diff --git a/.github/workflows/staging-deployment.yaml b/.github/workflows/staging-deployment.yaml index 9b7a5121b2..718eda47db 100644 --- a/.github/workflows/staging-deployment.yaml +++ b/.github/workflows/staging-deployment.yaml @@ -9,34 +9,46 @@ jobs: name: Deploy latest develop branch to staging runs-on: ubuntu-latest environment: staging + permissions: + contents: 'read' + id-token: 'write' steps: - - name: Executing remote ssh commands using ssh key - uses: appleboy/ssh-action@v1.0.3 - env: - GITHUB_BRANCH: develop - GITHUB_SHA: ${{ github.sha }} + - id: 'auth' + uses: 'google-github-actions/auth@v2' with: - host: ${{ secrets.HOST_DNS }} - username: ${{ secrets.USERNAME }} - key: ${{ secrets.SSH_KEY }} - envs: GITHUB_BRANCH,GITHUB_SHA - command_timeout: 60m - script: | - echo "GITHUB_BRANCH: ${GITHUB_BRANCH}" - echo "GITHUB_SHA: ${GITHUB_SHA}" - export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it - export OTELCOL_TAG="main" - export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work - docker system prune --force - docker pull signoz/signoz-otel-collector:main - docker pull signoz/signoz-schema-migrator:main - cd ~/signoz - git status - git add . - git stash push -m "stashed on $(date --iso-8601=seconds)" - git fetch origin - git checkout ${GITHUB_BRANCH} - git pull - make build-ee-query-service-amd64 - make build-frontend-amd64 - make run-signoz \ No newline at end of file + workload_identity_provider: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }} + service_account: ${{ secrets.GCP_SERVICE_ACCOUNT }} + + - name: 'sdk' + uses: 'google-github-actions/setup-gcloud@v2' + + - name: 'ssh' + shell: bash + env: + GITHUB_BRANCH: ${{ github.head_ref || github.ref_name }} + GITHUB_SHA: ${{ github.sha }} + GCP_PROJECT: ${{ secrets.GCP_PROJECT }} + GCP_ZONE: ${{ secrets.GCP_ZONE }} + GCP_INSTANCE: ${{ secrets.GCP_INSTANCE }} + run: | + read -r -d '' COMMAND < \ No newline at end of file diff --git a/frontend/src/assets/Dashboard/PromQl.tsx b/frontend/src/assets/Dashboard/PromQl.tsx new file mode 100644 index 0000000000..8a942ae74e --- /dev/null +++ b/frontend/src/assets/Dashboard/PromQl.tsx @@ -0,0 +1,27 @@ +import { CSSProperties } from 'react'; + +function PromQLIcon({ + fillColor, +}: { + fillColor: CSSProperties['color']; +}): JSX.Element { + return ( + + + + ); +} + +export default PromQLIcon; diff --git a/frontend/src/components/Logs/ListLogView/index.tsx b/frontend/src/components/Logs/ListLogView/index.tsx index 47b108e944..fa8a2fb608 100644 --- a/frontend/src/components/Logs/ListLogView/index.tsx +++ b/frontend/src/components/Logs/ListLogView/index.tsx @@ -16,6 +16,7 @@ import { useCallback, useMemo, useState } from 'react'; // interfaces import { IField } from 'types/api/logs/fields'; import { ILog } from 'types/api/logs/log'; +import { FORBID_DOM_PURIFY_TAGS } from 'utils/app'; // components import AddToQueryHOC, { AddToQueryHOCProps } from '../AddToQueryHOC'; @@ -50,7 +51,11 @@ function LogGeneralField({ }: LogFieldProps): JSX.Element { const html = useMemo( () => ({ - __html: convert.toHtml(dompurify.sanitize(fieldValue)), + __html: convert.toHtml( + dompurify.sanitize(fieldValue, { + FORBID_TAGS: [...FORBID_DOM_PURIFY_TAGS], + }), + ), }), [fieldValue], ); diff --git a/frontend/src/components/Logs/RawLogView/index.tsx b/frontend/src/components/Logs/RawLogView/index.tsx index 712b97723d..fcb8beeeec 100644 --- a/frontend/src/components/Logs/RawLogView/index.tsx +++ b/frontend/src/components/Logs/RawLogView/index.tsx @@ -21,6 +21,7 @@ import { useMemo, useState, } from 'react'; +import { FORBID_DOM_PURIFY_TAGS } from 'utils/app'; import LogLinesActionButtons from '../LogLinesActionButtons/LogLinesActionButtons'; import LogStateIndicator from '../LogStateIndicator/LogStateIndicator'; @@ -144,7 +145,9 @@ function RawLogView({ const html = useMemo( () => ({ - __html: convert.toHtml(dompurify.sanitize(text)), + __html: convert.toHtml( + dompurify.sanitize(text, { FORBID_TAGS: [...FORBID_DOM_PURIFY_TAGS] }), + ), }), [text], ); diff --git a/frontend/src/components/Logs/TableView/useTableView.tsx b/frontend/src/components/Logs/TableView/useTableView.tsx index be34e998ef..fd37132110 100644 --- a/frontend/src/components/Logs/TableView/useTableView.tsx +++ b/frontend/src/components/Logs/TableView/useTableView.tsx @@ -8,6 +8,7 @@ import dompurify from 'dompurify'; import { useIsDarkMode } from 'hooks/useDarkMode'; import { FlatLogData } from 'lib/logs/flatLogData'; import { useMemo } from 'react'; +import { FORBID_DOM_PURIFY_TAGS } from 'utils/app'; import LogStateIndicator from '../LogStateIndicator/LogStateIndicator'; import { getLogIndicatorTypeForTable } from '../LogStateIndicator/utils'; @@ -107,7 +108,11 @@ export const useTableView = (props: UseTableViewProps): UseTableViewResult => { children: ( {{- end}} + *RelatedTraces:* {{ if gt (len .Annotations.related_traces) 0 -}} View in <{{ .Annotations.related_traces }}|traces explorer> {{- end}} *Details:* {{ range .Labels.SortedPairs }} • *{{ .Name }}:* {{ .Value }} diff --git a/frontend/src/container/ExplorerOptions/ExplorerOptions.styles.scss b/frontend/src/container/ExplorerOptions/ExplorerOptions.styles.scss index 7f6ac6a4dd..8af1e4ad0a 100644 --- a/frontend/src/container/ExplorerOptions/ExplorerOptions.styles.scss +++ b/frontend/src/container/ExplorerOptions/ExplorerOptions.styles.scss @@ -64,6 +64,10 @@ .view-options, .actions { + .info-icon { + padding: 8px; + } + .hidden { display: none; } @@ -252,6 +256,10 @@ color: var(--bg-ink-200); background-color: var(--bg-vanilla-300); } + + .info-icon { + color: var(--bg-ink-200); + } } } diff --git a/frontend/src/container/ExplorerOptions/ExplorerOptions.tsx b/frontend/src/container/ExplorerOptions/ExplorerOptions.tsx index 0e9fd3704b..7253b45b94 100644 --- a/frontend/src/container/ExplorerOptions/ExplorerOptions.tsx +++ b/frontend/src/container/ExplorerOptions/ExplorerOptions.tsx @@ -1,6 +1,7 @@ /* eslint-disable react/jsx-props-no-spreading */ import './ExplorerOptions.styles.scss'; +import { InfoCircleOutlined } from '@ant-design/icons'; import { Color } from '@signozhq/design-tokens'; import { Button, @@ -402,6 +403,28 @@ function ExplorerOptions({
+ + {sourcepage === DataSource.LOGS + ? 'Learn more about Logs explorer ' + : 'Learn more about Traces explorer '} + + {' '} + here + {' '} +
+ } + > + + ), key: EQueryType.PROM, }, ], - [], + [isDarkMode], ); const { registerShortcut, deregisterShortcut } = useKeyboardHotkeys(); diff --git a/frontend/src/container/FormAlertRules/UserGuide/index.tsx b/frontend/src/container/FormAlertRules/UserGuide/index.tsx index 86992d7226..ab4c091a5e 100644 --- a/frontend/src/container/FormAlertRules/UserGuide/index.tsx +++ b/frontend/src/container/FormAlertRules/UserGuide/index.tsx @@ -147,7 +147,7 @@ function UserGuide({ queryType }: UserGuideProps): JSX.Element { diff --git a/frontend/src/container/ListAlertRules/ListAlert.tsx b/frontend/src/container/ListAlertRules/ListAlert.tsx index 4b7c2f4cb9..39465e28a8 100644 --- a/frontend/src/container/ListAlertRules/ListAlert.tsx +++ b/frontend/src/container/ListAlertRules/ListAlert.tsx @@ -338,7 +338,8 @@ function ListAlert({ allAlertRules, refetch }: ListAlertProps): JSX.Element { diff --git a/frontend/src/container/ListOfDashboard/DashboardsList.tsx b/frontend/src/container/ListOfDashboard/DashboardsList.tsx index ac3b835092..a0a31c3142 100644 --- a/frontend/src/container/ListOfDashboard/DashboardsList.tsx +++ b/frontend/src/container/ListOfDashboard/DashboardsList.tsx @@ -322,7 +322,8 @@ function DashboardsList(): JSX.Element { diff --git a/frontend/src/container/NewWidget/LeftContainer/QuerySection/QuerySection.styles.scss b/frontend/src/container/NewWidget/LeftContainer/QuerySection/QuerySection.styles.scss index 968da1459a..d6ae43ac9a 100644 --- a/frontend/src/container/NewWidget/LeftContainer/QuerySection/QuerySection.styles.scss +++ b/frontend/src/container/NewWidget/LeftContainer/QuerySection/QuerySection.styles.scss @@ -12,6 +12,7 @@ .prom-ql-icon { height: 14px; width: 14px; + color: var(--bg-vanilla-200); } } .ant-btn-default { @@ -54,6 +55,10 @@ .ant-tabs-tab-active { .nav-btns { background: var(--bg-vanilla-300) !important; + + .prom-ql-icon { + color: var(--bg-ink-400); + } } } } diff --git a/frontend/src/container/NewWidget/LeftContainer/QuerySection/index.tsx b/frontend/src/container/NewWidget/LeftContainer/QuerySection/index.tsx index 8819cbf0f9..11f01b402f 100644 --- a/frontend/src/container/NewWidget/LeftContainer/QuerySection/index.tsx +++ b/frontend/src/container/NewWidget/LeftContainer/QuerySection/index.tsx @@ -1,6 +1,8 @@ import './QuerySection.styles.scss'; +import { Color } from '@signozhq/design-tokens'; import { Button, Tabs, Tooltip, Typography } from 'antd'; +import PromQLIcon from 'assets/Dashboard/PromQl'; import TextToolTip from 'components/TextToolTip'; import { PANEL_TYPES } from 'constants/queryBuilder'; import { QBShortcuts } from 'constants/shortcuts/QBShortcuts'; @@ -11,6 +13,7 @@ import { useKeyboardHotkeys } from 'hooks/hotkeys/useKeyboardHotkeys'; import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder'; import { useShareBuilderUrl } from 'hooks/queryBuilder/useShareBuilderUrl'; import { updateStepInterval } from 'hooks/queryBuilder/useStepInterval'; +import { useIsDarkMode } from 'hooks/useDarkMode'; import useUrlQuery from 'hooks/useUrlQuery'; import { defaultTo } from 'lodash-es'; import { Atom, Play, Terminal } from 'lucide-react'; @@ -53,6 +56,8 @@ function QuerySection({ const { selectedDashboard, setSelectedDashboard } = useDashboard(); + const isDarkMode = useIsDarkMode(); + const { widgets } = selectedDashboard?.data || {}; const getWidget = useCallback(() => { @@ -196,7 +201,9 @@ function QuerySection({ label: ( ), @@ -228,7 +235,10 @@ function QuerySection({ onChange={handleQueryCategoryChange} tabBarExtraContent={ - + - + + Add New Formula + + {' '} +
+ Learn more +
+ + } + > diff --git a/frontend/src/container/QueryBuilder/components/QBEntityOptions/QBEntityOptions.tsx b/frontend/src/container/QueryBuilder/components/QBEntityOptions/QBEntityOptions.tsx index a2ec473921..652518f50d 100644 --- a/frontend/src/container/QueryBuilder/components/QBEntityOptions/QBEntityOptions.tsx +++ b/frontend/src/container/QueryBuilder/components/QBEntityOptions/QBEntityOptions.tsx @@ -66,21 +66,25 @@ export default function QBEntityOptions({
- - + + + + + + {entityType === 'query' && ( diff --git a/frontend/src/container/QueryBuilder/components/Query/Query.tsx b/frontend/src/container/QueryBuilder/components/Query/Query.tsx index fb8b0e1561..074443364e 100644 --- a/frontend/src/container/QueryBuilder/components/Query/Query.tsx +++ b/frontend/src/container/QueryBuilder/components/Query/Query.tsx @@ -1,7 +1,7 @@ /* eslint-disable sonarjs/cognitive-complexity */ import './Query.styles.scss'; -import { Col, Input, Row } from 'antd'; +import { Col, Input, Row, Tooltip, Typography } from 'antd'; import { ENTITY_VERSION_V4 } from 'constants/app'; // ** Constants import { ATTRIBUTE_TYPES, PANEL_TYPES } from 'constants/queryBuilder'; @@ -367,11 +367,29 @@ export const Query = memo(function Query({ {version && version === 'v3' && ( - + + Select Aggregate Operator + + {' '} +
+ Learn more +
+
+ } + > + + )} @@ -388,12 +406,30 @@ export const Query = memo(function Query({ Array.isArray(operators) && operators.length > 0 && ( - + + Select Aggregate Operator + + {' '} +
+ Learn more +
+
+ } + > + +
)} @@ -422,11 +458,28 @@ export const Query = memo(function Query({ - + + Select Aggregate Operator + + {' '} +
+ Learn more +
+ + } + > + +
- + + Name of legend + + {' '} +
+ Learn more +
+ + } + > + +
)} diff --git a/frontend/src/container/QueryBuilder/components/QueryFunctions/QueryFunctions.tsx b/frontend/src/container/QueryBuilder/components/QueryFunctions/QueryFunctions.tsx index 7dfdd99c9c..017047b507 100644 --- a/frontend/src/container/QueryBuilder/components/QueryFunctions/QueryFunctions.tsx +++ b/frontend/src/container/QueryBuilder/components/QueryFunctions/QueryFunctions.tsx @@ -1,6 +1,6 @@ import './QueryFunctions.styles.scss'; -import { Button, Tooltip } from 'antd'; +import { Button, Tooltip, Typography } from 'antd'; import cx from 'classnames'; import { useIsDarkMode } from 'hooks/useDarkMode'; import { cloneDeep, pullAt } from 'lodash-es'; @@ -180,9 +180,22 @@ export default function QueryFunctions({ = 3 - ? 'Functions are in early access. You can add a maximum of 3 function as of now.' - : '' + functions && functions.length >= 3 ? ( + 'Functions are in early access. You can add a maximum of 3 function as of now.' + ) : ( +
+ Add new function + + {' '} +
+ Learn more +
+
+ ) } placement="right" > diff --git a/frontend/src/pages/SaveView/SaveView.styles.scss b/frontend/src/pages/SaveView/SaveView.styles.scss index 292a0b8d06..1e4b7bf0f6 100644 --- a/frontend/src/pages/SaveView/SaveView.styles.scss +++ b/frontend/src/pages/SaveView/SaveView.styles.scss @@ -25,6 +25,9 @@ line-height: 20px; /* 142.857% */ letter-spacing: -0.07px; } + .learn-more { + font-size: 14px; + } .ant-input-affix-wrapper { margin-top: 16px; diff --git a/frontend/src/pages/SaveView/index.tsx b/frontend/src/pages/SaveView/index.tsx index 2ba5535fc4..86a511291e 100644 --- a/frontend/src/pages/SaveView/index.tsx +++ b/frontend/src/pages/SaveView/index.tsx @@ -282,7 +282,14 @@ function SaveView(): JSX.Element {
Views - Manage your saved views for {ROUTES_VS_SOURCEPAGE[pathname]}. + Manage your saved views for {ROUTES_VS_SOURCEPAGE[pathname]}.{' '} + + Learn more + = 1.4.0 < 2", statuses@~1.5.0: +"statuses@>= 1.4.0 < 2": version "1.5.0" resolved "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz" integrity sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA== @@ -16464,11 +15821,6 @@ string-convert@^0.2.0: resolved "https://registry.npmjs.org/string-convert/-/string-convert-0.2.1.tgz" integrity sha512-u/1tdPl4yQnPBjnVrmdLo9gtuLvELKsAoRapekWggdiQNvvvum+jYF329d84NAa660KQw7pB2n36KrIKVoXa3A== -string-hash@^1.1.3: - version "1.1.3" - resolved "https://registry.yarnpkg.com/string-hash/-/string-hash-1.1.3.tgz#e8aafc0ac1855b4666929ed7dd1275df5d6c811b" - integrity sha512-kJUvRUFK49aub+a7T1nNE66EJbZBMnBgoC1UbCZ5n6bsZKBRga4KgBRTMn/pFkeCZSYtNeSyMxPDM0AXWELk2A== - string-length@^4.0.1: version "4.0.2" resolved "https://registry.npmjs.org/string-length/-/string-length-4.0.2.tgz" @@ -16477,7 +15829,7 @@ string-length@^4.0.1: char-regex "^1.0.2" strip-ansi "^6.0.0" -string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.2, string-width@^4.2.3: +string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.3: version "4.2.3" resolved "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz" integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== @@ -16608,11 +15960,6 @@ strip-json-comments@^3.1.0, strip-json-comments@^3.1.1, strip-json-comments@~3.1 resolved "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz" integrity sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig== -strip-json-comments@~2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a" - integrity sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ== - style-dictionary@3.8.0: version "3.8.0" resolved "https://registry.yarnpkg.com/style-dictionary/-/style-dictionary-3.8.0.tgz#7cb8d64360c53431f768d44def665f61e971a73e" @@ -16732,11 +16079,6 @@ supports-preserve-symlinks-flag@^1.0.0: resolved "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz" integrity sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w== -svg-parser@^2.0.4: - version "2.0.4" - resolved "https://registry.yarnpkg.com/svg-parser/-/svg-parser-2.0.4.tgz#fdc2e29e13951736140b76cb122c8ee6630eb6b5" - integrity sha512-e4hG1hRwoOdRb37cIMSgzNsxyzKfayW6VOflrwvR+/bzrkyxY/31WkbgnQpgtrNp1SdpJvpUAGTa/ZoiPNDuRQ== - svgo@^3.0.2: version "3.0.2" resolved "https://registry.yarnpkg.com/svgo/-/svgo-3.0.2.tgz#5e99eeea42c68ee0dc46aa16da093838c262fe0a" @@ -16956,11 +16298,6 @@ to-fast-properties@^2.0.0: resolved "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz" integrity sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog== -to-readable-stream@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/to-readable-stream/-/to-readable-stream-1.0.0.tgz#ce0aa0c2f3df6adf852efb404a783e77c0475771" - integrity sha512-Iq25XBt6zD5npPhlLVXGFN3/gyR2/qODcKNNyTMd4vbm39HUaOiAM4PMq0eMVC/Tkxz+Zjdsc55g9yyz+Yq00Q== - to-regex-range@^5.0.1: version "5.0.1" resolved "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz" @@ -16983,11 +16320,6 @@ totalist@^1.0.0: resolved "https://registry.npmjs.org/totalist/-/totalist-1.1.0.tgz" integrity sha512-gduQwd1rOdDMGxFG1gEvhV88Oirdo2p+KjoYFU7k2g+i7n6AFFbDQ5kMPUsW0pNbfQsB/cwXvT1i4Bue0s9g5g== -totalist@^3.0.0: - version "3.0.1" - resolved "https://registry.yarnpkg.com/totalist/-/totalist-3.0.1.tgz#ba3a3d600c915b1a97872348f79c127475f6acf8" - integrity sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ== - tough-cookie@^4.0.0: version "4.1.3" resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-4.1.3.tgz#97b9adb0728b42280aa3d814b6b999b2ff0318bf" @@ -17299,13 +16631,6 @@ unified@^10.0.0, unified@^10.1.2, unified@~10.1.1: trough "^2.0.0" vfile "^5.0.0" -unique-string@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/unique-string/-/unique-string-2.0.0.tgz#39c6451f81afb2749de2b233e3f7c5e8843bd89d" - integrity sha512-uNaeirEPvpZWSgzwsPGtU2zVSTrn/8L5q/IexZmH0eH6SA73CmAA5U4GwORTxQAZs95TAXLNqeLoPPNO5gZfWg== - dependencies: - crypto-random-string "^2.0.0" - unist-util-filter@^4.0.0: version "4.0.1" resolved "https://registry.yarnpkg.com/unist-util-filter/-/unist-util-filter-4.0.1.tgz#fd885dd48adaad345de5f5dc706ec4ff44a8d074" @@ -17465,26 +16790,6 @@ update-browserslist-db@^1.0.13: escalade "^3.1.1" picocolors "^1.0.0" -update-notifier@^5.1.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/update-notifier/-/update-notifier-5.1.0.tgz#4ab0d7c7f36a231dd7316cf7729313f0214d9ad9" - integrity sha512-ItnICHbeMh9GqUy31hFPrD1kcuZ3rpxDZbf4KUDavXwS0bW5m7SLbDQpGX3UYr072cbrF5hFUs3r5tUsPwjfHw== - dependencies: - boxen "^5.0.0" - chalk "^4.1.0" - configstore "^5.0.1" - has-yarn "^2.1.0" - import-lazy "^2.1.0" - is-ci "^2.0.0" - is-installed-globally "^0.4.0" - is-npm "^5.0.0" - is-yarn-global "^0.3.0" - latest-version "^5.1.0" - pupa "^2.1.1" - semver "^7.3.4" - semver-diff "^3.1.1" - xdg-basedir "^4.0.0" - uplot@1.6.24: version "1.6.24" resolved "https://registry.yarnpkg.com/uplot/-/uplot-1.6.24.tgz#dfa213fa7da92763261920ea972ed1a5f9f6af12" @@ -17516,13 +16821,6 @@ uri-js@^4.2.2: dependencies: punycode "^2.1.0" -url-parse-lax@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/url-parse-lax/-/url-parse-lax-3.0.0.tgz#16b5cafc07dbe3676c1b1999177823d6503acb0c" - integrity sha512-NjFKA0DidqPa5ciFcSrXnAltTtzz84ogy+NebPvfEgAck0+TNg4UJ4IN+fB7zRZfbgUf0syOo9MDxFkDSMuFaQ== - dependencies: - prepend-http "^2.0.0" - url-parse@^1.5.3: version "1.5.10" resolved "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz" @@ -18149,7 +17447,7 @@ which-typed-array@^1.1.9: has-tostringtag "^1.0.0" is-typed-array "^1.1.10" -which@^1.2.12, which@^1.2.14, which@^1.2.9: +which@^1.2.12, which@^1.2.9: version "1.3.1" resolved "https://registry.yarnpkg.com/which/-/which-1.3.1.tgz#a45043d54f5805316da8d62f9f50918d3da70b0a" integrity sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ== @@ -18163,13 +17461,6 @@ which@^2.0.1, which@^2.0.2: dependencies: isexe "^2.0.0" -widest-line@^3.1.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/widest-line/-/widest-line-3.1.0.tgz#8292333bbf66cb45ff0de1603b136b7ae1496eca" - integrity sha512-NsmoXalsWVDMGupxZ5R08ka9flZjjiLvHVAWYOKtiKM8ujtZWr9cRffak+uSE48+Ob8ObalXpwyeUiyDD6QFgg== - dependencies: - string-width "^4.0.0" - wildcard@^2.0.0: version "2.0.1" resolved "https://registry.npmjs.org/wildcard/-/wildcard-2.0.1.tgz" @@ -18236,16 +17527,6 @@ ws@^8.13.0: resolved "https://registry.npmjs.org/ws/-/ws-8.13.0.tgz" integrity sha512-x9vcZYTrFPC7aSIbj7sRCYo7L/Xb8Iy+pW0ng0wt2vCJv7M9HOMy0UoN3rr+IFC7hb7vXoqS+P9ktyLLLhO+LA== -ws@^8.5.0: - version "8.16.0" - resolved "https://registry.yarnpkg.com/ws/-/ws-8.16.0.tgz#d1cd774f36fbc07165066a60e40323eab6446fd4" - integrity sha512-HS0c//TP7Ina87TfiPUz1rQzMhHrl/SG2guqRcTOIUYD2q8uhUdNHZYJUaQ8aTGPzCh+c6oawMKW35nFl1dxyQ== - -xdg-basedir@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/xdg-basedir/-/xdg-basedir-4.0.0.tgz#4bc8d9984403696225ef83a1573cbbcb4e79db13" - integrity sha512-PSNhEJDejZYV7h50BohL09Er9VaIefr2LMAf3OEmpCkjOi34eYyQYAXUTjEQtZJTKcF0E2UKTh+osDLsgNim9Q== - xhr-request@^1.0.1: version "1.1.0" resolved "https://registry.npmjs.org/xhr-request/-/xhr-request-1.1.0.tgz" @@ -18345,11 +17626,6 @@ yaml@^1.10.0, yaml@^1.10.2: resolved "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz" integrity sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg== -yaml@^2.3.4: - version "2.3.4" - resolved "https://registry.yarnpkg.com/yaml/-/yaml-2.3.4.tgz#53fc1d514be80aabf386dc6001eb29bf3b7523b2" - integrity sha512-8aAvwVUSHpfEqTQ4w/KMlf3HcRdt50E5ODIQJBw1fQ5RL34xabzxtUlzTXVqc4rkZsPbvrXKWnABCD7kWSmocA== - yargs-parser@20.x, yargs-parser@^20.2.2, yargs-parser@^20.2.3: version "20.2.9" resolved "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz" diff --git a/go.mod b/go.mod index 5ea9a547ae..51d72be2f8 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/ClickHouse/clickhouse-go/v2 v2.20.0 github.com/DATA-DOG/go-sqlmock v1.5.2 github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd - github.com/SigNoz/signoz-otel-collector v0.88.22 + github.com/SigNoz/signoz-otel-collector v0.88.24 github.com/SigNoz/zap_otlp/zap_otlp_encoder v0.0.0-20230822164844-1b861a431974 github.com/SigNoz/zap_otlp/zap_otlp_sync v0.0.0-20230822164844-1b861a431974 github.com/antonmedv/expr v1.15.3 diff --git a/go.sum b/go.sum index 513d62c409..e0a7e14e85 100644 --- a/go.sum +++ b/go.sum @@ -98,8 +98,8 @@ github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd h1:Bk43AsDYe0fhkb github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd/go.mod h1:nxRcH/OEdM8QxzH37xkGzomr1O0JpYBRS6pwjsWW6Pc= github.com/SigNoz/prometheus v1.11.0 h1:toX7fU2wqY1TnzvPzDglIYx6OxpqrZ0NNlM/H5S5+u8= github.com/SigNoz/prometheus v1.11.0/go.mod h1:MffmFu2qFILQrOHehx3D0XjYtaZMVfI+Ppeiv98x4Ww= -github.com/SigNoz/signoz-otel-collector v0.88.22 h1:PW9TpdQ8b8vWnUKWVe/w1bX8/Rq2MUUHGDIsx+KA+o0= -github.com/SigNoz/signoz-otel-collector v0.88.22/go.mod h1:sT1EM9PFDaOJLbAz5npWpgXK6OhpWJ9PpSwyhHWs9rU= +github.com/SigNoz/signoz-otel-collector v0.88.24 h1:6ESLmQtYPHmik9ZZFSJSbfuj4VQ1/0IC3v1qV9hm5Nk= +github.com/SigNoz/signoz-otel-collector v0.88.24/go.mod h1:sT1EM9PFDaOJLbAz5npWpgXK6OhpWJ9PpSwyhHWs9rU= github.com/SigNoz/zap_otlp v0.1.0 h1:T7rRcFN87GavY8lDGZj0Z3Xv6OhJA6Pj3I9dNPmqvRc= github.com/SigNoz/zap_otlp v0.1.0/go.mod h1:lcHvbDbRgvDnPxo9lDlaL1JK2PyOyouP/C3ynnYIvyo= github.com/SigNoz/zap_otlp/zap_otlp_encoder v0.0.0-20230822164844-1b861a431974 h1:PKVgdf83Yw+lZJbFtNGBgqXiXNf3+kOXW2qZ7Ms7OaY= diff --git a/pkg/query-service/app/clickhouseReader/reader.go b/pkg/query-service/app/clickhouseReader/reader.go index efad720f74..b7e1ec6a67 100644 --- a/pkg/query-service/app/clickhouseReader/reader.go +++ b/pkg/query-service/app/clickhouseReader/reader.go @@ -51,6 +51,7 @@ import ( "go.signoz.io/signoz/pkg/query-service/common" "go.signoz.io/signoz/pkg/query-service/constants" "go.signoz.io/signoz/pkg/query-service/dao" + chErrors "go.signoz.io/signoz/pkg/query-service/errors" am "go.signoz.io/signoz/pkg/query-service/integrations/alertManager" "go.signoz.io/signoz/pkg/query-service/interfaces" "go.signoz.io/signoz/pkg/query-service/model" @@ -71,11 +72,17 @@ const ( signozTraceTableName = "distributed_signoz_index_v2" signozTraceLocalTableName = "signoz_index_v2" signozMetricDBName = "signoz_metrics" - signozSampleLocalTableName = "samples_v2" - signozSampleTableName = "distributed_samples_v2" - signozTSTableName = "distributed_time_series_v2" - signozTSTableNameV4 = "distributed_time_series_v4" - signozTSTableNameV41Day = "distributed_time_series_v4_1day" + signozSampleLocalTableName = "samples_v4" + signozSampleTableName = "distributed_samples_v4" + + signozTSLocalTableNameV4 = "time_series_v4" + signozTSTableNameV4 = "distributed_time_series_v4" + + signozTSLocalTableNameV46Hrs = "time_series_v4_6hrs" + signozTSTableNameV46Hrs = "distributed_time_series_v4_6hrs" + + signozTSLocalTableNameV41Day = "time_series_v4_1day" + signozTSTableNameV41Day = "distributed_time_series_v4_1day" minTimespanForProgressiveSearch = time.Hour minTimespanForProgressiveSearchMargin = time.Minute @@ -2381,15 +2388,17 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context, } case constants.MetricsTTL: - tableName := signozMetricDBName + "." + signozSampleLocalTableName - statusItem, err := r.checkTTLStatusItem(ctx, tableName) - if err != nil { - return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing ttl_status check sql query")} + tableNames := []string{signozMetricDBName + "." + signozSampleLocalTableName, signozMetricDBName + "." + signozTSLocalTableNameV4, signozMetricDBName + "." + signozTSLocalTableNameV46Hrs, signozMetricDBName + "." + signozTSLocalTableNameV41Day} + for _, tableName := range tableNames { + statusItem, err := r.checkTTLStatusItem(ctx, tableName) + if err != nil { + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing ttl_status check sql query")} + } + if statusItem.Status == constants.StatusPending { + return nil, &model.ApiError{Typ: model.ErrorConflict, Err: fmt.Errorf("TTL is already running")} + } } - if statusItem.Status == constants.StatusPending { - return nil, &model.ApiError{Typ: model.ErrorConflict, Err: fmt.Errorf("TTL is already running")} - } - go func(tableName string) { + metricTTL := func(tableName string) { _, dbErr := r.localDB.Exec("INSERT INTO ttl_status (transaction_id, created_at, updated_at, table_name, ttl, status, cold_storage_ttl) VALUES (?, ?, ?, ?, ?, ?, ?)", uuid, time.Now(), time.Now(), tableName, params.DelDuration, constants.StatusPending, coldStorageDuration) if dbErr != nil { zap.L().Error("Error in inserting to ttl_status table", zap.Error(dbErr)) @@ -2433,7 +2442,10 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context, zap.L().Error("Error in processing ttl_status update sql query", zap.Error(dbErr)) return } - }(tableName) + } + for _, tableName := range tableNames { + go metricTTL(tableName) + } case constants.LogsTTL: tableName := r.logsDB + "." + r.logsLocalTable statusItem, err := r.checkTTLStatusItem(ctx, tableName) @@ -3258,7 +3270,7 @@ func (r *ClickHouseReader) FetchTemporality(ctx context.Context, metricNames []s func (r *ClickHouseReader) GetTimeSeriesInfo(ctx context.Context) (map[string]interface{}, error) { - queryStr := fmt.Sprintf("SELECT count() as count from %s.%s where metric_name not like 'signoz_%%' group by metric_name order by count desc;", signozMetricDBName, signozTSTableName) + queryStr := fmt.Sprintf("SELECT countDistinct(fingerprint) as count from %s.%s where metric_name not like 'signoz_%%' group by metric_name order by count desc;", signozMetricDBName, signozTSTableNameV41Day) rows, _ := r.db.Query(ctx, queryStr) @@ -4570,6 +4582,11 @@ func readRowsForTimeSeriesResult(rows driver.Rows, vars []interface{}, columnNam return nil, err } groupBy, groupAttributes, groupAttributesArray, metricPoint := readRow(vars, columnNames) + // skip the point if the value is NaN or Inf + // are they ever useful enough to be returned? + if math.IsNaN(metricPoint.Value) || math.IsInf(metricPoint.Value, 0) { + continue + } sort.Strings(groupBy) key := strings.Join(groupBy, "") if _, exists := seriesToAttrs[key]; !exists { @@ -4700,11 +4717,11 @@ func getPersonalisedError(err error) error { } zap.L().Error("error while reading result", zap.Error(err)) if strings.Contains(err.Error(), "code: 307") { - return errors.New("query is consuming too much resources, please reach out to the team") + return chErrors.ErrResourceBytesLimitExceeded } if strings.Contains(err.Error(), "code: 159") { - return errors.New("Query is taking too long to run, please reach out to the team") + return chErrors.ErrResourceTimeLimitExceeded } return err } diff --git a/pkg/query-service/app/clickhouseReader/wrapper.go b/pkg/query-service/app/clickhouseReader/wrapper.go index 2f08167534..c21fde0ceb 100644 --- a/pkg/query-service/app/clickhouseReader/wrapper.go +++ b/pkg/query-service/app/clickhouseReader/wrapper.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "regexp" - "strings" "github.com/ClickHouse/clickhouse-go/v2" "github.com/ClickHouse/clickhouse-go/v2/lib/driver" @@ -43,12 +42,6 @@ func (c clickhouseConnWrapper) addClickHouseSettings(ctx context.Context, query settings["log_comment"] = logComment } - // don't add resource restrictions traces - if strings.Contains(query, "signoz_traces") { - ctx = clickhouse.Context(ctx, clickhouse.WithSettings(settings)) - return ctx - } - if c.settings.MaxBytesToRead != "" { settings["max_bytes_to_read"] = c.settings.MaxBytesToRead } diff --git a/pkg/query-service/app/dashboards/model.go b/pkg/query-service/app/dashboards/model.go index c69f30a6bd..e7f48f8f87 100644 --- a/pkg/query-service/app/dashboards/model.go +++ b/pkg/query-service/app/dashboards/model.go @@ -326,7 +326,15 @@ func UpdateDashboard(ctx context.Context, uuid string, data map[string]interface if existingTotal > newTotal && existingTotal-newTotal > 1 { // if the total count of panels has reduced by more than 1, // return error - return nil, model.BadRequest(fmt.Errorf("deleting more than one panel is not supported")) + existingIds := getWidgetIds(dashboard.Data) + newIds := getWidgetIds(data) + + differenceIds := getIdDifference(existingIds, newIds) + + if len(differenceIds) > 1 { + return nil, model.BadRequest(fmt.Errorf("deleting more than one panel is not supported")) + } + } dashboard.UpdatedAt = time.Now() @@ -714,3 +722,52 @@ func countTraceAndLogsPanel(data map[string]interface{}) (int64, int64) { } return count, totalPanels } + +func getWidgetIds(data map[string]interface{}) []string { + widgetIds := []string{} + if data != nil && data["widgets"] != nil { + widgets, ok := data["widgets"].(interface{}) + if ok { + data, ok := widgets.([]interface{}) + if ok { + for _, widget := range data { + sData, ok := widget.(map[string]interface{}) + if ok && sData["query"] != nil && sData["id"] != nil { + id, ok := sData["id"].(string) + + if ok { + widgetIds = append(widgetIds, id) + } + + } + } + } + } + } + return widgetIds +} + +func getIdDifference(existingIds []string, newIds []string) []string { + // Convert newIds array to a map for faster lookups + newIdsMap := make(map[string]bool) + for _, id := range newIds { + newIdsMap[id] = true + } + + // Initialize a map to keep track of elements in the difference array + differenceMap := make(map[string]bool) + + // Initialize the difference array + difference := []string{} + + // Iterate through existingIds + for _, id := range existingIds { + // If the id is not found in newIds, and it's not already in the difference array + if _, found := newIdsMap[id]; !found && !differenceMap[id] { + difference = append(difference, id) + differenceMap[id] = true // Mark the id as seen in the difference array + } + } + + return difference +} diff --git a/pkg/query-service/app/formula.go b/pkg/query-service/app/formula.go index 619ae15cb3..f1f10e4499 100644 --- a/pkg/query-service/app/formula.go +++ b/pkg/query-service/app/formula.go @@ -4,6 +4,7 @@ import ( "fmt" "math" "sort" + "time" "github.com/SigNoz/govaluate" v3 "go.signoz.io/signoz/pkg/query-service/model/v3" @@ -89,6 +90,7 @@ func joinAndCalculate(results []*v3.Result, uniqueLabelSet map[string]string, ex resultSeries := &v3.Series{ Labels: uniqueLabelSet, + Points: make([]v3.Point, 0), } timestamps := make([]int64, 0) for timestamp := range uniqueTimestamps { @@ -158,7 +160,7 @@ func processResults(results []*v3.Result, expression *govaluate.EvaluableExpress }, nil } -var SupportedFunctions = []string{"exp", "log", "ln", "exp2", "log2", "exp10", "log10", "sqrt", "cbrt", "erf", "erfc", "lgamma", "tgamma", "sin", "cos", "tan", "asin", "acos", "atan", "degrees", "radians"} +var SupportedFunctions = []string{"exp", "log", "ln", "exp2", "log2", "exp10", "log10", "sqrt", "cbrt", "erf", "erfc", "lgamma", "tgamma", "sin", "cos", "tan", "asin", "acos", "atan", "degrees", "radians", "now", "toUnixTimestamp"} func evalFuncs() map[string]govaluate.ExpressionFunction { GoValuateFuncs := make(map[string]govaluate.ExpressionFunction) @@ -247,5 +249,21 @@ func evalFuncs() map[string]govaluate.ExpressionFunction { GoValuateFuncs["radians"] = func(args ...interface{}) (interface{}, error) { return args[0].(float64) * math.Pi / 180, nil } + + GoValuateFuncs["now"] = func(args ...interface{}) (interface{}, error) { + return time.Now().Unix(), nil + } + + GoValuateFuncs["toUnixTimestamp"] = func(args ...interface{}) (interface{}, error) { + if len(args) != 1 { + return nil, fmt.Errorf("toUnixTimestamp requires exactly one argument") + } + t, err := time.Parse(time.RFC3339, args[0].(string)) + if err != nil { + return nil, err + } + return t.Unix(), nil + } + return GoValuateFuncs } diff --git a/pkg/query-service/app/http_handler.go b/pkg/query-service/app/http_handler.go index c7adc95416..e3f7c5a165 100644 --- a/pkg/query-service/app/http_handler.go +++ b/pkg/query-service/app/http_handler.go @@ -483,49 +483,7 @@ func (aH *APIHandler) getRule(w http.ResponseWriter, r *http.Request) { aH.Respond(w, ruleResponse) } -func (aH *APIHandler) addTemporality(ctx context.Context, qp *v3.QueryRangeParamsV3) error { - - metricNames := make([]string, 0) - metricNameToTemporality := make(map[string]map[v3.Temporality]bool) - if qp.CompositeQuery != nil && len(qp.CompositeQuery.BuilderQueries) > 0 { - for _, query := range qp.CompositeQuery.BuilderQueries { - if query.DataSource == v3.DataSourceMetrics && query.Temporality == "" { - metricNames = append(metricNames, query.AggregateAttribute.Key) - if _, ok := metricNameToTemporality[query.AggregateAttribute.Key]; !ok { - metricNameToTemporality[query.AggregateAttribute.Key] = make(map[v3.Temporality]bool) - } - } - } - } - - var err error - - if aH.preferDelta { - zap.L().Debug("fetching metric temporality") - metricNameToTemporality, err = aH.reader.FetchTemporality(ctx, metricNames) - if err != nil { - return err - } - } - - if qp.CompositeQuery != nil && len(qp.CompositeQuery.BuilderQueries) > 0 { - for name := range qp.CompositeQuery.BuilderQueries { - query := qp.CompositeQuery.BuilderQueries[name] - if query.DataSource == v3.DataSourceMetrics && query.Temporality == "" { - if aH.preferDelta && metricNameToTemporality[query.AggregateAttribute.Key][v3.Delta] { - query.Temporality = v3.Delta - } else if metricNameToTemporality[query.AggregateAttribute.Key][v3.Cumulative] { - query.Temporality = v3.Cumulative - } else { - query.Temporality = v3.Unspecified - } - } - } - } - return nil -} - -// populateTemporality same as addTemporality but for v4 and better +// populateTemporality adds the temporality to the query if it is not present func (aH *APIHandler) populateTemporality(ctx context.Context, qp *v3.QueryRangeParamsV3) error { missingTemporality := make([]string, 0) @@ -2347,13 +2305,28 @@ func (ah *APIHandler) calculateConnectionStatus( func (ah *APIHandler) calculateLogsConnectionStatus( ctx context.Context, - logsConnectionTest *v3.FilterSet, + logsConnectionTest *integrations.LogsConnectionTest, lookbackSeconds int64, ) (*integrations.SignalConnectionStatus, *model.ApiError) { if logsConnectionTest == nil { return nil, nil } + logsConnTestFilter := &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: logsConnectionTest.AttributeKey, + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + }, + Operator: "=", + Value: logsConnectionTest.AttributeValue, + }, + }, + } + qrParams := &v3.QueryRangeParamsV3{ Start: time.Now().UnixMilli() - (lookbackSeconds * 1000), End: time.Now().UnixMilli(), @@ -2363,7 +2336,7 @@ func (ah *APIHandler) calculateLogsConnectionStatus( BuilderQueries: map[string]*v3.BuilderQuery{ "A": { PageSize: 1, - Filters: logsConnectionTest, + Filters: logsConnTestFilter, QueryName: "A", DataSource: v3.DataSourceLogs, Expression: "A", @@ -2892,7 +2865,7 @@ func (aH *APIHandler) autoCompleteAttributeValues(w http.ResponseWriter, r *http aH.Respond(w, response) } -func (aH *APIHandler) execClickHouseGraphQueries(ctx context.Context, queries map[string]string) ([]*v3.Result, error, map[string]string) { +func (aH *APIHandler) execClickHouseGraphQueries(ctx context.Context, queries map[string]string) ([]*v3.Result, error, map[string]error) { type channelResult struct { Series []*v3.Series Err error @@ -2922,13 +2895,13 @@ func (aH *APIHandler) execClickHouseGraphQueries(ctx context.Context, queries ma close(ch) var errs []error - errQuriesByName := make(map[string]string) + errQuriesByName := make(map[string]error) res := make([]*v3.Result, 0) // read values from the channel for r := range ch { if r.Err != nil { errs = append(errs, r.Err) - errQuriesByName[r.Name] = r.Query + errQuriesByName[r.Name] = r.Err continue } res = append(res, &v3.Result{ @@ -2942,7 +2915,7 @@ func (aH *APIHandler) execClickHouseGraphQueries(ctx context.Context, queries ma return res, nil, nil } -func (aH *APIHandler) execClickHouseListQueries(ctx context.Context, queries map[string]string) ([]*v3.Result, error, map[string]string) { +func (aH *APIHandler) execClickHouseListQueries(ctx context.Context, queries map[string]string) ([]*v3.Result, error, map[string]error) { type channelResult struct { List []*v3.Row Err error @@ -2971,13 +2944,13 @@ func (aH *APIHandler) execClickHouseListQueries(ctx context.Context, queries map close(ch) var errs []error - errQuriesByName := make(map[string]string) + errQuriesByName := make(map[string]error) res := make([]*v3.Result, 0) // read values from the channel for r := range ch { if r.Err != nil { errs = append(errs, r.Err) - errQuriesByName[r.Name] = r.Query + errQuriesByName[r.Name] = r.Err continue } res = append(res, &v3.Result{ @@ -2991,7 +2964,7 @@ func (aH *APIHandler) execClickHouseListQueries(ctx context.Context, queries map return res, nil, nil } -func (aH *APIHandler) execPromQueries(ctx context.Context, metricsQueryRangeParams *v3.QueryRangeParamsV3) ([]*v3.Result, error, map[string]string) { +func (aH *APIHandler) execPromQueries(ctx context.Context, metricsQueryRangeParams *v3.QueryRangeParamsV3) ([]*v3.Result, error, map[string]error) { type channelResult struct { Series []*v3.Series Err error @@ -3051,13 +3024,13 @@ func (aH *APIHandler) execPromQueries(ctx context.Context, metricsQueryRangePara close(ch) var errs []error - errQuriesByName := make(map[string]string) + errQuriesByName := make(map[string]error) res := make([]*v3.Result, 0) // read values from the channel for r := range ch { if r.Err != nil { errs = append(errs, r.Err) - errQuriesByName[r.Name] = r.Query + errQuriesByName[r.Name] = r.Err continue } res = append(res, &v3.Result{ @@ -3155,7 +3128,7 @@ func (aH *APIHandler) queryRangeV3(ctx context.Context, queryRangeParams *v3.Que var result []*v3.Result var err error - var errQuriesByName map[string]string + var errQuriesByName map[string]error var spanKeys map[string]v3.AttributeKey if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeBuilder { // check if any enrichment is required for logs if yes then enrich them @@ -3305,8 +3278,7 @@ func (aH *APIHandler) QueryRangeV3(w http.ResponseWriter, r *http.Request) { } // add temporality for each metric - - temporalityErr := aH.addTemporality(r.Context(), queryRangeParams) + temporalityErr := aH.populateTemporality(r.Context(), queryRangeParams) if temporalityErr != nil { zap.L().Error("Error while adding temporality for metrics", zap.Error(temporalityErr)) RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: temporalityErr}, nil) @@ -3412,7 +3384,7 @@ func (aH *APIHandler) queryRangeV4(ctx context.Context, queryRangeParams *v3.Que var result []*v3.Result var err error - var errQuriesByName map[string]string + var errQuriesByName map[string]error var spanKeys map[string]v3.AttributeKey if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeBuilder { // check if any enrichment is required for logs if yes then enrich them diff --git a/pkg/query-service/app/integrations/builtin.go b/pkg/query-service/app/integrations/builtin.go index cf98b3ff9d..00810cacc1 100644 --- a/pkg/query-service/app/integrations/builtin.go +++ b/pkg/query-service/app/integrations/builtin.go @@ -1,6 +1,7 @@ package integrations import ( + "bytes" "context" "embed" "strings" @@ -120,7 +121,9 @@ func readBuiltInIntegration(dirpath string) ( } var integration IntegrationDetails - err = json.Unmarshal(hydratedSpecJson, &integration) + decoder := json.NewDecoder(bytes.NewReader(hydratedSpecJson)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&integration) if err != nil { return nil, fmt.Errorf( "couldn't parse hydrated JSON spec read from %s: %w", diff --git a/pkg/query-service/app/integrations/builtin_integrations/clickhouse/config/collect-query-logs.md b/pkg/query-service/app/integrations/builtin_integrations/clickhouse/config/collect-query-logs.md index 373b1016d6..24e20a7815 100644 --- a/pkg/query-service/app/integrations/builtin_integrations/clickhouse/config/collect-query-logs.md +++ b/pkg/query-service/app/integrations/builtin_integrations/clickhouse/config/collect-query-logs.md @@ -78,3 +78,5 @@ Make the collector config file available to your otel collector and use it by ad ``` Note: the collector can use multiple config files, specified by multiple occurrences of the --config flag. +Also note that only 1 collector instance should be configured to collect query_logs. +Using multiple collector instances or replicas with this config will lead to duplicate logs. diff --git a/pkg/query-service/app/integrations/builtin_integrations/clickhouse/config/prerequisites.md b/pkg/query-service/app/integrations/builtin_integrations/clickhouse/config/prerequisites.md index 7f1019666d..f2c667edbc 100644 --- a/pkg/query-service/app/integrations/builtin_integrations/clickhouse/config/prerequisites.md +++ b/pkg/query-service/app/integrations/builtin_integrations/clickhouse/config/prerequisites.md @@ -30,7 +30,7 @@ To configure metrics and logs collection for a Clickhouse server, you need the f - **Ensure that an OTEL collector is running in your deployment environment** If needed, please [install SigNoz OTEL Collector](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/) If already installed, ensure that the collector version is v0.88.0 or newer. - If collecting logs from system.query_log table, ensure that the collector version is v0.88.22 or newer. + If collecting logs from system.query_log table, ensure that the collector version is v0.88.23 or newer. Also ensure that you can provide config files to the collector and that you can set environment variables and command line flags used for running it. diff --git a/pkg/query-service/app/integrations/builtin_integrations/clickhouse/integration.json b/pkg/query-service/app/integrations/builtin_integrations/clickhouse/integration.json index 99bf16c72d..3135ce402e 100644 --- a/pkg/query-service/app/integrations/builtin_integrations/clickhouse/integration.json +++ b/pkg/query-service/app/integrations/builtin_integrations/clickhouse/integration.json @@ -41,18 +41,8 @@ }, "connection_tests": { "logs": { - "op": "AND", - "items": [ - { - "key": { - "type": "tag", - "key": "source", - "dataType": "string" - }, - "op": "=", - "value": "clickhouse" - } - ] + "attribute_key": "source", + "attribute_value": "clickhouse" } }, "data_collected": "file://data-collected.json" diff --git a/pkg/query-service/app/integrations/builtin_integrations/mongo/integration.json b/pkg/query-service/app/integrations/builtin_integrations/mongo/integration.json index b9543e0757..d5e24eb4fc 100644 --- a/pkg/query-service/app/integrations/builtin_integrations/mongo/integration.json +++ b/pkg/query-service/app/integrations/builtin_integrations/mongo/integration.json @@ -37,18 +37,8 @@ }, "connection_tests": { "logs": { - "op": "AND", - "items": [ - { - "key": { - "type": "tag", - "key": "source", - "dataType": "string" - }, - "op": "=", - "value": "mongo" - } - ] + "attribute_key": "source", + "attribute_value": "mongodb" } }, "data_collected": { diff --git a/pkg/query-service/app/integrations/builtin_integrations/nginx/integration.json b/pkg/query-service/app/integrations/builtin_integrations/nginx/integration.json index 16f03bbed3..7789a5ae90 100644 --- a/pkg/query-service/app/integrations/builtin_integrations/nginx/integration.json +++ b/pkg/query-service/app/integrations/builtin_integrations/nginx/integration.json @@ -32,18 +32,8 @@ }, "connection_tests": { "logs": { - "op": "AND", - "items": [ - { - "key": { - "type": "tag", - "key": "source", - "dataType": "string" - }, - "op": "=", - "value": "nginx" - } - ] + "attribute_key": "source", + "attribute_value": "nginx" } }, "data_collected": { diff --git a/pkg/query-service/app/integrations/builtin_integrations/postgres/integration.json b/pkg/query-service/app/integrations/builtin_integrations/postgres/integration.json index 823ba61223..2040a5c946 100644 --- a/pkg/query-service/app/integrations/builtin_integrations/postgres/integration.json +++ b/pkg/query-service/app/integrations/builtin_integrations/postgres/integration.json @@ -37,18 +37,8 @@ }, "connection_tests": { "logs": { - "op": "AND", - "items": [ - { - "key": { - "type": "tag", - "key": "source", - "dataType": "string" - }, - "op": "=", - "value": "postgres" - } - ] + "attribute_key": "source", + "attribute_value": "postgres" } }, "data_collected": { diff --git a/pkg/query-service/app/integrations/builtin_integrations/redis/integration.json b/pkg/query-service/app/integrations/builtin_integrations/redis/integration.json index e3f5ef2e3c..c0da666181 100644 --- a/pkg/query-service/app/integrations/builtin_integrations/redis/integration.json +++ b/pkg/query-service/app/integrations/builtin_integrations/redis/integration.json @@ -37,18 +37,8 @@ }, "connection_tests": { "logs": { - "op": "AND", - "items": [ - { - "key": { - "type": "tag", - "key": "source", - "dataType": "string" - }, - "op": "=", - "value": "redis" - } - ] + "attribute_key": "source", + "attribute_value": "redis" } }, "data_collected": { diff --git a/pkg/query-service/app/integrations/manager.go b/pkg/query-service/app/integrations/manager.go index c3ebd21cc2..6cd5a0c853 100644 --- a/pkg/query-service/app/integrations/manager.go +++ b/pkg/query-service/app/integrations/manager.go @@ -12,7 +12,6 @@ import ( "go.signoz.io/signoz/pkg/query-service/app/dashboards" "go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline" "go.signoz.io/signoz/pkg/query-service/model" - v3 "go.signoz.io/signoz/pkg/query-service/model/v3" "go.signoz.io/signoz/pkg/query-service/rules" "go.signoz.io/signoz/pkg/query-service/utils" ) @@ -60,9 +59,10 @@ type CollectedLogAttribute struct { } type CollectedMetric struct { - Name string `json:"name"` - Type string `json:"type"` - Unit string `json:"unit"` + Name string `json:"name"` + Type string `json:"type"` + Unit string `json:"unit"` + Description string `json:"description"` } type SignalConnectionStatus struct { @@ -75,9 +75,14 @@ type IntegrationConnectionStatus struct { Metrics *SignalConnectionStatus `json:"metrics"` } +// log attribute value to use for finding logs for the integration. +type LogsConnectionTest struct { + AttributeKey string `json:"attribute_key"` + AttributeValue string `json:"attribute_value"` +} + type IntegrationConnectionTests struct { - // Filter to use for finding logs for the integration. - Logs *v3.FilterSet `json:"logs"` + Logs *LogsConnectionTest `json:"logs"` // Metric names expected to have been received for the integration. Metrics []string `json:"metrics"` @@ -253,7 +258,7 @@ func (m *Manager) UninstallIntegration( func (m *Manager) GetPipelinesForInstalledIntegrations( ctx context.Context, ) ([]logparsingpipeline.Pipeline, *model.ApiError) { - installedIntegrations, apiErr := m.getDetailsForInstalledIntegrations(ctx) + installedIntegrations, apiErr := m.getInstalledIntegrations(ctx) if apiErr != nil { return nil, apiErr } @@ -322,10 +327,15 @@ func (m *Manager) GetInstalledIntegrationDashboardById( if dId, exists := dd["id"]; exists { if id, ok := dId.(string); ok && id == dashboardId { isLocked := 1 + author := "integration" return &dashboards.Dashboard{ - Uuid: m.dashboardUuid(integrationId, string(dashboardId)), - Locked: &isLocked, - Data: dd, + Uuid: m.dashboardUuid(integrationId, string(dashboardId)), + Locked: &isLocked, + Data: dd, + CreatedAt: integration.Installation.InstalledAt, + CreateBy: &author, + UpdatedAt: integration.Installation.InstalledAt, + UpdateBy: &author, }, nil } } @@ -339,7 +349,7 @@ func (m *Manager) GetInstalledIntegrationDashboardById( func (m *Manager) GetDashboardsForInstalledIntegrations( ctx context.Context, ) ([]dashboards.Dashboard, *model.ApiError) { - installedIntegrations, apiErr := m.getDetailsForInstalledIntegrations(ctx) + installedIntegrations, apiErr := m.getInstalledIntegrations(ctx) if apiErr != nil { return nil, apiErr } @@ -351,10 +361,15 @@ func (m *Manager) GetDashboardsForInstalledIntegrations( if dId, exists := dd["id"]; exists { if dashboardId, ok := dId.(string); ok { isLocked := 1 + author := "integration" result = append(result, dashboards.Dashboard{ - Uuid: m.dashboardUuid(ii.IntegrationSummary.Id, dashboardId), - Locked: &isLocked, - Data: dd, + Uuid: m.dashboardUuid(ii.IntegrationSummary.Id, dashboardId), + Locked: &isLocked, + Data: dd, + CreatedAt: ii.Installation.InstalledAt, + CreateBy: &author, + UpdatedAt: ii.Installation.InstalledAt, + UpdateBy: &author, }) } } @@ -413,10 +428,10 @@ func (m *Manager) getInstalledIntegration( return &installation, nil } -func (m *Manager) getDetailsForInstalledIntegrations( +func (m *Manager) getInstalledIntegrations( ctx context.Context, ) ( - map[string]IntegrationDetails, *model.ApiError, + map[string]Integration, *model.ApiError, ) { installations, apiErr := m.installedIntegrationsRepo.list(ctx) if apiErr != nil { @@ -426,5 +441,24 @@ func (m *Manager) getDetailsForInstalledIntegrations( installedIds := utils.MapSlice(installations, func(i InstalledIntegration) string { return i.IntegrationId }) - return m.availableIntegrationsRepo.get(ctx, installedIds) + integrationDetails, apiErr := m.availableIntegrationsRepo.get(ctx, installedIds) + if apiErr != nil { + return nil, apiErr + } + + result := map[string]Integration{} + for _, ii := range installations { + iDetails, exists := integrationDetails[ii.IntegrationId] + if !exists { + return nil, model.InternalError(fmt.Errorf( + "couldn't find integration details for %s", ii.IntegrationId, + )) + } + + result[ii.IntegrationId] = Integration{ + Installation: &ii, + IntegrationDetails: iDetails, + } + } + return result, nil } diff --git a/pkg/query-service/app/integrations/test_utils.go b/pkg/query-service/app/integrations/test_utils.go index 1ff964b3e6..adb667b96e 100644 --- a/pkg/query-service/app/integrations/test_utils.go +++ b/pkg/query-service/app/integrations/test_utils.go @@ -96,19 +96,9 @@ func (t *TestAvailableIntegrationsRepo) list( Alerts: []rules.PostableRule{}, }, ConnectionTests: &IntegrationConnectionTests{ - Logs: &v3.FilterSet{ - Operator: "AND", - Items: []v3.FilterItem{ - { - Key: v3.AttributeKey{ - Key: "source", - DataType: v3.AttributeKeyDataTypeString, - Type: v3.AttributeKeyTypeTag, - }, - Operator: "=", - Value: "nginx", - }, - }, + Logs: &LogsConnectionTest{ + AttributeKey: "source", + AttributeValue: "nginx", }, }, }, { @@ -174,19 +164,9 @@ func (t *TestAvailableIntegrationsRepo) list( Alerts: []rules.PostableRule{}, }, ConnectionTests: &IntegrationConnectionTests{ - Logs: &v3.FilterSet{ - Operator: "AND", - Items: []v3.FilterItem{ - { - Key: v3.AttributeKey{ - Key: "source", - DataType: v3.AttributeKeyDataTypeString, - Type: v3.AttributeKeyTypeTag, - }, - Operator: "=", - Value: "nginx", - }, - }, + Logs: &LogsConnectionTest{ + AttributeKey: "source", + AttributeValue: "nginx", }, }, }, diff --git a/pkg/query-service/app/limit.go b/pkg/query-service/app/limit.go index 6b8faecea2..3ace3c687c 100644 --- a/pkg/query-service/app/limit.go +++ b/pkg/query-service/app/limit.go @@ -40,12 +40,13 @@ func applyMetricLimit(results []*v3.Result, queryRangeParams *v3.QueryRangeParam } } - ithSum, jthSum := 0.0, 0.0 + ithSum, jthSum, ithCount, jthCount := 0.0, 0.0, 0.0, 0.0 for _, point := range result.Series[i].Points { if math.IsNaN(point.Value) || math.IsInf(point.Value, 0) { continue } ithSum += point.Value + ithCount++ } for _, point := range result.Series[j].Points { @@ -53,12 +54,17 @@ func applyMetricLimit(results []*v3.Result, queryRangeParams *v3.QueryRangeParam continue } jthSum += point.Value + jthCount++ } + // avoid division by zero + ithCount = math.Max(ithCount, 1) + jthCount = math.Max(jthCount, 1) + if orderBy.Order == "asc" { - return ithSum < jthSum + return ithSum/ithCount < jthSum/jthCount } else if orderBy.Order == "desc" { - return ithSum > jthSum + return ithSum/ithCount > jthSum/jthCount } } else { // Sort based on Labels map diff --git a/pkg/query-service/app/metrics/v3/cumulative_table.go b/pkg/query-service/app/metrics/v3/cumulative_table.go index 157216bb23..d57743cb58 100644 --- a/pkg/query-service/app/metrics/v3/cumulative_table.go +++ b/pkg/query-service/app/metrics/v3/cumulative_table.go @@ -5,6 +5,7 @@ import ( "math" "strings" + "go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers" "go.signoz.io/signoz/pkg/query-service/constants" v3 "go.signoz.io/signoz/pkg/query-service/model/v3" "go.signoz.io/signoz/pkg/query-service/utils" @@ -28,7 +29,7 @@ func stepForTableCumulative(start, end int64) int64 { return int64(step) } -func buildMetricQueryForTable(start, end, _ int64, mq *v3.BuilderQuery, tableName string) (string, error) { +func buildMetricQueryForTable(start, end, _ int64, mq *v3.BuilderQuery) (string, error) { step := stepForTableCumulative(start, end) @@ -36,46 +37,19 @@ func buildMetricQueryForTable(start, end, _ int64, mq *v3.BuilderQuery, tableNam metricQueryGroupBy := mq.GroupBy - // if the aggregate operator is a histogram quantile, and user has not forgotten - // the le tag in the group by then add the le tag to the group by - if mq.AggregateOperator == v3.AggregateOperatorHistQuant50 || - mq.AggregateOperator == v3.AggregateOperatorHistQuant75 || - mq.AggregateOperator == v3.AggregateOperatorHistQuant90 || - mq.AggregateOperator == v3.AggregateOperatorHistQuant95 || - mq.AggregateOperator == v3.AggregateOperatorHistQuant99 { - found := false - for _, tag := range mq.GroupBy { - if tag.Key == "le" { - found = true - break - } - } - if !found { - metricQueryGroupBy = append( - metricQueryGroupBy, - v3.AttributeKey{ - Key: "le", - DataType: v3.AttributeKeyDataTypeString, - Type: v3.AttributeKeyTypeTag, - IsColumn: false, - }, - ) - } - } - - filterSubQuery, err := buildMetricsTimeSeriesFilterQuery(mq.Filters, metricQueryGroupBy, mq) + filterSubQuery, err := helpers.PrepareTimeseriesFilterQueryV3(start, end, mq) if err != nil { return "", err } - samplesTableTimeFilter := fmt.Sprintf("metric_name = %s AND timestamp_ms >= %d AND timestamp_ms <= %d", utils.ClickHouseFormattedValue(mq.AggregateAttribute.Key), start, end) + samplesTableTimeFilter := fmt.Sprintf("metric_name = %s AND unix_milli >= %d AND unix_milli <= %d", utils.ClickHouseFormattedValue(mq.AggregateAttribute.Key), start, end) // Select the aggregate value for interval queryTmplCounterInner := "SELECT %s" + - " toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL %d SECOND) as ts," + + " toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL %d SECOND) as ts," + " %s as value" + - " FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_TABLENAME + + " FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_V4_TABLENAME + " INNER JOIN" + " (%s) as filtered_time_series" + " USING fingerprint" + @@ -88,7 +62,7 @@ func buildMetricQueryForTable(start, end, _ int64, mq *v3.BuilderQuery, tableNam "SELECT %s" + " toStartOfHour(now()) as ts," + // now() has no menaing & used as a placeholder for ts " %s as value" + - " FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_TABLENAME + + " FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_V4_TABLENAME + " INNER JOIN" + " (%s) as filtered_time_series" + " USING fingerprint" + diff --git a/pkg/query-service/app/metrics/v3/cumulative_table_test.go b/pkg/query-service/app/metrics/v3/cumulative_table_test.go index 2da3421da2..26748b9f09 100644 --- a/pkg/query-service/app/metrics/v3/cumulative_table_test.go +++ b/pkg/query-service/app/metrics/v3/cumulative_table_test.go @@ -38,7 +38,7 @@ func TestPanelTableForCumulative(t *testing.T) { }, Expression: "A", }, - expected: "SELECT toStartOfHour(now()) as ts, sum(rate_value)/29 as value FROM (SELECT ts, If((value - lagInFrame(value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (value - lagInFrame(value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as rate_value FROM(SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL 60 SECOND) as ts, max(value) as value FROM signoz_metrics.distributed_samples_v2 INNER JOIN (SELECT fingerprint FROM signoz_metrics.time_series_v2 WHERE metric_name = 'signoz_latency_count' AND temporality IN ['Cumulative', 'Unspecified'] AND JSONExtractString(labels, 'service_name') IN ['frontend'] AND JSONExtractString(labels, 'operation') IN ['HTTP GET /dispatch']) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_count' AND timestamp_ms >= 1689255866000 AND timestamp_ms <= 1689257640000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)) WHERE isNaN(rate_value) = 0 GROUP BY ts ORDER BY ts", + expected: "SELECT toStartOfHour(now()) as ts, sum(rate_value)/29 as value FROM (SELECT ts, If((value - lagInFrame(value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (value - lagInFrame(value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as rate_value FROM(SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(value) as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name = 'signoz_latency_count' AND temporality = 'Cumulative' AND unix_milli >= 1689253200000 AND unix_milli < 1689257640000 AND JSONExtractString(labels, 'service_name') IN ['frontend'] AND JSONExtractString(labels, 'operation') IN ['HTTP GET /dispatch']) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_count' AND unix_milli >= 1689255866000 AND unix_milli <= 1689257640000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)) WHERE isNaN(rate_value) = 0 GROUP BY ts ORDER BY ts", }, { name: "latency p50", @@ -60,8 +60,13 @@ func TestPanelTableForCumulative(t *testing.T) { }, }, Expression: "A", + GroupBy: []v3.AttributeKey{ + { + Key: "le", + }, + }, }, - expected: "SELECT toStartOfHour(now()) as ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.500) as value FROM (SELECT le, toStartOfHour(now()) as ts, sum(rate_value)/29 as value FROM (SELECT le, ts, If((value - lagInFrame(value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (value - lagInFrame(value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as rate_value FROM(SELECT fingerprint, le, toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL 60 SECOND) as ts, max(value) as value FROM signoz_metrics.distributed_samples_v2 INNER JOIN (SELECT JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v2 WHERE metric_name = 'signoz_latency_bucket' AND temporality IN ['Cumulative', 'Unspecified'] AND JSONExtractString(labels, 'service_name') = 'frontend') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND timestamp_ms >= 1689255866000 AND timestamp_ms <= 1689257640000 GROUP BY fingerprint, le,ts ORDER BY fingerprint, le ASC, ts) WINDOW rate_window as (PARTITION BY fingerprint, le ORDER BY fingerprint, le ASC, ts)) WHERE isNaN(rate_value) = 0 GROUP BY le,ts ORDER BY le ASC, ts) GROUP BY ts ORDER BY ts", + expected: "SELECT toStartOfHour(now()) as ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.500) as value FROM (SELECT le, toStartOfHour(now()) as ts, sum(rate_value)/29 as value FROM (SELECT le, ts, If((value - lagInFrame(value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (value - lagInFrame(value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as rate_value FROM(SELECT fingerprint, le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(value) as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name = 'signoz_latency_bucket' AND temporality = 'Cumulative' AND unix_milli >= 1689253200000 AND unix_milli < 1689257640000 AND JSONExtractString(labels, 'service_name') = 'frontend') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND unix_milli >= 1689255866000 AND unix_milli <= 1689257640000 GROUP BY fingerprint, le,ts ORDER BY fingerprint, le ASC, ts) WINDOW rate_window as (PARTITION BY fingerprint, le ORDER BY fingerprint, le ASC, ts)) WHERE isNaN(rate_value) = 0 GROUP BY le,ts ORDER BY le ASC, ts) GROUP BY ts ORDER BY ts", }, { name: "latency p99 with group by", @@ -77,16 +82,19 @@ func TestPanelTableForCumulative(t *testing.T) { { Key: "service_name", }, + { + Key: "le", + }, }, Expression: "A", }, - expected: "SELECT service_name, toStartOfHour(now()) as ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT service_name,le, toStartOfHour(now()) as ts, sum(rate_value)/29 as value FROM (SELECT service_name,le, ts, If((value - lagInFrame(value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (value - lagInFrame(value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as rate_value FROM(SELECT fingerprint, service_name,le, toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL 60 SECOND) as ts, max(value) as value FROM signoz_metrics.distributed_samples_v2 INNER JOIN (SELECT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v2 WHERE metric_name = 'signoz_latency_bucket' AND temporality IN ['Cumulative', 'Unspecified']) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND timestamp_ms >= 1689255866000 AND timestamp_ms <= 1689257640000 GROUP BY fingerprint, service_name,le,ts ORDER BY fingerprint, service_name ASC,le ASC, ts) WINDOW rate_window as (PARTITION BY fingerprint, service_name,le ORDER BY fingerprint, service_name ASC,le ASC, ts)) WHERE isNaN(rate_value) = 0 GROUP BY service_name,le,ts ORDER BY service_name ASC,le ASC, ts) GROUP BY service_name,ts ORDER BY service_name ASC, ts", + expected: "SELECT service_name, toStartOfHour(now()) as ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT service_name,le, toStartOfHour(now()) as ts, sum(rate_value)/29 as value FROM (SELECT service_name,le, ts, If((value - lagInFrame(value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (value - lagInFrame(value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as rate_value FROM(SELECT fingerprint, service_name,le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(value) as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name = 'signoz_latency_bucket' AND temporality = 'Cumulative' AND unix_milli >= 1689253200000 AND unix_milli < 1689257640000) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND unix_milli >= 1689255866000 AND unix_milli <= 1689257640000 GROUP BY fingerprint, service_name,le,ts ORDER BY fingerprint, service_name ASC,le ASC, ts) WINDOW rate_window as (PARTITION BY fingerprint, service_name,le ORDER BY fingerprint, service_name ASC,le ASC, ts)) WHERE isNaN(rate_value) = 0 GROUP BY service_name,le,ts ORDER BY service_name ASC,le ASC, ts) GROUP BY service_name,ts ORDER BY service_name ASC, ts", }, } for _, c := range cases { t.Run(c.name, func(t *testing.T) { - query, err := buildMetricQueryForTable(1689255866000, 1689257640000, 1800, c.query, "distributed_time_series_v2") + query, err := buildMetricQueryForTable(1689255866000, 1689257640000, 1800, c.query) if err != nil { t.Fatalf("unexpected error: %v\n", err) } diff --git a/pkg/query-service/app/metrics/v3/delta.go b/pkg/query-service/app/metrics/v3/delta.go index c2fa38507a..c2a1893af0 100644 --- a/pkg/query-service/app/metrics/v3/delta.go +++ b/pkg/query-service/app/metrics/v3/delta.go @@ -3,63 +3,47 @@ package v3 import ( "fmt" + "go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers" "go.signoz.io/signoz/pkg/query-service/constants" v3 "go.signoz.io/signoz/pkg/query-service/model/v3" "go.signoz.io/signoz/pkg/query-service/utils" ) -func buildDeltaMetricQuery(start, end, step int64, mq *v3.BuilderQuery, tableName string) (string, error) { +func buildDeltaMetricQuery(start, end, step int64, mq *v3.BuilderQuery) (string, error) { metricQueryGroupBy := mq.GroupBy - // if the aggregate operator is a histogram quantile, and user has not forgotten - // the le tag in the group by then add the le tag to the group by - if mq.AggregateOperator == v3.AggregateOperatorHistQuant50 || - mq.AggregateOperator == v3.AggregateOperatorHistQuant75 || - mq.AggregateOperator == v3.AggregateOperatorHistQuant90 || - mq.AggregateOperator == v3.AggregateOperatorHistQuant95 || - mq.AggregateOperator == v3.AggregateOperatorHistQuant99 { - found := false - for _, tag := range mq.GroupBy { - if tag.Key == "le" { - found = true + if mq.Filters != nil { + temporalityFound := false + for _, filter := range mq.Filters.Items { + if filter.Key.Key == "__temporality__" { + temporalityFound = true break } } - if !found { - metricQueryGroupBy = append( - metricQueryGroupBy, - v3.AttributeKey{ - Key: "le", - DataType: v3.AttributeKeyDataTypeString, - Type: v3.AttributeKeyTypeTag, - IsColumn: false, - }, - ) + + if !temporalityFound { + mq.Filters.Items = append(mq.Filters.Items, v3.FilterItem{ + Key: v3.AttributeKey{Key: "__temporality__"}, + Operator: v3.FilterOperatorEqual, + Value: "Delta", + }) } } - if mq.Filters != nil { - mq.Filters.Items = append(mq.Filters.Items, v3.FilterItem{ - Key: v3.AttributeKey{Key: "__temporality__"}, - Operator: v3.FilterOperatorEqual, - Value: "Delta", - }) - } - - filterSubQuery, err := buildMetricsTimeSeriesFilterQuery(mq.Filters, metricQueryGroupBy, mq) + filterSubQuery, err := helpers.PrepareTimeseriesFilterQueryV3(start, end, mq) if err != nil { return "", err } - samplesTableTimeFilter := fmt.Sprintf("metric_name = %s AND timestamp_ms >= %d AND timestamp_ms <= %d", utils.ClickHouseFormattedValue(mq.AggregateAttribute.Key), start, end) + samplesTableTimeFilter := fmt.Sprintf("metric_name = %s AND unix_milli >= %d AND unix_milli <= %d", utils.ClickHouseFormattedValue(mq.AggregateAttribute.Key), start, end) // Select the aggregate value for interval queryTmpl := "SELECT %s" + - " toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL %d SECOND) as ts," + + " toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL %d SECOND) as ts," + " %s as value" + - " FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_TABLENAME + + " FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_V4_TABLENAME + " INNER JOIN" + " (%s) as filtered_time_series" + " USING fingerprint" + @@ -157,9 +141,9 @@ func buildDeltaMetricQuery(start, end, step int64, mq *v3.BuilderQuery, tableNam case v3.AggregateOperatorNoOp: queryTmpl := "SELECT fingerprint, labels as fullLabels," + - " toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL %d SECOND) as ts," + + " toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL %d SECOND) as ts," + " any(value) as value" + - " FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_TABLENAME + + " FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_V4_TABLENAME + " INNER JOIN" + " (%s) as filtered_time_series" + " USING fingerprint" + diff --git a/pkg/query-service/app/metrics/v3/delta_table.go b/pkg/query-service/app/metrics/v3/delta_table.go index 7d98d27b1a..4fdf152d95 100644 --- a/pkg/query-service/app/metrics/v3/delta_table.go +++ b/pkg/query-service/app/metrics/v3/delta_table.go @@ -4,12 +4,13 @@ import ( "fmt" "math" + "go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers" "go.signoz.io/signoz/pkg/query-service/constants" v3 "go.signoz.io/signoz/pkg/query-service/model/v3" "go.signoz.io/signoz/pkg/query-service/utils" ) -func buildDeltaMetricQueryForTable(start, end, _ int64, mq *v3.BuilderQuery, tableName string) (string, error) { +func buildDeltaMetricQueryForTable(start, end, _ int64, mq *v3.BuilderQuery) (string, error) { // round up to the nearest multiple of 60 step := int64(math.Ceil(float64(end-start+1)/1000/60) * 60) @@ -43,17 +44,17 @@ func buildDeltaMetricQueryForTable(start, end, _ int64, mq *v3.BuilderQuery, tab } } - filterSubQuery, err := buildMetricsTimeSeriesFilterQuery(mq.Filters, metricQueryGroupBy, mq) + filterSubQuery, err := helpers.PrepareTimeseriesFilterQueryV3(start, end, mq) if err != nil { return "", err } - samplesTableTimeFilter := fmt.Sprintf("metric_name = %s AND timestamp_ms >= %d AND timestamp_ms <= %d", utils.ClickHouseFormattedValue(mq.AggregateAttribute.Key), start, end) + samplesTableTimeFilter := fmt.Sprintf("metric_name = %s AND unix_milli >= %d AND unix_milli <= %d", utils.ClickHouseFormattedValue(mq.AggregateAttribute.Key), start, end) queryTmpl := "SELECT %s toStartOfHour(now()) as ts," + // now() has no menaing & used as a placeholder for ts " %s as value" + - " FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_TABLENAME + + " FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_V4_TABLENAME + " INNER JOIN" + " (%s) as filtered_time_series" + " USING fingerprint" + diff --git a/pkg/query-service/app/metrics/v3/delta_table_test.go b/pkg/query-service/app/metrics/v3/delta_table_test.go index d22807f2c1..3cb0598cfa 100644 --- a/pkg/query-service/app/metrics/v3/delta_table_test.go +++ b/pkg/query-service/app/metrics/v3/delta_table_test.go @@ -38,7 +38,7 @@ func TestPanelTableForDelta(t *testing.T) { }, Expression: "A", }, - expected: "SELECT toStartOfHour(now()) as ts, sum(value)/1800 as value FROM signoz_metrics.distributed_samples_v2 INNER JOIN (SELECT fingerprint FROM signoz_metrics.time_series_v2 WHERE metric_name = 'signoz_latency_count' AND temporality = 'Delta' AND JSONExtractString(labels, 'service_name') IN ['frontend'] AND JSONExtractString(labels, 'operation') IN ['HTTP GET /dispatch']) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_count' AND timestamp_ms >= 1689255866000 AND timestamp_ms <= 1689257640000 GROUP BY ts ORDER BY ts", + expected: "SELECT toStartOfHour(now()) as ts, sum(value)/1800 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name = 'signoz_latency_count' AND temporality = 'Delta' AND unix_milli >= 1689253200000 AND unix_milli < 1689257640000 AND JSONExtractString(labels, 'service_name') IN ['frontend'] AND JSONExtractString(labels, 'operation') IN ['HTTP GET /dispatch']) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_count' AND unix_milli >= 1689255866000 AND unix_milli <= 1689257640000 GROUP BY ts ORDER BY ts", }, { name: "latency p50", @@ -61,7 +61,7 @@ func TestPanelTableForDelta(t *testing.T) { }, Expression: "A", }, - expected: "SELECT ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.500) as value FROM (SELECT le, toStartOfHour(now()) as ts, sum(value)/1800 as value FROM signoz_metrics.distributed_samples_v2 INNER JOIN (SELECT JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v2 WHERE metric_name = 'signoz_latency_bucket' AND temporality = 'Delta' AND JSONExtractString(labels, 'service_name') = 'frontend') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND timestamp_ms >= 1689255866000 AND timestamp_ms <= 1689257640000 GROUP BY le,ts ORDER BY le ASC, ts) GROUP BY ts ORDER BY ts", + expected: "SELECT ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.500) as value FROM (SELECT le, toStartOfHour(now()) as ts, sum(value)/1800 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name = 'signoz_latency_bucket' AND temporality = 'Delta' AND unix_milli >= 1689253200000 AND unix_milli < 1689257640000 AND JSONExtractString(labels, 'service_name') = 'frontend') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND unix_milli >= 1689255866000 AND unix_milli <= 1689257640000 GROUP BY le,ts ORDER BY le ASC, ts) GROUP BY ts ORDER BY ts", }, { name: "latency p99 with group by", @@ -80,13 +80,13 @@ func TestPanelTableForDelta(t *testing.T) { }, Expression: "A", }, - expected: "SELECT service_name, ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT service_name,le, toStartOfHour(now()) as ts, sum(value)/1800 as value FROM signoz_metrics.distributed_samples_v2 INNER JOIN (SELECT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v2 WHERE metric_name = 'signoz_latency_bucket' AND temporality = 'Delta' ) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND timestamp_ms >= 1689255866000 AND timestamp_ms <= 1689257640000 GROUP BY service_name,le,ts ORDER BY service_name ASC,le ASC, ts) GROUP BY service_name,ts ORDER BY service_name ASC, ts", + expected: "SELECT service_name, ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT service_name,le, toStartOfHour(now()) as ts, sum(value)/1800 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name = 'signoz_latency_bucket' AND temporality = 'Delta' AND unix_milli >= 1689253200000 AND unix_milli < 1689257640000) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND unix_milli >= 1689255866000 AND unix_milli <= 1689257640000 GROUP BY service_name,le,ts ORDER BY service_name ASC,le ASC, ts) GROUP BY service_name,ts ORDER BY service_name ASC, ts", }, } for _, c := range cases { t.Run(c.name, func(t *testing.T) { - query, err := buildDeltaMetricQueryForTable(1689255866000, 1689257640000, 1800, c.query, "distributed_time_series_v2") + query, err := buildDeltaMetricQueryForTable(1689255866000, 1689257640000, 1800, c.query) if err != nil { t.Fatalf("unexpected error: %v", err) } diff --git a/pkg/query-service/app/metrics/v3/query_builder.go b/pkg/query-service/app/metrics/v3/query_builder.go index 01b860c968..b5453e97b4 100644 --- a/pkg/query-service/app/metrics/v3/query_builder.go +++ b/pkg/query-service/app/metrics/v3/query_builder.go @@ -6,6 +6,7 @@ import ( "strings" "time" + "go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers" "go.signoz.io/signoz/pkg/query-service/constants" "go.signoz.io/signoz/pkg/query-service/model" v3 "go.signoz.io/signoz/pkg/query-service/model/v3" @@ -51,136 +52,23 @@ var aggregateOperatorToSQLFunc = map[v3.AggregateOperator]string{ // See https://github.com/SigNoz/signoz/issues/2151#issuecomment-1467249056 var rateWithoutNegative = `If((value - lagInFrame(value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (value - lagInFrame(value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) ` -// buildMetricsTimeSeriesFilterQuery builds the sub-query to be used for filtering -// timeseries based on search criteria -func buildMetricsTimeSeriesFilterQuery(fs *v3.FilterSet, groupTags []v3.AttributeKey, mq *v3.BuilderQuery) (string, error) { - metricName := mq.AggregateAttribute.Key - aggregateOperator := mq.AggregateOperator - var conditions []string - if mq.Temporality == v3.Delta { - conditions = append(conditions, fmt.Sprintf("metric_name = %s AND temporality = '%s' ", utils.ClickHouseFormattedValue(metricName), v3.Delta)) - } else { - conditions = append(conditions, fmt.Sprintf("metric_name = %s AND temporality IN ['%s', '%s']", utils.ClickHouseFormattedValue(metricName), v3.Cumulative, v3.Unspecified)) - } - - if fs != nil && len(fs.Items) != 0 { - for _, item := range fs.Items { - toFormat := item.Value - op := v3.FilterOperator(strings.ToLower(strings.TrimSpace(string(item.Operator)))) - // if the received value is an array for like/match op, just take the first value - // or should we throw an error? - if op == v3.FilterOperatorLike || op == v3.FilterOperatorRegex || op == v3.FilterOperatorNotLike || op == v3.FilterOperatorNotRegex { - x, ok := item.Value.([]interface{}) - if ok { - if len(x) == 0 { - continue - } - toFormat = x[0] - } - } - - if op == v3.FilterOperatorContains || op == v3.FilterOperatorNotContains { - toFormat = fmt.Sprintf("%%%s%%", toFormat) - } - fmtVal := utils.ClickHouseFormattedValue(toFormat) - switch op { - case v3.FilterOperatorEqual: - conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') = %s", item.Key.Key, fmtVal)) - case v3.FilterOperatorNotEqual: - conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') != %s", item.Key.Key, fmtVal)) - case v3.FilterOperatorIn: - conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') IN %s", item.Key.Key, fmtVal)) - case v3.FilterOperatorNotIn: - conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') NOT IN %s", item.Key.Key, fmtVal)) - case v3.FilterOperatorLike: - conditions = append(conditions, fmt.Sprintf("like(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal)) - case v3.FilterOperatorNotLike: - conditions = append(conditions, fmt.Sprintf("notLike(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal)) - case v3.FilterOperatorRegex: - conditions = append(conditions, fmt.Sprintf("match(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal)) - case v3.FilterOperatorNotRegex: - conditions = append(conditions, fmt.Sprintf("not match(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal)) - case v3.FilterOperatorGreaterThan: - conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') > %s", item.Key.Key, fmtVal)) - case v3.FilterOperatorGreaterThanOrEq: - conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') >= %s", item.Key.Key, fmtVal)) - case v3.FilterOperatorLessThan: - conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') < %s", item.Key.Key, fmtVal)) - case v3.FilterOperatorLessThanOrEq: - conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') <= %s", item.Key.Key, fmtVal)) - case v3.FilterOperatorContains: - conditions = append(conditions, fmt.Sprintf("like(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal)) - case v3.FilterOperatorNotContains: - conditions = append(conditions, fmt.Sprintf("notLike(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal)) - case v3.FilterOperatorExists: - conditions = append(conditions, fmt.Sprintf("has(JSONExtractKeys(labels), '%s')", item.Key.Key)) - case v3.FilterOperatorNotExists: - conditions = append(conditions, fmt.Sprintf("not has(JSONExtractKeys(labels), '%s')", item.Key.Key)) - default: - return "", fmt.Errorf("unsupported operation") - } - } - } - queryString := strings.Join(conditions, " AND ") - - var selectLabels string - if aggregateOperator == v3.AggregateOperatorNoOp || aggregateOperator == v3.AggregateOperatorRate { - selectLabels = "labels," - } else { - for _, tag := range groupTags { - selectLabels += fmt.Sprintf(" JSONExtractString(labels, '%s') as %s,", tag.Key, tag.Key) - } - } - - filterSubQuery := fmt.Sprintf("SELECT %s fingerprint FROM %s.%s WHERE %s", selectLabels, constants.SIGNOZ_METRIC_DBNAME, constants.SIGNOZ_TIMESERIES_LOCAL_TABLENAME, queryString) - - return filterSubQuery, nil -} - -func buildMetricQuery(start, end, step int64, mq *v3.BuilderQuery, tableName string) (string, error) { +func buildMetricQuery(start, end, step int64, mq *v3.BuilderQuery) (string, error) { metricQueryGroupBy := mq.GroupBy - // if the aggregate operator is a histogram quantile, and user has not forgotten - // the le tag in the group by then add the le tag to the group by - if mq.AggregateOperator == v3.AggregateOperatorHistQuant50 || - mq.AggregateOperator == v3.AggregateOperatorHistQuant75 || - mq.AggregateOperator == v3.AggregateOperatorHistQuant90 || - mq.AggregateOperator == v3.AggregateOperatorHistQuant95 || - mq.AggregateOperator == v3.AggregateOperatorHistQuant99 { - found := false - for _, tag := range mq.GroupBy { - if tag.Key == "le" { - found = true - break - } - } - if !found { - metricQueryGroupBy = append( - metricQueryGroupBy, - v3.AttributeKey{ - Key: "le", - DataType: v3.AttributeKeyDataTypeString, - Type: v3.AttributeKeyTypeTag, - IsColumn: false, - }, - ) - } - } - - filterSubQuery, err := buildMetricsTimeSeriesFilterQuery(mq.Filters, metricQueryGroupBy, mq) + filterSubQuery, err := helpers.PrepareTimeseriesFilterQueryV3(start, end, mq) if err != nil { return "", err } - samplesTableTimeFilter := fmt.Sprintf("metric_name = %s AND timestamp_ms >= %d AND timestamp_ms < %d", utils.ClickHouseFormattedValue(mq.AggregateAttribute.Key), start, end) + samplesTableTimeFilter := fmt.Sprintf("metric_name = %s AND unix_milli >= %d AND unix_milli < %d", utils.ClickHouseFormattedValue(mq.AggregateAttribute.Key), start, end) // Select the aggregate value for interval queryTmpl := "SELECT %s" + - " toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL %d SECOND) as ts," + + " toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL %d SECOND) as ts," + " %s as value" + - " FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_TABLENAME + + " FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_V4_TABLENAME + " INNER JOIN" + " (%s) as filtered_time_series" + " USING fingerprint" + @@ -309,9 +197,9 @@ func buildMetricQuery(start, end, step int64, mq *v3.BuilderQuery, tableName str case v3.AggregateOperatorNoOp: queryTmpl := "SELECT fingerprint, labels as fullLabels," + - " toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL %d SECOND) as ts," + + " toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL %d SECOND) as ts," + " any(value) as value" + - " FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_TABLENAME + + " FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_V4_TABLENAME + " INNER JOIN" + " (%s) as filtered_time_series" + " USING fingerprint" + @@ -461,15 +349,15 @@ func PrepareMetricQuery(start, end int64, queryType v3.QueryType, panelType v3.P var err error if mq.Temporality == v3.Delta { if panelType == v3.PanelTypeTable { - query, err = buildDeltaMetricQueryForTable(start, end, mq.StepInterval, mq, constants.SIGNOZ_TIMESERIES_TABLENAME) + query, err = buildDeltaMetricQueryForTable(start, end, mq.StepInterval, mq) } else { - query, err = buildDeltaMetricQuery(start, end, mq.StepInterval, mq, constants.SIGNOZ_TIMESERIES_TABLENAME) + query, err = buildDeltaMetricQuery(start, end, mq.StepInterval, mq) } } else { if panelType == v3.PanelTypeTable { - query, err = buildMetricQueryForTable(start, end, mq.StepInterval, mq, constants.SIGNOZ_TIMESERIES_TABLENAME) + query, err = buildMetricQueryForTable(start, end, mq.StepInterval, mq) } else { - query, err = buildMetricQuery(start, end, mq.StepInterval, mq, constants.SIGNOZ_TIMESERIES_TABLENAME) + query, err = buildMetricQuery(start, end, mq.StepInterval, mq) } } diff --git a/pkg/query-service/app/metrics/v3/query_builder_test.go b/pkg/query-service/app/metrics/v3/query_builder_test.go index 2ad6013de6..5b85036007 100644 --- a/pkg/query-service/app/metrics/v3/query_builder_test.go +++ b/pkg/query-service/app/metrics/v3/query_builder_test.go @@ -50,6 +50,7 @@ func TestBuildQueryWithFilters(t *testing.T) { }}, AggregateOperator: v3.AggregateOperatorRateMax, Expression: "A", + Temporality: v3.Cumulative, }, }, }, @@ -57,7 +58,7 @@ func TestBuildQueryWithFilters(t *testing.T) { query, err := PrepareMetricQuery(q.Start, q.End, q.CompositeQuery.QueryType, q.CompositeQuery.PanelType, q.CompositeQuery.BuilderQueries["A"], Options{PreferRPM: false}) require.NoError(t, err) - require.Contains(t, query, "WHERE metric_name = 'name' AND temporality IN ['Cumulative', 'Unspecified'] AND JSONExtractString(labels, 'a') != 'b'") + require.Contains(t, query, "WHERE metric_name = 'name' AND temporality = 'Cumulative' AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND JSONExtractString(labels, 'a') != 'b'") require.Contains(t, query, rateWithoutNegative) require.Contains(t, query, "not match(JSONExtractString(labels, 'code'), 'ERROR_*')") }) @@ -78,6 +79,7 @@ func TestBuildQueryWithMultipleQueries(t *testing.T) { {Key: v3.AttributeKey{Key: "in"}, Value: []interface{}{"a", "b", "c"}, Operator: v3.FilterOperatorIn}, }}, AggregateOperator: v3.AggregateOperatorRateAvg, + Temporality: v3.Cumulative, Expression: "A", }, "B": { @@ -85,6 +87,7 @@ func TestBuildQueryWithMultipleQueries(t *testing.T) { StepInterval: 60, AggregateAttribute: v3.AttributeKey{Key: "name2"}, AggregateOperator: v3.AggregateOperatorRateMax, + Temporality: v3.Cumulative, Expression: "B", }, }, @@ -94,158 +97,15 @@ func TestBuildQueryWithMultipleQueries(t *testing.T) { query, err := PrepareMetricQuery(q.Start, q.End, q.CompositeQuery.QueryType, q.CompositeQuery.PanelType, q.CompositeQuery.BuilderQueries["A"], Options{PreferRPM: false}) require.NoError(t, err) - require.Contains(t, query, "WHERE metric_name = 'name' AND temporality IN ['Cumulative', 'Unspecified'] AND JSONExtractString(labels, 'in') IN ['a','b','c']") + require.Contains(t, query, "WHERE metric_name = 'name' AND temporality = 'Cumulative' AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND JSONExtractString(labels, 'in') IN ['a','b','c']") require.Contains(t, query, rateWithoutNegative) }) } -func TestBuildQueryOperators(t *testing.T) { - testCases := []struct { - operator v3.FilterOperator - filterSet v3.FilterSet - expectedWhereClause string - }{ - { - operator: v3.FilterOperatorEqual, - filterSet: v3.FilterSet{ - Operator: "AND", - Items: []v3.FilterItem{ - {Key: v3.AttributeKey{Key: "service_name"}, Value: "route", Operator: v3.FilterOperatorEqual}, - }, - }, - expectedWhereClause: "JSONExtractString(labels, 'service_name') = 'route'", - }, - { - operator: v3.FilterOperatorNotEqual, - filterSet: v3.FilterSet{ - Operator: "AND", - Items: []v3.FilterItem{ - {Key: v3.AttributeKey{Key: "service_name"}, Value: "route", Operator: v3.FilterOperatorNotEqual}, - }, - }, - expectedWhereClause: "JSONExtractString(labels, 'service_name') != 'route'", - }, - { - operator: v3.FilterOperatorRegex, - filterSet: v3.FilterSet{ - Operator: "AND", - Items: []v3.FilterItem{ - {Key: v3.AttributeKey{Key: "service_name"}, Value: "out", Operator: v3.FilterOperatorRegex}, - }, - }, - expectedWhereClause: "match(JSONExtractString(labels, 'service_name'), 'out')", - }, - { - operator: v3.FilterOperatorNotRegex, - filterSet: v3.FilterSet{ - Operator: "AND", - Items: []v3.FilterItem{ - {Key: v3.AttributeKey{Key: "service_name"}, Value: "out", Operator: v3.FilterOperatorNotRegex}, - }, - }, - expectedWhereClause: "not match(JSONExtractString(labels, 'service_name'), 'out')", - }, - { - operator: v3.FilterOperatorIn, - filterSet: v3.FilterSet{ - Operator: "AND", - Items: []v3.FilterItem{ - {Key: v3.AttributeKey{Key: "service_name"}, Value: []interface{}{"route", "driver"}, Operator: v3.FilterOperatorIn}, - }, - }, - expectedWhereClause: "JSONExtractString(labels, 'service_name') IN ['route','driver']", - }, - { - operator: v3.FilterOperatorNotIn, - filterSet: v3.FilterSet{ - Operator: "AND", - Items: []v3.FilterItem{ - {Key: v3.AttributeKey{Key: "service_name"}, Value: []interface{}{"route", "driver"}, Operator: v3.FilterOperatorNotIn}, - }, - }, - expectedWhereClause: "JSONExtractString(labels, 'service_name') NOT IN ['route','driver']", - }, - { - operator: v3.FilterOperatorExists, - filterSet: v3.FilterSet{ - Operator: "AND", - Items: []v3.FilterItem{ - {Key: v3.AttributeKey{Key: "horn"}, Operator: v3.FilterOperatorExists}, - }, - }, - expectedWhereClause: "has(JSONExtractKeys(labels), 'horn')", - }, - { - operator: v3.FilterOperatorNotExists, - filterSet: v3.FilterSet{ - Operator: "AND", - Items: []v3.FilterItem{ - {Key: v3.AttributeKey{Key: "horn"}, Operator: v3.FilterOperatorNotExists}, - }, - }, - expectedWhereClause: "not has(JSONExtractKeys(labels), 'horn')", - }, - { - operator: v3.FilterOperatorContains, - filterSet: v3.FilterSet{ - Operator: "AND", - Items: []v3.FilterItem{ - {Key: v3.AttributeKey{Key: "service_name"}, Value: "out", Operator: v3.FilterOperatorContains}, - }, - }, - expectedWhereClause: "like(JSONExtractString(labels, 'service_name'), '%out%')", - }, - { - operator: v3.FilterOperatorNotContains, - filterSet: v3.FilterSet{ - Operator: "AND", - Items: []v3.FilterItem{ - {Key: v3.AttributeKey{Key: "serice_name"}, Value: "out", Operator: v3.FilterOperatorNotContains}, - }, - }, - expectedWhereClause: "notLike(JSONExtractString(labels, 'serice_name'), '%out%')", - }, - { - operator: v3.FilterOperatorLike, - filterSet: v3.FilterSet{ - Operator: "AND", - Items: []v3.FilterItem{ - {Key: v3.AttributeKey{Key: "service_name"}, Value: "dri", Operator: v3.FilterOperatorLike}, - }, - }, - expectedWhereClause: "like(JSONExtractString(labels, 'service_name'), 'dri')", - }, - { - operator: v3.FilterOperatorNotLike, - filterSet: v3.FilterSet{ - Operator: "AND", - Items: []v3.FilterItem{ - {Key: v3.AttributeKey{Key: "serice_name"}, Value: "dri", Operator: v3.FilterOperatorNotLike}, - }, - }, - expectedWhereClause: "notLike(JSONExtractString(labels, 'serice_name'), 'dri')", - }, - } - - for i, tc := range testCases { - t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) { - mq := v3.BuilderQuery{ - QueryName: "A", - StepInterval: 60, - AggregateAttribute: v3.AttributeKey{Key: "signoz_calls_total"}, - AggregateOperator: v3.AggregateOperatorSum, - } - whereClause, err := buildMetricsTimeSeriesFilterQuery(&tc.filterSet, []v3.AttributeKey{}, &mq) - require.NoError(t, err) - require.Contains(t, whereClause, tc.expectedWhereClause) - }) - } -} - func TestBuildQueryXRate(t *testing.T) { t.Run("TestBuildQueryXRate", func(t *testing.T) { - tmpl := `SELECT ts, %s(rate_value) as value FROM (SELECT ts, If((value - lagInFrame(value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (value - lagInFrame(value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as rate_value FROM(SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL 60 SECOND) as ts, max(value) as value FROM signoz_metrics.distributed_samples_v2 INNER JOIN (SELECT fingerprint FROM signoz_metrics.time_series_v2 WHERE metric_name = 'name' AND temporality IN ['Cumulative', 'Unspecified']) as filtered_time_series USING fingerprint WHERE metric_name = 'name' AND timestamp_ms >= 1650991920000 AND timestamp_ms < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts) ) WHERE isNaN(rate_value) = 0 GROUP BY ts ORDER BY ts` + tmpl := `SELECT ts, %s(rate_value) as value FROM (SELECT ts, If((value - lagInFrame(value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (value - lagInFrame(value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as rate_value FROM(SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(value) as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'name' AND temporality = '' AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name = 'name' AND unix_milli >= 1650991920000 AND unix_milli < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts) ) WHERE isNaN(rate_value) = 0 GROUP BY ts ORDER BY ts` cases := []struct { aggregateOperator v3.AggregateOperator @@ -298,7 +158,7 @@ func TestBuildQueryXRate(t *testing.T) { func TestBuildQueryRPM(t *testing.T) { t.Run("TestBuildQueryXRate", func(t *testing.T) { - tmpl := `SELECT ts, ceil(value * 60) as value FROM (SELECT ts, %s(rate_value) as value FROM (SELECT ts, If((value - lagInFrame(value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (value - lagInFrame(value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as rate_value FROM(SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL 60 SECOND) as ts, max(value) as value FROM signoz_metrics.distributed_samples_v2 INNER JOIN (SELECT fingerprint FROM signoz_metrics.time_series_v2 WHERE metric_name = 'name' AND temporality IN ['Cumulative', 'Unspecified']) as filtered_time_series USING fingerprint WHERE metric_name = 'name' AND timestamp_ms >= 1650991920000 AND timestamp_ms < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts) ) WHERE isNaN(rate_value) = 0 GROUP BY ts ORDER BY ts)` + tmpl := `SELECT ts, ceil(value * 60) as value FROM (SELECT ts, %s(rate_value) as value FROM (SELECT ts, If((value - lagInFrame(value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (value - lagInFrame(value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as rate_value FROM(SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(value) as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'name' AND temporality = '' AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name = 'name' AND unix_milli >= 1650991920000 AND unix_milli < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts) ) WHERE isNaN(rate_value) = 0 GROUP BY ts ORDER BY ts)` cases := []struct { aggregateOperator v3.AggregateOperator @@ -377,7 +237,7 @@ func TestBuildQueryAdjustedTimes(t *testing.T) { }, }, // 20:10:00 - 20:41:00 - expected: "timestamp_ms >= 1686082200000 AND timestamp_ms < 1686084060000", + expected: "unix_milli >= 1686082200000 AND unix_milli < 1686084060000", }, { name: "TestBuildQueryAdjustedTimes start close to 50 seconds", @@ -402,7 +262,7 @@ func TestBuildQueryAdjustedTimes(t *testing.T) { }, }, // 20:10:00 - 20:41:00 - expected: "timestamp_ms >= 1686082200000 AND timestamp_ms < 1686084060000", + expected: "unix_milli >= 1686082200000 AND unix_milli < 1686084060000", }, { name: "TestBuildQueryAdjustedTimes start close to 42 seconds with step 30 seconds", @@ -427,7 +287,7 @@ func TestBuildQueryAdjustedTimes(t *testing.T) { }, }, // 20:11:00 - 20:41:00 - expected: "timestamp_ms >= 1686082260000 AND timestamp_ms < 1686084060000", + expected: "unix_milli >= 1686082260000 AND unix_milli < 1686084060000", }, { name: "TestBuildQueryAdjustedTimes start close to 42 seconds with step 30 seconds and end close to 30 seconds", @@ -452,7 +312,7 @@ func TestBuildQueryAdjustedTimes(t *testing.T) { }, }, // 20:11:00 - 20:41:00 - expected: "timestamp_ms >= 1686082260000 AND timestamp_ms < 1686084060000", + expected: "unix_milli >= 1686082260000 AND unix_milli < 1686084060000", }, { name: "TestBuildQueryAdjustedTimes start close to 42 seconds with step 300 seconds and end close to 30 seconds", @@ -479,7 +339,7 @@ func TestBuildQueryAdjustedTimes(t *testing.T) { // 20:05:00 - 20:41:00 // 20:10:00 is the nearest 5 minute interval, but we round down to 20:05:00 // as this is a rate query and we want to include the previous value for the first interval - expected: "timestamp_ms >= 1686081900000 AND timestamp_ms < 1686084060000", + expected: "unix_milli >= 1686081900000 AND unix_milli < 1686084060000", }, { name: "TestBuildQueryAdjustedTimes start close to 42 seconds with step 180 seconds and end close to 30 seconds", @@ -506,7 +366,7 @@ func TestBuildQueryAdjustedTimes(t *testing.T) { // 20:06:00 - 20:39:00 // 20:09:00 is the nearest 3 minute interval, but we round down to 20:06:00 // as this is a rate query and we want to include the previous value for the first interval - expected: "timestamp_ms >= 1686081960000 AND timestamp_ms < 1686084060000", + expected: "unix_milli >= 1686081960000 AND unix_milli < 1686084060000", }, } diff --git a/pkg/query-service/app/metrics/v4/helpers/sub_query.go b/pkg/query-service/app/metrics/v4/helpers/sub_query.go index d4cd103719..e1edc5a964 100644 --- a/pkg/query-service/app/metrics/v4/helpers/sub_query.go +++ b/pkg/query-service/app/metrics/v4/helpers/sub_query.go @@ -117,3 +117,88 @@ func PrepareTimeseriesFilterQuery(start, end int64, mq *v3.BuilderQuery) (string return filterSubQuery, nil } + +// PrepareTimeseriesFilterQuery builds the sub-query to be used for filtering timeseries based on the search criteria +func PrepareTimeseriesFilterQueryV3(start, end int64, mq *v3.BuilderQuery) (string, error) { + var conditions []string + var fs *v3.FilterSet = mq.Filters + var groupTags []v3.AttributeKey = mq.GroupBy + + conditions = append(conditions, fmt.Sprintf("metric_name = %s", utils.ClickHouseFormattedValue(mq.AggregateAttribute.Key))) + conditions = append(conditions, fmt.Sprintf("temporality = '%s'", mq.Temporality)) + + start, end, tableName := which(start, end) + + conditions = append(conditions, fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", start, end)) + + if fs != nil && len(fs.Items) != 0 { + for _, item := range fs.Items { + toFormat := item.Value + op := v3.FilterOperator(strings.ToLower(strings.TrimSpace(string(item.Operator)))) + if op == v3.FilterOperatorContains || op == v3.FilterOperatorNotContains { + toFormat = fmt.Sprintf("%%%s%%", toFormat) + } + fmtVal := utils.ClickHouseFormattedValue(toFormat) + switch op { + case v3.FilterOperatorEqual: + conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') = %s", item.Key.Key, fmtVal)) + case v3.FilterOperatorNotEqual: + conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') != %s", item.Key.Key, fmtVal)) + case v3.FilterOperatorIn: + conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') IN %s", item.Key.Key, fmtVal)) + case v3.FilterOperatorNotIn: + conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') NOT IN %s", item.Key.Key, fmtVal)) + case v3.FilterOperatorLike: + conditions = append(conditions, fmt.Sprintf("like(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal)) + case v3.FilterOperatorNotLike: + conditions = append(conditions, fmt.Sprintf("notLike(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal)) + case v3.FilterOperatorRegex: + conditions = append(conditions, fmt.Sprintf("match(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal)) + case v3.FilterOperatorNotRegex: + conditions = append(conditions, fmt.Sprintf("not match(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal)) + case v3.FilterOperatorGreaterThan: + conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') > %s", item.Key.Key, fmtVal)) + case v3.FilterOperatorGreaterThanOrEq: + conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') >= %s", item.Key.Key, fmtVal)) + case v3.FilterOperatorLessThan: + conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') < %s", item.Key.Key, fmtVal)) + case v3.FilterOperatorLessThanOrEq: + conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') <= %s", item.Key.Key, fmtVal)) + case v3.FilterOperatorContains: + conditions = append(conditions, fmt.Sprintf("like(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal)) + case v3.FilterOperatorNotContains: + conditions = append(conditions, fmt.Sprintf("notLike(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal)) + case v3.FilterOperatorExists: + conditions = append(conditions, fmt.Sprintf("has(JSONExtractKeys(labels), '%s')", item.Key.Key)) + case v3.FilterOperatorNotExists: + conditions = append(conditions, fmt.Sprintf("not has(JSONExtractKeys(labels), '%s')", item.Key.Key)) + default: + return "", fmt.Errorf("unsupported filter operator") + } + } + } + whereClause := strings.Join(conditions, " AND ") + + var selectLabels string + + if mq.AggregateOperator == v3.AggregateOperatorNoOp || mq.AggregateOperator == v3.AggregateOperatorRate { + selectLabels += "labels, " + } else { + for _, tag := range groupTags { + selectLabels += fmt.Sprintf("JSONExtractString(labels, '%s') as %s, ", tag.Key, tag.Key) + } + } + + // The table JOIN key always exists + selectLabels += "fingerprint" + + filterSubQuery := fmt.Sprintf( + "SELECT DISTINCT %s FROM %s.%s WHERE %s", + selectLabels, + constants.SIGNOZ_METRIC_DBNAME, + tableName, + whereClause, + ) + + return filterSubQuery, nil +} diff --git a/pkg/query-service/app/metrics/v4/query_builder.go b/pkg/query-service/app/metrics/v4/query_builder.go index c58c98c93f..bd8813dd3a 100644 --- a/pkg/query-service/app/metrics/v4/query_builder.go +++ b/pkg/query-service/app/metrics/v4/query_builder.go @@ -23,6 +23,8 @@ func PrepareMetricQuery(start, end int64, queryType v3.QueryType, panelType v3.P var quantile float64 + percentileOperator := mq.SpaceAggregation + if v3.IsPercentileOperator(mq.SpaceAggregation) && mq.AggregateAttribute.Type != v3.AttributeKeyType(v3.MetricTypeExponentialHistogram) { quantile = v3.GetPercentileFromOperator(mq.SpaceAggregation) @@ -80,6 +82,7 @@ func PrepareMetricQuery(start, end int64, queryType v3.QueryType, panelType v3.P // fixed-bucket histogram quantiles are calculated with UDF if quantile != 0 && mq.AggregateAttribute.Type != v3.AttributeKeyType(v3.MetricTypeExponentialHistogram) { query = fmt.Sprintf(`SELECT %s, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), %.3f) as value FROM (%s) GROUP BY %s ORDER BY %s`, groupBy, quantile, query, groupBy, orderBy) + mq.SpaceAggregation = percentileOperator } return query, nil diff --git a/pkg/query-service/app/parser.go b/pkg/query-service/app/parser.go index f7f622e250..773156cc0d 100644 --- a/pkg/query-service/app/parser.go +++ b/pkg/query-service/app/parser.go @@ -1005,6 +1005,7 @@ func ParseQueryRangeParams(r *http.Request) (*v3.QueryRangeParamsV3, *model.ApiE if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeBuilder { for _, query := range queryRangeParams.CompositeQuery.BuilderQueries { // Formula query + // Check if the queries used in the expression can be joined if query.QueryName != query.Expression { expression, err := govaluate.NewEvaluableExpressionWithFunctions(query.Expression, evalFuncs()) if err != nil { @@ -1039,10 +1040,12 @@ func ParseQueryRangeParams(r *http.Request) (*v3.QueryRangeParamsV3, *model.ApiE } } + // If the step interval is less than the minimum allowed step interval, set it to the minimum allowed step interval if minStep := common.MinAllowedStepInterval(queryRangeParams.Start, queryRangeParams.End); query.StepInterval < minStep { query.StepInterval = minStep } + // Remove the time shift function from the list of functions and set the shift by value var timeShiftBy int64 if len(query.Functions) > 0 { for idx := range query.Functions { @@ -1062,16 +1065,45 @@ func ParseQueryRangeParams(r *http.Request) (*v3.QueryRangeParamsV3, *model.ApiE } query.ShiftBy = timeShiftBy + // for metrics v3 + // if the aggregate operator is a histogram quantile, and user has not forgotten + // the le tag in the group by then add the le tag to the group by + if query.AggregateOperator == v3.AggregateOperatorHistQuant50 || + query.AggregateOperator == v3.AggregateOperatorHistQuant75 || + query.AggregateOperator == v3.AggregateOperatorHistQuant90 || + query.AggregateOperator == v3.AggregateOperatorHistQuant95 || + query.AggregateOperator == v3.AggregateOperatorHistQuant99 { + found := false + for _, tag := range query.GroupBy { + if tag.Key == "le" { + found = true + break + } + } + if !found { + query.GroupBy = append( + query.GroupBy, + v3.AttributeKey{ + Key: "le", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + IsColumn: false, + }, + ) + } + } + if query.Filters == nil || len(query.Filters.Items) == 0 { continue } + for idx := range query.Filters.Items { item := &query.Filters.Items[idx] value := item.Value if value != nil { switch x := value.(type) { case string: - variableName := strings.Trim(x, "{{ . }}") + variableName := strings.Trim(x, "{[.$]}") if _, ok := queryRangeParams.Variables[variableName]; ok { item.Value = queryRangeParams.Variables[variableName] } @@ -1079,7 +1111,7 @@ func ParseQueryRangeParams(r *http.Request) (*v3.QueryRangeParamsV3, *model.ApiE if len(x) > 0 { switch x[0].(type) { case string: - variableName := strings.Trim(x[0].(string), "{{ . }}") + variableName := strings.Trim(x[0].(string), "{[.$]}") if _, ok := queryRangeParams.Variables[variableName]; ok { item.Value = queryRangeParams.Variables[variableName] } @@ -1087,6 +1119,13 @@ func ParseQueryRangeParams(r *http.Request) (*v3.QueryRangeParamsV3, *model.ApiE } } } + + if v3.FilterOperator(strings.ToLower((string(item.Operator)))) != v3.FilterOperatorIn && v3.FilterOperator(strings.ToLower((string(item.Operator)))) != v3.FilterOperatorNotIn { + // the value type should not be multiple values + if _, ok := item.Value.([]interface{}); ok { + return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("multiple values %s are not allowed for operator `%s` for key `%s`", item.Value, item.Operator, item.Key.Key)} + } + } } } } @@ -1104,6 +1143,13 @@ func ParseQueryRangeParams(r *http.Request) (*v3.QueryRangeParamsV3, *model.ApiE if chQuery.Disabled { continue } + + for name, value := range queryRangeParams.Variables { + chQuery.Query = strings.Replace(chQuery.Query, fmt.Sprintf("{{%s}}", name), fmt.Sprint(value), -1) + chQuery.Query = strings.Replace(chQuery.Query, fmt.Sprintf("[[%s]]", name), fmt.Sprint(value), -1) + chQuery.Query = strings.Replace(chQuery.Query, fmt.Sprintf("$%s", name), fmt.Sprint(value), -1) + } + tmpl := template.New("clickhouse-query") tmpl, err := tmpl.Parse(chQuery.Query) if err != nil { @@ -1128,6 +1174,13 @@ func ParseQueryRangeParams(r *http.Request) (*v3.QueryRangeParamsV3, *model.ApiE if promQuery.Disabled { continue } + + for name, value := range queryRangeParams.Variables { + promQuery.Query = strings.Replace(promQuery.Query, fmt.Sprintf("{{%s}}", name), fmt.Sprint(value), -1) + promQuery.Query = strings.Replace(promQuery.Query, fmt.Sprintf("[[%s]]", name), fmt.Sprint(value), -1) + promQuery.Query = strings.Replace(promQuery.Query, fmt.Sprintf("$%s", name), fmt.Sprint(value), -1) + } + tmpl := template.New("prometheus-query") tmpl, err := tmpl.Parse(promQuery.Query) if err != nil { diff --git a/pkg/query-service/app/parser_test.go b/pkg/query-service/app/parser_test.go index be113d08b3..9d58a190f7 100644 --- a/pkg/query-service/app/parser_test.go +++ b/pkg/query-service/app/parser_test.go @@ -652,12 +652,12 @@ func TestParseQueryRangeParamsDashboardVarsSubstitution(t *testing.T) { Items: []v3.FilterItem{ { Key: v3.AttributeKey{Key: "service_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, - Operator: "EQ", + Operator: v3.FilterOperatorEqual, Value: "{{.service_name}}", }, { Key: v3.AttributeKey{Key: "operation_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, - Operator: "IN", + Operator: v3.FilterOperatorIn, Value: "{{.operation_name}}", }, }, @@ -675,6 +675,161 @@ func TestParseQueryRangeParamsDashboardVarsSubstitution(t *testing.T) { expectErr: false, expectedValue: []interface{}{"route", []interface{}{"GET /route", "POST /route"}}, }, + { + desc: "valid builder query with dashboard variables {{service_name}} and {{operation_name}}", + compositeQuery: v3.CompositeQuery{ + PanelType: v3.PanelTypeGraph, + QueryType: v3.QueryTypeBuilder, + BuilderQueries: map[string]*v3.BuilderQuery{ + "A": { + QueryName: "A", + DataSource: v3.DataSourceMetrics, + AggregateOperator: v3.AggregateOperatorSum, + AggregateAttribute: v3.AttributeKey{Key: "attribute_metrics"}, + Expression: "A", + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{Key: "service_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, + Operator: v3.FilterOperatorEqual, + Value: "{{service_name}}", + }, + { + Key: v3.AttributeKey{Key: "operation_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, + Operator: v3.FilterOperatorIn, + Value: "{{operation_name}}", + }, + }, + }, + }, + }, + }, + variables: map[string]interface{}{ + "service_name": "route", + "operation_name": []interface{}{ + "GET /route", + "POST /route", + }, + }, + expectErr: false, + expectedValue: []interface{}{"route", []interface{}{"GET /route", "POST /route"}}, + }, + { + desc: "valid builder query with dashboard variables [[service_name]] and [[operation_name]]", + compositeQuery: v3.CompositeQuery{ + PanelType: v3.PanelTypeGraph, + QueryType: v3.QueryTypeBuilder, + BuilderQueries: map[string]*v3.BuilderQuery{ + "A": { + QueryName: "A", + DataSource: v3.DataSourceMetrics, + AggregateOperator: v3.AggregateOperatorSum, + AggregateAttribute: v3.AttributeKey{Key: "attribute_metrics"}, + Expression: "A", + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{Key: "service_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, + Operator: v3.FilterOperatorEqual, + Value: "[[service_name]]", + }, + { + Key: v3.AttributeKey{Key: "operation_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, + Operator: v3.FilterOperatorIn, + Value: "[[operation_name]]", + }, + }, + }, + }, + }, + }, + variables: map[string]interface{}{ + "service_name": "route", + "operation_name": []interface{}{ + "GET /route", + "POST /route", + }, + }, + expectErr: false, + expectedValue: []interface{}{"route", []interface{}{"GET /route", "POST /route"}}, + }, + { + desc: "valid builder query with dashboard variables $service_name and $operation_name", + compositeQuery: v3.CompositeQuery{ + PanelType: v3.PanelTypeGraph, + QueryType: v3.QueryTypeBuilder, + BuilderQueries: map[string]*v3.BuilderQuery{ + "A": { + QueryName: "A", + DataSource: v3.DataSourceMetrics, + AggregateOperator: v3.AggregateOperatorSum, + AggregateAttribute: v3.AttributeKey{Key: "attribute_metrics"}, + Expression: "A", + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{Key: "service_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, + Operator: v3.FilterOperatorEqual, + Value: "$service_name", + }, + { + Key: v3.AttributeKey{Key: "operation_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, + Operator: v3.FilterOperatorIn, + Value: "$operation_name", + }, + }, + }, + }, + }, + }, + variables: map[string]interface{}{ + "service_name": "route", + "operation_name": []interface{}{ + "GET /route", + "POST /route", + }, + }, + expectErr: false, + expectedValue: []interface{}{"route", []interface{}{"GET /route", "POST /route"}}, + }, + { + desc: "multiple values for single select operator", + compositeQuery: v3.CompositeQuery{ + PanelType: v3.PanelTypeGraph, + QueryType: v3.QueryTypeBuilder, + BuilderQueries: map[string]*v3.BuilderQuery{ + "A": { + QueryName: "A", + DataSource: v3.DataSourceMetrics, + AggregateOperator: v3.AggregateOperatorSum, + AggregateAttribute: v3.AttributeKey{Key: "attribute_metrics"}, + Expression: "A", + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{Key: "operation_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, + Operator: v3.FilterOperatorEqual, + Value: "{{.operation_name}}", + }, + }, + }, + }, + }, + }, + variables: map[string]interface{}{ + "service_name": "route", + "operation_name": []interface{}{ + "GET /route", + "POST /route", + }, + }, + expectErr: true, + errMsg: "multiple values [GET /route POST /route] are not allowed for operator `=` for key `operation_name`", + }, } for _, tc := range reqCases { @@ -759,6 +914,72 @@ func TestParseQueryRangeParamsPromQLVars(t *testing.T) { expectErr: false, expectedQuery: "http_calls_total{service_name=\"route\", status_code=~\"200|505\"}", }, + { + desc: "valid prom query with dashboard variables {{service_name}} and {{status_code}}", + compositeQuery: v3.CompositeQuery{ + PanelType: v3.PanelTypeGraph, + QueryType: v3.QueryTypePromQL, + PromQueries: map[string]*v3.PromQuery{ + "A": { + Query: "http_calls_total{service_name=\"{{service_name}}\", status_code=~\"{{status_code}}\"}", + Disabled: false, + }, + }, + }, + variables: map[string]interface{}{ + "service_name": "route", + "status_code": []interface{}{ + 200, + 505, + }, + }, + expectErr: false, + expectedQuery: "http_calls_total{service_name=\"route\", status_code=~\"200|505\"}", + }, + { + desc: "valid prom query with dashboard variables [[service_name]] and [[status_code]]", + compositeQuery: v3.CompositeQuery{ + PanelType: v3.PanelTypeGraph, + QueryType: v3.QueryTypePromQL, + PromQueries: map[string]*v3.PromQuery{ + "A": { + Query: "http_calls_total{service_name=\"[[service_name]]\", status_code=~\"[[status_code]]\"}", + Disabled: false, + }, + }, + }, + variables: map[string]interface{}{ + "service_name": "route", + "status_code": []interface{}{ + 200, + 505, + }, + }, + expectErr: false, + expectedQuery: "http_calls_total{service_name=\"route\", status_code=~\"200|505\"}", + }, + { + desc: "valid prom query with dashboard variables $service_name and $status_code", + compositeQuery: v3.CompositeQuery{ + PanelType: v3.PanelTypeGraph, + QueryType: v3.QueryTypePromQL, + PromQueries: map[string]*v3.PromQuery{ + "A": { + Query: "http_calls_total{service_name=\"$service_name\", status_code=~\"$status_code\"}", + Disabled: false, + }, + }, + }, + variables: map[string]interface{}{ + "service_name": "route", + "status_code": []interface{}{ + 200, + 505, + }, + }, + expectErr: false, + expectedQuery: "http_calls_total{service_name=\"route\", status_code=~\"200|505\"}", + }, { desc: "valid prom query with dashboard variables", compositeQuery: v3.CompositeQuery{ diff --git a/pkg/query-service/app/querier/querier.go b/pkg/query-service/app/querier/querier.go index 84677480b2..1f68879d0c 100644 --- a/pkg/query-service/app/querier/querier.go +++ b/pkg/query-service/app/querier/querier.go @@ -14,6 +14,7 @@ import ( metricsV3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3" "go.signoz.io/signoz/pkg/query-service/app/queryBuilder" tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3" + chErrors "go.signoz.io/signoz/pkg/query-service/errors" "go.signoz.io/signoz/pkg/query-service/cache" "go.signoz.io/signoz/pkg/query-service/interfaces" @@ -283,7 +284,7 @@ func mergeSerieses(cachedSeries, missedSeries []*v3.Series) []*v3.Series { return mergedSeries } -func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, error, map[string]string) { +func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, error, map[string]error) { cacheKeys := q.keyGenerator.GenerateKeys(params) @@ -306,13 +307,13 @@ func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangePa close(ch) results := make([]*v3.Result, 0) - errQueriesByName := make(map[string]string) + errQueriesByName := make(map[string]error) var errs []error for result := range ch { if result.Err != nil { errs = append(errs, result.Err) - errQueriesByName[result.Name] = result.Err.Error() + errQueriesByName[result.Name] = result.Err continue } results = append(results, &v3.Result{ @@ -329,7 +330,7 @@ func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangePa return results, err, errQueriesByName } -func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, error, map[string]string) { +func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, error, map[string]error) { channelResults := make(chan channelResult, len(params.CompositeQuery.PromQueries)) var wg sync.WaitGroup cacheKeys := q.keyGenerator.GenerateKeys(params) @@ -390,13 +391,13 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam close(channelResults) results := make([]*v3.Result, 0) - errQueriesByName := make(map[string]string) + errQueriesByName := make(map[string]error) var errs []error for result := range channelResults { if result.Err != nil { errs = append(errs, result.Err) - errQueriesByName[result.Name] = result.Err.Error() + errQueriesByName[result.Name] = result.Err continue } results = append(results, &v3.Result{ @@ -413,7 +414,7 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam return results, err, errQueriesByName } -func (q *querier) runClickHouseQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, error, map[string]string) { +func (q *querier) runClickHouseQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, error, map[string]error) { channelResults := make(chan channelResult, len(params.CompositeQuery.ClickHouseQueries)) var wg sync.WaitGroup for queryName, clickHouseQuery := range params.CompositeQuery.ClickHouseQueries { @@ -431,13 +432,13 @@ func (q *querier) runClickHouseQueries(ctx context.Context, params *v3.QueryRang close(channelResults) results := make([]*v3.Result, 0) - errQueriesByName := make(map[string]string) + errQueriesByName := make(map[string]error) var errs []error for result := range channelResults { if result.Err != nil { errs = append(errs, result.Err) - errQueriesByName[result.Name] = result.Err.Error() + errQueriesByName[result.Name] = result.Err continue } results = append(results, &v3.Result{ @@ -453,7 +454,7 @@ func (q *querier) runClickHouseQueries(ctx context.Context, params *v3.QueryRang return results, err, errQueriesByName } -func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, error, map[string]string) { +func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, error, map[string]error) { queries, err := q.builder.PrepareQueries(params, keys) @@ -482,13 +483,13 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan close(ch) var errs []error - errQuriesByName := make(map[string]string) + errQuriesByName := make(map[string]error) res := make([]*v3.Result, 0) // read values from the channel for r := range ch { if r.Err != nil { errs = append(errs, r.Err) - errQuriesByName[r.Name] = r.Query + errQuriesByName[r.Name] = r.Err continue } res = append(res, &v3.Result{ @@ -502,10 +503,10 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan return res, nil, nil } -func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, error, map[string]string) { +func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, error, map[string]error) { var results []*v3.Result var err error - var errQueriesByName map[string]string + var errQueriesByName map[string]error if params.CompositeQuery != nil { switch params.CompositeQuery.QueryType { case v3.QueryTypeBuilder: @@ -514,6 +515,13 @@ func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3, } else { results, err, errQueriesByName = q.runBuilderQueries(ctx, params, keys) } + // in builder query, the only errors we expose are the ones that exceed the resource limits + // everything else is internal error as they are not actionable by the user + for name, err := range errQueriesByName { + if !chErrors.IsResourceLimitError(err) { + delete(errQueriesByName, name) + } + } case v3.QueryTypePromQL: results, err, errQueriesByName = q.runPromQueries(ctx, params) case v3.QueryTypeClickHouseSQL: diff --git a/pkg/query-service/app/querier/querier_test.go b/pkg/query-service/app/querier/querier_test.go index 37514b6f23..80a3a07422 100644 --- a/pkg/query-service/app/querier/querier_test.go +++ b/pkg/query-service/app/querier/querier_test.go @@ -572,8 +572,8 @@ func TestQueryRange(t *testing.T) { } q := NewQuerier(opts) expectedTimeRangeInQueryString := []string{ - fmt.Sprintf("timestamp_ms >= %d AND timestamp_ms < %d", 1675115520000, 1675115580000+120*60*1000), - fmt.Sprintf("timestamp_ms >= %d AND timestamp_ms < %d", 1675115520000+120*60*1000, 1675115580000+180*60*1000), + fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", 1675115520000, 1675115580000+120*60*1000), + fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", 1675115520000+120*60*1000, 1675115580000+180*60*1000), fmt.Sprintf("timestamp >= '%d' AND timestamp <= '%d'", 1675115580000*1000000, (1675115580000+120*60*1000)*int64(1000000)), fmt.Sprintf("timestamp >= '%d' AND timestamp <= '%d'", (1675115580000+60*60*1000)*int64(1000000), (1675115580000+180*60*1000)*int64(1000000)), } @@ -683,7 +683,7 @@ func TestQueryRangeValueType(t *testing.T) { q := NewQuerier(opts) // No caching expectedTimeRangeInQueryString := []string{ - fmt.Sprintf("timestamp_ms >= %d AND timestamp_ms < %d", 1675115520000, 1675115580000+120*60*1000), + fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", 1675115520000, 1675115580000+120*60*1000), fmt.Sprintf("timestamp >= '%d' AND timestamp <= '%d'", (1675115580000+60*60*1000)*int64(1000000), (1675115580000+180*60*1000)*int64(1000000)), } diff --git a/pkg/query-service/app/querier/v2/querier.go b/pkg/query-service/app/querier/v2/querier.go index 359546ea03..b3bf2c66f8 100644 --- a/pkg/query-service/app/querier/v2/querier.go +++ b/pkg/query-service/app/querier/v2/querier.go @@ -14,6 +14,7 @@ import ( metricsV4 "go.signoz.io/signoz/pkg/query-service/app/metrics/v4" "go.signoz.io/signoz/pkg/query-service/app/queryBuilder" tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3" + chErrors "go.signoz.io/signoz/pkg/query-service/errors" "go.signoz.io/signoz/pkg/query-service/cache" "go.signoz.io/signoz/pkg/query-service/interfaces" @@ -281,7 +282,7 @@ func mergeSerieses(cachedSeries, missedSeries []*v3.Series) []*v3.Series { return mergedSeries } -func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, error, map[string]string) { +func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, error, map[string]error) { cacheKeys := q.keyGenerator.GenerateKeys(params) @@ -299,13 +300,13 @@ func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangePa close(ch) results := make([]*v3.Result, 0) - errQueriesByName := make(map[string]string) + errQueriesByName := make(map[string]error) var errs []error for result := range ch { if result.Err != nil { errs = append(errs, result.Err) - errQueriesByName[result.Name] = result.Err.Error() + errQueriesByName[result.Name] = result.Err continue } results = append(results, &v3.Result{ @@ -322,7 +323,7 @@ func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangePa return results, err, errQueriesByName } -func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, error, map[string]string) { +func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, error, map[string]error) { channelResults := make(chan channelResult, len(params.CompositeQuery.PromQueries)) var wg sync.WaitGroup cacheKeys := q.keyGenerator.GenerateKeys(params) @@ -383,13 +384,13 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam close(channelResults) results := make([]*v3.Result, 0) - errQueriesByName := make(map[string]string) + errQueriesByName := make(map[string]error) var errs []error for result := range channelResults { if result.Err != nil { errs = append(errs, result.Err) - errQueriesByName[result.Name] = result.Err.Error() + errQueriesByName[result.Name] = result.Err continue } results = append(results, &v3.Result{ @@ -406,7 +407,7 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam return results, err, errQueriesByName } -func (q *querier) runClickHouseQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, error, map[string]string) { +func (q *querier) runClickHouseQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, error, map[string]error) { channelResults := make(chan channelResult, len(params.CompositeQuery.ClickHouseQueries)) var wg sync.WaitGroup for queryName, clickHouseQuery := range params.CompositeQuery.ClickHouseQueries { @@ -424,13 +425,13 @@ func (q *querier) runClickHouseQueries(ctx context.Context, params *v3.QueryRang close(channelResults) results := make([]*v3.Result, 0) - errQueriesByName := make(map[string]string) + errQueriesByName := make(map[string]error) var errs []error for result := range channelResults { if result.Err != nil { errs = append(errs, result.Err) - errQueriesByName[result.Name] = result.Err.Error() + errQueriesByName[result.Name] = result.Err continue } results = append(results, &v3.Result{ @@ -446,7 +447,7 @@ func (q *querier) runClickHouseQueries(ctx context.Context, params *v3.QueryRang return results, err, errQueriesByName } -func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, error, map[string]string) { +func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, error, map[string]error) { queries, err := q.builder.PrepareQueries(params, keys) @@ -475,13 +476,13 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan close(ch) var errs []error - errQuriesByName := make(map[string]string) + errQuriesByName := make(map[string]error) res := make([]*v3.Result, 0) // read values from the channel for r := range ch { if r.Err != nil { errs = append(errs, r.Err) - errQuriesByName[r.Name] = r.Query + errQuriesByName[r.Name] = r.Err continue } res = append(res, &v3.Result{ @@ -495,10 +496,10 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan return res, nil, nil } -func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, error, map[string]string) { +func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, error, map[string]error) { var results []*v3.Result var err error - var errQueriesByName map[string]string + var errQueriesByName map[string]error if params.CompositeQuery != nil { switch params.CompositeQuery.QueryType { case v3.QueryTypeBuilder: @@ -507,6 +508,13 @@ func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3, } else { results, err, errQueriesByName = q.runBuilderQueries(ctx, params, keys) } + // in builder query, the only errors we expose are the ones that exceed the resource limits + // everything else is internal error as they are not actionable by the user + for name, err := range errQueriesByName { + if !chErrors.IsResourceLimitError(err) { + delete(errQueriesByName, name) + } + } case v3.QueryTypePromQL: results, err, errQueriesByName = q.runPromQueries(ctx, params) case v3.QueryTypeClickHouseSQL: diff --git a/pkg/query-service/app/queryBuilder/query_builder_test.go b/pkg/query-service/app/queryBuilder/query_builder_test.go index 65fe21e1d7..3d170a7255 100644 --- a/pkg/query-service/app/queryBuilder/query_builder_test.go +++ b/pkg/query-service/app/queryBuilder/query_builder_test.go @@ -27,6 +27,7 @@ func TestBuildQueryWithMultipleQueriesAndFormula(t *testing.T) { {Key: v3.AttributeKey{Key: "in"}, Value: []interface{}{"a", "b", "c"}, Operator: v3.FilterOperatorIn}, }}, AggregateOperator: v3.AggregateOperatorRateMax, + Temporality: v3.Cumulative, Expression: "A", }, "B": { @@ -35,6 +36,7 @@ func TestBuildQueryWithMultipleQueriesAndFormula(t *testing.T) { AggregateAttribute: v3.AttributeKey{Key: "name2"}, DataSource: v3.DataSourceMetrics, AggregateOperator: v3.AggregateOperatorRateAvg, + Temporality: v3.Cumulative, Expression: "B", }, "C": { @@ -55,7 +57,7 @@ func TestBuildQueryWithMultipleQueriesAndFormula(t *testing.T) { require.NoError(t, err) require.Contains(t, queries["C"], "SELECT A.`ts` as `ts`, A.value / B.value") - require.Contains(t, queries["C"], "WHERE metric_name = 'name' AND temporality IN ['Cumulative', 'Unspecified'] AND JSONExtractString(labels, 'in') IN ['a','b','c']") + require.Contains(t, queries["C"], "WHERE metric_name = 'name' AND temporality = 'Cumulative' AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND JSONExtractString(labels, 'in') IN ['a','b','c']") require.Contains(t, queries["C"], "(value - lagInFrame(value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window)))") }) } @@ -257,7 +259,7 @@ func TestDeltaQueryBuilder(t *testing.T) { }, }, queryToTest: "A", - expected: "SELECT toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v2 INNER JOIN (SELECT fingerprint FROM signoz_metrics.time_series_v2 WHERE metric_name = 'signoz_latency_count' AND temporality = 'Delta' AND JSONExtractString(labels, 'service_name') IN ['frontend'] AND JSONExtractString(labels, 'operation') IN ['HTTP GET /dispatch'] AND JSONExtractString(labels, '__temporality__') = 'Delta') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_count' AND timestamp_ms >= 1650991980000 AND timestamp_ms <= 1651078380000 GROUP BY ts ORDER BY ts", + expected: "SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name = 'signoz_latency_count' AND temporality = 'Delta' AND unix_milli >= 1650974400000 AND unix_milli < 1651078380000 AND JSONExtractString(labels, 'service_name') IN ['frontend'] AND JSONExtractString(labels, 'operation') IN ['HTTP GET /dispatch'] AND JSONExtractString(labels, '__temporality__') = 'Delta') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_count' AND unix_milli >= 1650991980000 AND unix_milli <= 1651078380000 GROUP BY ts ORDER BY ts", }, { name: "TestQueryWithExpression - Error rate", @@ -327,7 +329,7 @@ func TestDeltaQueryBuilder(t *testing.T) { }, }, queryToTest: "C", - expected: "SELECT A.`ts` as `ts`, A.value * 100 / B.value as value FROM (SELECT toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v2 INNER JOIN (SELECT fingerprint FROM signoz_metrics.time_series_v2 WHERE metric_name = 'signoz_latency_count' AND temporality = 'Delta' AND JSONExtractString(labels, 'service_name') IN ['frontend'] AND JSONExtractString(labels, 'operation') IN ['HTTP GET /dispatch'] AND JSONExtractString(labels, 'status_code') IN ['STATUS_CODE_ERROR'] AND JSONExtractString(labels, '__temporality__') = 'Delta') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_count' AND timestamp_ms >= 1650991980000 AND timestamp_ms <= 1651078380000 GROUP BY ts ORDER BY ts) as A INNER JOIN (SELECT toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v2 INNER JOIN (SELECT fingerprint FROM signoz_metrics.time_series_v2 WHERE metric_name = 'signoz_latency_count' AND temporality = 'Delta' AND JSONExtractString(labels, 'service_name') IN ['frontend'] AND JSONExtractString(labels, 'operation') IN ['HTTP GET /dispatch'] AND JSONExtractString(labels, '__temporality__') = 'Delta') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_count' AND timestamp_ms >= 1650991980000 AND timestamp_ms <= 1651078380000 GROUP BY ts ORDER BY ts) as B ON A.`ts` = B.`ts`", + expected: "SELECT A.`ts` as `ts`, A.value * 100 / B.value as value FROM (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name = 'signoz_latency_count' AND temporality = 'Delta' AND unix_milli >= 1650974400000 AND unix_milli < 1651078380000 AND JSONExtractString(labels, 'service_name') IN ['frontend'] AND JSONExtractString(labels, 'operation') IN ['HTTP GET /dispatch'] AND JSONExtractString(labels, 'status_code') IN ['STATUS_CODE_ERROR'] AND JSONExtractString(labels, '__temporality__') = 'Delta') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_count' AND unix_milli >= 1650991980000 AND unix_milli <= 1651078380000 GROUP BY ts ORDER BY ts) as A INNER JOIN (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name = 'signoz_latency_count' AND temporality = 'Delta' AND unix_milli >= 1650974400000 AND unix_milli < 1651078380000 AND JSONExtractString(labels, 'service_name') IN ['frontend'] AND JSONExtractString(labels, 'operation') IN ['HTTP GET /dispatch'] AND JSONExtractString(labels, '__temporality__') = 'Delta') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_count' AND unix_milli >= 1650991980000 AND unix_milli <= 1651078380000 GROUP BY ts ORDER BY ts) as B ON A.`ts` = B.`ts`", }, { name: "TestQuery - Quantile", @@ -348,13 +350,14 @@ func TestDeltaQueryBuilder(t *testing.T) { Temporality: v3.Delta, GroupBy: []v3.AttributeKey{ {Key: "service_name"}, + {Key: "le"}, }, }, }, }, }, queryToTest: "A", - expected: "SELECT service_name, ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) as value FROM (SELECT service_name,le, toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v2 INNER JOIN (SELECT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v2 WHERE metric_name = 'signoz_latency_bucket' AND temporality = 'Delta' ) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND timestamp_ms >= 1650991980000 AND timestamp_ms <= 1651078380000 GROUP BY service_name,le,ts ORDER BY service_name ASC,le ASC, ts) GROUP BY service_name,ts ORDER BY service_name ASC, ts", + expected: "SELECT service_name, ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) as value FROM (SELECT service_name,le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name = 'signoz_latency_bucket' AND temporality = 'Delta' AND unix_milli >= 1650974400000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND unix_milli >= 1650991980000 AND unix_milli <= 1651078380000 GROUP BY service_name,le,ts ORDER BY service_name ASC,le ASC, ts) GROUP BY service_name,ts ORDER BY service_name ASC, ts", }, } diff --git a/pkg/query-service/constants/constants.go b/pkg/query-service/constants/constants.go index fe2ad8c86c..6e398a42e0 100644 --- a/pkg/query-service/constants/constants.go +++ b/pkg/query-service/constants/constants.go @@ -206,12 +206,9 @@ var GroupByColMap = map[string]struct{}{ const ( SIGNOZ_METRIC_DBNAME = "signoz_metrics" - SIGNOZ_SAMPLES_TABLENAME = "distributed_samples_v2" SIGNOZ_SAMPLES_V4_TABLENAME = "distributed_samples_v4" - SIGNOZ_TIMESERIES_TABLENAME = "distributed_time_series_v2" SIGNOZ_TRACE_DBNAME = "signoz_traces" SIGNOZ_SPAN_INDEX_TABLENAME = "distributed_signoz_index_v2" - SIGNOZ_TIMESERIES_LOCAL_TABLENAME = "time_series_v2" SIGNOZ_TIMESERIES_v4_LOCAL_TABLENAME = "time_series_v4" SIGNOZ_TIMESERIES_v4_6HRS_LOCAL_TABLENAME = "time_series_v4_6hrs" SIGNOZ_TIMESERIES_v4_1DAY_LOCAL_TABLENAME = "time_series_v4_1day" diff --git a/pkg/query-service/errors/clickhouse.go b/pkg/query-service/errors/clickhouse.go new file mode 100644 index 0000000000..573f1fae8c --- /dev/null +++ b/pkg/query-service/errors/clickhouse.go @@ -0,0 +1,42 @@ +package errors + +import "errors" + +var ( + // ErrResourceBytesLimitExceeded is returned when the resource bytes limit is exceeded + ErrResourceBytesLimitExceeded = NewResourceLimitError(errors.New("resource bytes limit exceeded, try applying filters such as service.name, etc. to reduce the data size")) + // ErrResourceTimeLimitExceeded is returned when the resource time limit is exceeded + ErrResourceTimeLimitExceeded = NewResourceLimitError(errors.New("resource time limit exceeded, try applying filters such as service.name, etc. to reduce the data size")) +) + +type ResourceLimitError struct { + err error +} + +func NewResourceLimitError(err error) error { + return &ResourceLimitError{err: err} +} + +func (e *ResourceLimitError) Error() string { + return e.err.Error() +} + +func (e *ResourceLimitError) Unwrap() error { + return e.err +} + +func IsResourceLimitError(err error) bool { + if err == nil { + return false + } + var target *ResourceLimitError + return errors.As(err, &target) +} + +func (e *ResourceLimitError) MarshalJSON() ([]byte, error) { + return []byte(`"` + e.Error() + `"`), nil +} + +func (e *ResourceLimitError) UnmarshalJSON([]byte) error { + return nil +} diff --git a/pkg/query-service/interfaces/interface.go b/pkg/query-service/interfaces/interface.go index 814f7b9071..4c89f6f793 100644 --- a/pkg/query-service/interfaces/interface.go +++ b/pkg/query-service/interfaces/interface.go @@ -107,7 +107,7 @@ type Reader interface { } type Querier interface { - QueryRange(context.Context, *v3.QueryRangeParamsV3, map[string]v3.AttributeKey) ([]*v3.Result, error, map[string]string) + QueryRange(context.Context, *v3.QueryRangeParamsV3, map[string]v3.AttributeKey) ([]*v3.Result, error, map[string]error) // test helpers QueriesExecuted() []string diff --git a/pkg/query-service/main.go b/pkg/query-service/main.go index b6f5e0281d..793ce25bf2 100644 --- a/pkg/query-service/main.go +++ b/pkg/query-service/main.go @@ -11,6 +11,7 @@ import ( "go.signoz.io/signoz/pkg/query-service/app" "go.signoz.io/signoz/pkg/query-service/auth" "go.signoz.io/signoz/pkg/query-service/constants" + "go.signoz.io/signoz/pkg/query-service/migrate" "go.signoz.io/signoz/pkg/query-service/version" "go.uber.org/zap" @@ -52,6 +53,8 @@ func main() { flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)") flag.StringVar(&fluxInterval, "flux-interval", "5m", "(cache config to use)") flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')") + // Allow using the consistent naming with the signoz collector + flag.StringVar(&cluster, "cluster-name", "cluster", "(cluster name - defaults to 'cluster')") flag.IntVar(&maxIdleConns, "max-idle-conns", 50, "(number of connections to maintain in the pool, only used with clickhouse if not set in ClickHouseUrl env var DSN.)") flag.IntVar(&maxOpenConns, "max-open-conns", 100, "(max connections for use at any time, only used with clickhouse if not set in ClickHouseUrl env var DSN.)") flag.DurationVar(&dialTimeout, "dial-timeout", 5*time.Second, "(the maximum time to establish a connection, only used with clickhouse if not set in ClickHouseUrl env var DSN.)") @@ -90,6 +93,12 @@ func main() { zap.L().Info("JWT secret key set successfully.") } + if err := migrate.Migrate(constants.RELATIONAL_DATASOURCE_PATH); err != nil { + zap.L().Error("Failed to migrate", zap.Error(err)) + } else { + zap.L().Info("Migration successful") + } + server, err := app.NewServer(serverOptions) if err != nil { logger.Fatal("Failed to create server", zap.Error(err)) diff --git a/pkg/query-service/migrate/0_45_alerts_to_v4/run.go b/pkg/query-service/migrate/0_45_alerts_to_v4/run.go new file mode 100644 index 0000000000..f68f4ca43b --- /dev/null +++ b/pkg/query-service/migrate/0_45_alerts_to_v4/run.go @@ -0,0 +1,153 @@ +package alertstov4 + +import ( + "context" + "encoding/json" + + "github.com/jmoiron/sqlx" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + "go.signoz.io/signoz/pkg/query-service/rules" + "go.uber.org/multierr" + "go.uber.org/zap" +) + +var Version = "0.45-alerts-to-v4" + +var mapTimeAggregation = map[v3.AggregateOperator]v3.TimeAggregation{ + v3.AggregateOperatorSum: v3.TimeAggregationSum, + v3.AggregateOperatorMin: v3.TimeAggregationMin, + v3.AggregateOperatorMax: v3.TimeAggregationMax, + v3.AggregateOperatorSumRate: v3.TimeAggregationRate, + v3.AggregateOperatorAvgRate: v3.TimeAggregationRate, + v3.AggregateOperatorMinRate: v3.TimeAggregationRate, + v3.AggregateOperatorMaxRate: v3.TimeAggregationRate, + v3.AggregateOperatorHistQuant50: v3.TimeAggregationUnspecified, + v3.AggregateOperatorHistQuant75: v3.TimeAggregationUnspecified, + v3.AggregateOperatorHistQuant90: v3.TimeAggregationUnspecified, + v3.AggregateOperatorHistQuant95: v3.TimeAggregationUnspecified, + v3.AggregateOperatorHistQuant99: v3.TimeAggregationUnspecified, +} + +var mapSpaceAggregation = map[v3.AggregateOperator]v3.SpaceAggregation{ + v3.AggregateOperatorSum: v3.SpaceAggregationSum, + v3.AggregateOperatorMin: v3.SpaceAggregationMin, + v3.AggregateOperatorMax: v3.SpaceAggregationMax, + v3.AggregateOperatorSumRate: v3.SpaceAggregationSum, + v3.AggregateOperatorAvgRate: v3.SpaceAggregationAvg, + v3.AggregateOperatorMinRate: v3.SpaceAggregationMin, + v3.AggregateOperatorMaxRate: v3.SpaceAggregationMax, + v3.AggregateOperatorHistQuant50: v3.SpaceAggregationPercentile50, + v3.AggregateOperatorHistQuant75: v3.SpaceAggregationPercentile75, + v3.AggregateOperatorHistQuant90: v3.SpaceAggregationPercentile90, + v3.AggregateOperatorHistQuant95: v3.SpaceAggregationPercentile95, + v3.AggregateOperatorHistQuant99: v3.SpaceAggregationPercentile99, +} + +func canMigrateOperator(operator v3.AggregateOperator) bool { + switch operator { + case v3.AggregateOperatorSum, + v3.AggregateOperatorMin, + v3.AggregateOperatorMax, + v3.AggregateOperatorSumRate, + v3.AggregateOperatorAvgRate, + v3.AggregateOperatorMinRate, + v3.AggregateOperatorMaxRate, + v3.AggregateOperatorHistQuant50, + v3.AggregateOperatorHistQuant75, + v3.AggregateOperatorHistQuant90, + v3.AggregateOperatorHistQuant95, + v3.AggregateOperatorHistQuant99: + return true + } + return false +} + +func Migrate(conn *sqlx.DB) error { + ruleDB := rules.NewRuleDB(conn) + storedRules, err := ruleDB.GetStoredRules(context.Background()) + if err != nil { + return err + } + + for _, storedRule := range storedRules { + parsedRule, errs := rules.ParsePostableRule([]byte(storedRule.Data)) + if len(errs) > 0 { + // this should not happen but if it does, we should not stop the migration + zap.L().Error("Error parsing rule", zap.Error(multierr.Combine(errs...)), zap.Int("rule", storedRule.Id)) + continue + } + zap.L().Info("Rule parsed", zap.Int("rule", storedRule.Id)) + updated := false + if parsedRule.RuleCondition != nil && parsedRule.Version == "" { + if parsedRule.RuleCondition.QueryType() == v3.QueryTypeBuilder { + // check if all the queries can be converted to v4 + canMigrate := true + for _, query := range parsedRule.RuleCondition.CompositeQuery.BuilderQueries { + if query.DataSource == v3.DataSourceMetrics && query.Expression == query.QueryName { + if !canMigrateOperator(query.AggregateOperator) { + canMigrate = false + break + } + } + } + + if canMigrate { + parsedRule.Version = "v4" + for _, query := range parsedRule.RuleCondition.CompositeQuery.BuilderQueries { + if query.DataSource == v3.DataSourceMetrics && query.Expression == query.QueryName { + // update aggregate attribute + if query.AggregateOperator == v3.AggregateOperatorSum || + query.AggregateOperator == v3.AggregateOperatorMin || + query.AggregateOperator == v3.AggregateOperatorMax { + query.AggregateAttribute.Type = "Gauge" + } + if query.AggregateOperator == v3.AggregateOperatorSumRate || + query.AggregateOperator == v3.AggregateOperatorAvgRate || + query.AggregateOperator == v3.AggregateOperatorMinRate || + query.AggregateOperator == v3.AggregateOperatorMaxRate { + query.AggregateAttribute.Type = "Sum" + } + + if query.AggregateOperator == v3.AggregateOperatorHistQuant50 || + query.AggregateOperator == v3.AggregateOperatorHistQuant75 || + query.AggregateOperator == v3.AggregateOperatorHistQuant90 || + query.AggregateOperator == v3.AggregateOperatorHistQuant95 || + query.AggregateOperator == v3.AggregateOperatorHistQuant99 { + query.AggregateAttribute.Type = "Histogram" + } + query.AggregateAttribute.DataType = v3.AttributeKeyDataTypeFloat64 + query.AggregateAttribute.IsColumn = true + query.TimeAggregation = mapTimeAggregation[query.AggregateOperator] + query.SpaceAggregation = mapSpaceAggregation[query.AggregateOperator] + query.AggregateOperator = v3.AggregateOperator(query.TimeAggregation) + updated = true + } + } + } + } + } + + if !updated { + zap.L().Info("Rule not updated", zap.Int("rule", storedRule.Id)) + continue + } + + ruleJSON, jsonErr := json.Marshal(parsedRule) + if jsonErr != nil { + zap.L().Error("Error marshalling rule; skipping rule migration", zap.Error(jsonErr), zap.Int("rule", storedRule.Id)) + continue + } + + stmt, prepareError := conn.PrepareContext(context.Background(), `UPDATE rules SET data=$3 WHERE id=$4;`) + if prepareError != nil { + zap.L().Error("Error in preparing statement for UPDATE to rules", zap.Error(prepareError)) + continue + } + defer stmt.Close() + + if _, err := stmt.Exec(ruleJSON, storedRule.Id); err != nil { + zap.L().Error("Error in Executing prepared statement for UPDATE to rules", zap.Error(err)) + } + } + return nil +} diff --git a/pkg/query-service/migrate/migate.go b/pkg/query-service/migrate/migate.go new file mode 100644 index 0000000000..f9d15a1567 --- /dev/null +++ b/pkg/query-service/migrate/migate.go @@ -0,0 +1,67 @@ +package migrate + +import ( + "database/sql" + + "github.com/jmoiron/sqlx" + alertstov4 "go.signoz.io/signoz/pkg/query-service/migrate/0_45_alerts_to_v4" + "go.uber.org/zap" +) + +type DataMigration struct { + ID int `db:"id"` + Version string `db:"version"` + CreatedAt string `db:"created_at"` + Succeeded bool `db:"succeeded"` +} + +func initSchema(conn *sqlx.DB) error { + tableSchema := ` + CREATE TABLE IF NOT EXISTS data_migrations ( + id SERIAL PRIMARY KEY, + version VARCHAR(255) NOT NULL UNIQUE, + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + succeeded BOOLEAN NOT NULL DEFAULT FALSE + ); + ` + _, err := conn.Exec(tableSchema) + if err != nil { + return err + } + return nil +} + +func getMigrationVersion(conn *sqlx.DB, version string) (*DataMigration, error) { + var migration DataMigration + err := conn.Get(&migration, "SELECT * FROM data_migrations WHERE version = $1", version) + if err != nil { + if err == sql.ErrNoRows { + return nil, nil + } + return nil, err + } + return &migration, nil +} + +func Migrate(dsn string) error { + conn, err := sqlx.Connect("sqlite3", dsn) + if err != nil { + return err + } + if err := initSchema(conn); err != nil { + return err + } + + if m, err := getMigrationVersion(conn, "0.45_alerts_to_v4"); err == nil && m == nil { + if err := alertstov4.Migrate(conn); err != nil { + zap.L().Error("failed to migrate 0.45_alerts_to_v4", zap.Error(err)) + } else { + _, err := conn.Exec("INSERT INTO data_migrations (version, succeeded) VALUES ('0.45_alerts_to_v4', true)") + if err != nil { + return err + } + } + } + + return nil +} diff --git a/pkg/query-service/model/featureSet.go b/pkg/query-service/model/featureSet.go index 2d0f4a55be..0e7a1c0278 100644 --- a/pkg/query-service/model/featureSet.go +++ b/pkg/query-service/model/featureSet.go @@ -56,14 +56,14 @@ var BasicPlan = FeatureSet{ Name: QueryBuilderPanels, Active: true, Usage: 0, - UsageLimit: 20, + UsageLimit: -1, Route: "", }, Feature{ Name: QueryBuilderAlerts, Active: true, Usage: 0, - UsageLimit: 10, + UsageLimit: -1, Route: "", }, Feature{ diff --git a/pkg/query-service/model/v3/v3.go b/pkg/query-service/model/v3/v3.go index c979a94786..d1f55d7bb0 100644 --- a/pkg/query-service/model/v3/v3.go +++ b/pkg/query-service/model/v3/v3.go @@ -433,24 +433,30 @@ func (c *CompositeQuery) Validate() error { } if c.BuilderQueries == nil && c.ClickHouseQueries == nil && c.PromQueries == nil { - return fmt.Errorf("composite query must contain at least one query") + return fmt.Errorf("composite query must contain at least one query type") } - for name, query := range c.BuilderQueries { - if err := query.Validate(); err != nil { - return fmt.Errorf("builder query %s is invalid: %w", name, err) + if c.QueryType == QueryTypeBuilder { + for name, query := range c.BuilderQueries { + if err := query.Validate(c.PanelType); err != nil { + return fmt.Errorf("builder query %s is invalid: %w", name, err) + } } } - for name, query := range c.ClickHouseQueries { - if err := query.Validate(); err != nil { - return fmt.Errorf("clickhouse query %s is invalid: %w", name, err) + if c.QueryType == QueryTypeClickHouseSQL { + for name, query := range c.ClickHouseQueries { + if err := query.Validate(); err != nil { + return fmt.Errorf("clickhouse query %s is invalid: %w", name, err) + } } } - for name, query := range c.PromQueries { - if err := query.Validate(); err != nil { - return fmt.Errorf("prom query %s is invalid: %w", name, err) + if c.QueryType == QueryTypePromQL { + for name, query := range c.PromQueries { + if err := query.Validate(); err != nil { + return fmt.Errorf("prom query %s is invalid: %w", name, err) + } } } @@ -663,10 +669,11 @@ type BuilderQuery struct { ShiftBy int64 } -func (b *BuilderQuery) Validate() error { +func (b *BuilderQuery) Validate(panelType PanelType) error { if b == nil { return nil } + if b.QueryName == "" { return fmt.Errorf("query name is required") } @@ -711,6 +718,10 @@ func (b *BuilderQuery) Validate() error { } } if b.GroupBy != nil { + if len(b.GroupBy) > 0 && panelType == PanelTypeList { + return fmt.Errorf("group by is not supported for list panel type") + } + for _, groupBy := range b.GroupBy { if err := groupBy.Validate(); err != nil { return fmt.Errorf("group by is invalid %w", err) diff --git a/pkg/query-service/rules/db.go b/pkg/query-service/rules/db.go index cf903884fd..23372ce911 100644 --- a/pkg/query-service/rules/db.go +++ b/pkg/query-service/rules/db.go @@ -49,7 +49,7 @@ type ruleDB struct { // todo: move init methods for creating tables -func newRuleDB(db *sqlx.DB) RuleDB { +func NewRuleDB(db *sqlx.DB) RuleDB { return &ruleDB{ db, } diff --git a/pkg/query-service/rules/manager.go b/pkg/query-service/rules/manager.go index cad02523d7..d649b565fd 100644 --- a/pkg/query-service/rules/manager.go +++ b/pkg/query-service/rules/manager.go @@ -108,7 +108,7 @@ func NewManager(o *ManagerOptions) (*Manager, error) { return nil, err } - db := newRuleDB(o.DBConn) + db := NewRuleDB(o.DBConn) m := &Manager{ tasks: map[string]Task{}, diff --git a/pkg/query-service/tests/integration/signoz_integrations_test.go b/pkg/query-service/tests/integration/signoz_integrations_test.go index d58ccaf51a..eae9603888 100644 --- a/pkg/query-service/tests/integration/signoz_integrations_test.go +++ b/pkg/query-service/tests/integration/signoz_integrations_test.go @@ -327,6 +327,7 @@ func TestDashboardsForInstalledIntegrationDashboards(t *testing.T) { // Installing an integration should make its dashboards appear in the dashboard list require.False(testAvailableIntegration.IsInstalled) + tsBeforeInstallation := time.Now().Unix() integrationsTB.RequestQSToInstallIntegration( testAvailableIntegration.Id, map[string]interface{}{}, ) @@ -344,9 +345,13 @@ func TestDashboardsForInstalledIntegrationDashboards(t *testing.T) { len(testIntegrationDashboards), len(dashboards), "dashboards for installed integrations should appear in dashboards list", ) + require.GreaterOrEqual(dashboards[0].CreatedAt.Unix(), tsBeforeInstallation) + require.GreaterOrEqual(dashboards[0].UpdatedAt.Unix(), tsBeforeInstallation) // Should be able to get installed integrations dashboard by id dd := integrationsTB.GetDashboardByIdFromQS(dashboards[0].Uuid) + require.GreaterOrEqual(dd.CreatedAt.Unix(), tsBeforeInstallation) + require.GreaterOrEqual(dd.UpdatedAt.Unix(), tsBeforeInstallation) require.Equal(*dd, dashboards[0]) // Integration dashboards should not longer appear in dashboard list after uninstallation diff --git a/pkg/query-service/tests/test-deploy/docker-compose.yaml b/pkg/query-service/tests/test-deploy/docker-compose.yaml index c6af0bc058..396b059157 100644 --- a/pkg/query-service/tests/test-deploy/docker-compose.yaml +++ b/pkg/query-service/tests/test-deploy/docker-compose.yaml @@ -192,7 +192,7 @@ services: <<: *db-depend otel-collector-migrator: - image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.22} + image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.24} container_name: otel-migrator command: - "--dsn=tcp://clickhouse:9000" @@ -205,7 +205,7 @@ services: # condition: service_healthy otel-collector: - image: signoz/signoz-otel-collector:0.88.22 + image: signoz/signoz-otel-collector:0.88.24 container_name: signoz-otel-collector command: [ diff --git a/pkg/query-service/utils/format.go b/pkg/query-service/utils/format.go index aa9fc59720..0adaebff4a 100644 --- a/pkg/query-service/utils/format.go +++ b/pkg/query-service/utils/format.go @@ -167,7 +167,7 @@ func ClickHouseFormattedValue(v interface{}) string { case []interface{}: if len(x) == 0 { - return "" + return "[]" } switch x[0].(type) { case string: @@ -184,7 +184,7 @@ func ClickHouseFormattedValue(v interface{}) string { return strings.Join(strings.Fields(fmt.Sprint(x)), ",") default: zap.L().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x[0]))) - return "" + return "[]" } default: zap.L().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x))) diff --git a/pkg/query-service/utils/queryTemplate/vars.go b/pkg/query-service/utils/queryTemplate/vars.go index 7297af6a24..677d3aa773 100644 --- a/pkg/query-service/utils/queryTemplate/vars.go +++ b/pkg/query-service/utils/queryTemplate/vars.go @@ -8,17 +8,17 @@ import ( // AssignReservedVars assigns values for go template vars. assumes that // model.QueryRangeParamsV3.Start and End are Unix Nano timestamps -func AssignReservedVarsV3(metricsQueryRangeParams *v3.QueryRangeParamsV3) { - metricsQueryRangeParams.Variables["start_timestamp"] = metricsQueryRangeParams.Start / 1000 - metricsQueryRangeParams.Variables["end_timestamp"] = metricsQueryRangeParams.End / 1000 +func AssignReservedVarsV3(queryRangeParams *v3.QueryRangeParamsV3) { + queryRangeParams.Variables["start_timestamp"] = queryRangeParams.Start / 1000 + queryRangeParams.Variables["end_timestamp"] = queryRangeParams.End / 1000 - metricsQueryRangeParams.Variables["start_timestamp_ms"] = metricsQueryRangeParams.Start - metricsQueryRangeParams.Variables["end_timestamp_ms"] = metricsQueryRangeParams.End + queryRangeParams.Variables["start_timestamp_ms"] = queryRangeParams.Start + queryRangeParams.Variables["end_timestamp_ms"] = queryRangeParams.End - metricsQueryRangeParams.Variables["start_timestamp_nano"] = metricsQueryRangeParams.Start * 1e6 - metricsQueryRangeParams.Variables["end_timestamp_nano"] = metricsQueryRangeParams.End * 1e6 + queryRangeParams.Variables["start_timestamp_nano"] = queryRangeParams.Start * 1e6 + queryRangeParams.Variables["end_timestamp_nano"] = queryRangeParams.End * 1e6 - metricsQueryRangeParams.Variables["start_datetime"] = fmt.Sprintf("toDateTime(%d)", metricsQueryRangeParams.Start/1000) - metricsQueryRangeParams.Variables["end_datetime"] = fmt.Sprintf("toDateTime(%d)", metricsQueryRangeParams.End/1000) + queryRangeParams.Variables["start_datetime"] = fmt.Sprintf("toDateTime(%d)", queryRangeParams.Start/1000) + queryRangeParams.Variables["end_datetime"] = fmt.Sprintf("toDateTime(%d)", queryRangeParams.End/1000) }