mirror of
https://git.mirrors.martin98.com/https://github.com/SigNoz/signoz
synced 2025-08-16 17:35:57 +08:00
commit
a0d866c2ff
34
.github/workflows/staging-deployment.yaml
vendored
34
.github/workflows/staging-deployment.yaml
vendored
@ -9,19 +9,29 @@ jobs:
|
||||
name: Deploy latest develop branch to staging
|
||||
runs-on: ubuntu-latest
|
||||
environment: staging
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
steps:
|
||||
- name: Executing remote ssh commands using ssh key
|
||||
uses: appleboy/ssh-action@v1.0.3
|
||||
env:
|
||||
GITHUB_BRANCH: develop
|
||||
GITHUB_SHA: ${{ github.sha }}
|
||||
- id: 'auth'
|
||||
uses: 'google-github-actions/auth@v2'
|
||||
with:
|
||||
host: ${{ secrets.HOST_DNS }}
|
||||
username: ${{ secrets.USERNAME }}
|
||||
key: ${{ secrets.SSH_KEY }}
|
||||
envs: GITHUB_BRANCH,GITHUB_SHA
|
||||
command_timeout: 60m
|
||||
script: |
|
||||
workload_identity_provider: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }}
|
||||
service_account: ${{ secrets.GCP_SERVICE_ACCOUNT }}
|
||||
|
||||
- name: 'sdk'
|
||||
uses: 'google-github-actions/setup-gcloud@v2'
|
||||
|
||||
- name: 'ssh'
|
||||
shell: bash
|
||||
env:
|
||||
GITHUB_BRANCH: ${{ github.head_ref || github.ref_name }}
|
||||
GITHUB_SHA: ${{ github.sha }}
|
||||
GCP_PROJECT: ${{ secrets.GCP_PROJECT }}
|
||||
GCP_ZONE: ${{ secrets.GCP_ZONE }}
|
||||
GCP_INSTANCE: ${{ secrets.GCP_INSTANCE }}
|
||||
run: |
|
||||
read -r -d '' COMMAND <<EOF || true
|
||||
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
|
||||
echo "GITHUB_SHA: ${GITHUB_SHA}"
|
||||
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
|
||||
@ -40,3 +50,5 @@ jobs:
|
||||
make build-ee-query-service-amd64
|
||||
make build-frontend-amd64
|
||||
make run-signoz
|
||||
EOF
|
||||
gcloud compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}"
|
||||
|
30
.github/workflows/testing-deployment.yaml
vendored
30
.github/workflows/testing-deployment.yaml
vendored
@ -9,19 +9,29 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
environment: testing
|
||||
if: ${{ github.event.label.name == 'testing-deploy' }}
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
steps:
|
||||
- name: Executing remote ssh commands using ssh key
|
||||
uses: appleboy/ssh-action@v1.0.3
|
||||
- id: 'auth'
|
||||
uses: 'google-github-actions/auth@v2'
|
||||
with:
|
||||
workload_identity_provider: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }}
|
||||
service_account: ${{ secrets.GCP_SERVICE_ACCOUNT }}
|
||||
|
||||
- name: 'sdk'
|
||||
uses: 'google-github-actions/setup-gcloud@v2'
|
||||
|
||||
- name: 'ssh'
|
||||
shell: bash
|
||||
env:
|
||||
GITHUB_BRANCH: ${{ github.head_ref || github.ref_name }}
|
||||
GITHUB_SHA: ${{ github.sha }}
|
||||
with:
|
||||
host: ${{ secrets.HOST_DNS }}
|
||||
username: ${{ secrets.USERNAME }}
|
||||
key: ${{ secrets.SSH_KEY }}
|
||||
envs: GITHUB_BRANCH,GITHUB_SHA
|
||||
command_timeout: 60m
|
||||
script: |
|
||||
GCP_PROJECT: ${{ secrets.GCP_PROJECT }}
|
||||
GCP_ZONE: ${{ secrets.GCP_ZONE }}
|
||||
GCP_INSTANCE: ${{ secrets.GCP_INSTANCE }}
|
||||
run: |
|
||||
read -r -d '' COMMAND <<EOF || true
|
||||
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
|
||||
echo "GITHUB_SHA: ${GITHUB_SHA}"
|
||||
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
|
||||
@ -41,3 +51,5 @@ jobs:
|
||||
make build-ee-query-service-amd64
|
||||
make build-frontend-amd64
|
||||
make run-signoz
|
||||
EOF
|
||||
gcloud compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}"
|
||||
|
8
.versions-golang
Normal file
8
.versions-golang
Normal file
@ -0,0 +1,8 @@
|
||||
#### Auto generated by make versions/golang. DO NOT EDIT! ####
|
||||
amd64=128d7baad667abc0e41a85673026a2cf9449ef40f384baf424aee45bc13f9235
|
||||
arm=a5f77dc34ccae0d43269675508aab8fa9078ded6fa3e2dcee54f7c230018100d
|
||||
arm64=1cdad16d01542a57caca4b0a6893a5b69d711d69dd6bb4483c77c1d092baec41
|
||||
386=0c82e5195d14caa5daa01ea06a70139e7ea1edbd366c83259227c7d9965d4c5a
|
||||
mips64le=25967f27f76031f31cd3ae2173958e151d8d961ca186ab4328af7a1895139a66
|
||||
ppc64le=6fa49b4730622b79560a1fc2677b02a1ee7aac5b28490a2bda6134050108fb3a
|
||||
s390x=4e2c0198c3db1c769e8e2e8a1e504dbb5e3eff0dad62f8f5c543b4823a89d81b
|
@ -146,7 +146,7 @@ services:
|
||||
condition: on-failure
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:0.45.0
|
||||
image: signoz/query-service:0.46.0
|
||||
command:
|
||||
[
|
||||
"-config=/root/config/prometheus.yml",
|
||||
@ -186,7 +186,7 @@ services:
|
||||
<<: *db-depend
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.45.0
|
||||
image: signoz/frontend:0.46.0
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
@ -199,7 +199,7 @@ services:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
otel-collector:
|
||||
image: signoz/signoz-otel-collector:0.88.22
|
||||
image: signoz/signoz-otel-collector:0.88.24
|
||||
command:
|
||||
[
|
||||
"--config=/etc/otel-collector-config.yaml",
|
||||
@ -237,7 +237,7 @@ services:
|
||||
- query-service
|
||||
|
||||
otel-collector-migrator:
|
||||
image: signoz/signoz-schema-migrator:0.88.22
|
||||
image: signoz/signoz-schema-migrator:0.88.24
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
@ -66,7 +66,7 @@ services:
|
||||
- --storage.path=/data
|
||||
|
||||
otel-collector-migrator:
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.22}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.24}
|
||||
container_name: otel-migrator
|
||||
command:
|
||||
- "--dsn=tcp://clickhouse:9000"
|
||||
@ -81,7 +81,7 @@ services:
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
otel-collector:
|
||||
container_name: signoz-otel-collector
|
||||
image: signoz/signoz-otel-collector:0.88.22
|
||||
image: signoz/signoz-otel-collector:0.88.24
|
||||
command:
|
||||
[
|
||||
"--config=/etc/otel-collector-config.yaml",
|
||||
|
@ -164,7 +164,7 @@ services:
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.45.0}
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.46.0}
|
||||
container_name: signoz-query-service
|
||||
command:
|
||||
[
|
||||
@ -203,7 +203,7 @@ services:
|
||||
<<: *db-depend
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.45.0}
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.46.0}
|
||||
container_name: signoz-frontend
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
@ -215,7 +215,7 @@ services:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
otel-collector-migrator:
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.22}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.24}
|
||||
container_name: otel-migrator
|
||||
command:
|
||||
- "--dsn=tcp://clickhouse:9000"
|
||||
@ -229,7 +229,7 @@ services:
|
||||
|
||||
|
||||
otel-collector:
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.88.22}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.88.24}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
[
|
||||
|
@ -14,7 +14,9 @@ import (
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
|
||||
"go.signoz.io/signoz/ee/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
"go.signoz.io/signoz/pkg/query-service/constants"
|
||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/migrate"
|
||||
"go.signoz.io/signoz/pkg/query-service/version"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
@ -143,6 +145,12 @@ func main() {
|
||||
zap.L().Info("JWT secret key set successfully.")
|
||||
}
|
||||
|
||||
if err := migrate.Migrate(constants.RELATIONAL_DATASOURCE_PATH); err != nil {
|
||||
zap.L().Error("Failed to migrate", zap.Error(err))
|
||||
} else {
|
||||
zap.L().Info("Migration successful")
|
||||
}
|
||||
|
||||
server, err := app.NewServer(serverOptions)
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to create server", zap.Error(err))
|
||||
|
@ -52,14 +52,14 @@ var BasicPlan = basemodel.FeatureSet{
|
||||
Name: basemodel.QueryBuilderPanels,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: 20,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.QueryBuilderAlerts,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: 10,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
|
@ -21,8 +21,6 @@ const config: Config.InitialOptions = {
|
||||
transform: {
|
||||
'^.+\\.(ts|tsx)?$': 'ts-jest',
|
||||
'^.+\\.(js|jsx)$': 'babel-jest',
|
||||
'^.+\\.(css|scss|sass|less)$': 'jest-preview/transforms/css',
|
||||
'^(?!.*\\.(js|jsx|mjs|cjs|ts|tsx|css|json)$)': 'jest-preview/transforms/file',
|
||||
},
|
||||
transformIgnorePatterns: [
|
||||
'node_modules/(?!(lodash-es|react-dnd|core-dnd|@react-dnd|dnd-core|react-dnd-html5-backend|axios|@signozhq/design-tokens|d3-interpolate|d3-color)/)',
|
||||
|
@ -13,8 +13,6 @@
|
||||
"jest": "jest",
|
||||
"jest:coverage": "jest --coverage",
|
||||
"jest:watch": "jest --watch",
|
||||
"jest-preview": "jest-preview",
|
||||
"test:debug": "npm-run-all -p test jest-preview",
|
||||
"postinstall": "is-ci || yarn husky:configure",
|
||||
"playwright": "npm run i18n:generate-hash && NODE_ENV=testing playwright test --config=./playwright.config.ts",
|
||||
"playwright:local:debug": "PWDEBUG=console yarn playwright --headed --browser=chromium",
|
||||
@ -183,7 +181,7 @@
|
||||
"@types/webpack-dev-server": "^4.7.2",
|
||||
"@typescript-eslint/eslint-plugin": "^4.33.0",
|
||||
"@typescript-eslint/parser": "^4.33.0",
|
||||
"autoprefixer": "^9.0.0",
|
||||
"autoprefixer": "10.4.19",
|
||||
"babel-plugin-styled-components": "^1.12.0",
|
||||
"compression-webpack-plugin": "9.0.0",
|
||||
"copy-webpack-plugin": "^8.1.0",
|
||||
@ -206,12 +204,12 @@
|
||||
"husky": "^7.0.4",
|
||||
"is-ci": "^3.0.1",
|
||||
"jest-playwright-preset": "^1.7.2",
|
||||
"jest-preview": "0.3.1",
|
||||
"jest-styled-components": "^7.0.8",
|
||||
"lint-staged": "^12.5.0",
|
||||
"msw": "1.3.2",
|
||||
"npm-run-all": "latest",
|
||||
"portfinder-sync": "^0.0.2",
|
||||
"postcss": "8.4.38",
|
||||
"prettier": "2.2.1",
|
||||
"raw-loader": "4.0.2",
|
||||
"react-hooks-testing-library": "0.6.0",
|
||||
|
@ -1 +0,0 @@
|
||||
<svg width="24" height="24" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M12 2c1 2.538 2.5 2.962 3.5 3.808.942.78 1.481 1.845 1.5 2.961 0 1.122-.527 2.198-1.464 2.992C14.598 12.554 13.326 13 12 13s-2.598-.446-3.536-1.24C7.527 10.968 7 9.892 7 8.77c0-.255 0-.508.1-.762.085.25.236.48.443.673.207.193.463.342.75.437a2.334 2.334 0 001.767-.128c.263-.135.485-.32.65-.539.166-.22.269-.468.301-.727a1.452 1.452 0 00-.11-.765 1.699 1.699 0 00-.501-.644C8 4.115 11 2 12 2zM17 16l-5 6-5-6h10z" stroke="#fff" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
|
Before Width: | Height: | Size: 581 B |
27
frontend/src/assets/Dashboard/PromQl.tsx
Normal file
27
frontend/src/assets/Dashboard/PromQl.tsx
Normal file
@ -0,0 +1,27 @@
|
||||
import { CSSProperties } from 'react';
|
||||
|
||||
function PromQLIcon({
|
||||
fillColor,
|
||||
}: {
|
||||
fillColor: CSSProperties['color'];
|
||||
}): JSX.Element {
|
||||
return (
|
||||
<svg
|
||||
width="14"
|
||||
height="14"
|
||||
fill="none"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
viewBox="0 0 24 24"
|
||||
>
|
||||
<path
|
||||
d="M12 2c1 2.538 2.5 2.962 3.5 3.808.942.78 1.481 1.845 1.5 2.961 0 1.122-.527 2.198-1.464 2.992C14.598 12.554 13.326 13 12 13s-2.598-.446-3.536-1.24C7.527 10.968 7 9.892 7 8.77c0-.255 0-.508.1-.762.085.25.236.48.443.673.207.193.463.342.75.437a2.334 2.334 0 001.767-.128c.263-.135.485-.32.65-.539.166-.22.269-.468.301-.727a1.452 1.452 0 00-.11-.765 1.699 1.699 0 00-.501-.644C8 4.115 11 2 12 2zM17 16l-5 6-5-6h10z"
|
||||
stroke={fillColor}
|
||||
strokeWidth="2"
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
/>
|
||||
</svg>
|
||||
);
|
||||
}
|
||||
|
||||
export default PromQLIcon;
|
@ -16,6 +16,7 @@ import { useCallback, useMemo, useState } from 'react';
|
||||
// interfaces
|
||||
import { IField } from 'types/api/logs/fields';
|
||||
import { ILog } from 'types/api/logs/log';
|
||||
import { FORBID_DOM_PURIFY_TAGS } from 'utils/app';
|
||||
|
||||
// components
|
||||
import AddToQueryHOC, { AddToQueryHOCProps } from '../AddToQueryHOC';
|
||||
@ -50,7 +51,11 @@ function LogGeneralField({
|
||||
}: LogFieldProps): JSX.Element {
|
||||
const html = useMemo(
|
||||
() => ({
|
||||
__html: convert.toHtml(dompurify.sanitize(fieldValue)),
|
||||
__html: convert.toHtml(
|
||||
dompurify.sanitize(fieldValue, {
|
||||
FORBID_TAGS: [...FORBID_DOM_PURIFY_TAGS],
|
||||
}),
|
||||
),
|
||||
}),
|
||||
[fieldValue],
|
||||
);
|
||||
|
@ -21,6 +21,7 @@ import {
|
||||
useMemo,
|
||||
useState,
|
||||
} from 'react';
|
||||
import { FORBID_DOM_PURIFY_TAGS } from 'utils/app';
|
||||
|
||||
import LogLinesActionButtons from '../LogLinesActionButtons/LogLinesActionButtons';
|
||||
import LogStateIndicator from '../LogStateIndicator/LogStateIndicator';
|
||||
@ -144,7 +145,9 @@ function RawLogView({
|
||||
|
||||
const html = useMemo(
|
||||
() => ({
|
||||
__html: convert.toHtml(dompurify.sanitize(text)),
|
||||
__html: convert.toHtml(
|
||||
dompurify.sanitize(text, { FORBID_TAGS: [...FORBID_DOM_PURIFY_TAGS] }),
|
||||
),
|
||||
}),
|
||||
[text],
|
||||
);
|
||||
|
@ -8,6 +8,7 @@ import dompurify from 'dompurify';
|
||||
import { useIsDarkMode } from 'hooks/useDarkMode';
|
||||
import { FlatLogData } from 'lib/logs/flatLogData';
|
||||
import { useMemo } from 'react';
|
||||
import { FORBID_DOM_PURIFY_TAGS } from 'utils/app';
|
||||
|
||||
import LogStateIndicator from '../LogStateIndicator/LogStateIndicator';
|
||||
import { getLogIndicatorTypeForTable } from '../LogStateIndicator/utils';
|
||||
@ -107,7 +108,11 @@ export const useTableView = (props: UseTableViewProps): UseTableViewResult => {
|
||||
children: (
|
||||
<TableBodyContent
|
||||
dangerouslySetInnerHTML={{
|
||||
__html: convert.toHtml(dompurify.sanitize(field)),
|
||||
__html: convert.toHtml(
|
||||
dompurify.sanitize(field, {
|
||||
FORBID_TAGS: [...FORBID_DOM_PURIFY_TAGS],
|
||||
}),
|
||||
),
|
||||
}}
|
||||
linesPerRow={linesPerRow}
|
||||
isDarkMode={isDarkMode}
|
||||
|
@ -59,8 +59,8 @@ function CreateAlertChannels({
|
||||
|
||||
*Summary:* {{ .Annotations.summary }}
|
||||
*Description:* {{ .Annotations.description }}
|
||||
*RelatedLogs:* {{ .Annotations.related_logs }}
|
||||
*RelatedTraces:* {{ .Annotations.related_traces }}
|
||||
*RelatedLogs:* {{ if gt (len .Annotations.related_logs) 0 -}} View in <{{ .Annotations.related_logs }}|logs explorer> {{- end}}
|
||||
*RelatedTraces:* {{ if gt (len .Annotations.related_traces) 0 -}} View in <{{ .Annotations.related_traces }}|traces explorer> {{- end}}
|
||||
|
||||
*Details:*
|
||||
{{ range .Labels.SortedPairs }} • *{{ .Name }}:* {{ .Value }}
|
||||
|
@ -64,6 +64,10 @@
|
||||
|
||||
.view-options,
|
||||
.actions {
|
||||
.info-icon {
|
||||
padding: 8px;
|
||||
}
|
||||
|
||||
.hidden {
|
||||
display: none;
|
||||
}
|
||||
@ -252,6 +256,10 @@
|
||||
color: var(--bg-ink-200);
|
||||
background-color: var(--bg-vanilla-300);
|
||||
}
|
||||
|
||||
.info-icon {
|
||||
color: var(--bg-ink-200);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
/* eslint-disable react/jsx-props-no-spreading */
|
||||
import './ExplorerOptions.styles.scss';
|
||||
|
||||
import { InfoCircleOutlined } from '@ant-design/icons';
|
||||
import { Color } from '@signozhq/design-tokens';
|
||||
import {
|
||||
Button,
|
||||
@ -402,6 +403,28 @@ function ExplorerOptions({
|
||||
</Button>
|
||||
</div>
|
||||
<div className="actions">
|
||||
<Tooltip
|
||||
title={
|
||||
<div>
|
||||
{sourcepage === DataSource.LOGS
|
||||
? 'Learn more about Logs explorer '
|
||||
: 'Learn more about Traces explorer '}
|
||||
<Typography.Link
|
||||
href={
|
||||
sourcepage === DataSource.LOGS
|
||||
? 'https://signoz.io/docs/product-features/logs-explorer/?utm_source=product&utm_medium=logs-explorer-toolbar'
|
||||
: 'https://signoz.io/docs/product-features/trace-explorer/?utm_source=product&utm_medium=trace-explorer-toolbar'
|
||||
}
|
||||
target="_blank"
|
||||
>
|
||||
{' '}
|
||||
here
|
||||
</Typography.Link>{' '}
|
||||
</div>
|
||||
}
|
||||
>
|
||||
<InfoCircleOutlined className="info-icon" />
|
||||
</Tooltip>
|
||||
<Tooltip title="Hide">
|
||||
<Button
|
||||
disabled={disabled}
|
||||
|
@ -1,12 +1,15 @@
|
||||
import './QuerySection.styles.scss';
|
||||
|
||||
import { Color } from '@signozhq/design-tokens';
|
||||
import { Button, Tabs, Tooltip } from 'antd';
|
||||
import PromQLIcon from 'assets/Dashboard/PromQl';
|
||||
import { ALERTS_DATA_SOURCE_MAP } from 'constants/alerts';
|
||||
import { ENTITY_VERSION_V4 } from 'constants/app';
|
||||
import { PANEL_TYPES } from 'constants/queryBuilder';
|
||||
import { QBShortcuts } from 'constants/shortcuts/QBShortcuts';
|
||||
import { QueryBuilder } from 'container/QueryBuilder';
|
||||
import { useKeyboardHotkeys } from 'hooks/hotkeys/useKeyboardHotkeys';
|
||||
import { useIsDarkMode } from 'hooks/useDarkMode';
|
||||
import { Atom, Play, Terminal } from 'lucide-react';
|
||||
import { useEffect, useMemo, useState } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
@ -48,6 +51,8 @@ function QuerySection({
|
||||
|
||||
const renderChQueryUI = (): JSX.Element => <ChQuerySection />;
|
||||
|
||||
const isDarkMode = useIsDarkMode();
|
||||
|
||||
const renderMetricUI = (): JSX.Element => (
|
||||
<QueryBuilder
|
||||
panelType={panelType}
|
||||
@ -113,14 +118,16 @@ function QuerySection({
|
||||
label: (
|
||||
<Tooltip title="PromQL">
|
||||
<Button className="nav-btns">
|
||||
<img src="/Icons/promQL.svg" alt="Prom Ql" className="prom-ql-icon" />
|
||||
<PromQLIcon
|
||||
fillColor={isDarkMode ? Color.BG_VANILLA_200 : Color.BG_INK_300}
|
||||
/>
|
||||
</Button>
|
||||
</Tooltip>
|
||||
),
|
||||
key: EQueryType.PROM,
|
||||
},
|
||||
],
|
||||
[],
|
||||
[isDarkMode],
|
||||
);
|
||||
|
||||
const { registerShortcut, deregisterShortcut } = useKeyboardHotkeys();
|
||||
|
@ -147,7 +147,7 @@ function UserGuide({ queryType }: UserGuideProps): JSX.Element {
|
||||
<Col flex="none">
|
||||
<TextToolTip
|
||||
text={t('user_tooltip_more_help')}
|
||||
url="https://signoz.io/docs/userguide/alerts-management/#create-alert-rules"
|
||||
url="https://signoz.io/docs/userguide/alerts-management/?utm_source=product&utm_medium=create-alert#creating-a-new-alert-in-signoz"
|
||||
/>
|
||||
</Col>
|
||||
</Row>
|
||||
|
@ -338,7 +338,8 @@ function ListAlert({ allAlertRules, refetch }: ListAlertProps): JSX.Element {
|
||||
<TextToolTip
|
||||
{...{
|
||||
text: `More details on how to create alerts`,
|
||||
url: 'https://signoz.io/docs/userguide/alerts-management/',
|
||||
url:
|
||||
'https://signoz.io/docs/alerts/?utm_source=product&utm_medium=list-alerts',
|
||||
}}
|
||||
/>
|
||||
|
||||
|
@ -322,7 +322,8 @@ function DashboardsList(): JSX.Element {
|
||||
<TextToolTip
|
||||
{...{
|
||||
text: `More details on how to create dashboards`,
|
||||
url: 'https://signoz.io/docs/userguide/dashboards',
|
||||
url:
|
||||
'https://signoz.io/docs/userguide/dashboards?utm_source=product&utm_medium=list-dashboard',
|
||||
}}
|
||||
/>
|
||||
</ButtonContainer>
|
||||
|
@ -12,6 +12,7 @@
|
||||
.prom-ql-icon {
|
||||
height: 14px;
|
||||
width: 14px;
|
||||
color: var(--bg-vanilla-200);
|
||||
}
|
||||
}
|
||||
.ant-btn-default {
|
||||
@ -54,6 +55,10 @@
|
||||
.ant-tabs-tab-active {
|
||||
.nav-btns {
|
||||
background: var(--bg-vanilla-300) !important;
|
||||
|
||||
.prom-ql-icon {
|
||||
color: var(--bg-ink-400);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,8 @@
|
||||
import './QuerySection.styles.scss';
|
||||
|
||||
import { Color } from '@signozhq/design-tokens';
|
||||
import { Button, Tabs, Tooltip, Typography } from 'antd';
|
||||
import PromQLIcon from 'assets/Dashboard/PromQl';
|
||||
import TextToolTip from 'components/TextToolTip';
|
||||
import { PANEL_TYPES } from 'constants/queryBuilder';
|
||||
import { QBShortcuts } from 'constants/shortcuts/QBShortcuts';
|
||||
@ -11,6 +13,7 @@ import { useKeyboardHotkeys } from 'hooks/hotkeys/useKeyboardHotkeys';
|
||||
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
|
||||
import { useShareBuilderUrl } from 'hooks/queryBuilder/useShareBuilderUrl';
|
||||
import { updateStepInterval } from 'hooks/queryBuilder/useStepInterval';
|
||||
import { useIsDarkMode } from 'hooks/useDarkMode';
|
||||
import useUrlQuery from 'hooks/useUrlQuery';
|
||||
import { defaultTo } from 'lodash-es';
|
||||
import { Atom, Play, Terminal } from 'lucide-react';
|
||||
@ -53,6 +56,8 @@ function QuerySection({
|
||||
|
||||
const { selectedDashboard, setSelectedDashboard } = useDashboard();
|
||||
|
||||
const isDarkMode = useIsDarkMode();
|
||||
|
||||
const { widgets } = selectedDashboard?.data || {};
|
||||
|
||||
const getWidget = useCallback(() => {
|
||||
@ -196,7 +201,9 @@ function QuerySection({
|
||||
label: (
|
||||
<Tooltip title="PromQL">
|
||||
<Button className="nav-btns">
|
||||
<img src="/Icons/promQL.svg" alt="Prom Ql" className="prom-ql-icon" />
|
||||
<PromQLIcon
|
||||
fillColor={isDarkMode ? Color.BG_VANILLA_200 : Color.BG_INK_300}
|
||||
/>
|
||||
</Button>
|
||||
</Tooltip>
|
||||
),
|
||||
@ -228,7 +235,10 @@ function QuerySection({
|
||||
onChange={handleQueryCategoryChange}
|
||||
tabBarExtraContent={
|
||||
<span style={{ display: 'flex', gap: '1rem', alignItems: 'center' }}>
|
||||
<TextToolTip text="This will temporarily save the current query and graph state. This will persist across tab change" />
|
||||
<TextToolTip
|
||||
text="This will temporarily save the current query and graph state. This will persist across tab change"
|
||||
url="https://signoz.io/docs/userguide/query-builder?utm_source=product&utm_medium=query-builder"
|
||||
/>
|
||||
<Button
|
||||
loading={queryResponse.isFetching}
|
||||
type="primary"
|
||||
|
@ -271,7 +271,29 @@ function NewWidget({ selectedGraph }: NewWidgetProps): JSX.Element {
|
||||
uuid: selectedDashboard.uuid,
|
||||
data: {
|
||||
...selectedDashboard.data,
|
||||
widgets: [
|
||||
widgets: isNewDashboard
|
||||
? [
|
||||
...afterWidgets,
|
||||
{
|
||||
...(selectedWidget || ({} as Widgets)),
|
||||
description: selectedWidget?.description || '',
|
||||
timePreferance: selectedTime.enum,
|
||||
isStacked: selectedWidget?.isStacked || false,
|
||||
opacity: selectedWidget?.opacity || '1',
|
||||
nullZeroValues: selectedWidget?.nullZeroValues || 'zero',
|
||||
title: selectedWidget?.title,
|
||||
yAxisUnit: selectedWidget?.yAxisUnit,
|
||||
panelTypes: graphType,
|
||||
query: currentQuery,
|
||||
thresholds: selectedWidget?.thresholds,
|
||||
softMin: selectedWidget?.softMin || 0,
|
||||
softMax: selectedWidget?.softMax || 0,
|
||||
fillSpans: selectedWidget?.fillSpans,
|
||||
selectedLogFields: selectedWidget?.selectedLogFields || [],
|
||||
selectedTracesFields: selectedWidget?.selectedTracesFields || [],
|
||||
},
|
||||
]
|
||||
: [
|
||||
...preWidgets,
|
||||
{
|
||||
...(selectedWidget || ({} as Widgets)),
|
||||
|
@ -50,6 +50,9 @@ export const panelTypeDataSourceFormValuesMap: Record<
|
||||
'having',
|
||||
'orderBy',
|
||||
'functions',
|
||||
'queryName',
|
||||
'expression',
|
||||
'disabled',
|
||||
],
|
||||
},
|
||||
},
|
||||
@ -65,6 +68,9 @@ export const panelTypeDataSourceFormValuesMap: Record<
|
||||
'orderBy',
|
||||
'functions',
|
||||
'spaceAggregation',
|
||||
'queryName',
|
||||
'expression',
|
||||
'disabled',
|
||||
],
|
||||
},
|
||||
},
|
||||
@ -78,6 +84,9 @@ export const panelTypeDataSourceFormValuesMap: Record<
|
||||
'limit',
|
||||
'having',
|
||||
'orderBy',
|
||||
'queryName',
|
||||
'expression',
|
||||
'disabled',
|
||||
],
|
||||
},
|
||||
},
|
||||
@ -94,6 +103,9 @@ export const panelTypeDataSourceFormValuesMap: Record<
|
||||
'having',
|
||||
'orderBy',
|
||||
'functions',
|
||||
'queryName',
|
||||
'expression',
|
||||
'disabled',
|
||||
],
|
||||
},
|
||||
},
|
||||
@ -109,6 +121,9 @@ export const panelTypeDataSourceFormValuesMap: Record<
|
||||
'orderBy',
|
||||
'functions',
|
||||
'spaceAggregation',
|
||||
'queryName',
|
||||
'expression',
|
||||
'disabled',
|
||||
],
|
||||
},
|
||||
},
|
||||
@ -122,6 +137,9 @@ export const panelTypeDataSourceFormValuesMap: Record<
|
||||
'limit',
|
||||
'having',
|
||||
'orderBy',
|
||||
'queryName',
|
||||
'expression',
|
||||
'disabled',
|
||||
],
|
||||
},
|
||||
},
|
||||
@ -138,6 +156,9 @@ export const panelTypeDataSourceFormValuesMap: Record<
|
||||
'having',
|
||||
'orderBy',
|
||||
'functions',
|
||||
'queryName',
|
||||
'expression',
|
||||
'disabled',
|
||||
],
|
||||
},
|
||||
},
|
||||
@ -153,6 +174,9 @@ export const panelTypeDataSourceFormValuesMap: Record<
|
||||
'orderBy',
|
||||
'functions',
|
||||
'spaceAggregation',
|
||||
'queryName',
|
||||
'expression',
|
||||
'disabled',
|
||||
],
|
||||
},
|
||||
},
|
||||
@ -166,6 +190,9 @@ export const panelTypeDataSourceFormValuesMap: Record<
|
||||
'limit',
|
||||
'having',
|
||||
'orderBy',
|
||||
'queryName',
|
||||
'expression',
|
||||
'disabled',
|
||||
],
|
||||
},
|
||||
},
|
||||
@ -182,6 +209,9 @@ export const panelTypeDataSourceFormValuesMap: Record<
|
||||
'having',
|
||||
'orderBy',
|
||||
'functions',
|
||||
'queryName',
|
||||
'expression',
|
||||
'disabled',
|
||||
],
|
||||
},
|
||||
},
|
||||
@ -197,6 +227,9 @@ export const panelTypeDataSourceFormValuesMap: Record<
|
||||
'orderBy',
|
||||
'functions',
|
||||
'spaceAggregation',
|
||||
'queryName',
|
||||
'expression',
|
||||
'disabled',
|
||||
],
|
||||
},
|
||||
},
|
||||
@ -210,6 +243,9 @@ export const panelTypeDataSourceFormValuesMap: Record<
|
||||
'limit',
|
||||
'having',
|
||||
'orderBy',
|
||||
'queryName',
|
||||
'expression',
|
||||
'disabled',
|
||||
],
|
||||
},
|
||||
},
|
||||
@ -241,6 +277,9 @@ export const panelTypeDataSourceFormValuesMap: Record<
|
||||
'reduceTo',
|
||||
'having',
|
||||
'functions',
|
||||
'queryName',
|
||||
'expression',
|
||||
'disabled',
|
||||
],
|
||||
},
|
||||
},
|
||||
@ -254,6 +293,9 @@ export const panelTypeDataSourceFormValuesMap: Record<
|
||||
'reduceTo',
|
||||
'functions',
|
||||
'spaceAggregation',
|
||||
'queryName',
|
||||
'expression',
|
||||
'disabled',
|
||||
],
|
||||
},
|
||||
},
|
||||
@ -267,6 +309,9 @@ export const panelTypeDataSourceFormValuesMap: Record<
|
||||
'limit',
|
||||
'having',
|
||||
'orderBy',
|
||||
'queryName',
|
||||
'expression',
|
||||
'disabled',
|
||||
],
|
||||
},
|
||||
},
|
||||
|
@ -43,7 +43,7 @@ function CreatePipelineButton({
|
||||
<ButtonContainer>
|
||||
<TextToolTip
|
||||
text={t('learn_more')}
|
||||
url="https://signoz.io/docs/logs-pipelines/introduction/"
|
||||
url="https://signoz.io/docs/logs-pipelines/introduction/?utm_source=product&utm_medium=pipelines-tab"
|
||||
/>
|
||||
{isAddNewPipelineVisible && (
|
||||
<CustomButton
|
||||
|
@ -66,7 +66,7 @@ function PipelinesListEmptyState(): JSX.Element {
|
||||
<Typography>
|
||||
{t('learn_more')}
|
||||
<a
|
||||
href="https://signoz.io/docs/logs-pipelines/introduction/"
|
||||
href="https://signoz.io/docs/logs-pipelines/introduction/?utm_source=product&utm_medium=pipelines-tab"
|
||||
target="_blank"
|
||||
rel="noreferrer"
|
||||
>
|
||||
|
@ -119,7 +119,7 @@ exports[`PipelinePage container test should render PipelinePageLayout section 1`
|
||||
>
|
||||
learn_more
|
||||
<a
|
||||
href="https://signoz.io/docs/logs-pipelines/introduction/"
|
||||
href="https://signoz.io/docs/logs-pipelines/introduction/?utm_source=product&utm_medium=pipelines-tab"
|
||||
rel="noreferrer"
|
||||
target="_blank"
|
||||
>
|
||||
|
@ -1,6 +1,6 @@
|
||||
import './QueryBuilder.styles.scss';
|
||||
|
||||
import { Button, Col, Divider, Row, Tooltip } from 'antd';
|
||||
import { Button, Col, Divider, Row, Tooltip, Typography } from 'antd';
|
||||
import {
|
||||
MAX_FORMULAS,
|
||||
MAX_QUERIES,
|
||||
@ -133,13 +133,43 @@ export const QueryBuilder = memo(function QueryBuilder({
|
||||
{!isListViewPanel && (
|
||||
<div className="new-query-formula-buttons-container">
|
||||
<Button.Group>
|
||||
<Tooltip title="Add Query">
|
||||
<Tooltip
|
||||
title={
|
||||
<div style={{ textAlign: 'center' }}>
|
||||
Add New Query
|
||||
<Typography.Link
|
||||
href="https://signoz.io/docs/userguide/query-builder/?utm_source=product&utm_medium=query-builder#multiple-queries-and-functions"
|
||||
target="_blank"
|
||||
style={{ textDecoration: 'underline' }}
|
||||
>
|
||||
{' '}
|
||||
<br />
|
||||
Learn more
|
||||
</Typography.Link>
|
||||
</div>
|
||||
}
|
||||
>
|
||||
<Button disabled={isDisabledQueryButton} onClick={addNewBuilderQuery}>
|
||||
<DatabaseZap size={12} />
|
||||
</Button>
|
||||
</Tooltip>
|
||||
|
||||
<Tooltip title="Add Formula">
|
||||
<Tooltip
|
||||
title={
|
||||
<div style={{ textAlign: 'center' }}>
|
||||
Add New Formula
|
||||
<Typography.Link
|
||||
href="https://signoz.io/docs/userguide/query-builder/?utm_source=product&utm_medium=query-builder#multiple-queries-and-functions"
|
||||
target="_blank"
|
||||
style={{ textDecoration: 'underline' }}
|
||||
>
|
||||
{' '}
|
||||
<br />
|
||||
Learn more
|
||||
</Typography.Link>
|
||||
</div>
|
||||
}
|
||||
>
|
||||
<Button disabled={isDisabledFormulaButton} onClick={addNewFormula}>
|
||||
<Sigma size={12} />
|
||||
</Button>
|
||||
|
@ -66,6 +66,7 @@ export default function QBEntityOptions({
|
||||
<div className="left-col-items">
|
||||
<div className="options periscope-btn-group">
|
||||
<Button.Group>
|
||||
<Tooltip title={isCollapsed ? 'Uncollapse' : 'Collapse'}>
|
||||
<Button
|
||||
value="search"
|
||||
className="periscope-btn collapse"
|
||||
@ -73,6 +74,8 @@ export default function QBEntityOptions({
|
||||
>
|
||||
{isCollapsed ? <ChevronRight size={16} /> : <ChevronDown size={16} />}
|
||||
</Button>
|
||||
</Tooltip>
|
||||
<Tooltip title={entityData.disabled ? 'Show' : 'Hide'}>
|
||||
<Button
|
||||
value="query-builder"
|
||||
className="periscope-btn visibility-toggle"
|
||||
@ -81,6 +84,7 @@ export default function QBEntityOptions({
|
||||
>
|
||||
{entityData.disabled ? <EyeOff size={16} /> : <Eye size={16} />}
|
||||
</Button>
|
||||
</Tooltip>
|
||||
|
||||
{entityType === 'query' && (
|
||||
<Tooltip title={`Clone Query ${entityData.queryName}`}>
|
||||
|
@ -1,7 +1,7 @@
|
||||
/* eslint-disable sonarjs/cognitive-complexity */
|
||||
import './Query.styles.scss';
|
||||
|
||||
import { Col, Input, Row } from 'antd';
|
||||
import { Col, Input, Row, Tooltip, Typography } from 'antd';
|
||||
import { ENTITY_VERSION_V4 } from 'constants/app';
|
||||
// ** Constants
|
||||
import { ATTRIBUTE_TYPES, PANEL_TYPES } from 'constants/queryBuilder';
|
||||
@ -367,11 +367,29 @@ export const Query = memo(function Query({
|
||||
<Row gutter={[11, 5]}>
|
||||
{version && version === 'v3' && (
|
||||
<Col flex="5.93rem">
|
||||
<Tooltip
|
||||
title={
|
||||
<div style={{ textAlign: 'center' }}>
|
||||
Select Aggregate Operator
|
||||
<Typography.Link
|
||||
className="learn-more"
|
||||
href="https://signoz.io/docs/userguide/query-builder/?utm_source=product&utm_medium=query-builder#aggregation"
|
||||
target="_blank"
|
||||
style={{ textDecoration: 'underline' }}
|
||||
>
|
||||
{' '}
|
||||
<br />
|
||||
Learn more
|
||||
</Typography.Link>
|
||||
</div>
|
||||
}
|
||||
>
|
||||
<OperatorsSelect
|
||||
value={query.aggregateOperator}
|
||||
onChange={handleChangeOperator}
|
||||
operators={operators}
|
||||
/>
|
||||
</Tooltip>
|
||||
</Col>
|
||||
)}
|
||||
|
||||
@ -388,12 +406,30 @@ export const Query = memo(function Query({
|
||||
Array.isArray(operators) &&
|
||||
operators.length > 0 && (
|
||||
<Col flex="5.93rem">
|
||||
<Tooltip
|
||||
title={
|
||||
<div style={{ textAlign: 'center' }}>
|
||||
Select Aggregate Operator
|
||||
<Typography.Link
|
||||
className="learn-more"
|
||||
href="https://signoz.io/docs/userguide/query-builder/?utm_source=product&utm_medium=query-builder#aggregation"
|
||||
target="_blank"
|
||||
style={{ textDecoration: 'underline' }}
|
||||
>
|
||||
{' '}
|
||||
<br />
|
||||
Learn more
|
||||
</Typography.Link>
|
||||
</div>
|
||||
}
|
||||
>
|
||||
<OperatorsSelect
|
||||
value={query.aggregateOperator}
|
||||
onChange={handleChangeOperator}
|
||||
operators={operators}
|
||||
disabled={disableOperatorSelector}
|
||||
/>
|
||||
</Tooltip>
|
||||
</Col>
|
||||
)}
|
||||
</Row>
|
||||
@ -422,11 +458,28 @@ export const Query = memo(function Query({
|
||||
<Col span={11}>
|
||||
<Row gutter={[11, 5]}>
|
||||
<Col flex="5.93rem">
|
||||
<Tooltip
|
||||
title={
|
||||
<div style={{ textAlign: 'center' }}>
|
||||
Select Aggregate Operator
|
||||
<Typography.Link
|
||||
href="https://signoz.io/docs/userguide/query-builder/?utm_source=product&utm_medium=query-builder#aggregation"
|
||||
target="_blank"
|
||||
style={{ textDecoration: 'underline' }}
|
||||
>
|
||||
{' '}
|
||||
<br />
|
||||
Learn more
|
||||
</Typography.Link>
|
||||
</div>
|
||||
}
|
||||
>
|
||||
<OperatorsSelect
|
||||
value={query.aggregateOperator}
|
||||
onChange={handleChangeOperator}
|
||||
operators={operators}
|
||||
/>
|
||||
</Tooltip>
|
||||
</Col>
|
||||
<Col flex="1 1 12.5rem">
|
||||
<AggregatorFilter
|
||||
@ -520,12 +573,30 @@ export const Query = memo(function Query({
|
||||
)}
|
||||
{panelType !== PANEL_TYPES.LIST && panelType !== PANEL_TYPES.TRACE && (
|
||||
<Row style={{ width: '100%' }}>
|
||||
<Tooltip
|
||||
placement="right"
|
||||
title={
|
||||
<div style={{ textAlign: 'center' }}>
|
||||
Name of legend
|
||||
<Typography.Link
|
||||
style={{ textDecoration: 'underline' }}
|
||||
href="https://signoz.io/docs/userguide/query-builder/?utm_source=product&utm_medium=query-builder#legend-format"
|
||||
target="_blank"
|
||||
>
|
||||
{' '}
|
||||
<br />
|
||||
Learn more
|
||||
</Typography.Link>
|
||||
</div>
|
||||
}
|
||||
>
|
||||
<Input
|
||||
onChange={handleChangeQueryLegend}
|
||||
size="middle"
|
||||
value={query.legend}
|
||||
addonBefore="Legend Format"
|
||||
/>
|
||||
</Tooltip>
|
||||
</Row>
|
||||
)}
|
||||
</Row>
|
||||
|
@ -1,6 +1,6 @@
|
||||
import './QueryFunctions.styles.scss';
|
||||
|
||||
import { Button, Tooltip } from 'antd';
|
||||
import { Button, Tooltip, Typography } from 'antd';
|
||||
import cx from 'classnames';
|
||||
import { useIsDarkMode } from 'hooks/useDarkMode';
|
||||
import { cloneDeep, pullAt } from 'lodash-es';
|
||||
@ -180,9 +180,22 @@ export default function QueryFunctions({
|
||||
|
||||
<Tooltip
|
||||
title={
|
||||
functions && functions.length >= 3
|
||||
? 'Functions are in early access. You can add a maximum of 3 function as of now.'
|
||||
: ''
|
||||
functions && functions.length >= 3 ? (
|
||||
'Functions are in early access. You can add a maximum of 3 function as of now.'
|
||||
) : (
|
||||
<div style={{ textAlign: 'center' }}>
|
||||
Add new function
|
||||
<Typography.Link
|
||||
style={{ textDecoration: 'underline' }}
|
||||
href="https://signoz.io/docs/userguide/query-builder/?utm_source=product&utm_medium=query-builder#functions-for-extended-data-analysis"
|
||||
target="_blank"
|
||||
>
|
||||
{' '}
|
||||
<br />
|
||||
Learn more
|
||||
</Typography.Link>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
placement="right"
|
||||
>
|
||||
|
@ -25,6 +25,9 @@
|
||||
line-height: 20px; /* 142.857% */
|
||||
letter-spacing: -0.07px;
|
||||
}
|
||||
.learn-more {
|
||||
font-size: 14px;
|
||||
}
|
||||
|
||||
.ant-input-affix-wrapper {
|
||||
margin-top: 16px;
|
||||
|
@ -282,7 +282,14 @@ function SaveView(): JSX.Element {
|
||||
<div className="save-view-content">
|
||||
<Typography.Title className="title">Views</Typography.Title>
|
||||
<Typography.Text className="subtitle">
|
||||
Manage your saved views for {ROUTES_VS_SOURCEPAGE[pathname]}.
|
||||
Manage your saved views for {ROUTES_VS_SOURCEPAGE[pathname]}.{' '}
|
||||
<Typography.Link
|
||||
className="learn-more"
|
||||
href="https://signoz.io/docs/product-features/saved-view/?utm_source=product&utm_medium=views-tab"
|
||||
target="_blank"
|
||||
>
|
||||
Learn more
|
||||
</Typography.Link>
|
||||
</Typography.Text>
|
||||
<Input
|
||||
placeholder="Search for views..."
|
||||
|
@ -229,6 +229,9 @@ export function QueryBuilderProvider({
|
||||
setCurrentQuery(
|
||||
timeUpdated ? merge(currentQuery, newQueryState) : newQueryState,
|
||||
);
|
||||
setSupersetQuery(
|
||||
timeUpdated ? merge(currentQuery, newQueryState) : newQueryState,
|
||||
);
|
||||
setQueryType(type);
|
||||
},
|
||||
[prepareQueryBuilderData, currentQuery],
|
||||
@ -802,6 +805,7 @@ export function QueryBuilderProvider({
|
||||
|
||||
if (newCurrentQuery) {
|
||||
setCurrentQuery(newCurrentQuery);
|
||||
setSupersetQuery(newCurrentQuery);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -242,3 +242,7 @@ body {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.ant-notification-notice-message {
|
||||
padding-right: 20px;
|
||||
}
|
||||
|
@ -31,3 +31,6 @@ export const checkVersionState = (
|
||||
const versionCore = currentVersion?.split('-')[0];
|
||||
return versionCore === latestVersion;
|
||||
};
|
||||
|
||||
// list of forbidden tags to remove in dompurify
|
||||
export const FORBID_DOM_PURIFY_TAGS = ['img', 'form'];
|
||||
|
File diff suppressed because it is too large
Load Diff
2
go.mod
2
go.mod
@ -6,7 +6,7 @@ require (
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.20.0
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2
|
||||
github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd
|
||||
github.com/SigNoz/signoz-otel-collector v0.88.22
|
||||
github.com/SigNoz/signoz-otel-collector v0.88.24
|
||||
github.com/SigNoz/zap_otlp/zap_otlp_encoder v0.0.0-20230822164844-1b861a431974
|
||||
github.com/SigNoz/zap_otlp/zap_otlp_sync v0.0.0-20230822164844-1b861a431974
|
||||
github.com/antonmedv/expr v1.15.3
|
||||
|
4
go.sum
4
go.sum
@ -98,8 +98,8 @@ github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd h1:Bk43AsDYe0fhkb
|
||||
github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd/go.mod h1:nxRcH/OEdM8QxzH37xkGzomr1O0JpYBRS6pwjsWW6Pc=
|
||||
github.com/SigNoz/prometheus v1.11.0 h1:toX7fU2wqY1TnzvPzDglIYx6OxpqrZ0NNlM/H5S5+u8=
|
||||
github.com/SigNoz/prometheus v1.11.0/go.mod h1:MffmFu2qFILQrOHehx3D0XjYtaZMVfI+Ppeiv98x4Ww=
|
||||
github.com/SigNoz/signoz-otel-collector v0.88.22 h1:PW9TpdQ8b8vWnUKWVe/w1bX8/Rq2MUUHGDIsx+KA+o0=
|
||||
github.com/SigNoz/signoz-otel-collector v0.88.22/go.mod h1:sT1EM9PFDaOJLbAz5npWpgXK6OhpWJ9PpSwyhHWs9rU=
|
||||
github.com/SigNoz/signoz-otel-collector v0.88.24 h1:6ESLmQtYPHmik9ZZFSJSbfuj4VQ1/0IC3v1qV9hm5Nk=
|
||||
github.com/SigNoz/signoz-otel-collector v0.88.24/go.mod h1:sT1EM9PFDaOJLbAz5npWpgXK6OhpWJ9PpSwyhHWs9rU=
|
||||
github.com/SigNoz/zap_otlp v0.1.0 h1:T7rRcFN87GavY8lDGZj0Z3Xv6OhJA6Pj3I9dNPmqvRc=
|
||||
github.com/SigNoz/zap_otlp v0.1.0/go.mod h1:lcHvbDbRgvDnPxo9lDlaL1JK2PyOyouP/C3ynnYIvyo=
|
||||
github.com/SigNoz/zap_otlp/zap_otlp_encoder v0.0.0-20230822164844-1b861a431974 h1:PKVgdf83Yw+lZJbFtNGBgqXiXNf3+kOXW2qZ7Ms7OaY=
|
||||
|
@ -51,6 +51,7 @@ import (
|
||||
"go.signoz.io/signoz/pkg/query-service/common"
|
||||
"go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/dao"
|
||||
chErrors "go.signoz.io/signoz/pkg/query-service/errors"
|
||||
am "go.signoz.io/signoz/pkg/query-service/integrations/alertManager"
|
||||
"go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
@ -71,10 +72,16 @@ const (
|
||||
signozTraceTableName = "distributed_signoz_index_v2"
|
||||
signozTraceLocalTableName = "signoz_index_v2"
|
||||
signozMetricDBName = "signoz_metrics"
|
||||
signozSampleLocalTableName = "samples_v2"
|
||||
signozSampleTableName = "distributed_samples_v2"
|
||||
signozTSTableName = "distributed_time_series_v2"
|
||||
signozSampleLocalTableName = "samples_v4"
|
||||
signozSampleTableName = "distributed_samples_v4"
|
||||
|
||||
signozTSLocalTableNameV4 = "time_series_v4"
|
||||
signozTSTableNameV4 = "distributed_time_series_v4"
|
||||
|
||||
signozTSLocalTableNameV46Hrs = "time_series_v4_6hrs"
|
||||
signozTSTableNameV46Hrs = "distributed_time_series_v4_6hrs"
|
||||
|
||||
signozTSLocalTableNameV41Day = "time_series_v4_1day"
|
||||
signozTSTableNameV41Day = "distributed_time_series_v4_1day"
|
||||
|
||||
minTimespanForProgressiveSearch = time.Hour
|
||||
@ -2381,15 +2388,17 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context,
|
||||
}
|
||||
|
||||
case constants.MetricsTTL:
|
||||
tableName := signozMetricDBName + "." + signozSampleLocalTableName
|
||||
tableNames := []string{signozMetricDBName + "." + signozSampleLocalTableName, signozMetricDBName + "." + signozTSLocalTableNameV4, signozMetricDBName + "." + signozTSLocalTableNameV46Hrs, signozMetricDBName + "." + signozTSLocalTableNameV41Day}
|
||||
for _, tableName := range tableNames {
|
||||
statusItem, err := r.checkTTLStatusItem(ctx, tableName)
|
||||
if err != nil {
|
||||
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing ttl_status check sql query")}
|
||||
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing ttl_status check sql query")}
|
||||
}
|
||||
if statusItem.Status == constants.StatusPending {
|
||||
return nil, &model.ApiError{Typ: model.ErrorConflict, Err: fmt.Errorf("TTL is already running")}
|
||||
}
|
||||
go func(tableName string) {
|
||||
}
|
||||
metricTTL := func(tableName string) {
|
||||
_, dbErr := r.localDB.Exec("INSERT INTO ttl_status (transaction_id, created_at, updated_at, table_name, ttl, status, cold_storage_ttl) VALUES (?, ?, ?, ?, ?, ?, ?)", uuid, time.Now(), time.Now(), tableName, params.DelDuration, constants.StatusPending, coldStorageDuration)
|
||||
if dbErr != nil {
|
||||
zap.L().Error("Error in inserting to ttl_status table", zap.Error(dbErr))
|
||||
@ -2433,7 +2442,10 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context,
|
||||
zap.L().Error("Error in processing ttl_status update sql query", zap.Error(dbErr))
|
||||
return
|
||||
}
|
||||
}(tableName)
|
||||
}
|
||||
for _, tableName := range tableNames {
|
||||
go metricTTL(tableName)
|
||||
}
|
||||
case constants.LogsTTL:
|
||||
tableName := r.logsDB + "." + r.logsLocalTable
|
||||
statusItem, err := r.checkTTLStatusItem(ctx, tableName)
|
||||
@ -3258,7 +3270,7 @@ func (r *ClickHouseReader) FetchTemporality(ctx context.Context, metricNames []s
|
||||
|
||||
func (r *ClickHouseReader) GetTimeSeriesInfo(ctx context.Context) (map[string]interface{}, error) {
|
||||
|
||||
queryStr := fmt.Sprintf("SELECT count() as count from %s.%s where metric_name not like 'signoz_%%' group by metric_name order by count desc;", signozMetricDBName, signozTSTableName)
|
||||
queryStr := fmt.Sprintf("SELECT countDistinct(fingerprint) as count from %s.%s where metric_name not like 'signoz_%%' group by metric_name order by count desc;", signozMetricDBName, signozTSTableNameV41Day)
|
||||
|
||||
rows, _ := r.db.Query(ctx, queryStr)
|
||||
|
||||
@ -4570,6 +4582,11 @@ func readRowsForTimeSeriesResult(rows driver.Rows, vars []interface{}, columnNam
|
||||
return nil, err
|
||||
}
|
||||
groupBy, groupAttributes, groupAttributesArray, metricPoint := readRow(vars, columnNames)
|
||||
// skip the point if the value is NaN or Inf
|
||||
// are they ever useful enough to be returned?
|
||||
if math.IsNaN(metricPoint.Value) || math.IsInf(metricPoint.Value, 0) {
|
||||
continue
|
||||
}
|
||||
sort.Strings(groupBy)
|
||||
key := strings.Join(groupBy, "")
|
||||
if _, exists := seriesToAttrs[key]; !exists {
|
||||
@ -4700,11 +4717,11 @@ func getPersonalisedError(err error) error {
|
||||
}
|
||||
zap.L().Error("error while reading result", zap.Error(err))
|
||||
if strings.Contains(err.Error(), "code: 307") {
|
||||
return errors.New("query is consuming too much resources, please reach out to the team")
|
||||
return chErrors.ErrResourceBytesLimitExceeded
|
||||
}
|
||||
|
||||
if strings.Contains(err.Error(), "code: 159") {
|
||||
return errors.New("Query is taking too long to run, please reach out to the team")
|
||||
return chErrors.ErrResourceTimeLimitExceeded
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/v2"
|
||||
"github.com/ClickHouse/clickhouse-go/v2/lib/driver"
|
||||
@ -43,12 +42,6 @@ func (c clickhouseConnWrapper) addClickHouseSettings(ctx context.Context, query
|
||||
settings["log_comment"] = logComment
|
||||
}
|
||||
|
||||
// don't add resource restrictions traces
|
||||
if strings.Contains(query, "signoz_traces") {
|
||||
ctx = clickhouse.Context(ctx, clickhouse.WithSettings(settings))
|
||||
return ctx
|
||||
}
|
||||
|
||||
if c.settings.MaxBytesToRead != "" {
|
||||
settings["max_bytes_to_read"] = c.settings.MaxBytesToRead
|
||||
}
|
||||
|
@ -326,9 +326,17 @@ func UpdateDashboard(ctx context.Context, uuid string, data map[string]interface
|
||||
if existingTotal > newTotal && existingTotal-newTotal > 1 {
|
||||
// if the total count of panels has reduced by more than 1,
|
||||
// return error
|
||||
existingIds := getWidgetIds(dashboard.Data)
|
||||
newIds := getWidgetIds(data)
|
||||
|
||||
differenceIds := getIdDifference(existingIds, newIds)
|
||||
|
||||
if len(differenceIds) > 1 {
|
||||
return nil, model.BadRequest(fmt.Errorf("deleting more than one panel is not supported"))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
dashboard.UpdatedAt = time.Now()
|
||||
dashboard.UpdateBy = &userEmail
|
||||
dashboard.Data = data
|
||||
@ -714,3 +722,52 @@ func countTraceAndLogsPanel(data map[string]interface{}) (int64, int64) {
|
||||
}
|
||||
return count, totalPanels
|
||||
}
|
||||
|
||||
func getWidgetIds(data map[string]interface{}) []string {
|
||||
widgetIds := []string{}
|
||||
if data != nil && data["widgets"] != nil {
|
||||
widgets, ok := data["widgets"].(interface{})
|
||||
if ok {
|
||||
data, ok := widgets.([]interface{})
|
||||
if ok {
|
||||
for _, widget := range data {
|
||||
sData, ok := widget.(map[string]interface{})
|
||||
if ok && sData["query"] != nil && sData["id"] != nil {
|
||||
id, ok := sData["id"].(string)
|
||||
|
||||
if ok {
|
||||
widgetIds = append(widgetIds, id)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return widgetIds
|
||||
}
|
||||
|
||||
func getIdDifference(existingIds []string, newIds []string) []string {
|
||||
// Convert newIds array to a map for faster lookups
|
||||
newIdsMap := make(map[string]bool)
|
||||
for _, id := range newIds {
|
||||
newIdsMap[id] = true
|
||||
}
|
||||
|
||||
// Initialize a map to keep track of elements in the difference array
|
||||
differenceMap := make(map[string]bool)
|
||||
|
||||
// Initialize the difference array
|
||||
difference := []string{}
|
||||
|
||||
// Iterate through existingIds
|
||||
for _, id := range existingIds {
|
||||
// If the id is not found in newIds, and it's not already in the difference array
|
||||
if _, found := newIdsMap[id]; !found && !differenceMap[id] {
|
||||
difference = append(difference, id)
|
||||
differenceMap[id] = true // Mark the id as seen in the difference array
|
||||
}
|
||||
}
|
||||
|
||||
return difference
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/govaluate"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
@ -89,6 +90,7 @@ func joinAndCalculate(results []*v3.Result, uniqueLabelSet map[string]string, ex
|
||||
|
||||
resultSeries := &v3.Series{
|
||||
Labels: uniqueLabelSet,
|
||||
Points: make([]v3.Point, 0),
|
||||
}
|
||||
timestamps := make([]int64, 0)
|
||||
for timestamp := range uniqueTimestamps {
|
||||
@ -158,7 +160,7 @@ func processResults(results []*v3.Result, expression *govaluate.EvaluableExpress
|
||||
}, nil
|
||||
}
|
||||
|
||||
var SupportedFunctions = []string{"exp", "log", "ln", "exp2", "log2", "exp10", "log10", "sqrt", "cbrt", "erf", "erfc", "lgamma", "tgamma", "sin", "cos", "tan", "asin", "acos", "atan", "degrees", "radians"}
|
||||
var SupportedFunctions = []string{"exp", "log", "ln", "exp2", "log2", "exp10", "log10", "sqrt", "cbrt", "erf", "erfc", "lgamma", "tgamma", "sin", "cos", "tan", "asin", "acos", "atan", "degrees", "radians", "now", "toUnixTimestamp"}
|
||||
|
||||
func evalFuncs() map[string]govaluate.ExpressionFunction {
|
||||
GoValuateFuncs := make(map[string]govaluate.ExpressionFunction)
|
||||
@ -247,5 +249,21 @@ func evalFuncs() map[string]govaluate.ExpressionFunction {
|
||||
GoValuateFuncs["radians"] = func(args ...interface{}) (interface{}, error) {
|
||||
return args[0].(float64) * math.Pi / 180, nil
|
||||
}
|
||||
|
||||
GoValuateFuncs["now"] = func(args ...interface{}) (interface{}, error) {
|
||||
return time.Now().Unix(), nil
|
||||
}
|
||||
|
||||
GoValuateFuncs["toUnixTimestamp"] = func(args ...interface{}) (interface{}, error) {
|
||||
if len(args) != 1 {
|
||||
return nil, fmt.Errorf("toUnixTimestamp requires exactly one argument")
|
||||
}
|
||||
t, err := time.Parse(time.RFC3339, args[0].(string))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return t.Unix(), nil
|
||||
}
|
||||
|
||||
return GoValuateFuncs
|
||||
}
|
||||
|
@ -483,49 +483,7 @@ func (aH *APIHandler) getRule(w http.ResponseWriter, r *http.Request) {
|
||||
aH.Respond(w, ruleResponse)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) addTemporality(ctx context.Context, qp *v3.QueryRangeParamsV3) error {
|
||||
|
||||
metricNames := make([]string, 0)
|
||||
metricNameToTemporality := make(map[string]map[v3.Temporality]bool)
|
||||
if qp.CompositeQuery != nil && len(qp.CompositeQuery.BuilderQueries) > 0 {
|
||||
for _, query := range qp.CompositeQuery.BuilderQueries {
|
||||
if query.DataSource == v3.DataSourceMetrics && query.Temporality == "" {
|
||||
metricNames = append(metricNames, query.AggregateAttribute.Key)
|
||||
if _, ok := metricNameToTemporality[query.AggregateAttribute.Key]; !ok {
|
||||
metricNameToTemporality[query.AggregateAttribute.Key] = make(map[v3.Temporality]bool)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
if aH.preferDelta {
|
||||
zap.L().Debug("fetching metric temporality")
|
||||
metricNameToTemporality, err = aH.reader.FetchTemporality(ctx, metricNames)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if qp.CompositeQuery != nil && len(qp.CompositeQuery.BuilderQueries) > 0 {
|
||||
for name := range qp.CompositeQuery.BuilderQueries {
|
||||
query := qp.CompositeQuery.BuilderQueries[name]
|
||||
if query.DataSource == v3.DataSourceMetrics && query.Temporality == "" {
|
||||
if aH.preferDelta && metricNameToTemporality[query.AggregateAttribute.Key][v3.Delta] {
|
||||
query.Temporality = v3.Delta
|
||||
} else if metricNameToTemporality[query.AggregateAttribute.Key][v3.Cumulative] {
|
||||
query.Temporality = v3.Cumulative
|
||||
} else {
|
||||
query.Temporality = v3.Unspecified
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// populateTemporality same as addTemporality but for v4 and better
|
||||
// populateTemporality adds the temporality to the query if it is not present
|
||||
func (aH *APIHandler) populateTemporality(ctx context.Context, qp *v3.QueryRangeParamsV3) error {
|
||||
|
||||
missingTemporality := make([]string, 0)
|
||||
@ -2347,13 +2305,28 @@ func (ah *APIHandler) calculateConnectionStatus(
|
||||
|
||||
func (ah *APIHandler) calculateLogsConnectionStatus(
|
||||
ctx context.Context,
|
||||
logsConnectionTest *v3.FilterSet,
|
||||
logsConnectionTest *integrations.LogsConnectionTest,
|
||||
lookbackSeconds int64,
|
||||
) (*integrations.SignalConnectionStatus, *model.ApiError) {
|
||||
if logsConnectionTest == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
logsConnTestFilter := &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{
|
||||
Key: logsConnectionTest.AttributeKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeTag,
|
||||
},
|
||||
Operator: "=",
|
||||
Value: logsConnectionTest.AttributeValue,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
qrParams := &v3.QueryRangeParamsV3{
|
||||
Start: time.Now().UnixMilli() - (lookbackSeconds * 1000),
|
||||
End: time.Now().UnixMilli(),
|
||||
@ -2363,7 +2336,7 @@ func (ah *APIHandler) calculateLogsConnectionStatus(
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
PageSize: 1,
|
||||
Filters: logsConnectionTest,
|
||||
Filters: logsConnTestFilter,
|
||||
QueryName: "A",
|
||||
DataSource: v3.DataSourceLogs,
|
||||
Expression: "A",
|
||||
@ -2892,7 +2865,7 @@ func (aH *APIHandler) autoCompleteAttributeValues(w http.ResponseWriter, r *http
|
||||
aH.Respond(w, response)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) execClickHouseGraphQueries(ctx context.Context, queries map[string]string) ([]*v3.Result, error, map[string]string) {
|
||||
func (aH *APIHandler) execClickHouseGraphQueries(ctx context.Context, queries map[string]string) ([]*v3.Result, error, map[string]error) {
|
||||
type channelResult struct {
|
||||
Series []*v3.Series
|
||||
Err error
|
||||
@ -2922,13 +2895,13 @@ func (aH *APIHandler) execClickHouseGraphQueries(ctx context.Context, queries ma
|
||||
close(ch)
|
||||
|
||||
var errs []error
|
||||
errQuriesByName := make(map[string]string)
|
||||
errQuriesByName := make(map[string]error)
|
||||
res := make([]*v3.Result, 0)
|
||||
// read values from the channel
|
||||
for r := range ch {
|
||||
if r.Err != nil {
|
||||
errs = append(errs, r.Err)
|
||||
errQuriesByName[r.Name] = r.Query
|
||||
errQuriesByName[r.Name] = r.Err
|
||||
continue
|
||||
}
|
||||
res = append(res, &v3.Result{
|
||||
@ -2942,7 +2915,7 @@ func (aH *APIHandler) execClickHouseGraphQueries(ctx context.Context, queries ma
|
||||
return res, nil, nil
|
||||
}
|
||||
|
||||
func (aH *APIHandler) execClickHouseListQueries(ctx context.Context, queries map[string]string) ([]*v3.Result, error, map[string]string) {
|
||||
func (aH *APIHandler) execClickHouseListQueries(ctx context.Context, queries map[string]string) ([]*v3.Result, error, map[string]error) {
|
||||
type channelResult struct {
|
||||
List []*v3.Row
|
||||
Err error
|
||||
@ -2971,13 +2944,13 @@ func (aH *APIHandler) execClickHouseListQueries(ctx context.Context, queries map
|
||||
close(ch)
|
||||
|
||||
var errs []error
|
||||
errQuriesByName := make(map[string]string)
|
||||
errQuriesByName := make(map[string]error)
|
||||
res := make([]*v3.Result, 0)
|
||||
// read values from the channel
|
||||
for r := range ch {
|
||||
if r.Err != nil {
|
||||
errs = append(errs, r.Err)
|
||||
errQuriesByName[r.Name] = r.Query
|
||||
errQuriesByName[r.Name] = r.Err
|
||||
continue
|
||||
}
|
||||
res = append(res, &v3.Result{
|
||||
@ -2991,7 +2964,7 @@ func (aH *APIHandler) execClickHouseListQueries(ctx context.Context, queries map
|
||||
return res, nil, nil
|
||||
}
|
||||
|
||||
func (aH *APIHandler) execPromQueries(ctx context.Context, metricsQueryRangeParams *v3.QueryRangeParamsV3) ([]*v3.Result, error, map[string]string) {
|
||||
func (aH *APIHandler) execPromQueries(ctx context.Context, metricsQueryRangeParams *v3.QueryRangeParamsV3) ([]*v3.Result, error, map[string]error) {
|
||||
type channelResult struct {
|
||||
Series []*v3.Series
|
||||
Err error
|
||||
@ -3051,13 +3024,13 @@ func (aH *APIHandler) execPromQueries(ctx context.Context, metricsQueryRangePara
|
||||
close(ch)
|
||||
|
||||
var errs []error
|
||||
errQuriesByName := make(map[string]string)
|
||||
errQuriesByName := make(map[string]error)
|
||||
res := make([]*v3.Result, 0)
|
||||
// read values from the channel
|
||||
for r := range ch {
|
||||
if r.Err != nil {
|
||||
errs = append(errs, r.Err)
|
||||
errQuriesByName[r.Name] = r.Query
|
||||
errQuriesByName[r.Name] = r.Err
|
||||
continue
|
||||
}
|
||||
res = append(res, &v3.Result{
|
||||
@ -3155,7 +3128,7 @@ func (aH *APIHandler) queryRangeV3(ctx context.Context, queryRangeParams *v3.Que
|
||||
|
||||
var result []*v3.Result
|
||||
var err error
|
||||
var errQuriesByName map[string]string
|
||||
var errQuriesByName map[string]error
|
||||
var spanKeys map[string]v3.AttributeKey
|
||||
if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeBuilder {
|
||||
// check if any enrichment is required for logs if yes then enrich them
|
||||
@ -3305,8 +3278,7 @@ func (aH *APIHandler) QueryRangeV3(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// add temporality for each metric
|
||||
|
||||
temporalityErr := aH.addTemporality(r.Context(), queryRangeParams)
|
||||
temporalityErr := aH.populateTemporality(r.Context(), queryRangeParams)
|
||||
if temporalityErr != nil {
|
||||
zap.L().Error("Error while adding temporality for metrics", zap.Error(temporalityErr))
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: temporalityErr}, nil)
|
||||
@ -3412,7 +3384,7 @@ func (aH *APIHandler) queryRangeV4(ctx context.Context, queryRangeParams *v3.Que
|
||||
|
||||
var result []*v3.Result
|
||||
var err error
|
||||
var errQuriesByName map[string]string
|
||||
var errQuriesByName map[string]error
|
||||
var spanKeys map[string]v3.AttributeKey
|
||||
if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeBuilder {
|
||||
// check if any enrichment is required for logs if yes then enrich them
|
||||
|
@ -1,6 +1,7 @@
|
||||
package integrations
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"embed"
|
||||
"strings"
|
||||
@ -120,7 +121,9 @@ func readBuiltInIntegration(dirpath string) (
|
||||
}
|
||||
|
||||
var integration IntegrationDetails
|
||||
err = json.Unmarshal(hydratedSpecJson, &integration)
|
||||
decoder := json.NewDecoder(bytes.NewReader(hydratedSpecJson))
|
||||
decoder.DisallowUnknownFields()
|
||||
err = decoder.Decode(&integration)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't parse hydrated JSON spec read from %s: %w",
|
||||
|
@ -78,3 +78,5 @@ Make the collector config file available to your otel collector and use it by ad
|
||||
```
|
||||
Note: the collector can use multiple config files, specified by multiple occurrences of the --config flag.
|
||||
|
||||
Also note that only 1 collector instance should be configured to collect query_logs.
|
||||
Using multiple collector instances or replicas with this config will lead to duplicate logs.
|
||||
|
@ -30,7 +30,7 @@ To configure metrics and logs collection for a Clickhouse server, you need the f
|
||||
- **Ensure that an OTEL collector is running in your deployment environment**
|
||||
If needed, please [install SigNoz OTEL Collector](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/)
|
||||
If already installed, ensure that the collector version is v0.88.0 or newer.
|
||||
If collecting logs from system.query_log table, ensure that the collector version is v0.88.22 or newer.
|
||||
If collecting logs from system.query_log table, ensure that the collector version is v0.88.23 or newer.
|
||||
|
||||
Also ensure that you can provide config files to the collector and that you can set environment variables and command line flags used for running it.
|
||||
|
||||
|
@ -41,18 +41,8 @@
|
||||
},
|
||||
"connection_tests": {
|
||||
"logs": {
|
||||
"op": "AND",
|
||||
"items": [
|
||||
{
|
||||
"key": {
|
||||
"type": "tag",
|
||||
"key": "source",
|
||||
"dataType": "string"
|
||||
},
|
||||
"op": "=",
|
||||
"value": "clickhouse"
|
||||
}
|
||||
]
|
||||
"attribute_key": "source",
|
||||
"attribute_value": "clickhouse"
|
||||
}
|
||||
},
|
||||
"data_collected": "file://data-collected.json"
|
||||
|
@ -37,18 +37,8 @@
|
||||
},
|
||||
"connection_tests": {
|
||||
"logs": {
|
||||
"op": "AND",
|
||||
"items": [
|
||||
{
|
||||
"key": {
|
||||
"type": "tag",
|
||||
"key": "source",
|
||||
"dataType": "string"
|
||||
},
|
||||
"op": "=",
|
||||
"value": "mongo"
|
||||
}
|
||||
]
|
||||
"attribute_key": "source",
|
||||
"attribute_value": "mongodb"
|
||||
}
|
||||
},
|
||||
"data_collected": {
|
||||
|
@ -32,18 +32,8 @@
|
||||
},
|
||||
"connection_tests": {
|
||||
"logs": {
|
||||
"op": "AND",
|
||||
"items": [
|
||||
{
|
||||
"key": {
|
||||
"type": "tag",
|
||||
"key": "source",
|
||||
"dataType": "string"
|
||||
},
|
||||
"op": "=",
|
||||
"value": "nginx"
|
||||
}
|
||||
]
|
||||
"attribute_key": "source",
|
||||
"attribute_value": "nginx"
|
||||
}
|
||||
},
|
||||
"data_collected": {
|
||||
|
@ -37,18 +37,8 @@
|
||||
},
|
||||
"connection_tests": {
|
||||
"logs": {
|
||||
"op": "AND",
|
||||
"items": [
|
||||
{
|
||||
"key": {
|
||||
"type": "tag",
|
||||
"key": "source",
|
||||
"dataType": "string"
|
||||
},
|
||||
"op": "=",
|
||||
"value": "postgres"
|
||||
}
|
||||
]
|
||||
"attribute_key": "source",
|
||||
"attribute_value": "postgres"
|
||||
}
|
||||
},
|
||||
"data_collected": {
|
||||
|
@ -37,18 +37,8 @@
|
||||
},
|
||||
"connection_tests": {
|
||||
"logs": {
|
||||
"op": "AND",
|
||||
"items": [
|
||||
{
|
||||
"key": {
|
||||
"type": "tag",
|
||||
"key": "source",
|
||||
"dataType": "string"
|
||||
},
|
||||
"op": "=",
|
||||
"value": "redis"
|
||||
}
|
||||
]
|
||||
"attribute_key": "source",
|
||||
"attribute_value": "redis"
|
||||
}
|
||||
},
|
||||
"data_collected": {
|
||||
|
@ -12,7 +12,6 @@ import (
|
||||
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
"go.signoz.io/signoz/pkg/query-service/rules"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils"
|
||||
)
|
||||
@ -63,6 +62,7 @@ type CollectedMetric struct {
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
Unit string `json:"unit"`
|
||||
Description string `json:"description"`
|
||||
}
|
||||
|
||||
type SignalConnectionStatus struct {
|
||||
@ -75,9 +75,14 @@ type IntegrationConnectionStatus struct {
|
||||
Metrics *SignalConnectionStatus `json:"metrics"`
|
||||
}
|
||||
|
||||
// log attribute value to use for finding logs for the integration.
|
||||
type LogsConnectionTest struct {
|
||||
AttributeKey string `json:"attribute_key"`
|
||||
AttributeValue string `json:"attribute_value"`
|
||||
}
|
||||
|
||||
type IntegrationConnectionTests struct {
|
||||
// Filter to use for finding logs for the integration.
|
||||
Logs *v3.FilterSet `json:"logs"`
|
||||
Logs *LogsConnectionTest `json:"logs"`
|
||||
|
||||
// Metric names expected to have been received for the integration.
|
||||
Metrics []string `json:"metrics"`
|
||||
@ -253,7 +258,7 @@ func (m *Manager) UninstallIntegration(
|
||||
func (m *Manager) GetPipelinesForInstalledIntegrations(
|
||||
ctx context.Context,
|
||||
) ([]logparsingpipeline.Pipeline, *model.ApiError) {
|
||||
installedIntegrations, apiErr := m.getDetailsForInstalledIntegrations(ctx)
|
||||
installedIntegrations, apiErr := m.getInstalledIntegrations(ctx)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
@ -322,10 +327,15 @@ func (m *Manager) GetInstalledIntegrationDashboardById(
|
||||
if dId, exists := dd["id"]; exists {
|
||||
if id, ok := dId.(string); ok && id == dashboardId {
|
||||
isLocked := 1
|
||||
author := "integration"
|
||||
return &dashboards.Dashboard{
|
||||
Uuid: m.dashboardUuid(integrationId, string(dashboardId)),
|
||||
Locked: &isLocked,
|
||||
Data: dd,
|
||||
CreatedAt: integration.Installation.InstalledAt,
|
||||
CreateBy: &author,
|
||||
UpdatedAt: integration.Installation.InstalledAt,
|
||||
UpdateBy: &author,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
@ -339,7 +349,7 @@ func (m *Manager) GetInstalledIntegrationDashboardById(
|
||||
func (m *Manager) GetDashboardsForInstalledIntegrations(
|
||||
ctx context.Context,
|
||||
) ([]dashboards.Dashboard, *model.ApiError) {
|
||||
installedIntegrations, apiErr := m.getDetailsForInstalledIntegrations(ctx)
|
||||
installedIntegrations, apiErr := m.getInstalledIntegrations(ctx)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
@ -351,10 +361,15 @@ func (m *Manager) GetDashboardsForInstalledIntegrations(
|
||||
if dId, exists := dd["id"]; exists {
|
||||
if dashboardId, ok := dId.(string); ok {
|
||||
isLocked := 1
|
||||
author := "integration"
|
||||
result = append(result, dashboards.Dashboard{
|
||||
Uuid: m.dashboardUuid(ii.IntegrationSummary.Id, dashboardId),
|
||||
Locked: &isLocked,
|
||||
Data: dd,
|
||||
CreatedAt: ii.Installation.InstalledAt,
|
||||
CreateBy: &author,
|
||||
UpdatedAt: ii.Installation.InstalledAt,
|
||||
UpdateBy: &author,
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -413,10 +428,10 @@ func (m *Manager) getInstalledIntegration(
|
||||
return &installation, nil
|
||||
}
|
||||
|
||||
func (m *Manager) getDetailsForInstalledIntegrations(
|
||||
func (m *Manager) getInstalledIntegrations(
|
||||
ctx context.Context,
|
||||
) (
|
||||
map[string]IntegrationDetails, *model.ApiError,
|
||||
map[string]Integration, *model.ApiError,
|
||||
) {
|
||||
installations, apiErr := m.installedIntegrationsRepo.list(ctx)
|
||||
if apiErr != nil {
|
||||
@ -426,5 +441,24 @@ func (m *Manager) getDetailsForInstalledIntegrations(
|
||||
installedIds := utils.MapSlice(installations, func(i InstalledIntegration) string {
|
||||
return i.IntegrationId
|
||||
})
|
||||
return m.availableIntegrationsRepo.get(ctx, installedIds)
|
||||
integrationDetails, apiErr := m.availableIntegrationsRepo.get(ctx, installedIds)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
result := map[string]Integration{}
|
||||
for _, ii := range installations {
|
||||
iDetails, exists := integrationDetails[ii.IntegrationId]
|
||||
if !exists {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"couldn't find integration details for %s", ii.IntegrationId,
|
||||
))
|
||||
}
|
||||
|
||||
result[ii.IntegrationId] = Integration{
|
||||
Installation: &ii,
|
||||
IntegrationDetails: iDetails,
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
@ -96,19 +96,9 @@ func (t *TestAvailableIntegrationsRepo) list(
|
||||
Alerts: []rules.PostableRule{},
|
||||
},
|
||||
ConnectionTests: &IntegrationConnectionTests{
|
||||
Logs: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "source",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeTag,
|
||||
},
|
||||
Operator: "=",
|
||||
Value: "nginx",
|
||||
},
|
||||
},
|
||||
Logs: &LogsConnectionTest{
|
||||
AttributeKey: "source",
|
||||
AttributeValue: "nginx",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
@ -174,19 +164,9 @@ func (t *TestAvailableIntegrationsRepo) list(
|
||||
Alerts: []rules.PostableRule{},
|
||||
},
|
||||
ConnectionTests: &IntegrationConnectionTests{
|
||||
Logs: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "source",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeTag,
|
||||
},
|
||||
Operator: "=",
|
||||
Value: "nginx",
|
||||
},
|
||||
},
|
||||
Logs: &LogsConnectionTest{
|
||||
AttributeKey: "source",
|
||||
AttributeValue: "nginx",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -40,12 +40,13 @@ func applyMetricLimit(results []*v3.Result, queryRangeParams *v3.QueryRangeParam
|
||||
}
|
||||
}
|
||||
|
||||
ithSum, jthSum := 0.0, 0.0
|
||||
ithSum, jthSum, ithCount, jthCount := 0.0, 0.0, 0.0, 0.0
|
||||
for _, point := range result.Series[i].Points {
|
||||
if math.IsNaN(point.Value) || math.IsInf(point.Value, 0) {
|
||||
continue
|
||||
}
|
||||
ithSum += point.Value
|
||||
ithCount++
|
||||
}
|
||||
|
||||
for _, point := range result.Series[j].Points {
|
||||
@ -53,12 +54,17 @@ func applyMetricLimit(results []*v3.Result, queryRangeParams *v3.QueryRangeParam
|
||||
continue
|
||||
}
|
||||
jthSum += point.Value
|
||||
jthCount++
|
||||
}
|
||||
|
||||
// avoid division by zero
|
||||
ithCount = math.Max(ithCount, 1)
|
||||
jthCount = math.Max(jthCount, 1)
|
||||
|
||||
if orderBy.Order == "asc" {
|
||||
return ithSum < jthSum
|
||||
return ithSum/ithCount < jthSum/jthCount
|
||||
} else if orderBy.Order == "desc" {
|
||||
return ithSum > jthSum
|
||||
return ithSum/ithCount > jthSum/jthCount
|
||||
}
|
||||
} else {
|
||||
// Sort based on Labels map
|
||||
|
@ -5,6 +5,7 @@ import (
|
||||
"math"
|
||||
"strings"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers"
|
||||
"go.signoz.io/signoz/pkg/query-service/constants"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils"
|
||||
@ -28,7 +29,7 @@ func stepForTableCumulative(start, end int64) int64 {
|
||||
return int64(step)
|
||||
}
|
||||
|
||||
func buildMetricQueryForTable(start, end, _ int64, mq *v3.BuilderQuery, tableName string) (string, error) {
|
||||
func buildMetricQueryForTable(start, end, _ int64, mq *v3.BuilderQuery) (string, error) {
|
||||
|
||||
step := stepForTableCumulative(start, end)
|
||||
|
||||
@ -36,46 +37,19 @@ func buildMetricQueryForTable(start, end, _ int64, mq *v3.BuilderQuery, tableNam
|
||||
|
||||
metricQueryGroupBy := mq.GroupBy
|
||||
|
||||
// if the aggregate operator is a histogram quantile, and user has not forgotten
|
||||
// the le tag in the group by then add the le tag to the group by
|
||||
if mq.AggregateOperator == v3.AggregateOperatorHistQuant50 ||
|
||||
mq.AggregateOperator == v3.AggregateOperatorHistQuant75 ||
|
||||
mq.AggregateOperator == v3.AggregateOperatorHistQuant90 ||
|
||||
mq.AggregateOperator == v3.AggregateOperatorHistQuant95 ||
|
||||
mq.AggregateOperator == v3.AggregateOperatorHistQuant99 {
|
||||
found := false
|
||||
for _, tag := range mq.GroupBy {
|
||||
if tag.Key == "le" {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
metricQueryGroupBy = append(
|
||||
metricQueryGroupBy,
|
||||
v3.AttributeKey{
|
||||
Key: "le",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeTag,
|
||||
IsColumn: false,
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
filterSubQuery, err := buildMetricsTimeSeriesFilterQuery(mq.Filters, metricQueryGroupBy, mq)
|
||||
filterSubQuery, err := helpers.PrepareTimeseriesFilterQueryV3(start, end, mq)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
samplesTableTimeFilter := fmt.Sprintf("metric_name = %s AND timestamp_ms >= %d AND timestamp_ms <= %d", utils.ClickHouseFormattedValue(mq.AggregateAttribute.Key), start, end)
|
||||
samplesTableTimeFilter := fmt.Sprintf("metric_name = %s AND unix_milli >= %d AND unix_milli <= %d", utils.ClickHouseFormattedValue(mq.AggregateAttribute.Key), start, end)
|
||||
|
||||
// Select the aggregate value for interval
|
||||
queryTmplCounterInner :=
|
||||
"SELECT %s" +
|
||||
" toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL %d SECOND) as ts," +
|
||||
" toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL %d SECOND) as ts," +
|
||||
" %s as value" +
|
||||
" FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_TABLENAME +
|
||||
" FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_V4_TABLENAME +
|
||||
" INNER JOIN" +
|
||||
" (%s) as filtered_time_series" +
|
||||
" USING fingerprint" +
|
||||
@ -88,7 +62,7 @@ func buildMetricQueryForTable(start, end, _ int64, mq *v3.BuilderQuery, tableNam
|
||||
"SELECT %s" +
|
||||
" toStartOfHour(now()) as ts," + // now() has no menaing & used as a placeholder for ts
|
||||
" %s as value" +
|
||||
" FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_TABLENAME +
|
||||
" FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_V4_TABLENAME +
|
||||
" INNER JOIN" +
|
||||
" (%s) as filtered_time_series" +
|
||||
" USING fingerprint" +
|
||||
|
@ -38,7 +38,7 @@ func TestPanelTableForCumulative(t *testing.T) {
|
||||
},
|
||||
Expression: "A",
|
||||
},
|
||||
expected: "SELECT toStartOfHour(now()) as ts, sum(rate_value)/29 as value FROM (SELECT ts, If((value - lagInFrame(value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (value - lagInFrame(value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as rate_value FROM(SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL 60 SECOND) as ts, max(value) as value FROM signoz_metrics.distributed_samples_v2 INNER JOIN (SELECT fingerprint FROM signoz_metrics.time_series_v2 WHERE metric_name = 'signoz_latency_count' AND temporality IN ['Cumulative', 'Unspecified'] AND JSONExtractString(labels, 'service_name') IN ['frontend'] AND JSONExtractString(labels, 'operation') IN ['HTTP GET /dispatch']) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_count' AND timestamp_ms >= 1689255866000 AND timestamp_ms <= 1689257640000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)) WHERE isNaN(rate_value) = 0 GROUP BY ts ORDER BY ts",
|
||||
expected: "SELECT toStartOfHour(now()) as ts, sum(rate_value)/29 as value FROM (SELECT ts, If((value - lagInFrame(value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (value - lagInFrame(value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as rate_value FROM(SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(value) as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name = 'signoz_latency_count' AND temporality = 'Cumulative' AND unix_milli >= 1689253200000 AND unix_milli < 1689257640000 AND JSONExtractString(labels, 'service_name') IN ['frontend'] AND JSONExtractString(labels, 'operation') IN ['HTTP GET /dispatch']) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_count' AND unix_milli >= 1689255866000 AND unix_milli <= 1689257640000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)) WHERE isNaN(rate_value) = 0 GROUP BY ts ORDER BY ts",
|
||||
},
|
||||
{
|
||||
name: "latency p50",
|
||||
@ -60,8 +60,13 @@ func TestPanelTableForCumulative(t *testing.T) {
|
||||
},
|
||||
},
|
||||
Expression: "A",
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: "le",
|
||||
},
|
||||
expected: "SELECT toStartOfHour(now()) as ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.500) as value FROM (SELECT le, toStartOfHour(now()) as ts, sum(rate_value)/29 as value FROM (SELECT le, ts, If((value - lagInFrame(value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (value - lagInFrame(value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as rate_value FROM(SELECT fingerprint, le, toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL 60 SECOND) as ts, max(value) as value FROM signoz_metrics.distributed_samples_v2 INNER JOIN (SELECT JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v2 WHERE metric_name = 'signoz_latency_bucket' AND temporality IN ['Cumulative', 'Unspecified'] AND JSONExtractString(labels, 'service_name') = 'frontend') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND timestamp_ms >= 1689255866000 AND timestamp_ms <= 1689257640000 GROUP BY fingerprint, le,ts ORDER BY fingerprint, le ASC, ts) WINDOW rate_window as (PARTITION BY fingerprint, le ORDER BY fingerprint, le ASC, ts)) WHERE isNaN(rate_value) = 0 GROUP BY le,ts ORDER BY le ASC, ts) GROUP BY ts ORDER BY ts",
|
||||
},
|
||||
},
|
||||
expected: "SELECT toStartOfHour(now()) as ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.500) as value FROM (SELECT le, toStartOfHour(now()) as ts, sum(rate_value)/29 as value FROM (SELECT le, ts, If((value - lagInFrame(value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (value - lagInFrame(value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as rate_value FROM(SELECT fingerprint, le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(value) as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name = 'signoz_latency_bucket' AND temporality = 'Cumulative' AND unix_milli >= 1689253200000 AND unix_milli < 1689257640000 AND JSONExtractString(labels, 'service_name') = 'frontend') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND unix_milli >= 1689255866000 AND unix_milli <= 1689257640000 GROUP BY fingerprint, le,ts ORDER BY fingerprint, le ASC, ts) WINDOW rate_window as (PARTITION BY fingerprint, le ORDER BY fingerprint, le ASC, ts)) WHERE isNaN(rate_value) = 0 GROUP BY le,ts ORDER BY le ASC, ts) GROUP BY ts ORDER BY ts",
|
||||
},
|
||||
{
|
||||
name: "latency p99 with group by",
|
||||
@ -77,16 +82,19 @@ func TestPanelTableForCumulative(t *testing.T) {
|
||||
{
|
||||
Key: "service_name",
|
||||
},
|
||||
{
|
||||
Key: "le",
|
||||
},
|
||||
},
|
||||
Expression: "A",
|
||||
},
|
||||
expected: "SELECT service_name, toStartOfHour(now()) as ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT service_name,le, toStartOfHour(now()) as ts, sum(rate_value)/29 as value FROM (SELECT service_name,le, ts, If((value - lagInFrame(value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (value - lagInFrame(value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as rate_value FROM(SELECT fingerprint, service_name,le, toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL 60 SECOND) as ts, max(value) as value FROM signoz_metrics.distributed_samples_v2 INNER JOIN (SELECT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v2 WHERE metric_name = 'signoz_latency_bucket' AND temporality IN ['Cumulative', 'Unspecified']) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND timestamp_ms >= 1689255866000 AND timestamp_ms <= 1689257640000 GROUP BY fingerprint, service_name,le,ts ORDER BY fingerprint, service_name ASC,le ASC, ts) WINDOW rate_window as (PARTITION BY fingerprint, service_name,le ORDER BY fingerprint, service_name ASC,le ASC, ts)) WHERE isNaN(rate_value) = 0 GROUP BY service_name,le,ts ORDER BY service_name ASC,le ASC, ts) GROUP BY service_name,ts ORDER BY service_name ASC, ts",
|
||||
expected: "SELECT service_name, toStartOfHour(now()) as ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT service_name,le, toStartOfHour(now()) as ts, sum(rate_value)/29 as value FROM (SELECT service_name,le, ts, If((value - lagInFrame(value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (value - lagInFrame(value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as rate_value FROM(SELECT fingerprint, service_name,le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(value) as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name = 'signoz_latency_bucket' AND temporality = 'Cumulative' AND unix_milli >= 1689253200000 AND unix_milli < 1689257640000) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND unix_milli >= 1689255866000 AND unix_milli <= 1689257640000 GROUP BY fingerprint, service_name,le,ts ORDER BY fingerprint, service_name ASC,le ASC, ts) WINDOW rate_window as (PARTITION BY fingerprint, service_name,le ORDER BY fingerprint, service_name ASC,le ASC, ts)) WHERE isNaN(rate_value) = 0 GROUP BY service_name,le,ts ORDER BY service_name ASC,le ASC, ts) GROUP BY service_name,ts ORDER BY service_name ASC, ts",
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
query, err := buildMetricQueryForTable(1689255866000, 1689257640000, 1800, c.query, "distributed_time_series_v2")
|
||||
query, err := buildMetricQueryForTable(1689255866000, 1689257640000, 1800, c.query)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v\n", err)
|
||||
}
|
||||
|
@ -3,63 +3,47 @@ package v3
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers"
|
||||
"go.signoz.io/signoz/pkg/query-service/constants"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils"
|
||||
)
|
||||
|
||||
func buildDeltaMetricQuery(start, end, step int64, mq *v3.BuilderQuery, tableName string) (string, error) {
|
||||
func buildDeltaMetricQuery(start, end, step int64, mq *v3.BuilderQuery) (string, error) {
|
||||
|
||||
metricQueryGroupBy := mq.GroupBy
|
||||
|
||||
// if the aggregate operator is a histogram quantile, and user has not forgotten
|
||||
// the le tag in the group by then add the le tag to the group by
|
||||
if mq.AggregateOperator == v3.AggregateOperatorHistQuant50 ||
|
||||
mq.AggregateOperator == v3.AggregateOperatorHistQuant75 ||
|
||||
mq.AggregateOperator == v3.AggregateOperatorHistQuant90 ||
|
||||
mq.AggregateOperator == v3.AggregateOperatorHistQuant95 ||
|
||||
mq.AggregateOperator == v3.AggregateOperatorHistQuant99 {
|
||||
found := false
|
||||
for _, tag := range mq.GroupBy {
|
||||
if tag.Key == "le" {
|
||||
found = true
|
||||
if mq.Filters != nil {
|
||||
temporalityFound := false
|
||||
for _, filter := range mq.Filters.Items {
|
||||
if filter.Key.Key == "__temporality__" {
|
||||
temporalityFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
metricQueryGroupBy = append(
|
||||
metricQueryGroupBy,
|
||||
v3.AttributeKey{
|
||||
Key: "le",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeTag,
|
||||
IsColumn: false,
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
if mq.Filters != nil {
|
||||
if !temporalityFound {
|
||||
mq.Filters.Items = append(mq.Filters.Items, v3.FilterItem{
|
||||
Key: v3.AttributeKey{Key: "__temporality__"},
|
||||
Operator: v3.FilterOperatorEqual,
|
||||
Value: "Delta",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
filterSubQuery, err := buildMetricsTimeSeriesFilterQuery(mq.Filters, metricQueryGroupBy, mq)
|
||||
filterSubQuery, err := helpers.PrepareTimeseriesFilterQueryV3(start, end, mq)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
samplesTableTimeFilter := fmt.Sprintf("metric_name = %s AND timestamp_ms >= %d AND timestamp_ms <= %d", utils.ClickHouseFormattedValue(mq.AggregateAttribute.Key), start, end)
|
||||
samplesTableTimeFilter := fmt.Sprintf("metric_name = %s AND unix_milli >= %d AND unix_milli <= %d", utils.ClickHouseFormattedValue(mq.AggregateAttribute.Key), start, end)
|
||||
|
||||
// Select the aggregate value for interval
|
||||
queryTmpl :=
|
||||
"SELECT %s" +
|
||||
" toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL %d SECOND) as ts," +
|
||||
" toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL %d SECOND) as ts," +
|
||||
" %s as value" +
|
||||
" FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_TABLENAME +
|
||||
" FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_V4_TABLENAME +
|
||||
" INNER JOIN" +
|
||||
" (%s) as filtered_time_series" +
|
||||
" USING fingerprint" +
|
||||
@ -157,9 +141,9 @@ func buildDeltaMetricQuery(start, end, step int64, mq *v3.BuilderQuery, tableNam
|
||||
case v3.AggregateOperatorNoOp:
|
||||
queryTmpl :=
|
||||
"SELECT fingerprint, labels as fullLabels," +
|
||||
" toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL %d SECOND) as ts," +
|
||||
" toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL %d SECOND) as ts," +
|
||||
" any(value) as value" +
|
||||
" FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_TABLENAME +
|
||||
" FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_V4_TABLENAME +
|
||||
" INNER JOIN" +
|
||||
" (%s) as filtered_time_series" +
|
||||
" USING fingerprint" +
|
||||
|
@ -4,12 +4,13 @@ import (
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers"
|
||||
"go.signoz.io/signoz/pkg/query-service/constants"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils"
|
||||
)
|
||||
|
||||
func buildDeltaMetricQueryForTable(start, end, _ int64, mq *v3.BuilderQuery, tableName string) (string, error) {
|
||||
func buildDeltaMetricQueryForTable(start, end, _ int64, mq *v3.BuilderQuery) (string, error) {
|
||||
|
||||
// round up to the nearest multiple of 60
|
||||
step := int64(math.Ceil(float64(end-start+1)/1000/60) * 60)
|
||||
@ -43,17 +44,17 @@ func buildDeltaMetricQueryForTable(start, end, _ int64, mq *v3.BuilderQuery, tab
|
||||
}
|
||||
}
|
||||
|
||||
filterSubQuery, err := buildMetricsTimeSeriesFilterQuery(mq.Filters, metricQueryGroupBy, mq)
|
||||
filterSubQuery, err := helpers.PrepareTimeseriesFilterQueryV3(start, end, mq)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
samplesTableTimeFilter := fmt.Sprintf("metric_name = %s AND timestamp_ms >= %d AND timestamp_ms <= %d", utils.ClickHouseFormattedValue(mq.AggregateAttribute.Key), start, end)
|
||||
samplesTableTimeFilter := fmt.Sprintf("metric_name = %s AND unix_milli >= %d AND unix_milli <= %d", utils.ClickHouseFormattedValue(mq.AggregateAttribute.Key), start, end)
|
||||
|
||||
queryTmpl :=
|
||||
"SELECT %s toStartOfHour(now()) as ts," + // now() has no menaing & used as a placeholder for ts
|
||||
" %s as value" +
|
||||
" FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_TABLENAME +
|
||||
" FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_V4_TABLENAME +
|
||||
" INNER JOIN" +
|
||||
" (%s) as filtered_time_series" +
|
||||
" USING fingerprint" +
|
||||
|
@ -38,7 +38,7 @@ func TestPanelTableForDelta(t *testing.T) {
|
||||
},
|
||||
Expression: "A",
|
||||
},
|
||||
expected: "SELECT toStartOfHour(now()) as ts, sum(value)/1800 as value FROM signoz_metrics.distributed_samples_v2 INNER JOIN (SELECT fingerprint FROM signoz_metrics.time_series_v2 WHERE metric_name = 'signoz_latency_count' AND temporality = 'Delta' AND JSONExtractString(labels, 'service_name') IN ['frontend'] AND JSONExtractString(labels, 'operation') IN ['HTTP GET /dispatch']) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_count' AND timestamp_ms >= 1689255866000 AND timestamp_ms <= 1689257640000 GROUP BY ts ORDER BY ts",
|
||||
expected: "SELECT toStartOfHour(now()) as ts, sum(value)/1800 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name = 'signoz_latency_count' AND temporality = 'Delta' AND unix_milli >= 1689253200000 AND unix_milli < 1689257640000 AND JSONExtractString(labels, 'service_name') IN ['frontend'] AND JSONExtractString(labels, 'operation') IN ['HTTP GET /dispatch']) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_count' AND unix_milli >= 1689255866000 AND unix_milli <= 1689257640000 GROUP BY ts ORDER BY ts",
|
||||
},
|
||||
{
|
||||
name: "latency p50",
|
||||
@ -61,7 +61,7 @@ func TestPanelTableForDelta(t *testing.T) {
|
||||
},
|
||||
Expression: "A",
|
||||
},
|
||||
expected: "SELECT ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.500) as value FROM (SELECT le, toStartOfHour(now()) as ts, sum(value)/1800 as value FROM signoz_metrics.distributed_samples_v2 INNER JOIN (SELECT JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v2 WHERE metric_name = 'signoz_latency_bucket' AND temporality = 'Delta' AND JSONExtractString(labels, 'service_name') = 'frontend') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND timestamp_ms >= 1689255866000 AND timestamp_ms <= 1689257640000 GROUP BY le,ts ORDER BY le ASC, ts) GROUP BY ts ORDER BY ts",
|
||||
expected: "SELECT ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.500) as value FROM (SELECT le, toStartOfHour(now()) as ts, sum(value)/1800 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name = 'signoz_latency_bucket' AND temporality = 'Delta' AND unix_milli >= 1689253200000 AND unix_milli < 1689257640000 AND JSONExtractString(labels, 'service_name') = 'frontend') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND unix_milli >= 1689255866000 AND unix_milli <= 1689257640000 GROUP BY le,ts ORDER BY le ASC, ts) GROUP BY ts ORDER BY ts",
|
||||
},
|
||||
{
|
||||
name: "latency p99 with group by",
|
||||
@ -80,13 +80,13 @@ func TestPanelTableForDelta(t *testing.T) {
|
||||
},
|
||||
Expression: "A",
|
||||
},
|
||||
expected: "SELECT service_name, ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT service_name,le, toStartOfHour(now()) as ts, sum(value)/1800 as value FROM signoz_metrics.distributed_samples_v2 INNER JOIN (SELECT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v2 WHERE metric_name = 'signoz_latency_bucket' AND temporality = 'Delta' ) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND timestamp_ms >= 1689255866000 AND timestamp_ms <= 1689257640000 GROUP BY service_name,le,ts ORDER BY service_name ASC,le ASC, ts) GROUP BY service_name,ts ORDER BY service_name ASC, ts",
|
||||
expected: "SELECT service_name, ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT service_name,le, toStartOfHour(now()) as ts, sum(value)/1800 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name = 'signoz_latency_bucket' AND temporality = 'Delta' AND unix_milli >= 1689253200000 AND unix_milli < 1689257640000) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND unix_milli >= 1689255866000 AND unix_milli <= 1689257640000 GROUP BY service_name,le,ts ORDER BY service_name ASC,le ASC, ts) GROUP BY service_name,ts ORDER BY service_name ASC, ts",
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
query, err := buildDeltaMetricQueryForTable(1689255866000, 1689257640000, 1800, c.query, "distributed_time_series_v2")
|
||||
query, err := buildDeltaMetricQueryForTable(1689255866000, 1689257640000, 1800, c.query)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers"
|
||||
"go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
@ -51,136 +52,23 @@ var aggregateOperatorToSQLFunc = map[v3.AggregateOperator]string{
|
||||
// See https://github.com/SigNoz/signoz/issues/2151#issuecomment-1467249056
|
||||
var rateWithoutNegative = `If((value - lagInFrame(value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (value - lagInFrame(value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) `
|
||||
|
||||
// buildMetricsTimeSeriesFilterQuery builds the sub-query to be used for filtering
|
||||
// timeseries based on search criteria
|
||||
func buildMetricsTimeSeriesFilterQuery(fs *v3.FilterSet, groupTags []v3.AttributeKey, mq *v3.BuilderQuery) (string, error) {
|
||||
metricName := mq.AggregateAttribute.Key
|
||||
aggregateOperator := mq.AggregateOperator
|
||||
var conditions []string
|
||||
if mq.Temporality == v3.Delta {
|
||||
conditions = append(conditions, fmt.Sprintf("metric_name = %s AND temporality = '%s' ", utils.ClickHouseFormattedValue(metricName), v3.Delta))
|
||||
} else {
|
||||
conditions = append(conditions, fmt.Sprintf("metric_name = %s AND temporality IN ['%s', '%s']", utils.ClickHouseFormattedValue(metricName), v3.Cumulative, v3.Unspecified))
|
||||
}
|
||||
|
||||
if fs != nil && len(fs.Items) != 0 {
|
||||
for _, item := range fs.Items {
|
||||
toFormat := item.Value
|
||||
op := v3.FilterOperator(strings.ToLower(strings.TrimSpace(string(item.Operator))))
|
||||
// if the received value is an array for like/match op, just take the first value
|
||||
// or should we throw an error?
|
||||
if op == v3.FilterOperatorLike || op == v3.FilterOperatorRegex || op == v3.FilterOperatorNotLike || op == v3.FilterOperatorNotRegex {
|
||||
x, ok := item.Value.([]interface{})
|
||||
if ok {
|
||||
if len(x) == 0 {
|
||||
continue
|
||||
}
|
||||
toFormat = x[0]
|
||||
}
|
||||
}
|
||||
|
||||
if op == v3.FilterOperatorContains || op == v3.FilterOperatorNotContains {
|
||||
toFormat = fmt.Sprintf("%%%s%%", toFormat)
|
||||
}
|
||||
fmtVal := utils.ClickHouseFormattedValue(toFormat)
|
||||
switch op {
|
||||
case v3.FilterOperatorEqual:
|
||||
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') = %s", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorNotEqual:
|
||||
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') != %s", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorIn:
|
||||
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') IN %s", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorNotIn:
|
||||
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') NOT IN %s", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorLike:
|
||||
conditions = append(conditions, fmt.Sprintf("like(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorNotLike:
|
||||
conditions = append(conditions, fmt.Sprintf("notLike(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorRegex:
|
||||
conditions = append(conditions, fmt.Sprintf("match(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorNotRegex:
|
||||
conditions = append(conditions, fmt.Sprintf("not match(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorGreaterThan:
|
||||
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') > %s", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorGreaterThanOrEq:
|
||||
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') >= %s", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorLessThan:
|
||||
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') < %s", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorLessThanOrEq:
|
||||
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') <= %s", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorContains:
|
||||
conditions = append(conditions, fmt.Sprintf("like(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorNotContains:
|
||||
conditions = append(conditions, fmt.Sprintf("notLike(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorExists:
|
||||
conditions = append(conditions, fmt.Sprintf("has(JSONExtractKeys(labels), '%s')", item.Key.Key))
|
||||
case v3.FilterOperatorNotExists:
|
||||
conditions = append(conditions, fmt.Sprintf("not has(JSONExtractKeys(labels), '%s')", item.Key.Key))
|
||||
default:
|
||||
return "", fmt.Errorf("unsupported operation")
|
||||
}
|
||||
}
|
||||
}
|
||||
queryString := strings.Join(conditions, " AND ")
|
||||
|
||||
var selectLabels string
|
||||
if aggregateOperator == v3.AggregateOperatorNoOp || aggregateOperator == v3.AggregateOperatorRate {
|
||||
selectLabels = "labels,"
|
||||
} else {
|
||||
for _, tag := range groupTags {
|
||||
selectLabels += fmt.Sprintf(" JSONExtractString(labels, '%s') as %s,", tag.Key, tag.Key)
|
||||
}
|
||||
}
|
||||
|
||||
filterSubQuery := fmt.Sprintf("SELECT %s fingerprint FROM %s.%s WHERE %s", selectLabels, constants.SIGNOZ_METRIC_DBNAME, constants.SIGNOZ_TIMESERIES_LOCAL_TABLENAME, queryString)
|
||||
|
||||
return filterSubQuery, nil
|
||||
}
|
||||
|
||||
func buildMetricQuery(start, end, step int64, mq *v3.BuilderQuery, tableName string) (string, error) {
|
||||
func buildMetricQuery(start, end, step int64, mq *v3.BuilderQuery) (string, error) {
|
||||
|
||||
metricQueryGroupBy := mq.GroupBy
|
||||
|
||||
// if the aggregate operator is a histogram quantile, and user has not forgotten
|
||||
// the le tag in the group by then add the le tag to the group by
|
||||
if mq.AggregateOperator == v3.AggregateOperatorHistQuant50 ||
|
||||
mq.AggregateOperator == v3.AggregateOperatorHistQuant75 ||
|
||||
mq.AggregateOperator == v3.AggregateOperatorHistQuant90 ||
|
||||
mq.AggregateOperator == v3.AggregateOperatorHistQuant95 ||
|
||||
mq.AggregateOperator == v3.AggregateOperatorHistQuant99 {
|
||||
found := false
|
||||
for _, tag := range mq.GroupBy {
|
||||
if tag.Key == "le" {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
metricQueryGroupBy = append(
|
||||
metricQueryGroupBy,
|
||||
v3.AttributeKey{
|
||||
Key: "le",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeTag,
|
||||
IsColumn: false,
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
filterSubQuery, err := buildMetricsTimeSeriesFilterQuery(mq.Filters, metricQueryGroupBy, mq)
|
||||
filterSubQuery, err := helpers.PrepareTimeseriesFilterQueryV3(start, end, mq)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
samplesTableTimeFilter := fmt.Sprintf("metric_name = %s AND timestamp_ms >= %d AND timestamp_ms < %d", utils.ClickHouseFormattedValue(mq.AggregateAttribute.Key), start, end)
|
||||
samplesTableTimeFilter := fmt.Sprintf("metric_name = %s AND unix_milli >= %d AND unix_milli < %d", utils.ClickHouseFormattedValue(mq.AggregateAttribute.Key), start, end)
|
||||
|
||||
// Select the aggregate value for interval
|
||||
queryTmpl :=
|
||||
"SELECT %s" +
|
||||
" toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL %d SECOND) as ts," +
|
||||
" toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL %d SECOND) as ts," +
|
||||
" %s as value" +
|
||||
" FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_TABLENAME +
|
||||
" FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_V4_TABLENAME +
|
||||
" INNER JOIN" +
|
||||
" (%s) as filtered_time_series" +
|
||||
" USING fingerprint" +
|
||||
@ -309,9 +197,9 @@ func buildMetricQuery(start, end, step int64, mq *v3.BuilderQuery, tableName str
|
||||
case v3.AggregateOperatorNoOp:
|
||||
queryTmpl :=
|
||||
"SELECT fingerprint, labels as fullLabels," +
|
||||
" toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL %d SECOND) as ts," +
|
||||
" toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL %d SECOND) as ts," +
|
||||
" any(value) as value" +
|
||||
" FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_TABLENAME +
|
||||
" FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_V4_TABLENAME +
|
||||
" INNER JOIN" +
|
||||
" (%s) as filtered_time_series" +
|
||||
" USING fingerprint" +
|
||||
@ -461,15 +349,15 @@ func PrepareMetricQuery(start, end int64, queryType v3.QueryType, panelType v3.P
|
||||
var err error
|
||||
if mq.Temporality == v3.Delta {
|
||||
if panelType == v3.PanelTypeTable {
|
||||
query, err = buildDeltaMetricQueryForTable(start, end, mq.StepInterval, mq, constants.SIGNOZ_TIMESERIES_TABLENAME)
|
||||
query, err = buildDeltaMetricQueryForTable(start, end, mq.StepInterval, mq)
|
||||
} else {
|
||||
query, err = buildDeltaMetricQuery(start, end, mq.StepInterval, mq, constants.SIGNOZ_TIMESERIES_TABLENAME)
|
||||
query, err = buildDeltaMetricQuery(start, end, mq.StepInterval, mq)
|
||||
}
|
||||
} else {
|
||||
if panelType == v3.PanelTypeTable {
|
||||
query, err = buildMetricQueryForTable(start, end, mq.StepInterval, mq, constants.SIGNOZ_TIMESERIES_TABLENAME)
|
||||
query, err = buildMetricQueryForTable(start, end, mq.StepInterval, mq)
|
||||
} else {
|
||||
query, err = buildMetricQuery(start, end, mq.StepInterval, mq, constants.SIGNOZ_TIMESERIES_TABLENAME)
|
||||
query, err = buildMetricQuery(start, end, mq.StepInterval, mq)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -50,6 +50,7 @@ func TestBuildQueryWithFilters(t *testing.T) {
|
||||
}},
|
||||
AggregateOperator: v3.AggregateOperatorRateMax,
|
||||
Expression: "A",
|
||||
Temporality: v3.Cumulative,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -57,7 +58,7 @@ func TestBuildQueryWithFilters(t *testing.T) {
|
||||
query, err := PrepareMetricQuery(q.Start, q.End, q.CompositeQuery.QueryType, q.CompositeQuery.PanelType, q.CompositeQuery.BuilderQueries["A"], Options{PreferRPM: false})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Contains(t, query, "WHERE metric_name = 'name' AND temporality IN ['Cumulative', 'Unspecified'] AND JSONExtractString(labels, 'a') != 'b'")
|
||||
require.Contains(t, query, "WHERE metric_name = 'name' AND temporality = 'Cumulative' AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND JSONExtractString(labels, 'a') != 'b'")
|
||||
require.Contains(t, query, rateWithoutNegative)
|
||||
require.Contains(t, query, "not match(JSONExtractString(labels, 'code'), 'ERROR_*')")
|
||||
})
|
||||
@ -78,6 +79,7 @@ func TestBuildQueryWithMultipleQueries(t *testing.T) {
|
||||
{Key: v3.AttributeKey{Key: "in"}, Value: []interface{}{"a", "b", "c"}, Operator: v3.FilterOperatorIn},
|
||||
}},
|
||||
AggregateOperator: v3.AggregateOperatorRateAvg,
|
||||
Temporality: v3.Cumulative,
|
||||
Expression: "A",
|
||||
},
|
||||
"B": {
|
||||
@ -85,6 +87,7 @@ func TestBuildQueryWithMultipleQueries(t *testing.T) {
|
||||
StepInterval: 60,
|
||||
AggregateAttribute: v3.AttributeKey{Key: "name2"},
|
||||
AggregateOperator: v3.AggregateOperatorRateMax,
|
||||
Temporality: v3.Cumulative,
|
||||
Expression: "B",
|
||||
},
|
||||
},
|
||||
@ -94,158 +97,15 @@ func TestBuildQueryWithMultipleQueries(t *testing.T) {
|
||||
query, err := PrepareMetricQuery(q.Start, q.End, q.CompositeQuery.QueryType, q.CompositeQuery.PanelType, q.CompositeQuery.BuilderQueries["A"], Options{PreferRPM: false})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Contains(t, query, "WHERE metric_name = 'name' AND temporality IN ['Cumulative', 'Unspecified'] AND JSONExtractString(labels, 'in') IN ['a','b','c']")
|
||||
require.Contains(t, query, "WHERE metric_name = 'name' AND temporality = 'Cumulative' AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND JSONExtractString(labels, 'in') IN ['a','b','c']")
|
||||
require.Contains(t, query, rateWithoutNegative)
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuildQueryOperators(t *testing.T) {
|
||||
testCases := []struct {
|
||||
operator v3.FilterOperator
|
||||
filterSet v3.FilterSet
|
||||
expectedWhereClause string
|
||||
}{
|
||||
{
|
||||
operator: v3.FilterOperatorEqual,
|
||||
filterSet: v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "service_name"}, Value: "route", Operator: v3.FilterOperatorEqual},
|
||||
},
|
||||
},
|
||||
expectedWhereClause: "JSONExtractString(labels, 'service_name') = 'route'",
|
||||
},
|
||||
{
|
||||
operator: v3.FilterOperatorNotEqual,
|
||||
filterSet: v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "service_name"}, Value: "route", Operator: v3.FilterOperatorNotEqual},
|
||||
},
|
||||
},
|
||||
expectedWhereClause: "JSONExtractString(labels, 'service_name') != 'route'",
|
||||
},
|
||||
{
|
||||
operator: v3.FilterOperatorRegex,
|
||||
filterSet: v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "service_name"}, Value: "out", Operator: v3.FilterOperatorRegex},
|
||||
},
|
||||
},
|
||||
expectedWhereClause: "match(JSONExtractString(labels, 'service_name'), 'out')",
|
||||
},
|
||||
{
|
||||
operator: v3.FilterOperatorNotRegex,
|
||||
filterSet: v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "service_name"}, Value: "out", Operator: v3.FilterOperatorNotRegex},
|
||||
},
|
||||
},
|
||||
expectedWhereClause: "not match(JSONExtractString(labels, 'service_name'), 'out')",
|
||||
},
|
||||
{
|
||||
operator: v3.FilterOperatorIn,
|
||||
filterSet: v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "service_name"}, Value: []interface{}{"route", "driver"}, Operator: v3.FilterOperatorIn},
|
||||
},
|
||||
},
|
||||
expectedWhereClause: "JSONExtractString(labels, 'service_name') IN ['route','driver']",
|
||||
},
|
||||
{
|
||||
operator: v3.FilterOperatorNotIn,
|
||||
filterSet: v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "service_name"}, Value: []interface{}{"route", "driver"}, Operator: v3.FilterOperatorNotIn},
|
||||
},
|
||||
},
|
||||
expectedWhereClause: "JSONExtractString(labels, 'service_name') NOT IN ['route','driver']",
|
||||
},
|
||||
{
|
||||
operator: v3.FilterOperatorExists,
|
||||
filterSet: v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "horn"}, Operator: v3.FilterOperatorExists},
|
||||
},
|
||||
},
|
||||
expectedWhereClause: "has(JSONExtractKeys(labels), 'horn')",
|
||||
},
|
||||
{
|
||||
operator: v3.FilterOperatorNotExists,
|
||||
filterSet: v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "horn"}, Operator: v3.FilterOperatorNotExists},
|
||||
},
|
||||
},
|
||||
expectedWhereClause: "not has(JSONExtractKeys(labels), 'horn')",
|
||||
},
|
||||
{
|
||||
operator: v3.FilterOperatorContains,
|
||||
filterSet: v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "service_name"}, Value: "out", Operator: v3.FilterOperatorContains},
|
||||
},
|
||||
},
|
||||
expectedWhereClause: "like(JSONExtractString(labels, 'service_name'), '%out%')",
|
||||
},
|
||||
{
|
||||
operator: v3.FilterOperatorNotContains,
|
||||
filterSet: v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "serice_name"}, Value: "out", Operator: v3.FilterOperatorNotContains},
|
||||
},
|
||||
},
|
||||
expectedWhereClause: "notLike(JSONExtractString(labels, 'serice_name'), '%out%')",
|
||||
},
|
||||
{
|
||||
operator: v3.FilterOperatorLike,
|
||||
filterSet: v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "service_name"}, Value: "dri", Operator: v3.FilterOperatorLike},
|
||||
},
|
||||
},
|
||||
expectedWhereClause: "like(JSONExtractString(labels, 'service_name'), 'dri')",
|
||||
},
|
||||
{
|
||||
operator: v3.FilterOperatorNotLike,
|
||||
filterSet: v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "serice_name"}, Value: "dri", Operator: v3.FilterOperatorNotLike},
|
||||
},
|
||||
},
|
||||
expectedWhereClause: "notLike(JSONExtractString(labels, 'serice_name'), 'dri')",
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) {
|
||||
mq := v3.BuilderQuery{
|
||||
QueryName: "A",
|
||||
StepInterval: 60,
|
||||
AggregateAttribute: v3.AttributeKey{Key: "signoz_calls_total"},
|
||||
AggregateOperator: v3.AggregateOperatorSum,
|
||||
}
|
||||
whereClause, err := buildMetricsTimeSeriesFilterQuery(&tc.filterSet, []v3.AttributeKey{}, &mq)
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, whereClause, tc.expectedWhereClause)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildQueryXRate(t *testing.T) {
|
||||
t.Run("TestBuildQueryXRate", func(t *testing.T) {
|
||||
|
||||
tmpl := `SELECT ts, %s(rate_value) as value FROM (SELECT ts, If((value - lagInFrame(value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (value - lagInFrame(value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as rate_value FROM(SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL 60 SECOND) as ts, max(value) as value FROM signoz_metrics.distributed_samples_v2 INNER JOIN (SELECT fingerprint FROM signoz_metrics.time_series_v2 WHERE metric_name = 'name' AND temporality IN ['Cumulative', 'Unspecified']) as filtered_time_series USING fingerprint WHERE metric_name = 'name' AND timestamp_ms >= 1650991920000 AND timestamp_ms < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts) ) WHERE isNaN(rate_value) = 0 GROUP BY ts ORDER BY ts`
|
||||
tmpl := `SELECT ts, %s(rate_value) as value FROM (SELECT ts, If((value - lagInFrame(value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (value - lagInFrame(value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as rate_value FROM(SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(value) as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'name' AND temporality = '' AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name = 'name' AND unix_milli >= 1650991920000 AND unix_milli < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts) ) WHERE isNaN(rate_value) = 0 GROUP BY ts ORDER BY ts`
|
||||
|
||||
cases := []struct {
|
||||
aggregateOperator v3.AggregateOperator
|
||||
@ -298,7 +158,7 @@ func TestBuildQueryXRate(t *testing.T) {
|
||||
func TestBuildQueryRPM(t *testing.T) {
|
||||
t.Run("TestBuildQueryXRate", func(t *testing.T) {
|
||||
|
||||
tmpl := `SELECT ts, ceil(value * 60) as value FROM (SELECT ts, %s(rate_value) as value FROM (SELECT ts, If((value - lagInFrame(value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (value - lagInFrame(value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as rate_value FROM(SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL 60 SECOND) as ts, max(value) as value FROM signoz_metrics.distributed_samples_v2 INNER JOIN (SELECT fingerprint FROM signoz_metrics.time_series_v2 WHERE metric_name = 'name' AND temporality IN ['Cumulative', 'Unspecified']) as filtered_time_series USING fingerprint WHERE metric_name = 'name' AND timestamp_ms >= 1650991920000 AND timestamp_ms < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts) ) WHERE isNaN(rate_value) = 0 GROUP BY ts ORDER BY ts)`
|
||||
tmpl := `SELECT ts, ceil(value * 60) as value FROM (SELECT ts, %s(rate_value) as value FROM (SELECT ts, If((value - lagInFrame(value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (value - lagInFrame(value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as rate_value FROM(SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(value) as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'name' AND temporality = '' AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name = 'name' AND unix_milli >= 1650991920000 AND unix_milli < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts) ) WHERE isNaN(rate_value) = 0 GROUP BY ts ORDER BY ts)`
|
||||
|
||||
cases := []struct {
|
||||
aggregateOperator v3.AggregateOperator
|
||||
@ -377,7 +237,7 @@ func TestBuildQueryAdjustedTimes(t *testing.T) {
|
||||
},
|
||||
},
|
||||
// 20:10:00 - 20:41:00
|
||||
expected: "timestamp_ms >= 1686082200000 AND timestamp_ms < 1686084060000",
|
||||
expected: "unix_milli >= 1686082200000 AND unix_milli < 1686084060000",
|
||||
},
|
||||
{
|
||||
name: "TestBuildQueryAdjustedTimes start close to 50 seconds",
|
||||
@ -402,7 +262,7 @@ func TestBuildQueryAdjustedTimes(t *testing.T) {
|
||||
},
|
||||
},
|
||||
// 20:10:00 - 20:41:00
|
||||
expected: "timestamp_ms >= 1686082200000 AND timestamp_ms < 1686084060000",
|
||||
expected: "unix_milli >= 1686082200000 AND unix_milli < 1686084060000",
|
||||
},
|
||||
{
|
||||
name: "TestBuildQueryAdjustedTimes start close to 42 seconds with step 30 seconds",
|
||||
@ -427,7 +287,7 @@ func TestBuildQueryAdjustedTimes(t *testing.T) {
|
||||
},
|
||||
},
|
||||
// 20:11:00 - 20:41:00
|
||||
expected: "timestamp_ms >= 1686082260000 AND timestamp_ms < 1686084060000",
|
||||
expected: "unix_milli >= 1686082260000 AND unix_milli < 1686084060000",
|
||||
},
|
||||
{
|
||||
name: "TestBuildQueryAdjustedTimes start close to 42 seconds with step 30 seconds and end close to 30 seconds",
|
||||
@ -452,7 +312,7 @@ func TestBuildQueryAdjustedTimes(t *testing.T) {
|
||||
},
|
||||
},
|
||||
// 20:11:00 - 20:41:00
|
||||
expected: "timestamp_ms >= 1686082260000 AND timestamp_ms < 1686084060000",
|
||||
expected: "unix_milli >= 1686082260000 AND unix_milli < 1686084060000",
|
||||
},
|
||||
{
|
||||
name: "TestBuildQueryAdjustedTimes start close to 42 seconds with step 300 seconds and end close to 30 seconds",
|
||||
@ -479,7 +339,7 @@ func TestBuildQueryAdjustedTimes(t *testing.T) {
|
||||
// 20:05:00 - 20:41:00
|
||||
// 20:10:00 is the nearest 5 minute interval, but we round down to 20:05:00
|
||||
// as this is a rate query and we want to include the previous value for the first interval
|
||||
expected: "timestamp_ms >= 1686081900000 AND timestamp_ms < 1686084060000",
|
||||
expected: "unix_milli >= 1686081900000 AND unix_milli < 1686084060000",
|
||||
},
|
||||
{
|
||||
name: "TestBuildQueryAdjustedTimes start close to 42 seconds with step 180 seconds and end close to 30 seconds",
|
||||
@ -506,7 +366,7 @@ func TestBuildQueryAdjustedTimes(t *testing.T) {
|
||||
// 20:06:00 - 20:39:00
|
||||
// 20:09:00 is the nearest 3 minute interval, but we round down to 20:06:00
|
||||
// as this is a rate query and we want to include the previous value for the first interval
|
||||
expected: "timestamp_ms >= 1686081960000 AND timestamp_ms < 1686084060000",
|
||||
expected: "unix_milli >= 1686081960000 AND unix_milli < 1686084060000",
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -117,3 +117,88 @@ func PrepareTimeseriesFilterQuery(start, end int64, mq *v3.BuilderQuery) (string
|
||||
|
||||
return filterSubQuery, nil
|
||||
}
|
||||
|
||||
// PrepareTimeseriesFilterQuery builds the sub-query to be used for filtering timeseries based on the search criteria
|
||||
func PrepareTimeseriesFilterQueryV3(start, end int64, mq *v3.BuilderQuery) (string, error) {
|
||||
var conditions []string
|
||||
var fs *v3.FilterSet = mq.Filters
|
||||
var groupTags []v3.AttributeKey = mq.GroupBy
|
||||
|
||||
conditions = append(conditions, fmt.Sprintf("metric_name = %s", utils.ClickHouseFormattedValue(mq.AggregateAttribute.Key)))
|
||||
conditions = append(conditions, fmt.Sprintf("temporality = '%s'", mq.Temporality))
|
||||
|
||||
start, end, tableName := which(start, end)
|
||||
|
||||
conditions = append(conditions, fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", start, end))
|
||||
|
||||
if fs != nil && len(fs.Items) != 0 {
|
||||
for _, item := range fs.Items {
|
||||
toFormat := item.Value
|
||||
op := v3.FilterOperator(strings.ToLower(strings.TrimSpace(string(item.Operator))))
|
||||
if op == v3.FilterOperatorContains || op == v3.FilterOperatorNotContains {
|
||||
toFormat = fmt.Sprintf("%%%s%%", toFormat)
|
||||
}
|
||||
fmtVal := utils.ClickHouseFormattedValue(toFormat)
|
||||
switch op {
|
||||
case v3.FilterOperatorEqual:
|
||||
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') = %s", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorNotEqual:
|
||||
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') != %s", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorIn:
|
||||
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') IN %s", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorNotIn:
|
||||
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') NOT IN %s", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorLike:
|
||||
conditions = append(conditions, fmt.Sprintf("like(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorNotLike:
|
||||
conditions = append(conditions, fmt.Sprintf("notLike(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorRegex:
|
||||
conditions = append(conditions, fmt.Sprintf("match(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorNotRegex:
|
||||
conditions = append(conditions, fmt.Sprintf("not match(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorGreaterThan:
|
||||
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') > %s", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorGreaterThanOrEq:
|
||||
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') >= %s", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorLessThan:
|
||||
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') < %s", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorLessThanOrEq:
|
||||
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') <= %s", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorContains:
|
||||
conditions = append(conditions, fmt.Sprintf("like(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorNotContains:
|
||||
conditions = append(conditions, fmt.Sprintf("notLike(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorExists:
|
||||
conditions = append(conditions, fmt.Sprintf("has(JSONExtractKeys(labels), '%s')", item.Key.Key))
|
||||
case v3.FilterOperatorNotExists:
|
||||
conditions = append(conditions, fmt.Sprintf("not has(JSONExtractKeys(labels), '%s')", item.Key.Key))
|
||||
default:
|
||||
return "", fmt.Errorf("unsupported filter operator")
|
||||
}
|
||||
}
|
||||
}
|
||||
whereClause := strings.Join(conditions, " AND ")
|
||||
|
||||
var selectLabels string
|
||||
|
||||
if mq.AggregateOperator == v3.AggregateOperatorNoOp || mq.AggregateOperator == v3.AggregateOperatorRate {
|
||||
selectLabels += "labels, "
|
||||
} else {
|
||||
for _, tag := range groupTags {
|
||||
selectLabels += fmt.Sprintf("JSONExtractString(labels, '%s') as %s, ", tag.Key, tag.Key)
|
||||
}
|
||||
}
|
||||
|
||||
// The table JOIN key always exists
|
||||
selectLabels += "fingerprint"
|
||||
|
||||
filterSubQuery := fmt.Sprintf(
|
||||
"SELECT DISTINCT %s FROM %s.%s WHERE %s",
|
||||
selectLabels,
|
||||
constants.SIGNOZ_METRIC_DBNAME,
|
||||
tableName,
|
||||
whereClause,
|
||||
)
|
||||
|
||||
return filterSubQuery, nil
|
||||
}
|
||||
|
@ -23,6 +23,8 @@ func PrepareMetricQuery(start, end int64, queryType v3.QueryType, panelType v3.P
|
||||
|
||||
var quantile float64
|
||||
|
||||
percentileOperator := mq.SpaceAggregation
|
||||
|
||||
if v3.IsPercentileOperator(mq.SpaceAggregation) &&
|
||||
mq.AggregateAttribute.Type != v3.AttributeKeyType(v3.MetricTypeExponentialHistogram) {
|
||||
quantile = v3.GetPercentileFromOperator(mq.SpaceAggregation)
|
||||
@ -80,6 +82,7 @@ func PrepareMetricQuery(start, end int64, queryType v3.QueryType, panelType v3.P
|
||||
// fixed-bucket histogram quantiles are calculated with UDF
|
||||
if quantile != 0 && mq.AggregateAttribute.Type != v3.AttributeKeyType(v3.MetricTypeExponentialHistogram) {
|
||||
query = fmt.Sprintf(`SELECT %s, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), %.3f) as value FROM (%s) GROUP BY %s ORDER BY %s`, groupBy, quantile, query, groupBy, orderBy)
|
||||
mq.SpaceAggregation = percentileOperator
|
||||
}
|
||||
|
||||
return query, nil
|
||||
|
@ -1005,6 +1005,7 @@ func ParseQueryRangeParams(r *http.Request) (*v3.QueryRangeParamsV3, *model.ApiE
|
||||
if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeBuilder {
|
||||
for _, query := range queryRangeParams.CompositeQuery.BuilderQueries {
|
||||
// Formula query
|
||||
// Check if the queries used in the expression can be joined
|
||||
if query.QueryName != query.Expression {
|
||||
expression, err := govaluate.NewEvaluableExpressionWithFunctions(query.Expression, evalFuncs())
|
||||
if err != nil {
|
||||
@ -1039,10 +1040,12 @@ func ParseQueryRangeParams(r *http.Request) (*v3.QueryRangeParamsV3, *model.ApiE
|
||||
}
|
||||
}
|
||||
|
||||
// If the step interval is less than the minimum allowed step interval, set it to the minimum allowed step interval
|
||||
if minStep := common.MinAllowedStepInterval(queryRangeParams.Start, queryRangeParams.End); query.StepInterval < minStep {
|
||||
query.StepInterval = minStep
|
||||
}
|
||||
|
||||
// Remove the time shift function from the list of functions and set the shift by value
|
||||
var timeShiftBy int64
|
||||
if len(query.Functions) > 0 {
|
||||
for idx := range query.Functions {
|
||||
@ -1062,16 +1065,45 @@ func ParseQueryRangeParams(r *http.Request) (*v3.QueryRangeParamsV3, *model.ApiE
|
||||
}
|
||||
query.ShiftBy = timeShiftBy
|
||||
|
||||
// for metrics v3
|
||||
// if the aggregate operator is a histogram quantile, and user has not forgotten
|
||||
// the le tag in the group by then add the le tag to the group by
|
||||
if query.AggregateOperator == v3.AggregateOperatorHistQuant50 ||
|
||||
query.AggregateOperator == v3.AggregateOperatorHistQuant75 ||
|
||||
query.AggregateOperator == v3.AggregateOperatorHistQuant90 ||
|
||||
query.AggregateOperator == v3.AggregateOperatorHistQuant95 ||
|
||||
query.AggregateOperator == v3.AggregateOperatorHistQuant99 {
|
||||
found := false
|
||||
for _, tag := range query.GroupBy {
|
||||
if tag.Key == "le" {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
query.GroupBy = append(
|
||||
query.GroupBy,
|
||||
v3.AttributeKey{
|
||||
Key: "le",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeTag,
|
||||
IsColumn: false,
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
if query.Filters == nil || len(query.Filters.Items) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
for idx := range query.Filters.Items {
|
||||
item := &query.Filters.Items[idx]
|
||||
value := item.Value
|
||||
if value != nil {
|
||||
switch x := value.(type) {
|
||||
case string:
|
||||
variableName := strings.Trim(x, "{{ . }}")
|
||||
variableName := strings.Trim(x, "{[.$]}")
|
||||
if _, ok := queryRangeParams.Variables[variableName]; ok {
|
||||
item.Value = queryRangeParams.Variables[variableName]
|
||||
}
|
||||
@ -1079,7 +1111,7 @@ func ParseQueryRangeParams(r *http.Request) (*v3.QueryRangeParamsV3, *model.ApiE
|
||||
if len(x) > 0 {
|
||||
switch x[0].(type) {
|
||||
case string:
|
||||
variableName := strings.Trim(x[0].(string), "{{ . }}")
|
||||
variableName := strings.Trim(x[0].(string), "{[.$]}")
|
||||
if _, ok := queryRangeParams.Variables[variableName]; ok {
|
||||
item.Value = queryRangeParams.Variables[variableName]
|
||||
}
|
||||
@ -1087,6 +1119,13 @@ func ParseQueryRangeParams(r *http.Request) (*v3.QueryRangeParamsV3, *model.ApiE
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if v3.FilterOperator(strings.ToLower((string(item.Operator)))) != v3.FilterOperatorIn && v3.FilterOperator(strings.ToLower((string(item.Operator)))) != v3.FilterOperatorNotIn {
|
||||
// the value type should not be multiple values
|
||||
if _, ok := item.Value.([]interface{}); ok {
|
||||
return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("multiple values %s are not allowed for operator `%s` for key `%s`", item.Value, item.Operator, item.Key.Key)}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1104,6 +1143,13 @@ func ParseQueryRangeParams(r *http.Request) (*v3.QueryRangeParamsV3, *model.ApiE
|
||||
if chQuery.Disabled {
|
||||
continue
|
||||
}
|
||||
|
||||
for name, value := range queryRangeParams.Variables {
|
||||
chQuery.Query = strings.Replace(chQuery.Query, fmt.Sprintf("{{%s}}", name), fmt.Sprint(value), -1)
|
||||
chQuery.Query = strings.Replace(chQuery.Query, fmt.Sprintf("[[%s]]", name), fmt.Sprint(value), -1)
|
||||
chQuery.Query = strings.Replace(chQuery.Query, fmt.Sprintf("$%s", name), fmt.Sprint(value), -1)
|
||||
}
|
||||
|
||||
tmpl := template.New("clickhouse-query")
|
||||
tmpl, err := tmpl.Parse(chQuery.Query)
|
||||
if err != nil {
|
||||
@ -1128,6 +1174,13 @@ func ParseQueryRangeParams(r *http.Request) (*v3.QueryRangeParamsV3, *model.ApiE
|
||||
if promQuery.Disabled {
|
||||
continue
|
||||
}
|
||||
|
||||
for name, value := range queryRangeParams.Variables {
|
||||
promQuery.Query = strings.Replace(promQuery.Query, fmt.Sprintf("{{%s}}", name), fmt.Sprint(value), -1)
|
||||
promQuery.Query = strings.Replace(promQuery.Query, fmt.Sprintf("[[%s]]", name), fmt.Sprint(value), -1)
|
||||
promQuery.Query = strings.Replace(promQuery.Query, fmt.Sprintf("$%s", name), fmt.Sprint(value), -1)
|
||||
}
|
||||
|
||||
tmpl := template.New("prometheus-query")
|
||||
tmpl, err := tmpl.Parse(promQuery.Query)
|
||||
if err != nil {
|
||||
|
@ -652,12 +652,12 @@ func TestParseQueryRangeParamsDashboardVarsSubstitution(t *testing.T) {
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{Key: "service_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
|
||||
Operator: "EQ",
|
||||
Operator: v3.FilterOperatorEqual,
|
||||
Value: "{{.service_name}}",
|
||||
},
|
||||
{
|
||||
Key: v3.AttributeKey{Key: "operation_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
|
||||
Operator: "IN",
|
||||
Operator: v3.FilterOperatorIn,
|
||||
Value: "{{.operation_name}}",
|
||||
},
|
||||
},
|
||||
@ -675,6 +675,161 @@ func TestParseQueryRangeParamsDashboardVarsSubstitution(t *testing.T) {
|
||||
expectErr: false,
|
||||
expectedValue: []interface{}{"route", []interface{}{"GET /route", "POST /route"}},
|
||||
},
|
||||
{
|
||||
desc: "valid builder query with dashboard variables {{service_name}} and {{operation_name}}",
|
||||
compositeQuery: v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateOperator: v3.AggregateOperatorSum,
|
||||
AggregateAttribute: v3.AttributeKey{Key: "attribute_metrics"},
|
||||
Expression: "A",
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{Key: "service_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
|
||||
Operator: v3.FilterOperatorEqual,
|
||||
Value: "{{service_name}}",
|
||||
},
|
||||
{
|
||||
Key: v3.AttributeKey{Key: "operation_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
|
||||
Operator: v3.FilterOperatorIn,
|
||||
Value: "{{operation_name}}",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
variables: map[string]interface{}{
|
||||
"service_name": "route",
|
||||
"operation_name": []interface{}{
|
||||
"GET /route",
|
||||
"POST /route",
|
||||
},
|
||||
},
|
||||
expectErr: false,
|
||||
expectedValue: []interface{}{"route", []interface{}{"GET /route", "POST /route"}},
|
||||
},
|
||||
{
|
||||
desc: "valid builder query with dashboard variables [[service_name]] and [[operation_name]]",
|
||||
compositeQuery: v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateOperator: v3.AggregateOperatorSum,
|
||||
AggregateAttribute: v3.AttributeKey{Key: "attribute_metrics"},
|
||||
Expression: "A",
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{Key: "service_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
|
||||
Operator: v3.FilterOperatorEqual,
|
||||
Value: "[[service_name]]",
|
||||
},
|
||||
{
|
||||
Key: v3.AttributeKey{Key: "operation_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
|
||||
Operator: v3.FilterOperatorIn,
|
||||
Value: "[[operation_name]]",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
variables: map[string]interface{}{
|
||||
"service_name": "route",
|
||||
"operation_name": []interface{}{
|
||||
"GET /route",
|
||||
"POST /route",
|
||||
},
|
||||
},
|
||||
expectErr: false,
|
||||
expectedValue: []interface{}{"route", []interface{}{"GET /route", "POST /route"}},
|
||||
},
|
||||
{
|
||||
desc: "valid builder query with dashboard variables $service_name and $operation_name",
|
||||
compositeQuery: v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateOperator: v3.AggregateOperatorSum,
|
||||
AggregateAttribute: v3.AttributeKey{Key: "attribute_metrics"},
|
||||
Expression: "A",
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{Key: "service_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
|
||||
Operator: v3.FilterOperatorEqual,
|
||||
Value: "$service_name",
|
||||
},
|
||||
{
|
||||
Key: v3.AttributeKey{Key: "operation_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
|
||||
Operator: v3.FilterOperatorIn,
|
||||
Value: "$operation_name",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
variables: map[string]interface{}{
|
||||
"service_name": "route",
|
||||
"operation_name": []interface{}{
|
||||
"GET /route",
|
||||
"POST /route",
|
||||
},
|
||||
},
|
||||
expectErr: false,
|
||||
expectedValue: []interface{}{"route", []interface{}{"GET /route", "POST /route"}},
|
||||
},
|
||||
{
|
||||
desc: "multiple values for single select operator",
|
||||
compositeQuery: v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateOperator: v3.AggregateOperatorSum,
|
||||
AggregateAttribute: v3.AttributeKey{Key: "attribute_metrics"},
|
||||
Expression: "A",
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{Key: "operation_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
|
||||
Operator: v3.FilterOperatorEqual,
|
||||
Value: "{{.operation_name}}",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
variables: map[string]interface{}{
|
||||
"service_name": "route",
|
||||
"operation_name": []interface{}{
|
||||
"GET /route",
|
||||
"POST /route",
|
||||
},
|
||||
},
|
||||
expectErr: true,
|
||||
errMsg: "multiple values [GET /route POST /route] are not allowed for operator `=` for key `operation_name`",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range reqCases {
|
||||
@ -759,6 +914,72 @@ func TestParseQueryRangeParamsPromQLVars(t *testing.T) {
|
||||
expectErr: false,
|
||||
expectedQuery: "http_calls_total{service_name=\"route\", status_code=~\"200|505\"}",
|
||||
},
|
||||
{
|
||||
desc: "valid prom query with dashboard variables {{service_name}} and {{status_code}}",
|
||||
compositeQuery: v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
QueryType: v3.QueryTypePromQL,
|
||||
PromQueries: map[string]*v3.PromQuery{
|
||||
"A": {
|
||||
Query: "http_calls_total{service_name=\"{{service_name}}\", status_code=~\"{{status_code}}\"}",
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
variables: map[string]interface{}{
|
||||
"service_name": "route",
|
||||
"status_code": []interface{}{
|
||||
200,
|
||||
505,
|
||||
},
|
||||
},
|
||||
expectErr: false,
|
||||
expectedQuery: "http_calls_total{service_name=\"route\", status_code=~\"200|505\"}",
|
||||
},
|
||||
{
|
||||
desc: "valid prom query with dashboard variables [[service_name]] and [[status_code]]",
|
||||
compositeQuery: v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
QueryType: v3.QueryTypePromQL,
|
||||
PromQueries: map[string]*v3.PromQuery{
|
||||
"A": {
|
||||
Query: "http_calls_total{service_name=\"[[service_name]]\", status_code=~\"[[status_code]]\"}",
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
variables: map[string]interface{}{
|
||||
"service_name": "route",
|
||||
"status_code": []interface{}{
|
||||
200,
|
||||
505,
|
||||
},
|
||||
},
|
||||
expectErr: false,
|
||||
expectedQuery: "http_calls_total{service_name=\"route\", status_code=~\"200|505\"}",
|
||||
},
|
||||
{
|
||||
desc: "valid prom query with dashboard variables $service_name and $status_code",
|
||||
compositeQuery: v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
QueryType: v3.QueryTypePromQL,
|
||||
PromQueries: map[string]*v3.PromQuery{
|
||||
"A": {
|
||||
Query: "http_calls_total{service_name=\"$service_name\", status_code=~\"$status_code\"}",
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
variables: map[string]interface{}{
|
||||
"service_name": "route",
|
||||
"status_code": []interface{}{
|
||||
200,
|
||||
505,
|
||||
},
|
||||
},
|
||||
expectErr: false,
|
||||
expectedQuery: "http_calls_total{service_name=\"route\", status_code=~\"200|505\"}",
|
||||
},
|
||||
{
|
||||
desc: "valid prom query with dashboard variables",
|
||||
compositeQuery: v3.CompositeQuery{
|
||||
|
@ -14,6 +14,7 @@ import (
|
||||
metricsV3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
|
||||
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
|
||||
chErrors "go.signoz.io/signoz/pkg/query-service/errors"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||
"go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
@ -283,7 +284,7 @@ func mergeSerieses(cachedSeries, missedSeries []*v3.Series) []*v3.Series {
|
||||
return mergedSeries
|
||||
}
|
||||
|
||||
func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, error, map[string]string) {
|
||||
func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, error, map[string]error) {
|
||||
|
||||
cacheKeys := q.keyGenerator.GenerateKeys(params)
|
||||
|
||||
@ -306,13 +307,13 @@ func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangePa
|
||||
close(ch)
|
||||
|
||||
results := make([]*v3.Result, 0)
|
||||
errQueriesByName := make(map[string]string)
|
||||
errQueriesByName := make(map[string]error)
|
||||
var errs []error
|
||||
|
||||
for result := range ch {
|
||||
if result.Err != nil {
|
||||
errs = append(errs, result.Err)
|
||||
errQueriesByName[result.Name] = result.Err.Error()
|
||||
errQueriesByName[result.Name] = result.Err
|
||||
continue
|
||||
}
|
||||
results = append(results, &v3.Result{
|
||||
@ -329,7 +330,7 @@ func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangePa
|
||||
return results, err, errQueriesByName
|
||||
}
|
||||
|
||||
func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, error, map[string]string) {
|
||||
func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, error, map[string]error) {
|
||||
channelResults := make(chan channelResult, len(params.CompositeQuery.PromQueries))
|
||||
var wg sync.WaitGroup
|
||||
cacheKeys := q.keyGenerator.GenerateKeys(params)
|
||||
@ -390,13 +391,13 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam
|
||||
close(channelResults)
|
||||
|
||||
results := make([]*v3.Result, 0)
|
||||
errQueriesByName := make(map[string]string)
|
||||
errQueriesByName := make(map[string]error)
|
||||
var errs []error
|
||||
|
||||
for result := range channelResults {
|
||||
if result.Err != nil {
|
||||
errs = append(errs, result.Err)
|
||||
errQueriesByName[result.Name] = result.Err.Error()
|
||||
errQueriesByName[result.Name] = result.Err
|
||||
continue
|
||||
}
|
||||
results = append(results, &v3.Result{
|
||||
@ -413,7 +414,7 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam
|
||||
return results, err, errQueriesByName
|
||||
}
|
||||
|
||||
func (q *querier) runClickHouseQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, error, map[string]string) {
|
||||
func (q *querier) runClickHouseQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, error, map[string]error) {
|
||||
channelResults := make(chan channelResult, len(params.CompositeQuery.ClickHouseQueries))
|
||||
var wg sync.WaitGroup
|
||||
for queryName, clickHouseQuery := range params.CompositeQuery.ClickHouseQueries {
|
||||
@ -431,13 +432,13 @@ func (q *querier) runClickHouseQueries(ctx context.Context, params *v3.QueryRang
|
||||
close(channelResults)
|
||||
|
||||
results := make([]*v3.Result, 0)
|
||||
errQueriesByName := make(map[string]string)
|
||||
errQueriesByName := make(map[string]error)
|
||||
var errs []error
|
||||
|
||||
for result := range channelResults {
|
||||
if result.Err != nil {
|
||||
errs = append(errs, result.Err)
|
||||
errQueriesByName[result.Name] = result.Err.Error()
|
||||
errQueriesByName[result.Name] = result.Err
|
||||
continue
|
||||
}
|
||||
results = append(results, &v3.Result{
|
||||
@ -453,7 +454,7 @@ func (q *querier) runClickHouseQueries(ctx context.Context, params *v3.QueryRang
|
||||
return results, err, errQueriesByName
|
||||
}
|
||||
|
||||
func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, error, map[string]string) {
|
||||
func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, error, map[string]error) {
|
||||
|
||||
queries, err := q.builder.PrepareQueries(params, keys)
|
||||
|
||||
@ -482,13 +483,13 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan
|
||||
close(ch)
|
||||
|
||||
var errs []error
|
||||
errQuriesByName := make(map[string]string)
|
||||
errQuriesByName := make(map[string]error)
|
||||
res := make([]*v3.Result, 0)
|
||||
// read values from the channel
|
||||
for r := range ch {
|
||||
if r.Err != nil {
|
||||
errs = append(errs, r.Err)
|
||||
errQuriesByName[r.Name] = r.Query
|
||||
errQuriesByName[r.Name] = r.Err
|
||||
continue
|
||||
}
|
||||
res = append(res, &v3.Result{
|
||||
@ -502,10 +503,10 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan
|
||||
return res, nil, nil
|
||||
}
|
||||
|
||||
func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, error, map[string]string) {
|
||||
func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, error, map[string]error) {
|
||||
var results []*v3.Result
|
||||
var err error
|
||||
var errQueriesByName map[string]string
|
||||
var errQueriesByName map[string]error
|
||||
if params.CompositeQuery != nil {
|
||||
switch params.CompositeQuery.QueryType {
|
||||
case v3.QueryTypeBuilder:
|
||||
@ -514,6 +515,13 @@ func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3,
|
||||
} else {
|
||||
results, err, errQueriesByName = q.runBuilderQueries(ctx, params, keys)
|
||||
}
|
||||
// in builder query, the only errors we expose are the ones that exceed the resource limits
|
||||
// everything else is internal error as they are not actionable by the user
|
||||
for name, err := range errQueriesByName {
|
||||
if !chErrors.IsResourceLimitError(err) {
|
||||
delete(errQueriesByName, name)
|
||||
}
|
||||
}
|
||||
case v3.QueryTypePromQL:
|
||||
results, err, errQueriesByName = q.runPromQueries(ctx, params)
|
||||
case v3.QueryTypeClickHouseSQL:
|
||||
|
@ -572,8 +572,8 @@ func TestQueryRange(t *testing.T) {
|
||||
}
|
||||
q := NewQuerier(opts)
|
||||
expectedTimeRangeInQueryString := []string{
|
||||
fmt.Sprintf("timestamp_ms >= %d AND timestamp_ms < %d", 1675115520000, 1675115580000+120*60*1000),
|
||||
fmt.Sprintf("timestamp_ms >= %d AND timestamp_ms < %d", 1675115520000+120*60*1000, 1675115580000+180*60*1000),
|
||||
fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", 1675115520000, 1675115580000+120*60*1000),
|
||||
fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", 1675115520000+120*60*1000, 1675115580000+180*60*1000),
|
||||
fmt.Sprintf("timestamp >= '%d' AND timestamp <= '%d'", 1675115580000*1000000, (1675115580000+120*60*1000)*int64(1000000)),
|
||||
fmt.Sprintf("timestamp >= '%d' AND timestamp <= '%d'", (1675115580000+60*60*1000)*int64(1000000), (1675115580000+180*60*1000)*int64(1000000)),
|
||||
}
|
||||
@ -683,7 +683,7 @@ func TestQueryRangeValueType(t *testing.T) {
|
||||
q := NewQuerier(opts)
|
||||
// No caching
|
||||
expectedTimeRangeInQueryString := []string{
|
||||
fmt.Sprintf("timestamp_ms >= %d AND timestamp_ms < %d", 1675115520000, 1675115580000+120*60*1000),
|
||||
fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", 1675115520000, 1675115580000+120*60*1000),
|
||||
fmt.Sprintf("timestamp >= '%d' AND timestamp <= '%d'", (1675115580000+60*60*1000)*int64(1000000), (1675115580000+180*60*1000)*int64(1000000)),
|
||||
}
|
||||
|
||||
|
@ -14,6 +14,7 @@ import (
|
||||
metricsV4 "go.signoz.io/signoz/pkg/query-service/app/metrics/v4"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
|
||||
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
|
||||
chErrors "go.signoz.io/signoz/pkg/query-service/errors"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||
"go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
@ -281,7 +282,7 @@ func mergeSerieses(cachedSeries, missedSeries []*v3.Series) []*v3.Series {
|
||||
return mergedSeries
|
||||
}
|
||||
|
||||
func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, error, map[string]string) {
|
||||
func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, error, map[string]error) {
|
||||
|
||||
cacheKeys := q.keyGenerator.GenerateKeys(params)
|
||||
|
||||
@ -299,13 +300,13 @@ func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangePa
|
||||
close(ch)
|
||||
|
||||
results := make([]*v3.Result, 0)
|
||||
errQueriesByName := make(map[string]string)
|
||||
errQueriesByName := make(map[string]error)
|
||||
var errs []error
|
||||
|
||||
for result := range ch {
|
||||
if result.Err != nil {
|
||||
errs = append(errs, result.Err)
|
||||
errQueriesByName[result.Name] = result.Err.Error()
|
||||
errQueriesByName[result.Name] = result.Err
|
||||
continue
|
||||
}
|
||||
results = append(results, &v3.Result{
|
||||
@ -322,7 +323,7 @@ func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangePa
|
||||
return results, err, errQueriesByName
|
||||
}
|
||||
|
||||
func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, error, map[string]string) {
|
||||
func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, error, map[string]error) {
|
||||
channelResults := make(chan channelResult, len(params.CompositeQuery.PromQueries))
|
||||
var wg sync.WaitGroup
|
||||
cacheKeys := q.keyGenerator.GenerateKeys(params)
|
||||
@ -383,13 +384,13 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam
|
||||
close(channelResults)
|
||||
|
||||
results := make([]*v3.Result, 0)
|
||||
errQueriesByName := make(map[string]string)
|
||||
errQueriesByName := make(map[string]error)
|
||||
var errs []error
|
||||
|
||||
for result := range channelResults {
|
||||
if result.Err != nil {
|
||||
errs = append(errs, result.Err)
|
||||
errQueriesByName[result.Name] = result.Err.Error()
|
||||
errQueriesByName[result.Name] = result.Err
|
||||
continue
|
||||
}
|
||||
results = append(results, &v3.Result{
|
||||
@ -406,7 +407,7 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam
|
||||
return results, err, errQueriesByName
|
||||
}
|
||||
|
||||
func (q *querier) runClickHouseQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, error, map[string]string) {
|
||||
func (q *querier) runClickHouseQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, error, map[string]error) {
|
||||
channelResults := make(chan channelResult, len(params.CompositeQuery.ClickHouseQueries))
|
||||
var wg sync.WaitGroup
|
||||
for queryName, clickHouseQuery := range params.CompositeQuery.ClickHouseQueries {
|
||||
@ -424,13 +425,13 @@ func (q *querier) runClickHouseQueries(ctx context.Context, params *v3.QueryRang
|
||||
close(channelResults)
|
||||
|
||||
results := make([]*v3.Result, 0)
|
||||
errQueriesByName := make(map[string]string)
|
||||
errQueriesByName := make(map[string]error)
|
||||
var errs []error
|
||||
|
||||
for result := range channelResults {
|
||||
if result.Err != nil {
|
||||
errs = append(errs, result.Err)
|
||||
errQueriesByName[result.Name] = result.Err.Error()
|
||||
errQueriesByName[result.Name] = result.Err
|
||||
continue
|
||||
}
|
||||
results = append(results, &v3.Result{
|
||||
@ -446,7 +447,7 @@ func (q *querier) runClickHouseQueries(ctx context.Context, params *v3.QueryRang
|
||||
return results, err, errQueriesByName
|
||||
}
|
||||
|
||||
func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, error, map[string]string) {
|
||||
func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, error, map[string]error) {
|
||||
|
||||
queries, err := q.builder.PrepareQueries(params, keys)
|
||||
|
||||
@ -475,13 +476,13 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan
|
||||
close(ch)
|
||||
|
||||
var errs []error
|
||||
errQuriesByName := make(map[string]string)
|
||||
errQuriesByName := make(map[string]error)
|
||||
res := make([]*v3.Result, 0)
|
||||
// read values from the channel
|
||||
for r := range ch {
|
||||
if r.Err != nil {
|
||||
errs = append(errs, r.Err)
|
||||
errQuriesByName[r.Name] = r.Query
|
||||
errQuriesByName[r.Name] = r.Err
|
||||
continue
|
||||
}
|
||||
res = append(res, &v3.Result{
|
||||
@ -495,10 +496,10 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan
|
||||
return res, nil, nil
|
||||
}
|
||||
|
||||
func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, error, map[string]string) {
|
||||
func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, error, map[string]error) {
|
||||
var results []*v3.Result
|
||||
var err error
|
||||
var errQueriesByName map[string]string
|
||||
var errQueriesByName map[string]error
|
||||
if params.CompositeQuery != nil {
|
||||
switch params.CompositeQuery.QueryType {
|
||||
case v3.QueryTypeBuilder:
|
||||
@ -507,6 +508,13 @@ func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3,
|
||||
} else {
|
||||
results, err, errQueriesByName = q.runBuilderQueries(ctx, params, keys)
|
||||
}
|
||||
// in builder query, the only errors we expose are the ones that exceed the resource limits
|
||||
// everything else is internal error as they are not actionable by the user
|
||||
for name, err := range errQueriesByName {
|
||||
if !chErrors.IsResourceLimitError(err) {
|
||||
delete(errQueriesByName, name)
|
||||
}
|
||||
}
|
||||
case v3.QueryTypePromQL:
|
||||
results, err, errQueriesByName = q.runPromQueries(ctx, params)
|
||||
case v3.QueryTypeClickHouseSQL:
|
||||
|
@ -27,6 +27,7 @@ func TestBuildQueryWithMultipleQueriesAndFormula(t *testing.T) {
|
||||
{Key: v3.AttributeKey{Key: "in"}, Value: []interface{}{"a", "b", "c"}, Operator: v3.FilterOperatorIn},
|
||||
}},
|
||||
AggregateOperator: v3.AggregateOperatorRateMax,
|
||||
Temporality: v3.Cumulative,
|
||||
Expression: "A",
|
||||
},
|
||||
"B": {
|
||||
@ -35,6 +36,7 @@ func TestBuildQueryWithMultipleQueriesAndFormula(t *testing.T) {
|
||||
AggregateAttribute: v3.AttributeKey{Key: "name2"},
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateOperator: v3.AggregateOperatorRateAvg,
|
||||
Temporality: v3.Cumulative,
|
||||
Expression: "B",
|
||||
},
|
||||
"C": {
|
||||
@ -55,7 +57,7 @@ func TestBuildQueryWithMultipleQueriesAndFormula(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Contains(t, queries["C"], "SELECT A.`ts` as `ts`, A.value / B.value")
|
||||
require.Contains(t, queries["C"], "WHERE metric_name = 'name' AND temporality IN ['Cumulative', 'Unspecified'] AND JSONExtractString(labels, 'in') IN ['a','b','c']")
|
||||
require.Contains(t, queries["C"], "WHERE metric_name = 'name' AND temporality = 'Cumulative' AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND JSONExtractString(labels, 'in') IN ['a','b','c']")
|
||||
require.Contains(t, queries["C"], "(value - lagInFrame(value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window)))")
|
||||
})
|
||||
}
|
||||
@ -257,7 +259,7 @@ func TestDeltaQueryBuilder(t *testing.T) {
|
||||
},
|
||||
},
|
||||
queryToTest: "A",
|
||||
expected: "SELECT toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v2 INNER JOIN (SELECT fingerprint FROM signoz_metrics.time_series_v2 WHERE metric_name = 'signoz_latency_count' AND temporality = 'Delta' AND JSONExtractString(labels, 'service_name') IN ['frontend'] AND JSONExtractString(labels, 'operation') IN ['HTTP GET /dispatch'] AND JSONExtractString(labels, '__temporality__') = 'Delta') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_count' AND timestamp_ms >= 1650991980000 AND timestamp_ms <= 1651078380000 GROUP BY ts ORDER BY ts",
|
||||
expected: "SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name = 'signoz_latency_count' AND temporality = 'Delta' AND unix_milli >= 1650974400000 AND unix_milli < 1651078380000 AND JSONExtractString(labels, 'service_name') IN ['frontend'] AND JSONExtractString(labels, 'operation') IN ['HTTP GET /dispatch'] AND JSONExtractString(labels, '__temporality__') = 'Delta') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_count' AND unix_milli >= 1650991980000 AND unix_milli <= 1651078380000 GROUP BY ts ORDER BY ts",
|
||||
},
|
||||
{
|
||||
name: "TestQueryWithExpression - Error rate",
|
||||
@ -327,7 +329,7 @@ func TestDeltaQueryBuilder(t *testing.T) {
|
||||
},
|
||||
},
|
||||
queryToTest: "C",
|
||||
expected: "SELECT A.`ts` as `ts`, A.value * 100 / B.value as value FROM (SELECT toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v2 INNER JOIN (SELECT fingerprint FROM signoz_metrics.time_series_v2 WHERE metric_name = 'signoz_latency_count' AND temporality = 'Delta' AND JSONExtractString(labels, 'service_name') IN ['frontend'] AND JSONExtractString(labels, 'operation') IN ['HTTP GET /dispatch'] AND JSONExtractString(labels, 'status_code') IN ['STATUS_CODE_ERROR'] AND JSONExtractString(labels, '__temporality__') = 'Delta') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_count' AND timestamp_ms >= 1650991980000 AND timestamp_ms <= 1651078380000 GROUP BY ts ORDER BY ts) as A INNER JOIN (SELECT toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v2 INNER JOIN (SELECT fingerprint FROM signoz_metrics.time_series_v2 WHERE metric_name = 'signoz_latency_count' AND temporality = 'Delta' AND JSONExtractString(labels, 'service_name') IN ['frontend'] AND JSONExtractString(labels, 'operation') IN ['HTTP GET /dispatch'] AND JSONExtractString(labels, '__temporality__') = 'Delta') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_count' AND timestamp_ms >= 1650991980000 AND timestamp_ms <= 1651078380000 GROUP BY ts ORDER BY ts) as B ON A.`ts` = B.`ts`",
|
||||
expected: "SELECT A.`ts` as `ts`, A.value * 100 / B.value as value FROM (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name = 'signoz_latency_count' AND temporality = 'Delta' AND unix_milli >= 1650974400000 AND unix_milli < 1651078380000 AND JSONExtractString(labels, 'service_name') IN ['frontend'] AND JSONExtractString(labels, 'operation') IN ['HTTP GET /dispatch'] AND JSONExtractString(labels, 'status_code') IN ['STATUS_CODE_ERROR'] AND JSONExtractString(labels, '__temporality__') = 'Delta') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_count' AND unix_milli >= 1650991980000 AND unix_milli <= 1651078380000 GROUP BY ts ORDER BY ts) as A INNER JOIN (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name = 'signoz_latency_count' AND temporality = 'Delta' AND unix_milli >= 1650974400000 AND unix_milli < 1651078380000 AND JSONExtractString(labels, 'service_name') IN ['frontend'] AND JSONExtractString(labels, 'operation') IN ['HTTP GET /dispatch'] AND JSONExtractString(labels, '__temporality__') = 'Delta') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_count' AND unix_milli >= 1650991980000 AND unix_milli <= 1651078380000 GROUP BY ts ORDER BY ts) as B ON A.`ts` = B.`ts`",
|
||||
},
|
||||
{
|
||||
name: "TestQuery - Quantile",
|
||||
@ -348,13 +350,14 @@ func TestDeltaQueryBuilder(t *testing.T) {
|
||||
Temporality: v3.Delta,
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{Key: "service_name"},
|
||||
{Key: "le"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
queryToTest: "A",
|
||||
expected: "SELECT service_name, ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) as value FROM (SELECT service_name,le, toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v2 INNER JOIN (SELECT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v2 WHERE metric_name = 'signoz_latency_bucket' AND temporality = 'Delta' ) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND timestamp_ms >= 1650991980000 AND timestamp_ms <= 1651078380000 GROUP BY service_name,le,ts ORDER BY service_name ASC,le ASC, ts) GROUP BY service_name,ts ORDER BY service_name ASC, ts",
|
||||
expected: "SELECT service_name, ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) as value FROM (SELECT service_name,le, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name = 'signoz_latency_bucket' AND temporality = 'Delta' AND unix_milli >= 1650974400000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND unix_milli >= 1650991980000 AND unix_milli <= 1651078380000 GROUP BY service_name,le,ts ORDER BY service_name ASC,le ASC, ts) GROUP BY service_name,ts ORDER BY service_name ASC, ts",
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -206,12 +206,9 @@ var GroupByColMap = map[string]struct{}{
|
||||
|
||||
const (
|
||||
SIGNOZ_METRIC_DBNAME = "signoz_metrics"
|
||||
SIGNOZ_SAMPLES_TABLENAME = "distributed_samples_v2"
|
||||
SIGNOZ_SAMPLES_V4_TABLENAME = "distributed_samples_v4"
|
||||
SIGNOZ_TIMESERIES_TABLENAME = "distributed_time_series_v2"
|
||||
SIGNOZ_TRACE_DBNAME = "signoz_traces"
|
||||
SIGNOZ_SPAN_INDEX_TABLENAME = "distributed_signoz_index_v2"
|
||||
SIGNOZ_TIMESERIES_LOCAL_TABLENAME = "time_series_v2"
|
||||
SIGNOZ_TIMESERIES_v4_LOCAL_TABLENAME = "time_series_v4"
|
||||
SIGNOZ_TIMESERIES_v4_6HRS_LOCAL_TABLENAME = "time_series_v4_6hrs"
|
||||
SIGNOZ_TIMESERIES_v4_1DAY_LOCAL_TABLENAME = "time_series_v4_1day"
|
||||
|
42
pkg/query-service/errors/clickhouse.go
Normal file
42
pkg/query-service/errors/clickhouse.go
Normal file
@ -0,0 +1,42 @@
|
||||
package errors
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
// ErrResourceBytesLimitExceeded is returned when the resource bytes limit is exceeded
|
||||
ErrResourceBytesLimitExceeded = NewResourceLimitError(errors.New("resource bytes limit exceeded, try applying filters such as service.name, etc. to reduce the data size"))
|
||||
// ErrResourceTimeLimitExceeded is returned when the resource time limit is exceeded
|
||||
ErrResourceTimeLimitExceeded = NewResourceLimitError(errors.New("resource time limit exceeded, try applying filters such as service.name, etc. to reduce the data size"))
|
||||
)
|
||||
|
||||
type ResourceLimitError struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func NewResourceLimitError(err error) error {
|
||||
return &ResourceLimitError{err: err}
|
||||
}
|
||||
|
||||
func (e *ResourceLimitError) Error() string {
|
||||
return e.err.Error()
|
||||
}
|
||||
|
||||
func (e *ResourceLimitError) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
func IsResourceLimitError(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
var target *ResourceLimitError
|
||||
return errors.As(err, &target)
|
||||
}
|
||||
|
||||
func (e *ResourceLimitError) MarshalJSON() ([]byte, error) {
|
||||
return []byte(`"` + e.Error() + `"`), nil
|
||||
}
|
||||
|
||||
func (e *ResourceLimitError) UnmarshalJSON([]byte) error {
|
||||
return nil
|
||||
}
|
@ -107,7 +107,7 @@ type Reader interface {
|
||||
}
|
||||
|
||||
type Querier interface {
|
||||
QueryRange(context.Context, *v3.QueryRangeParamsV3, map[string]v3.AttributeKey) ([]*v3.Result, error, map[string]string)
|
||||
QueryRange(context.Context, *v3.QueryRangeParamsV3, map[string]v3.AttributeKey) ([]*v3.Result, error, map[string]error)
|
||||
|
||||
// test helpers
|
||||
QueriesExecuted() []string
|
||||
|
@ -11,6 +11,7 @@ import (
|
||||
"go.signoz.io/signoz/pkg/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
"go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/migrate"
|
||||
"go.signoz.io/signoz/pkg/query-service/version"
|
||||
|
||||
"go.uber.org/zap"
|
||||
@ -52,6 +53,8 @@ func main() {
|
||||
flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)")
|
||||
flag.StringVar(&fluxInterval, "flux-interval", "5m", "(cache config to use)")
|
||||
flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')")
|
||||
// Allow using the consistent naming with the signoz collector
|
||||
flag.StringVar(&cluster, "cluster-name", "cluster", "(cluster name - defaults to 'cluster')")
|
||||
flag.IntVar(&maxIdleConns, "max-idle-conns", 50, "(number of connections to maintain in the pool, only used with clickhouse if not set in ClickHouseUrl env var DSN.)")
|
||||
flag.IntVar(&maxOpenConns, "max-open-conns", 100, "(max connections for use at any time, only used with clickhouse if not set in ClickHouseUrl env var DSN.)")
|
||||
flag.DurationVar(&dialTimeout, "dial-timeout", 5*time.Second, "(the maximum time to establish a connection, only used with clickhouse if not set in ClickHouseUrl env var DSN.)")
|
||||
@ -90,6 +93,12 @@ func main() {
|
||||
zap.L().Info("JWT secret key set successfully.")
|
||||
}
|
||||
|
||||
if err := migrate.Migrate(constants.RELATIONAL_DATASOURCE_PATH); err != nil {
|
||||
zap.L().Error("Failed to migrate", zap.Error(err))
|
||||
} else {
|
||||
zap.L().Info("Migration successful")
|
||||
}
|
||||
|
||||
server, err := app.NewServer(serverOptions)
|
||||
if err != nil {
|
||||
logger.Fatal("Failed to create server", zap.Error(err))
|
||||
|
153
pkg/query-service/migrate/0_45_alerts_to_v4/run.go
Normal file
153
pkg/query-service/migrate/0_45_alerts_to_v4/run.go
Normal file
@ -0,0 +1,153 @@
|
||||
package alertstov4
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
"go.signoz.io/signoz/pkg/query-service/rules"
|
||||
"go.uber.org/multierr"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var Version = "0.45-alerts-to-v4"
|
||||
|
||||
var mapTimeAggregation = map[v3.AggregateOperator]v3.TimeAggregation{
|
||||
v3.AggregateOperatorSum: v3.TimeAggregationSum,
|
||||
v3.AggregateOperatorMin: v3.TimeAggregationMin,
|
||||
v3.AggregateOperatorMax: v3.TimeAggregationMax,
|
||||
v3.AggregateOperatorSumRate: v3.TimeAggregationRate,
|
||||
v3.AggregateOperatorAvgRate: v3.TimeAggregationRate,
|
||||
v3.AggregateOperatorMinRate: v3.TimeAggregationRate,
|
||||
v3.AggregateOperatorMaxRate: v3.TimeAggregationRate,
|
||||
v3.AggregateOperatorHistQuant50: v3.TimeAggregationUnspecified,
|
||||
v3.AggregateOperatorHistQuant75: v3.TimeAggregationUnspecified,
|
||||
v3.AggregateOperatorHistQuant90: v3.TimeAggregationUnspecified,
|
||||
v3.AggregateOperatorHistQuant95: v3.TimeAggregationUnspecified,
|
||||
v3.AggregateOperatorHistQuant99: v3.TimeAggregationUnspecified,
|
||||
}
|
||||
|
||||
var mapSpaceAggregation = map[v3.AggregateOperator]v3.SpaceAggregation{
|
||||
v3.AggregateOperatorSum: v3.SpaceAggregationSum,
|
||||
v3.AggregateOperatorMin: v3.SpaceAggregationMin,
|
||||
v3.AggregateOperatorMax: v3.SpaceAggregationMax,
|
||||
v3.AggregateOperatorSumRate: v3.SpaceAggregationSum,
|
||||
v3.AggregateOperatorAvgRate: v3.SpaceAggregationAvg,
|
||||
v3.AggregateOperatorMinRate: v3.SpaceAggregationMin,
|
||||
v3.AggregateOperatorMaxRate: v3.SpaceAggregationMax,
|
||||
v3.AggregateOperatorHistQuant50: v3.SpaceAggregationPercentile50,
|
||||
v3.AggregateOperatorHistQuant75: v3.SpaceAggregationPercentile75,
|
||||
v3.AggregateOperatorHistQuant90: v3.SpaceAggregationPercentile90,
|
||||
v3.AggregateOperatorHistQuant95: v3.SpaceAggregationPercentile95,
|
||||
v3.AggregateOperatorHistQuant99: v3.SpaceAggregationPercentile99,
|
||||
}
|
||||
|
||||
func canMigrateOperator(operator v3.AggregateOperator) bool {
|
||||
switch operator {
|
||||
case v3.AggregateOperatorSum,
|
||||
v3.AggregateOperatorMin,
|
||||
v3.AggregateOperatorMax,
|
||||
v3.AggregateOperatorSumRate,
|
||||
v3.AggregateOperatorAvgRate,
|
||||
v3.AggregateOperatorMinRate,
|
||||
v3.AggregateOperatorMaxRate,
|
||||
v3.AggregateOperatorHistQuant50,
|
||||
v3.AggregateOperatorHistQuant75,
|
||||
v3.AggregateOperatorHistQuant90,
|
||||
v3.AggregateOperatorHistQuant95,
|
||||
v3.AggregateOperatorHistQuant99:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func Migrate(conn *sqlx.DB) error {
|
||||
ruleDB := rules.NewRuleDB(conn)
|
||||
storedRules, err := ruleDB.GetStoredRules(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, storedRule := range storedRules {
|
||||
parsedRule, errs := rules.ParsePostableRule([]byte(storedRule.Data))
|
||||
if len(errs) > 0 {
|
||||
// this should not happen but if it does, we should not stop the migration
|
||||
zap.L().Error("Error parsing rule", zap.Error(multierr.Combine(errs...)), zap.Int("rule", storedRule.Id))
|
||||
continue
|
||||
}
|
||||
zap.L().Info("Rule parsed", zap.Int("rule", storedRule.Id))
|
||||
updated := false
|
||||
if parsedRule.RuleCondition != nil && parsedRule.Version == "" {
|
||||
if parsedRule.RuleCondition.QueryType() == v3.QueryTypeBuilder {
|
||||
// check if all the queries can be converted to v4
|
||||
canMigrate := true
|
||||
for _, query := range parsedRule.RuleCondition.CompositeQuery.BuilderQueries {
|
||||
if query.DataSource == v3.DataSourceMetrics && query.Expression == query.QueryName {
|
||||
if !canMigrateOperator(query.AggregateOperator) {
|
||||
canMigrate = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if canMigrate {
|
||||
parsedRule.Version = "v4"
|
||||
for _, query := range parsedRule.RuleCondition.CompositeQuery.BuilderQueries {
|
||||
if query.DataSource == v3.DataSourceMetrics && query.Expression == query.QueryName {
|
||||
// update aggregate attribute
|
||||
if query.AggregateOperator == v3.AggregateOperatorSum ||
|
||||
query.AggregateOperator == v3.AggregateOperatorMin ||
|
||||
query.AggregateOperator == v3.AggregateOperatorMax {
|
||||
query.AggregateAttribute.Type = "Gauge"
|
||||
}
|
||||
if query.AggregateOperator == v3.AggregateOperatorSumRate ||
|
||||
query.AggregateOperator == v3.AggregateOperatorAvgRate ||
|
||||
query.AggregateOperator == v3.AggregateOperatorMinRate ||
|
||||
query.AggregateOperator == v3.AggregateOperatorMaxRate {
|
||||
query.AggregateAttribute.Type = "Sum"
|
||||
}
|
||||
|
||||
if query.AggregateOperator == v3.AggregateOperatorHistQuant50 ||
|
||||
query.AggregateOperator == v3.AggregateOperatorHistQuant75 ||
|
||||
query.AggregateOperator == v3.AggregateOperatorHistQuant90 ||
|
||||
query.AggregateOperator == v3.AggregateOperatorHistQuant95 ||
|
||||
query.AggregateOperator == v3.AggregateOperatorHistQuant99 {
|
||||
query.AggregateAttribute.Type = "Histogram"
|
||||
}
|
||||
query.AggregateAttribute.DataType = v3.AttributeKeyDataTypeFloat64
|
||||
query.AggregateAttribute.IsColumn = true
|
||||
query.TimeAggregation = mapTimeAggregation[query.AggregateOperator]
|
||||
query.SpaceAggregation = mapSpaceAggregation[query.AggregateOperator]
|
||||
query.AggregateOperator = v3.AggregateOperator(query.TimeAggregation)
|
||||
updated = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !updated {
|
||||
zap.L().Info("Rule not updated", zap.Int("rule", storedRule.Id))
|
||||
continue
|
||||
}
|
||||
|
||||
ruleJSON, jsonErr := json.Marshal(parsedRule)
|
||||
if jsonErr != nil {
|
||||
zap.L().Error("Error marshalling rule; skipping rule migration", zap.Error(jsonErr), zap.Int("rule", storedRule.Id))
|
||||
continue
|
||||
}
|
||||
|
||||
stmt, prepareError := conn.PrepareContext(context.Background(), `UPDATE rules SET data=$3 WHERE id=$4;`)
|
||||
if prepareError != nil {
|
||||
zap.L().Error("Error in preparing statement for UPDATE to rules", zap.Error(prepareError))
|
||||
continue
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
if _, err := stmt.Exec(ruleJSON, storedRule.Id); err != nil {
|
||||
zap.L().Error("Error in Executing prepared statement for UPDATE to rules", zap.Error(err))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
67
pkg/query-service/migrate/migate.go
Normal file
67
pkg/query-service/migrate/migate.go
Normal file
@ -0,0 +1,67 @@
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
alertstov4 "go.signoz.io/signoz/pkg/query-service/migrate/0_45_alerts_to_v4"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type DataMigration struct {
|
||||
ID int `db:"id"`
|
||||
Version string `db:"version"`
|
||||
CreatedAt string `db:"created_at"`
|
||||
Succeeded bool `db:"succeeded"`
|
||||
}
|
||||
|
||||
func initSchema(conn *sqlx.DB) error {
|
||||
tableSchema := `
|
||||
CREATE TABLE IF NOT EXISTS data_migrations (
|
||||
id SERIAL PRIMARY KEY,
|
||||
version VARCHAR(255) NOT NULL UNIQUE,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
succeeded BOOLEAN NOT NULL DEFAULT FALSE
|
||||
);
|
||||
`
|
||||
_, err := conn.Exec(tableSchema)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getMigrationVersion(conn *sqlx.DB, version string) (*DataMigration, error) {
|
||||
var migration DataMigration
|
||||
err := conn.Get(&migration, "SELECT * FROM data_migrations WHERE version = $1", version)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return &migration, nil
|
||||
}
|
||||
|
||||
func Migrate(dsn string) error {
|
||||
conn, err := sqlx.Connect("sqlite3", dsn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := initSchema(conn); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if m, err := getMigrationVersion(conn, "0.45_alerts_to_v4"); err == nil && m == nil {
|
||||
if err := alertstov4.Migrate(conn); err != nil {
|
||||
zap.L().Error("failed to migrate 0.45_alerts_to_v4", zap.Error(err))
|
||||
} else {
|
||||
_, err := conn.Exec("INSERT INTO data_migrations (version, succeeded) VALUES ('0.45_alerts_to_v4', true)")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -56,14 +56,14 @@ var BasicPlan = FeatureSet{
|
||||
Name: QueryBuilderPanels,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: 20,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
Feature{
|
||||
Name: QueryBuilderAlerts,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: 10,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
Feature{
|
||||
|
@ -433,26 +433,32 @@ func (c *CompositeQuery) Validate() error {
|
||||
}
|
||||
|
||||
if c.BuilderQueries == nil && c.ClickHouseQueries == nil && c.PromQueries == nil {
|
||||
return fmt.Errorf("composite query must contain at least one query")
|
||||
return fmt.Errorf("composite query must contain at least one query type")
|
||||
}
|
||||
|
||||
if c.QueryType == QueryTypeBuilder {
|
||||
for name, query := range c.BuilderQueries {
|
||||
if err := query.Validate(); err != nil {
|
||||
if err := query.Validate(c.PanelType); err != nil {
|
||||
return fmt.Errorf("builder query %s is invalid: %w", name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if c.QueryType == QueryTypeClickHouseSQL {
|
||||
for name, query := range c.ClickHouseQueries {
|
||||
if err := query.Validate(); err != nil {
|
||||
return fmt.Errorf("clickhouse query %s is invalid: %w", name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if c.QueryType == QueryTypePromQL {
|
||||
for name, query := range c.PromQueries {
|
||||
if err := query.Validate(); err != nil {
|
||||
return fmt.Errorf("prom query %s is invalid: %w", name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.PanelType.Validate(); err != nil {
|
||||
return fmt.Errorf("panel type is invalid: %w", err)
|
||||
@ -663,10 +669,11 @@ type BuilderQuery struct {
|
||||
ShiftBy int64
|
||||
}
|
||||
|
||||
func (b *BuilderQuery) Validate() error {
|
||||
func (b *BuilderQuery) Validate(panelType PanelType) error {
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if b.QueryName == "" {
|
||||
return fmt.Errorf("query name is required")
|
||||
}
|
||||
@ -711,6 +718,10 @@ func (b *BuilderQuery) Validate() error {
|
||||
}
|
||||
}
|
||||
if b.GroupBy != nil {
|
||||
if len(b.GroupBy) > 0 && panelType == PanelTypeList {
|
||||
return fmt.Errorf("group by is not supported for list panel type")
|
||||
}
|
||||
|
||||
for _, groupBy := range b.GroupBy {
|
||||
if err := groupBy.Validate(); err != nil {
|
||||
return fmt.Errorf("group by is invalid %w", err)
|
||||
|
@ -49,7 +49,7 @@ type ruleDB struct {
|
||||
|
||||
// todo: move init methods for creating tables
|
||||
|
||||
func newRuleDB(db *sqlx.DB) RuleDB {
|
||||
func NewRuleDB(db *sqlx.DB) RuleDB {
|
||||
return &ruleDB{
|
||||
db,
|
||||
}
|
||||
|
@ -108,7 +108,7 @@ func NewManager(o *ManagerOptions) (*Manager, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
db := newRuleDB(o.DBConn)
|
||||
db := NewRuleDB(o.DBConn)
|
||||
|
||||
m := &Manager{
|
||||
tasks: map[string]Task{},
|
||||
|
@ -327,6 +327,7 @@ func TestDashboardsForInstalledIntegrationDashboards(t *testing.T) {
|
||||
|
||||
// Installing an integration should make its dashboards appear in the dashboard list
|
||||
require.False(testAvailableIntegration.IsInstalled)
|
||||
tsBeforeInstallation := time.Now().Unix()
|
||||
integrationsTB.RequestQSToInstallIntegration(
|
||||
testAvailableIntegration.Id, map[string]interface{}{},
|
||||
)
|
||||
@ -344,9 +345,13 @@ func TestDashboardsForInstalledIntegrationDashboards(t *testing.T) {
|
||||
len(testIntegrationDashboards), len(dashboards),
|
||||
"dashboards for installed integrations should appear in dashboards list",
|
||||
)
|
||||
require.GreaterOrEqual(dashboards[0].CreatedAt.Unix(), tsBeforeInstallation)
|
||||
require.GreaterOrEqual(dashboards[0].UpdatedAt.Unix(), tsBeforeInstallation)
|
||||
|
||||
// Should be able to get installed integrations dashboard by id
|
||||
dd := integrationsTB.GetDashboardByIdFromQS(dashboards[0].Uuid)
|
||||
require.GreaterOrEqual(dd.CreatedAt.Unix(), tsBeforeInstallation)
|
||||
require.GreaterOrEqual(dd.UpdatedAt.Unix(), tsBeforeInstallation)
|
||||
require.Equal(*dd, dashboards[0])
|
||||
|
||||
// Integration dashboards should not longer appear in dashboard list after uninstallation
|
||||
|
@ -192,7 +192,7 @@ services:
|
||||
<<: *db-depend
|
||||
|
||||
otel-collector-migrator:
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.22}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.24}
|
||||
container_name: otel-migrator
|
||||
command:
|
||||
- "--dsn=tcp://clickhouse:9000"
|
||||
@ -205,7 +205,7 @@ services:
|
||||
# condition: service_healthy
|
||||
|
||||
otel-collector:
|
||||
image: signoz/signoz-otel-collector:0.88.22
|
||||
image: signoz/signoz-otel-collector:0.88.24
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
[
|
||||
|
@ -167,7 +167,7 @@ func ClickHouseFormattedValue(v interface{}) string {
|
||||
|
||||
case []interface{}:
|
||||
if len(x) == 0 {
|
||||
return ""
|
||||
return "[]"
|
||||
}
|
||||
switch x[0].(type) {
|
||||
case string:
|
||||
@ -184,7 +184,7 @@ func ClickHouseFormattedValue(v interface{}) string {
|
||||
return strings.Join(strings.Fields(fmt.Sprint(x)), ",")
|
||||
default:
|
||||
zap.L().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x[0])))
|
||||
return ""
|
||||
return "[]"
|
||||
}
|
||||
default:
|
||||
zap.L().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x)))
|
||||
|
@ -8,17 +8,17 @@ import (
|
||||
|
||||
// AssignReservedVars assigns values for go template vars. assumes that
|
||||
// model.QueryRangeParamsV3.Start and End are Unix Nano timestamps
|
||||
func AssignReservedVarsV3(metricsQueryRangeParams *v3.QueryRangeParamsV3) {
|
||||
metricsQueryRangeParams.Variables["start_timestamp"] = metricsQueryRangeParams.Start / 1000
|
||||
metricsQueryRangeParams.Variables["end_timestamp"] = metricsQueryRangeParams.End / 1000
|
||||
func AssignReservedVarsV3(queryRangeParams *v3.QueryRangeParamsV3) {
|
||||
queryRangeParams.Variables["start_timestamp"] = queryRangeParams.Start / 1000
|
||||
queryRangeParams.Variables["end_timestamp"] = queryRangeParams.End / 1000
|
||||
|
||||
metricsQueryRangeParams.Variables["start_timestamp_ms"] = metricsQueryRangeParams.Start
|
||||
metricsQueryRangeParams.Variables["end_timestamp_ms"] = metricsQueryRangeParams.End
|
||||
queryRangeParams.Variables["start_timestamp_ms"] = queryRangeParams.Start
|
||||
queryRangeParams.Variables["end_timestamp_ms"] = queryRangeParams.End
|
||||
|
||||
metricsQueryRangeParams.Variables["start_timestamp_nano"] = metricsQueryRangeParams.Start * 1e6
|
||||
metricsQueryRangeParams.Variables["end_timestamp_nano"] = metricsQueryRangeParams.End * 1e6
|
||||
queryRangeParams.Variables["start_timestamp_nano"] = queryRangeParams.Start * 1e6
|
||||
queryRangeParams.Variables["end_timestamp_nano"] = queryRangeParams.End * 1e6
|
||||
|
||||
metricsQueryRangeParams.Variables["start_datetime"] = fmt.Sprintf("toDateTime(%d)", metricsQueryRangeParams.Start/1000)
|
||||
metricsQueryRangeParams.Variables["end_datetime"] = fmt.Sprintf("toDateTime(%d)", metricsQueryRangeParams.End/1000)
|
||||
queryRangeParams.Variables["start_datetime"] = fmt.Sprintf("toDateTime(%d)", queryRangeParams.Start/1000)
|
||||
queryRangeParams.Variables["end_datetime"] = fmt.Sprintf("toDateTime(%d)", queryRangeParams.End/1000)
|
||||
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user