Merge pull request #3165 from SigNoz/release/v0.23.1

Release/v0.23.1
This commit is contained in:
Prashant Shahi 2023-07-18 09:23:06 +05:30 committed by GitHub
commit 0ed6594e48
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
34 changed files with 1768 additions and 341 deletions

View File

@ -137,7 +137,7 @@ services:
condition: on-failure
query-service:
image: signoz/query-service:0.23.0
image: signoz/query-service:0.23.1
command: ["-config=/root/config/prometheus.yml"]
# ports:
# - "6060:6060" # pprof port
@ -166,7 +166,7 @@ services:
<<: *clickhouse-depend
frontend:
image: signoz/frontend:0.23.0
image: signoz/frontend:0.23.1
deploy:
restart_policy:
condition: on-failure

View File

@ -153,7 +153,7 @@ services:
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
query-service:
image: signoz/query-service:${DOCKER_TAG:-0.23.0}
image: signoz/query-service:${DOCKER_TAG:-0.23.1}
container_name: query-service
command: ["-config=/root/config/prometheus.yml"]
# ports:
@ -181,7 +181,7 @@ services:
<<: *clickhouse-depend
frontend:
image: signoz/frontend:${DOCKER_TAG:-0.23.0}
image: signoz/frontend:${DOCKER_TAG:-0.23.1}
container_name: frontend
restart: on-failure
depends_on:

View File

@ -15,13 +15,14 @@ import (
)
type APIHandlerOptions struct {
DataConnector interfaces.DataConnector
SkipConfig *basemodel.SkipConfig
PreferDelta bool
AppDao dao.ModelDao
RulesManager *rules.Manager
FeatureFlags baseint.FeatureLookup
LicenseManager *license.Manager
DataConnector interfaces.DataConnector
SkipConfig *basemodel.SkipConfig
PreferDelta bool
PreferSpanMetrics bool
AppDao dao.ModelDao
RulesManager *rules.Manager
FeatureFlags baseint.FeatureLookup
LicenseManager *license.Manager
}
type APIHandler struct {
@ -33,12 +34,13 @@ type APIHandler struct {
func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
baseHandler, err := baseapp.NewAPIHandler(baseapp.APIHandlerOpts{
Reader: opts.DataConnector,
SkipConfig: opts.SkipConfig,
PerferDelta: opts.PreferDelta,
AppDao: opts.AppDao,
RuleManager: opts.RulesManager,
FeatureFlags: opts.FeatureFlags})
Reader: opts.DataConnector,
SkipConfig: opts.SkipConfig,
PerferDelta: opts.PreferDelta,
PreferSpanMetrics: opts.PreferSpanMetrics,
AppDao: opts.AppDao,
RuleManager: opts.RulesManager,
FeatureFlags: opts.FeatureFlags})
if err != nil {
return nil, err

View File

@ -2,6 +2,8 @@ package api
import (
"net/http"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
)
func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
@ -10,5 +12,13 @@ func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
ah.HandleError(w, err, http.StatusInternalServerError)
return
}
if ah.opts.PreferSpanMetrics {
for idx := range featureSet {
feature := &featureSet[idx]
if feature.Name == basemodel.UseSpanMetrics {
featureSet[idx].Active = true
}
}
}
ah.Respond(w, featureSet)
}

View File

@ -54,9 +54,10 @@ type ServerOptions struct {
HTTPHostPort string
PrivateHostPort string
// alert specific params
DisableRules bool
RuleRepoURL string
PreferDelta bool
DisableRules bool
RuleRepoURL string
PreferDelta bool
PreferSpanMetrics bool
}
// Server runs HTTP api service
@ -169,13 +170,14 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
telemetry.GetInstance().SetReader(reader)
apiOpts := api.APIHandlerOptions{
DataConnector: reader,
SkipConfig: skipConfig,
PreferDelta: serverOptions.PreferDelta,
AppDao: modelDao,
RulesManager: rm,
FeatureFlags: lm,
LicenseManager: lm,
DataConnector: reader,
SkipConfig: skipConfig,
PreferDelta: serverOptions.PreferDelta,
PreferSpanMetrics: serverOptions.PreferSpanMetrics,
AppDao: modelDao,
RulesManager: rm,
FeatureFlags: lm,
LicenseManager: lm,
}
apiHandler, err := api.NewAPIHandler(apiOpts)

View File

@ -84,11 +84,13 @@ func main() {
var enableQueryServiceLogOTLPExport bool
var preferDelta bool
var preferSpanMetrics bool
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
flag.BoolVar(&preferDelta, "prefer-delta", false, "(prefer delta over raw metrics)")
flag.BoolVar(&preferDelta, "prefer-delta", false, "(prefer delta over cumulative metrics)")
flag.BoolVar(&preferSpanMetrics, "prefer-span-metrics", false, "(prefer span metrics for service level metrics)")
flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)")
flag.BoolVar(&enableQueryServiceLogOTLPExport, "enable.query.service.log.otlp.export", false, "(enable query service log otlp export)")
flag.Parse()
@ -105,6 +107,7 @@ func main() {
PromConfigPath: promConfigPath,
SkipTopLvlOpsPath: skipTopLvlOpsPath,
PreferDelta: preferDelta,
PreferSpanMetrics: preferSpanMetrics,
PrivateHostPort: baseconst.PrivateHostPort,
DisableRules: disableRules,
RuleRepoURL: ruleRepoURL,

View File

@ -60,6 +60,13 @@ var BasicPlan = basemodel.FeatureSet{
UsageLimit: 5,
Route: "",
},
basemodel.Feature{
Name: basemodel.UseSpanMetrics,
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
}
var ProPlan = basemodel.FeatureSet{
@ -105,6 +112,13 @@ var ProPlan = basemodel.FeatureSet{
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.UseSpanMetrics,
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
}
var EnterprisePlan = basemodel.FeatureSet{
@ -150,4 +164,11 @@ var EnterprisePlan = basemodel.FeatureSet{
UsageLimit: -1,
Route: "",
},
basemodel.Feature{
Name: basemodel.UseSpanMetrics,
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
}

View File

@ -1,6 +1,7 @@
import { Drawer, Tabs } from 'antd';
import JSONView from 'container/LogDetailedView/JsonView';
import TableView from 'container/LogDetailedView/TableView';
import { useMemo } from 'react';
import { LogDetailProps } from './LogDetail.interfaces';
@ -14,24 +15,27 @@ function LogDetail({
onClose();
};
const items = [
{
label: 'Table',
key: '1',
children: log && (
<TableView
logData={log}
onAddToQuery={onAddToQuery}
onClickActionItem={onClickActionItem}
/>
),
},
{
label: 'JSON',
key: '2',
children: log && <JSONView logData={log} />,
},
];
const items = useMemo(
() => [
{
label: 'Table',
key: '1',
children: log && (
<TableView
logData={log}
onAddToQuery={onAddToQuery}
onClickActionItem={onClickActionItem}
/>
),
},
{
label: 'JSON',
key: '2',
children: log && <JSONView logData={log} />,
},
],
[log, onAddToQuery, onClickActionItem],
);
return (
<Drawer

View File

@ -6,7 +6,6 @@ import dayjs from 'dayjs';
import dompurify from 'dompurify';
import { FlatLogData } from 'lib/logs/flatLogData';
import { useMemo } from 'react';
import { ILog } from 'types/api/logs/log';
import { ExpandIconWrapper } from '../RawLogView/styles';
import { defaultCellStyle, defaultTableStyle } from './config';
@ -57,14 +56,14 @@ export const useTableView = (props: UseTableViewProps): UseTableViewResult => {
dataIndex: 'id',
key: 'expand',
// https://github.com/ant-design/ant-design/discussions/36886
render: (_, item): ColumnTypeRender<Record<string, unknown>> => ({
render: (_, item, index): ColumnTypeRender<Record<string, unknown>> => ({
props: {
style: defaultCellStyle,
},
children: (
<ExpandIconWrapper
onClick={(): void => {
onClickExpand((item as unknown) as ILog);
onClickExpand(logs[index]);
}}
>
<ExpandAltOutlined />
@ -108,7 +107,7 @@ export const useTableView = (props: UseTableViewProps): UseTableViewResult => {
},
...(appendTo === 'end' ? fieldColumns : []),
];
}, [fields, linesPerRow, appendTo, onClickExpand]);
}, [fields, appendTo, linesPerRow, onClickExpand, logs]);
return { columns, dataSource: flattenLogData };
};

View File

@ -0,0 +1,29 @@
import { ArrowUpOutlined } from '@ant-design/icons';
import { FloatButton } from 'antd';
import { PANEL_TYPES } from 'constants/queryBuilder';
// hooks
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
import useScrollToTop from 'hooks/useScrollToTop';
function GoToTop(): JSX.Element | null {
const { isVisible, scrollToTop } = useScrollToTop();
const { panelType } = useQueryBuilder();
if (!isVisible) return null;
if (panelType === PANEL_TYPES.LIST) {
return (
<FloatButton
onClick={scrollToTop}
shape="circle"
type="primary"
icon={<ArrowUpOutlined />}
/>
);
}
return null;
}
export default GoToTop;

View File

@ -146,12 +146,17 @@ function LogsExplorerList({
isShowPageSize={false}
optionsMenuConfig={config}
/>
{options.format !== 'table' && (
<Heading>
<Typography.Text>Event</Typography.Text>
</Heading>
)}
{logs.length === 0 && <Typography>No logs lines found</Typography>}
{!isLoading && logs.length === 0 && (
<Typography>No logs lines found</Typography>
)}
<InfinityWrapperStyled>{renderContent}</InfinityWrapperStyled>
</>
);

View File

@ -1,5 +1,4 @@
import { TabsProps } from 'antd';
import axios from 'axios';
import LogDetail from 'components/LogDetail';
import TabLabel from 'components/TabLabel';
import { QueryParams } from 'constants/query';
@ -13,16 +12,17 @@ import { queryParamNamesMap } from 'constants/queryBuilderQueryNames';
import ROUTES from 'constants/routes';
import { DEFAULT_PER_PAGE_VALUE } from 'container/Controls/config';
import ExportPanel from 'container/ExportPanel';
import GoToTop from 'container/GoToTop';
import LogsExplorerChart from 'container/LogsExplorerChart';
import LogsExplorerList from 'container/LogsExplorerList';
// TODO: temporary hide table view
// import LogsExplorerTable from 'container/LogsExplorerTable';
import LogsExplorerTable from 'container/LogsExplorerTable';
import { GRAPH_TYPES } from 'container/NewDashboard/ComponentsSlider';
import TimeSeriesView from 'container/TimeSeriesView/TimeSeriesView';
import { useUpdateDashboard } from 'hooks/dashboard/useUpdateDashboard';
import { addEmptyWidgetInDashboardJSONWithQuery } from 'hooks/dashboard/utils';
import { useGetExplorerQueryRange } from 'hooks/queryBuilder/useGetExplorerQueryRange';
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
import useAxiosError from 'hooks/useAxiosError';
import { useNotifications } from 'hooks/useNotifications';
import useUrlQueryData from 'hooks/useUrlQueryData';
import { chooseAutocompleteFromCustomValue } from 'lib/newQueryBuilder/chooseAutocompleteFromCustomValue';
@ -82,6 +82,8 @@ function LogsExplorerViews(): JSX.Element {
const [logs, setLogs] = useState<ILog[]>([]);
const [requestData, setRequestData] = useState<Query | null>(null);
const handleAxisError = useAxiosError();
const currentStagedQueryData = useMemo(() => {
if (!stagedQuery || stagedQuery.builder.queryData.length !== 1) return null;
@ -358,16 +360,16 @@ function LogsExplorerViews(): JSX.Element {
history.push(dashboardEditView);
},
onError: (error) => {
if (axios.isAxiosError(error)) {
notifications.error({
message: error.message,
});
}
},
onError: handleAxisError,
});
},
[exportDefaultQuery, history, notifications, updateDashboard],
[
exportDefaultQuery,
history,
notifications,
updateDashboard,
handleAxisError,
],
);
useEffect(() => {
@ -437,17 +439,16 @@ function LogsExplorerViews(): JSX.Element {
<TimeSeriesView isLoading={isFetching} data={data} isError={isError} />
),
},
// TODO: temporary hide table view
// {
// label: 'Table',
// key: PANEL_TYPES.TABLE,
// children: (
// <LogsExplorerTable
// data={data?.payload.data.newResult.data.result || []}
// isLoading={isFetching}
// />
// ),
// },
{
label: 'Table',
key: PANEL_TYPES.TABLE,
children: (
<LogsExplorerTable
data={data?.payload.data.newResult.data.result || []}
isLoading={isFetching}
/>
),
},
],
[
isMultipleQueries,
@ -513,6 +514,8 @@ function LogsExplorerViews(): JSX.Element {
onAddToQuery={handleAddToQuery}
onClickActionItem={handleAddToQuery}
/>
<GoToTop />
</>
);
}

View File

@ -1,8 +1,8 @@
import { InputNumber, Tooltip } from 'antd';
// import { useMemo } from 'react';
import { InputNumber } from 'antd';
import { useMemo } from 'react';
import { IBuilderQuery } from 'types/api/queryBuilder/queryBuilderData';
import { DataSource } from 'types/common/queryBuilder';
// import { DataSource } from 'types/common/queryBuilder';
import { selectStyle } from '../QueryBuilderSearch/config';
function LimitFilter({ onChange, query }: LimitFilterProps): JSX.Element {
@ -21,25 +21,23 @@ function LimitFilter({ onChange, query }: LimitFilterProps): JSX.Element {
}
};
// const isMetricsDataSource = useMemo(
// () => query.dataSource === DataSource.METRICS,
// [query.dataSource],
// );
const isMetricsDataSource = useMemo(
() => query.dataSource === DataSource.METRICS,
[query.dataSource],
);
// const isDisabled = isMetricsDataSource && !query.aggregateAttribute.key;
const isDisabled = isMetricsDataSource && !query.aggregateAttribute.key;
return (
<Tooltip placement="top" title="coming soon">
<InputNumber
min={1}
type="number"
readOnly
value={query.limit}
style={selectStyle}
onChange={onChange}
onKeyDown={handleKeyDown}
/>
</Tooltip>
<InputNumber
min={1}
type="number"
value={query.limit}
style={selectStyle}
disabled={isDisabled}
onChange={onChange}
onKeyDown={handleKeyDown}
/>
);
}

View File

@ -0,0 +1,29 @@
import throttle from 'lodash-es/throttle';
import { useEffect, useState } from 'react';
import { UseScrollToTop } from './types';
function useScrollToTop(visibleOffset = 200): UseScrollToTop {
const [isVisible, setIsVisible] = useState<boolean>(false);
const scrollToTop = (): void => {
window.scrollTo({
top: 0,
behavior: 'smooth',
});
};
useEffect(() => {
const toggleVisibility = throttle(() => {
setIsVisible(window.pageYOffset > visibleOffset);
}, 300);
window.addEventListener('scroll', toggleVisibility);
return (): void => window.removeEventListener('scroll', toggleVisibility);
}, [visibleOffset]);
return { isVisible, scrollToTop };
}
export default useScrollToTop;

View File

@ -0,0 +1,4 @@
export interface UseScrollToTop {
isVisible: boolean;
scrollToTop: VoidFunction;
}

View File

@ -0,0 +1,58 @@
import { act, renderHook } from '@testing-library/react';
import useScrollToTop from './index';
// Mocking window.scrollTo method
global.scrollTo = jest.fn();
describe('useScrollToTop hook', () => {
beforeAll(() => {
jest.useFakeTimers();
});
it('should change visibility and scroll to top on call', () => {
const { result } = renderHook(() => useScrollToTop(100));
// Simulate scrolling 150px down
act(() => {
global.pageYOffset = 150;
global.dispatchEvent(new Event('scroll'));
jest.advanceTimersByTime(300);
});
expect(result.current.isVisible).toBe(true);
// Simulate scrolling to top
act(() => {
result.current.scrollToTop();
});
expect(global.scrollTo).toHaveBeenCalledWith({ top: 0, behavior: 'smooth' });
});
it('should be invisible when scrolled less than offset', () => {
const { result } = renderHook(() => useScrollToTop(100));
// Simulate scrolling 50px down
act(() => {
global.pageYOffset = 50;
global.dispatchEvent(new Event('scroll'));
jest.advanceTimersByTime(300);
});
expect(result.current.isVisible).toBe(false);
});
it('should be visible when scrolled more than offset', () => {
const { result } = renderHook(() => useScrollToTop(100));
// Simulate scrolling 50px down
act(() => {
global.pageYOffset = 200;
global.dispatchEvent(new Event('scroll'));
jest.advanceTimersByTime(300);
});
expect(result.current.isVisible).toBe(true);
});
});

View File

@ -4,8 +4,13 @@ import { FORMULA_REGEXP } from 'constants/regExp';
import { QueryTableProps } from 'container/QueryTable/QueryTable.intefaces';
import { toCapitalize } from 'lib/toCapitalize';
import { ReactNode } from 'react';
import { IBuilderQuery, Query } from 'types/api/queryBuilder/queryBuilderData';
import {
IBuilderFormula,
IBuilderQuery,
Query,
} from 'types/api/queryBuilder/queryBuilderData';
import { ListItem, QueryDataV3, SeriesItem } from 'types/api/widgets/getQuery';
import { QueryBuilderData } from 'types/common/queryBuilder';
import { v4 as uuid } from 'uuid';
type CreateTableDataFromQueryParams = Pick<
@ -21,8 +26,10 @@ export type RowData = {
type DynamicColumn = {
key: keyof RowData;
title: string;
sourceLabel: string;
data: (string | number)[];
type: 'field' | 'operator';
type: 'field' | 'operator' | 'formula';
// sortable: boolean;
};
@ -39,7 +46,6 @@ type CreateTableDataFromQuery = (
type FillColumnData = (
queryTableData: QueryDataV3[],
dynamicColumns: DynamicColumns,
query: Query,
) => { filledDynamicColumns: DynamicColumns; rowsLength: number };
type GetDynamicColumns = (
@ -54,43 +60,37 @@ type SeriesItemLabels = SeriesItem['labels'];
const isFormula = (queryName: string): boolean =>
FORMULA_REGEXP.test(queryName);
const isColumnExist = (
columnName: string,
const isValueExist = (
field: keyof DynamicColumn,
value: string,
columns: DynamicColumns,
): boolean => {
const columnKeys = columns.map((item) => item.key);
const existColumns = columns.find((item) => item[field] === value);
return columnKeys.includes(columnName);
return !!existColumns;
};
const prepareColumnTitle = (title: string): string => {
const haveUnderscore = title.includes('_');
if (haveUnderscore) {
return title
.split('_')
.map((str) => toCapitalize(str))
.join(' ');
}
return toCapitalize(title);
};
const getQueryOperator = (
queryData: IBuilderQuery[],
const getQueryByName = <T extends keyof QueryBuilderData>(
builder: QueryBuilderData,
currentQueryName: string,
): string => {
const builderQuery = queryData.find((q) => q.queryName === currentQueryName);
type: T,
): (T extends 'queryData' ? IBuilderQuery : IBuilderFormula) | null => {
const queryArray = builder[type];
return builderQuery ? builderQuery.aggregateOperator : '';
const currentQuery =
queryArray.find((q) => q.queryName === currentQueryName) || null;
if (!currentQuery) return null;
return currentQuery as T extends 'queryData' ? IBuilderQuery : IBuilderFormula;
};
const createLabels = <T extends ListItemData | SeriesItemLabels>(
labels: T,
// labels: T,
label: keyof T,
dynamicColumns: DynamicColumns,
): void => {
if (isColumnExist(label as string, dynamicColumns)) return;
if (isValueExist('key', label as string, dynamicColumns)) return;
// const labelValue = labels[label];
@ -98,6 +98,8 @@ const createLabels = <T extends ListItemData | SeriesItemLabels>(
const fieldObj: DynamicColumn = {
key: label as string,
title: label as string,
sourceLabel: label as string,
data: [],
type: 'field',
// sortable: isNumber,
@ -106,6 +108,68 @@ const createLabels = <T extends ListItemData | SeriesItemLabels>(
dynamicColumns.push(fieldObj);
};
const appendOperatorFormulaColumns = (
builder: QueryBuilderData,
currentQueryName: string,
dynamicColumns: DynamicColumns,
): void => {
const currentFormula = getQueryByName(
builder,
currentQueryName,
'queryFormulas',
);
if (currentFormula) {
let formulaLabel = `${currentFormula.queryName}(${currentFormula.expression})`;
if (currentFormula.legend) {
formulaLabel += ` - ${currentFormula.legend}`;
}
const formulaColumn: DynamicColumn = {
key: currentQueryName,
title: formulaLabel,
sourceLabel: formulaLabel,
data: [],
type: 'formula',
// sortable: isNumber,
};
dynamicColumns.push(formulaColumn);
}
const currentQueryData = getQueryByName(
builder,
currentQueryName,
'queryData',
);
if (!currentQueryData) return;
let operatorLabel = `${currentQueryData.aggregateOperator}`;
if (currentQueryData.aggregateAttribute.key) {
operatorLabel += `(${currentQueryData.aggregateAttribute.key})`;
}
if (currentQueryData.legend) {
operatorLabel += ` - ${currentQueryData.legend}`;
} else {
operatorLabel += ` - ${currentQueryData.queryName}`;
}
const resultValue = `${toCapitalize(operatorLabel)}`;
const operatorColumn: DynamicColumn = {
key: currentQueryName,
title: resultValue,
sourceLabel: resultValue,
data: [],
type: 'operator',
// sortable: isNumber,
};
dynamicColumns.push(operatorColumn);
};
const getDynamicColumns: GetDynamicColumns = (queryTableData, query) => {
const dynamicColumns: DynamicColumns = [];
@ -113,49 +177,52 @@ const getDynamicColumns: GetDynamicColumns = (queryTableData, query) => {
if (currentQuery.list) {
currentQuery.list.forEach((listItem) => {
Object.keys(listItem.data).forEach((label) => {
createLabels<ListItemData>(
listItem.data,
label as ListItemKey,
dynamicColumns,
);
createLabels<ListItemData>(label as ListItemKey, dynamicColumns);
});
});
}
if (currentQuery.series) {
if (!isColumnExist('timestamp', dynamicColumns)) {
if (!isValueExist('key', 'timestamp', dynamicColumns)) {
dynamicColumns.push({
key: 'timestamp',
title: 'Timestamp',
sourceLabel: 'Timestamp',
data: [],
type: 'field',
// sortable: true,
});
}
currentQuery.series.forEach((seria) => {
Object.keys(seria.labels).forEach((label) => {
createLabels<SeriesItemLabels>(seria.labels, label, dynamicColumns);
});
});
const operator = getQueryOperator(
query.builder.queryData,
appendOperatorFormulaColumns(
query.builder,
currentQuery.queryName,
dynamicColumns,
);
if (operator === '' || isColumnExist(operator, dynamicColumns)) return;
const operatorColumn: DynamicColumn = {
key: operator,
data: [],
type: 'operator',
// sortable: true,
};
dynamicColumns.push(operatorColumn);
currentQuery.series.forEach((seria) => {
Object.keys(seria.labels).forEach((label) => {
createLabels<SeriesItemLabels>(label, dynamicColumns);
});
});
}
});
return dynamicColumns;
return dynamicColumns.map((item) => {
if (isFormula(item.key as string)) {
return item;
}
const sameValues = dynamicColumns.filter(
(column) => column.sourceLabel === item.sourceLabel,
);
if (sameValues.length > 1) {
return { ...item, title: `${item.title} - ${item.key}` };
}
return item;
});
};
const fillEmptyRowCells = (
@ -179,7 +246,6 @@ const fillDataFromSeria = (
seria: SeriesItem,
columns: DynamicColumns,
queryName: string,
operator: string,
): void => {
const labelEntries = Object.entries(seria.labels);
@ -195,13 +261,7 @@ const fillDataFromSeria = (
return;
}
if (isFormula(queryName) && queryName === column.key) {
column.data.push(parseFloat(value.value).toFixed(2));
unusedColumnsKeys.delete(column.key);
return;
}
if (!isFormula(queryName) && operator === column.key) {
if (queryName === column.key) {
column.data.push(parseFloat(value.value).toFixed(2));
unusedColumnsKeys.delete(column.key);
return;
@ -238,25 +298,16 @@ const fillDataFromList = (
});
};
const fillColumnsData: FillColumnData = (queryTableData, cols, query) => {
const fillColumnsData: FillColumnData = (queryTableData, cols) => {
const fields = cols.filter((item) => item.type === 'field');
const operators = cols.filter((item) => item.type === 'operator');
const resultColumns = [...fields, ...operators];
const formulas = cols.filter((item) => item.type === 'formula');
const resultColumns = [...fields, ...operators, ...formulas];
queryTableData.forEach((currentQuery) => {
if (currentQuery.series) {
currentQuery.series.forEach((seria) => {
const currentOperator = getQueryOperator(
query.builder.queryData,
currentQuery.queryName,
);
fillDataFromSeria(
seria,
resultColumns,
currentQuery.queryName,
currentOperator,
);
fillDataFromSeria(seria, resultColumns, currentQuery.queryName);
});
}
@ -303,7 +354,7 @@ const generateTableColumns = (
const column: ColumnType<RowData> = {
dataIndex: item.key,
key: item.key,
title: prepareColumnTitle(item.key as string),
title: item.title,
// sorter: item.sortable
// ? (a: RowData, b: RowData): number =>
// (a[item.key] as number) - (b[item.key] as number)
@ -326,7 +377,6 @@ export const createTableColumnsFromQuery: CreateTableDataFromQuery = ({
const { filledDynamicColumns, rowsLength } = fillColumnsData(
queryTableData,
dynamicColumns,
query,
);
const dataSource = generateData(filledDynamicColumns, rowsLength);

View File

@ -61,17 +61,18 @@ func NewRouter() *mux.Router {
type APIHandler struct {
// queryService *querysvc.QueryService
// queryParser queryParser
basePath string
apiPrefix string
reader interfaces.Reader
skipConfig *model.SkipConfig
appDao dao.ModelDao
alertManager am.Manager
ruleManager *rules.Manager
featureFlags interfaces.FeatureLookup
ready func(http.HandlerFunc) http.HandlerFunc
queryBuilder *queryBuilder.QueryBuilder
preferDelta bool
basePath string
apiPrefix string
reader interfaces.Reader
skipConfig *model.SkipConfig
appDao dao.ModelDao
alertManager am.Manager
ruleManager *rules.Manager
featureFlags interfaces.FeatureLookup
ready func(http.HandlerFunc) http.HandlerFunc
queryBuilder *queryBuilder.QueryBuilder
preferDelta bool
preferSpanMetrics bool
// SetupCompleted indicates if SigNoz is ready for general use.
// at the moment, we mark the app ready when the first user
@ -86,7 +87,8 @@ type APIHandlerOpts struct {
SkipConfig *model.SkipConfig
PerferDelta bool
PerferDelta bool
PreferSpanMetrics bool
// dao layer to perform crud on app objects like dashboard, alerts etc
AppDao dao.ModelDao
@ -106,13 +108,14 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
}
aH := &APIHandler{
reader: opts.Reader,
appDao: opts.AppDao,
skipConfig: opts.SkipConfig,
preferDelta: opts.PerferDelta,
alertManager: alertManager,
ruleManager: opts.RuleManager,
featureFlags: opts.FeatureFlags,
reader: opts.Reader,
appDao: opts.AppDao,
skipConfig: opts.SkipConfig,
preferDelta: opts.PerferDelta,
preferSpanMetrics: opts.PreferSpanMetrics,
alertManager: alertManager,
ruleManager: opts.RuleManager,
featureFlags: opts.FeatureFlags,
}
builderOpts := queryBuilder.QueryBuilderOptions{
@ -1668,6 +1671,14 @@ func (aH *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
aH.HandleError(w, err, http.StatusInternalServerError)
return
}
if aH.preferSpanMetrics {
for idx := range featureSet {
feature := &featureSet[idx]
if feature.Name == model.UseSpanMetrics {
featureSet[idx].Active = true
}
}
}
aH.Respond(w, featureSet)
}
@ -2511,6 +2522,7 @@ func (aH *APIHandler) execClickHouseGraphQueries(ctx context.Context, queries ma
wg.Add(1)
go func(name, query string) {
defer wg.Done()
seriesList, err := aH.reader.GetTimeSeriesResultV3(ctx, query)
if err != nil {
@ -2842,20 +2854,48 @@ func applyMetricLimit(results []*v3.Result, queryRangeParams *v3.QueryRangeParam
builderQueries := queryRangeParams.CompositeQuery.BuilderQueries
if builderQueries != nil && builderQueries[result.QueryName].DataSource == v3.DataSourceMetrics {
limit := builderQueries[result.QueryName].Limit
var orderAsc bool
for _, item := range builderQueries[result.QueryName].OrderBy {
if item.ColumnName == constants.SigNozOrderByValue {
orderAsc = strings.ToLower(item.Order) == "asc"
break
}
}
orderByList := builderQueries[result.QueryName].OrderBy
if limit != 0 {
sort.Slice(result.Series, func(i, j int) bool {
if orderAsc {
return result.Series[i].Points[0].Value < result.Series[j].Points[0].Value
if len(orderByList) == 0 {
// If no orderBy is specified, sort by value in descending order
orderByList = []v3.OrderBy{{ColumnName: constants.SigNozOrderByValue, Order: "desc"}}
}
sort.SliceStable(result.Series, func(i, j int) bool {
for _, orderBy := range orderByList {
if orderBy.ColumnName == constants.SigNozOrderByValue {
if result.Series[i].GroupingSetsPoint == nil || result.Series[j].GroupingSetsPoint == nil {
// Handle nil GroupingSetsPoint, if needed
// Here, we assume non-nil values are always less than nil values
return result.Series[i].GroupingSetsPoint != nil
}
if orderBy.Order == "asc" {
return result.Series[i].GroupingSetsPoint.Value < result.Series[j].GroupingSetsPoint.Value
} else if orderBy.Order == "desc" {
return result.Series[i].GroupingSetsPoint.Value > result.Series[j].GroupingSetsPoint.Value
}
} else {
// Sort based on Labels map
labelI, existsI := result.Series[i].Labels[orderBy.ColumnName]
labelJ, existsJ := result.Series[j].Labels[orderBy.ColumnName]
if !existsI || !existsJ {
// Handle missing labels, if needed
// Here, we assume non-existent labels are always less than existing ones
return existsI
}
if orderBy.Order == "asc" {
return strings.Compare(labelI, labelJ) < 0
} else if orderBy.Order == "desc" {
return strings.Compare(labelI, labelJ) > 0
}
}
}
return result.Series[i].Points[0].Value > result.Series[j].Points[0].Value
// Preserve original order if no matching orderBy is found
return i < j
})
if len(result.Series) > int(limit) {
result.Series = result.Series[:limit]
}

View File

@ -443,6 +443,278 @@ func TestApplyLimitOnMetricResult(t *testing.T) {
},
},
},
{
// ["GET /api/v1/health", "DELETE /api/v1/health"] so result should be ["DELETE /api/v1/health"] although it has lower value
name: "test limit with operation asc",
inputResult: []*v3.Result{
{
QueryName: "A",
Series: []*v3.Series{
{
Labels: map[string]string{
"service_name": "frontend",
"operation": "GET /api/v1/health",
},
Points: []v3.Point{
{
Timestamp: 1689220036000,
Value: 19.2,
},
{
Timestamp: 1689220096000,
Value: 19.5,
},
},
GroupingSetsPoint: &v3.Point{
Timestamp: 0,
Value: 19.3,
},
},
{
Labels: map[string]string{
"service_name": "route",
"operation": "DELETE /api/v1/health",
},
Points: []v3.Point{
{
Timestamp: 1689220036000,
Value: 8.83,
},
{
Timestamp: 1689220096000,
Value: 8.83,
},
},
GroupingSetsPoint: &v3.Point{
Timestamp: 0,
Value: 8.83,
},
},
},
},
},
params: &v3.QueryRangeParamsV3{
Start: 1689220036000,
End: 1689220096000,
Step: 60,
CompositeQuery: &v3.CompositeQuery{
BuilderQueries: map[string]*v3.BuilderQuery{
"A": {
QueryName: "A",
AggregateAttribute: v3.AttributeKey{Key: "signo_calls_total"},
DataSource: v3.DataSourceMetrics,
AggregateOperator: v3.AggregateOperatorSumRate,
Expression: "A",
GroupBy: []v3.AttributeKey{{Key: "service_name"}},
Limit: 1,
OrderBy: []v3.OrderBy{{ColumnName: "operation", Order: "asc"}},
},
},
QueryType: v3.QueryTypeBuilder,
PanelType: v3.PanelTypeGraph,
},
},
expectedResult: []*v3.Result{
{
QueryName: "A",
Series: []*v3.Series{
{
Labels: map[string]string{
"service_name": "route",
"operation": "DELETE /api/v1/health",
},
Points: []v3.Point{
{
Timestamp: 1689220036000,
Value: 8.83,
},
{
Timestamp: 1689220096000,
Value: 8.83,
},
},
GroupingSetsPoint: &v3.Point{
Timestamp: 0,
Value: 8.83,
},
},
},
},
},
},
{
name: "test limit with multiple order by labels",
inputResult: []*v3.Result{
{
QueryName: "A",
Series: []*v3.Series{
{
Labels: map[string]string{
"service_name": "frontend",
"operation": "GET /api/v1/health",
"status_code": "200",
"priority": "P0",
},
Points: []v3.Point{
{
Timestamp: 1689220036000,
Value: 19.2,
},
{
Timestamp: 1689220096000,
Value: 19.5,
},
},
GroupingSetsPoint: &v3.Point{
Timestamp: 0,
Value: 19.3,
},
},
{
Labels: map[string]string{
"service_name": "route",
"operation": "DELETE /api/v1/health",
"status_code": "301",
"priority": "P1",
},
Points: []v3.Point{
{
Timestamp: 1689220036000,
Value: 8.83,
},
{
Timestamp: 1689220096000,
Value: 8.83,
},
},
GroupingSetsPoint: &v3.Point{
Timestamp: 0,
Value: 8.83,
},
},
{
Labels: map[string]string{
"service_name": "route",
"operation": "DELETE /api/v1/health",
"status_code": "400",
"priority": "P0",
},
Points: []v3.Point{
{
Timestamp: 1689220036000,
Value: 8.83,
},
{
Timestamp: 1689220096000,
Value: 8.83,
},
},
GroupingSetsPoint: &v3.Point{
Timestamp: 0,
Value: 8.83,
},
},
{
Labels: map[string]string{
"service_name": "route",
"operation": "DELETE /api/v1/health",
"status_code": "200",
"priority": "P1",
},
Points: []v3.Point{
{
Timestamp: 1689220036000,
Value: 8.83,
},
{
Timestamp: 1689220096000,
Value: 8.83,
},
},
GroupingSetsPoint: &v3.Point{
Timestamp: 0,
Value: 8.83,
},
},
},
},
},
params: &v3.QueryRangeParamsV3{
Start: 1689220036000,
End: 1689220096000,
Step: 60,
CompositeQuery: &v3.CompositeQuery{
BuilderQueries: map[string]*v3.BuilderQuery{
"A": {
QueryName: "A",
AggregateAttribute: v3.AttributeKey{Key: "signo_calls_total"},
DataSource: v3.DataSourceMetrics,
AggregateOperator: v3.AggregateOperatorSumRate,
Expression: "A",
GroupBy: []v3.AttributeKey{{Key: "service_name"}, {Key: "operation"}, {Key: "status_code"}, {Key: "priority"}},
Limit: 2,
OrderBy: []v3.OrderBy{
{ColumnName: "priority", Order: "asc"},
{ColumnName: "status_code", Order: "desc"},
},
},
},
QueryType: v3.QueryTypeBuilder,
PanelType: v3.PanelTypeGraph,
},
},
expectedResult: []*v3.Result{
{
QueryName: "A",
Series: []*v3.Series{
{
Labels: map[string]string{
"service_name": "frontend",
"operation": "GET /api/v1/health",
"status_code": "200",
"priority": "P0",
},
Points: []v3.Point{
{
Timestamp: 1689220036000,
Value: 19.2,
},
{
Timestamp: 1689220096000,
Value: 19.5,
},
},
GroupingSetsPoint: &v3.Point{
Timestamp: 0,
Value: 19.3,
},
},
{
Labels: map[string]string{
"service_name": "route",
"operation": "DELETE /api/v1/health",
"status_code": "400",
"priority": "P0",
},
Points: []v3.Point{
{
Timestamp: 1689220036000,
Value: 8.83,
},
{
Timestamp: 1689220096000,
Value: 8.83,
},
},
GroupingSetsPoint: &v3.Point{
Timestamp: 0,
Value: 8.83,
},
},
},
},
},
},
}
for _, c := range cases {

View File

@ -89,17 +89,29 @@ func getClickhouseColumnName(key v3.AttributeKey) string {
}
// getSelectLabels returns the select labels for the query based on groupBy and aggregateOperator
func getSelectLabels(aggregatorOperator v3.AggregateOperator, groupBy []v3.AttributeKey) (string, error) {
func getSelectLabels(aggregatorOperator v3.AggregateOperator, groupBy []v3.AttributeKey) string {
var selectLabels string
if aggregatorOperator == v3.AggregateOperatorNoOp {
selectLabels = ""
} else {
for _, tag := range groupBy {
columnName := getClickhouseColumnName(tag)
selectLabels += fmt.Sprintf(", %s as %s", columnName, tag.Key)
selectLabels += fmt.Sprintf(" %s as %s,", columnName, tag.Key)
}
}
return selectLabels, nil
return selectLabels
}
func getSelectKeys(aggregatorOperator v3.AggregateOperator, groupBy []v3.AttributeKey) string {
var selectLabels []string
if aggregatorOperator == v3.AggregateOperatorNoOp {
return ""
} else {
for _, tag := range groupBy {
selectLabels = append(selectLabels, tag.Key)
}
}
return strings.Join(selectLabels, ",")
}
func buildLogsTimeSeriesFilterQuery(fs *v3.FilterSet, groupBy []v3.AttributeKey) (string, error) {
@ -163,7 +175,7 @@ func getZerosForEpochNano(epoch int64) int64 {
return int64(math.Pow(10, float64(19-count)))
}
func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.BuilderQuery) (string, error) {
func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.BuilderQuery, graphLimitQtype string) (string, error) {
filterSubQuery, err := buildLogsTimeSeriesFilterQuery(mq.Filters, mq.GroupBy)
if err != nil {
@ -173,10 +185,7 @@ func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.Build
// timerange will be sent in epoch millisecond
timeFilter := fmt.Sprintf("(timestamp >= %d AND timestamp <= %d)", start*getZerosForEpochNano(start), end*getZerosForEpochNano(end))
selectLabels, err := getSelectLabels(mq.AggregateOperator, mq.GroupBy)
if err != nil {
return "", err
}
selectLabels := getSelectLabels(mq.AggregateOperator, mq.GroupBy)
having := having(mq.Having)
if having != "" {
@ -184,35 +193,44 @@ func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.Build
}
var queryTmpl string
if panelType == v3.PanelTypeTable {
if graphLimitQtype == constants.FirstQueryGraphLimit {
queryTmpl = "SELECT"
} else if panelType == v3.PanelTypeTable {
queryTmpl =
"SELECT now() as ts" + selectLabels +
", %s as value " +
"from signoz_logs.distributed_logs " +
"where " + timeFilter + "%s" +
"%s%s" +
"%s"
"SELECT now() as ts,"
} else if panelType == v3.PanelTypeGraph || panelType == v3.PanelTypeValue {
// Select the aggregate value for interval
queryTmpl =
fmt.Sprintf("SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL %d SECOND) AS ts", step) + selectLabels +
", %s as value " +
"from signoz_logs.distributed_logs " +
"where " + timeFilter + "%s" +
"%s%s" +
"%s"
fmt.Sprintf("SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL %d SECOND) AS ts,", step)
}
groupBy := groupByAttributeKeyTags(panelType, mq.GroupBy...)
queryTmpl =
queryTmpl + selectLabels +
" %s as value " +
"from signoz_logs.distributed_logs " +
"where " + timeFilter + "%s" +
"%s%s" +
"%s"
// we dont need value for first query
// going with this route as for a cleaner approach on implementation
if graphLimitQtype == constants.FirstQueryGraphLimit {
queryTmpl = "SELECT " + getSelectKeys(mq.AggregateOperator, mq.GroupBy) + " from (" + queryTmpl + ")"
}
groupBy := groupByAttributeKeyTags(panelType, graphLimitQtype, mq.GroupBy...)
if panelType != v3.PanelTypeList && groupBy != "" {
groupBy = " group by " + groupBy
}
orderBy := orderByAttributeKeyTags(panelType, mq.AggregateOperator, mq.OrderBy, mq.GroupBy)
orderBy := orderByAttributeKeyTags(panelType, mq.OrderBy, mq.GroupBy)
if panelType != v3.PanelTypeList && orderBy != "" {
orderBy = " order by " + orderBy
}
if graphLimitQtype == constants.SecondQueryGraphLimit {
filterSubQuery = filterSubQuery + " AND " + fmt.Sprintf("(%s) IN (", getSelectKeys(mq.AggregateOperator, mq.GroupBy)) + "%s)"
}
aggregationKey := ""
if mq.AggregateAttribute.Key != "" {
aggregationKey = getClickhouseColumnName(mq.AggregateAttribute)
@ -273,82 +291,56 @@ func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.Build
// groupBy returns a string of comma separated tags for group by clause
// `ts` is always added to the group by clause
func groupBy(panelType v3.PanelType, tags ...string) string {
if panelType == v3.PanelTypeGraph || panelType == v3.PanelTypeValue {
func groupBy(panelType v3.PanelType, graphLimitQtype string, tags ...string) string {
if (graphLimitQtype != constants.FirstQueryGraphLimit) && (panelType == v3.PanelTypeGraph || panelType == v3.PanelTypeValue) {
tags = append(tags, "ts")
}
return strings.Join(tags, ",")
}
func groupByAttributeKeyTags(panelType v3.PanelType, tags ...v3.AttributeKey) string {
func groupByAttributeKeyTags(panelType v3.PanelType, graphLimitQtype string, tags ...v3.AttributeKey) string {
groupTags := []string{}
for _, tag := range tags {
groupTags = append(groupTags, tag.Key)
}
return groupBy(panelType, groupTags...)
return groupBy(panelType, graphLimitQtype, groupTags...)
}
// orderBy returns a string of comma separated tags for order by clause
// if there are remaining items which are not present in tags they are also added
// if the order is not specified, it defaults to ASC
func orderBy(panelType v3.PanelType, items []v3.OrderBy, tags []string) []string {
func orderBy(panelType v3.PanelType, items []v3.OrderBy, tagLookup map[string]struct{}) []string {
var orderBy []string
// create a lookup
addedToOrderBy := map[string]bool{}
itemsLookup := map[string]v3.OrderBy{}
for i := 0; i < len(items); i++ {
addedToOrderBy[items[i].ColumnName] = false
itemsLookup[items[i].ColumnName] = items[i]
}
for _, tag := range tags {
if item, ok := itemsLookup[tag]; ok {
orderBy = append(orderBy, fmt.Sprintf("%s %s", item.ColumnName, item.Order))
addedToOrderBy[item.ColumnName] = true
} else {
orderBy = append(orderBy, fmt.Sprintf("%s ASC", tag))
}
}
// users might want to order by value of aggreagation
for _, item := range items {
if item.ColumnName == constants.SigNozOrderByValue {
orderBy = append(orderBy, fmt.Sprintf("value %s", item.Order))
addedToOrderBy[item.ColumnName] = true
}
}
// add the remaining items
if panelType == v3.PanelTypeList {
for _, item := range items {
// since these are not present in tags we will have to select them correctly
// for list view there is no need to check if it was added since they wont be added yet but this is just for safety
if !addedToOrderBy[item.ColumnName] {
attr := v3.AttributeKey{Key: item.ColumnName, DataType: item.DataType, Type: item.Type, IsColumn: item.IsColumn}
name := getClickhouseColumnName(attr)
orderBy = append(orderBy, fmt.Sprintf("%s %s", name, item.Order))
}
} else if _, ok := tagLookup[item.ColumnName]; ok {
orderBy = append(orderBy, fmt.Sprintf("%s %s", item.ColumnName, item.Order))
} else if panelType == v3.PanelTypeList {
attr := v3.AttributeKey{Key: item.ColumnName, DataType: item.DataType, Type: item.Type, IsColumn: item.IsColumn}
name := getClickhouseColumnName(attr)
orderBy = append(orderBy, fmt.Sprintf("%s %s", name, item.Order))
}
}
return orderBy
}
func orderByAttributeKeyTags(panelType v3.PanelType, aggregatorOperator v3.AggregateOperator, items []v3.OrderBy, tags []v3.AttributeKey) string {
var groupTags []string
for _, tag := range tags {
groupTags = append(groupTags, tag.Key)
}
orderByArray := orderBy(panelType, items, groupTags)
func orderByAttributeKeyTags(panelType v3.PanelType, items []v3.OrderBy, tags []v3.AttributeKey) string {
if panelType == v3.PanelTypeList {
if len(orderByArray) == 0 {
orderByArray = append(orderByArray, constants.TIMESTAMP)
tagLookup := map[string]struct{}{}
for _, v := range tags {
tagLookup[v.Key] = struct{}{}
}
orderByArray := orderBy(panelType, items, tagLookup)
if len(orderByArray) == 0 {
if panelType == v3.PanelTypeList {
orderByArray = append(orderByArray, constants.TIMESTAMP+" DESC")
} else {
orderByArray = append(orderByArray, "value DESC")
}
} else if panelType == v3.PanelTypeGraph || panelType == v3.PanelTypeValue {
// since in other aggregation operator we will have to add ts as it will not be present in group by
orderByArray = append(orderByArray, "ts")
}
str := strings.Join(orderByArray, ",")
@ -392,8 +384,26 @@ func addOffsetToQuery(query string, offset uint64) string {
return fmt.Sprintf("%s OFFSET %d", query, offset)
}
func PrepareLogsQuery(start, end int64, queryType v3.QueryType, panelType v3.PanelType, mq *v3.BuilderQuery) (string, error) {
query, err := buildLogsQuery(panelType, start, end, mq.StepInterval, mq)
func PrepareLogsQuery(start, end int64, queryType v3.QueryType, panelType v3.PanelType, mq *v3.BuilderQuery, graphLimitQtype string) (string, error) {
if graphLimitQtype == constants.FirstQueryGraphLimit {
// give me just the groupby names
query, err := buildLogsQuery(panelType, start, end, mq.StepInterval, mq, graphLimitQtype)
if err != nil {
return "", err
}
query = addLimitToQuery(query, mq.Limit)
return query, nil
} else if graphLimitQtype == constants.SecondQueryGraphLimit {
query, err := buildLogsQuery(panelType, start, end, mq.StepInterval, mq, graphLimitQtype)
if err != nil {
return "", err
}
return query, nil
}
query, err := buildLogsQuery(panelType, start, end, mq.StepInterval, mq, graphLimitQtype)
if err != nil {
return "", err
}
@ -401,7 +411,7 @@ func PrepareLogsQuery(start, end int64, queryType v3.QueryType, panelType v3.Pan
query, err = reduceQuery(query, mq.ReduceTo, mq.AggregateOperator)
}
if panelType == v3.PanelTypeList {
if panelType == v3.PanelTypeList || panelType == v3.PanelTypeTable {
if mq.PageSize > 0 {
if mq.Limit > 0 && mq.Offset > mq.Limit {
return "", fmt.Errorf("max limit exceeded")
@ -414,4 +424,5 @@ func PrepareLogsQuery(start, end int64, queryType v3.QueryType, panelType v3.Pan
}
return query, err
}

View File

@ -1,6 +1,7 @@
package v3
import (
"fmt"
"testing"
. "github.com/smartystreets/goconvey/convey"
@ -59,13 +60,13 @@ var testGetSelectLabelsData = []struct {
Name: "select fields for groupBy attribute",
AggregateOperator: v3.AggregateOperatorCount,
GroupByTags: []v3.AttributeKey{{Key: "user_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
SelectLabels: ", attributes_string_value[indexOf(attributes_string_key, 'user_name')] as user_name",
SelectLabels: " attributes_string_value[indexOf(attributes_string_key, 'user_name')] as user_name,",
},
{
Name: "select fields for groupBy resource",
AggregateOperator: v3.AggregateOperatorCount,
GroupByTags: []v3.AttributeKey{{Key: "user_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource}},
SelectLabels: ", resources_string_value[indexOf(resources_string_key, 'user_name')] as user_name",
SelectLabels: " resources_string_value[indexOf(resources_string_key, 'user_name')] as user_name,",
},
{
Name: "select fields for groupBy attribute and resource",
@ -74,27 +75,26 @@ var testGetSelectLabelsData = []struct {
{Key: "user_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource},
{Key: "host", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
},
SelectLabels: ", resources_string_value[indexOf(resources_string_key, 'user_name')] as user_name, attributes_string_value[indexOf(attributes_string_key, 'host')] as host",
SelectLabels: " resources_string_value[indexOf(resources_string_key, 'user_name')] as user_name, attributes_string_value[indexOf(attributes_string_key, 'host')] as host,",
},
{
Name: "select fields for groupBy materialized columns",
AggregateOperator: v3.AggregateOperatorCount,
GroupByTags: []v3.AttributeKey{{Key: "host", IsColumn: true}},
SelectLabels: ", host as host",
SelectLabels: " host as host,",
},
{
Name: "trace_id field as an attribute",
AggregateOperator: v3.AggregateOperatorCount,
GroupByTags: []v3.AttributeKey{{Key: "trace_id", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
SelectLabels: ", attributes_string_value[indexOf(attributes_string_key, 'trace_id')] as trace_id",
SelectLabels: " attributes_string_value[indexOf(attributes_string_key, 'trace_id')] as trace_id,",
},
}
func TestGetSelectLabels(t *testing.T) {
for _, tt := range testGetSelectLabelsData {
Convey("testGetSelectLabelsData", t, func() {
selectLabels, err := getSelectLabels(tt.AggregateOperator, tt.GroupByTags)
So(err, ShouldBeNil)
selectLabels := getSelectLabels(tt.AggregateOperator, tt.GroupByTags)
So(selectLabels, ShouldEqual, tt.SelectLabels)
})
}
@ -238,6 +238,7 @@ var testBuildLogsQueryData = []struct {
TableName string
AggregateOperator v3.AggregateOperator
ExpectedQuery string
Type int
}{
{
Name: "Test aggregate count on select field",
@ -251,7 +252,7 @@ var testBuildLogsQueryData = []struct {
Expression: "A",
},
TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) group by ts order by ts",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) group by ts order by value DESC",
},
{
Name: "Test aggregate count on a attribute",
@ -266,7 +267,7 @@ var testBuildLogsQueryData = []struct {
Expression: "A",
},
TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND has(attributes_string_key, 'user_name') group by ts order by ts",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND has(attributes_string_key, 'user_name') group by ts order by value DESC",
},
{
Name: "Test aggregate count on a with filter",
@ -284,7 +285,7 @@ var testBuildLogsQueryData = []struct {
Expression: "A",
},
TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_float64_value[indexOf(attributes_float64_key, 'bytes')] > 100.000000 AND has(attributes_string_key, 'user_name') group by ts order by ts",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_float64_value[indexOf(attributes_float64_key, 'bytes')] > 100.000000 AND has(attributes_string_key, 'user_name') group by ts order by value DESC",
},
{
Name: "Test aggregate count distinct and order by value",
@ -300,7 +301,7 @@ var testBuildLogsQueryData = []struct {
OrderBy: []v3.OrderBy{{ColumnName: "#SIGNOZ_VALUE", Order: "ASC"}},
},
TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(name))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) group by ts order by value ASC,ts",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(name))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) group by ts order by value ASC",
},
{
Name: "Test aggregate count distinct on non selected field",
@ -315,7 +316,7 @@ var testBuildLogsQueryData = []struct {
Expression: "A",
},
TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) group by ts order by ts",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) group by ts order by value DESC",
},
{
Name: "Test aggregate count distinct with filter and groupBy",
@ -344,7 +345,7 @@ var testBuildLogsQueryData = []struct {
"AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND resources_string_value[indexOf(resources_string_key, 'x')] != 'abc' " +
"AND indexOf(attributes_string_key, 'method') > 0 " +
"group by method,ts " +
"order by method ASC,ts",
"order by method ASC",
},
{
Name: "Test aggregate count with multiple filter,groupBy and orderBy",
@ -375,7 +376,7 @@ var testBuildLogsQueryData = []struct {
"AND indexOf(attributes_string_key, 'method') > 0 " +
"AND indexOf(resources_string_key, 'x') > 0 " +
"group by method,x,ts " +
"order by method ASC,x ASC,ts",
"order by method ASC,x ASC",
},
{
Name: "Test aggregate avg",
@ -404,7 +405,7 @@ var testBuildLogsQueryData = []struct {
"AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' " +
"AND indexOf(attributes_string_key, 'method') > 0 " +
"group by method,ts " +
"order by method ASC,ts",
"order by method ASC",
},
{
Name: "Test aggregate sum",
@ -433,7 +434,7 @@ var testBuildLogsQueryData = []struct {
"AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' " +
"AND indexOf(attributes_string_key, 'method') > 0 " +
"group by method,ts " +
"order by method ASC,ts",
"order by method ASC",
},
{
Name: "Test aggregate min",
@ -462,7 +463,7 @@ var testBuildLogsQueryData = []struct {
"AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' " +
"AND indexOf(attributes_string_key, 'method') > 0 " +
"group by method,ts " +
"order by method ASC,ts",
"order by method ASC",
},
{
Name: "Test aggregate max",
@ -491,7 +492,7 @@ var testBuildLogsQueryData = []struct {
"AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' " +
"AND indexOf(attributes_string_key, 'method') > 0 " +
"group by method,ts " +
"order by method ASC,ts",
"order by method ASC",
},
{
Name: "Test aggregate PXX",
@ -516,7 +517,7 @@ var testBuildLogsQueryData = []struct {
"where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
"AND indexOf(attributes_string_key, 'method') > 0 " +
"group by method,ts " +
"order by method ASC,ts",
"order by method ASC",
},
{
Name: "Test aggregate RateSum",
@ -538,7 +539,7 @@ var testBuildLogsQueryData = []struct {
", sum(bytes)/60 as value from signoz_logs.distributed_logs " +
"where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
"AND indexOf(attributes_string_key, 'method') > 0 " +
"group by method,ts order by method ASC,ts",
"group by method,ts order by method ASC",
},
{
Name: "Test aggregate rate",
@ -561,7 +562,7 @@ var testBuildLogsQueryData = []struct {
"from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
"AND indexOf(attributes_string_key, 'method') > 0 " +
"group by method,ts " +
"order by method ASC,ts",
"order by method ASC",
},
{
Name: "Test aggregate RateSum without materialized column",
@ -585,7 +586,7 @@ var testBuildLogsQueryData = []struct {
"from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
"AND indexOf(attributes_string_key, 'method') > 0 " +
"group by method,ts " +
"order by method ASC,ts",
"order by method ASC",
},
{
Name: "Test Noop",
@ -603,7 +604,7 @@ var testBuildLogsQueryData = []struct {
ExpectedQuery: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body,CAST((attributes_string_key, attributes_string_value), 'Map(String, String)') as attributes_string," +
"CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64," +
"CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string " +
"from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) order by timestamp",
"from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) order by timestamp DESC",
},
{
Name: "Test Noop order by custom",
@ -642,7 +643,7 @@ var testBuildLogsQueryData = []struct {
ExpectedQuery: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body,CAST((attributes_string_key, attributes_string_value), 'Map(String, String)') as attributes_string," +
"CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64," +
"CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string " +
"from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND severity_number != 0 order by timestamp",
"from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND severity_number != 0 order by timestamp DESC",
},
{
Name: "Test aggregate with having clause",
@ -664,7 +665,7 @@ var testBuildLogsQueryData = []struct {
},
},
TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) group by ts having value > 10 order by ts",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) group by ts having value > 10 order by value DESC",
},
{
Name: "Test aggregate with having clause and filters",
@ -690,7 +691,7 @@ var testBuildLogsQueryData = []struct {
},
},
TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' group by ts having value > 10 order by ts",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' group by ts having value > 10 order by value DESC",
},
{
Name: "Test top level key",
@ -716,7 +717,7 @@ var testBuildLogsQueryData = []struct {
},
},
TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND body ILIKE '%test%' group by ts having value > 10 order by ts",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND body ILIKE '%test%' group by ts having value > 10 order by value DESC",
},
{
Name: "Test attribute with same name as top level key",
@ -742,7 +743,7 @@ var testBuildLogsQueryData = []struct {
},
},
TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'body')] ILIKE '%test%' group by ts having value > 10 order by ts",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'body')] ILIKE '%test%' group by ts having value > 10 order by value DESC",
},
// Tests for table panel type
@ -758,7 +759,7 @@ var testBuildLogsQueryData = []struct {
Expression: "A",
},
TableName: "logs",
ExpectedQuery: "SELECT now() as ts, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000)",
ExpectedQuery: "SELECT now() as ts, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) order by value DESC",
},
{
Name: "TABLE: Test count with groupBy",
@ -775,7 +776,7 @@ var testBuildLogsQueryData = []struct {
},
},
TableName: "logs",
ExpectedQuery: "SELECT now() as ts, attributes_string_value[indexOf(attributes_string_key, 'name')] as name, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND indexOf(attributes_string_key, 'name') > 0 group by name order by name ASC",
ExpectedQuery: "SELECT now() as ts, attributes_string_value[indexOf(attributes_string_key, 'name')] as name, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND indexOf(attributes_string_key, 'name') > 0 group by name order by value DESC",
},
{
Name: "TABLE: Test count with groupBy, orderBy",
@ -802,7 +803,8 @@ var testBuildLogsQueryData = []struct {
func TestBuildLogsQuery(t *testing.T) {
for _, tt := range testBuildLogsQueryData {
Convey("TestBuildLogsQuery", t, func() {
query, err := buildLogsQuery(tt.PanelType, tt.Start, tt.End, tt.Step, tt.BuilderQuery)
query, err := buildLogsQuery(tt.PanelType, tt.Start, tt.End, tt.Step, tt.BuilderQuery, "")
fmt.Println(query)
So(err, ShouldBeNil)
So(query, ShouldEqual, tt.ExpectedQuery)
@ -844,8 +846,8 @@ var testOrderBy = []struct {
Name string
PanelType v3.PanelType
Items []v3.OrderBy
Tags []string
Result []string
Tags []v3.AttributeKey
Result string
}{
{
Name: "Test 1",
@ -860,8 +862,10 @@ var testOrderBy = []struct {
Order: "desc",
},
},
Tags: []string{"name"},
Result: []string{"name asc", "value desc"},
Tags: []v3.AttributeKey{
{Key: "name"},
},
Result: "name asc,value desc",
},
{
Name: "Test 2",
@ -876,8 +880,34 @@ var testOrderBy = []struct {
Order: "asc",
},
},
Tags: []string{"name", "bytes"},
Result: []string{"name asc", "bytes asc"},
Tags: []v3.AttributeKey{
{Key: "name"},
{Key: "bytes"},
},
Result: "name asc,bytes asc",
},
{
Name: "Test Graph item not present in tag",
PanelType: v3.PanelTypeGraph,
Items: []v3.OrderBy{
{
ColumnName: "name",
Order: "asc",
},
{
ColumnName: "bytes",
Order: "asc",
},
{
ColumnName: "method",
Order: "asc",
},
},
Tags: []v3.AttributeKey{
{Key: "name"},
{Key: "bytes"},
},
Result: "name asc,bytes asc",
},
{
Name: "Test 3",
@ -896,8 +926,11 @@ var testOrderBy = []struct {
Order: "asc",
},
},
Tags: []string{"name", "bytes"},
Result: []string{"name asc", "bytes asc", "value asc"},
Tags: []v3.AttributeKey{
{Key: "name"},
{Key: "bytes"},
},
Result: "name asc,value asc,bytes asc",
},
{
Name: "Test 4",
@ -923,16 +956,163 @@ var testOrderBy = []struct {
DataType: v3.AttributeKeyDataTypeString,
},
},
Tags: []string{"name", "bytes"},
Result: []string{"name asc", "bytes asc", "value asc", "attributes_string_value[indexOf(attributes_string_key, 'response_time')] desc"},
Tags: []v3.AttributeKey{
{Key: "name"},
{Key: "bytes"},
},
Result: "name asc,value asc,bytes asc,attributes_string_value[indexOf(attributes_string_key, 'response_time')] desc",
},
}
func TestOrderBy(t *testing.T) {
for _, tt := range testOrderBy {
Convey("testOrderBy", t, func() {
res := orderBy(tt.PanelType, tt.Items, tt.Tags)
res := orderByAttributeKeyTags(tt.PanelType, tt.Items, tt.Tags)
So(res, ShouldResemble, tt.Result)
})
}
}
// if there is no group by then there is no point of limit in ts and table queries
// since the above will result in a single ts
// handle only when there is a group by something.
var testPrepLogsQueryData = []struct {
Name string
PanelType v3.PanelType
Start int64
End int64
Step int64
BuilderQuery *v3.BuilderQuery
GroupByTags []v3.AttributeKey
TableName string
AggregateOperator v3.AggregateOperator
ExpectedQuery string
Type string
}{
{
Name: "Test TS with limit- first",
PanelType: v3.PanelTypeGraph,
Start: 1680066360726210000,
End: 1680066458000000000,
Step: 60,
BuilderQuery: &v3.BuilderQuery{
QueryName: "A",
AggregateAttribute: v3.AttributeKey{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
AggregateOperator: v3.AggregateOperatorCountDistinct,
Expression: "A",
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "GET", Operator: "="},
},
},
Limit: 10,
GroupBy: []v3.AttributeKey{{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
},
TableName: "logs",
ExpectedQuery: "SELECT method from (SELECT attributes_string_value[indexOf(attributes_string_key, 'method')] as method, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND indexOf(attributes_string_key, 'method') > 0 group by method order by value DESC) LIMIT 10",
Type: constants.FirstQueryGraphLimit,
},
{
Name: "Test TS with limit- first - with order by value",
PanelType: v3.PanelTypeGraph,
Start: 1680066360726210000,
End: 1680066458000000000,
Step: 60,
BuilderQuery: &v3.BuilderQuery{
QueryName: "A",
AggregateAttribute: v3.AttributeKey{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
AggregateOperator: v3.AggregateOperatorCountDistinct,
Expression: "A",
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "GET", Operator: "="},
},
},
Limit: 10,
GroupBy: []v3.AttributeKey{{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
OrderBy: []v3.OrderBy{{ColumnName: constants.SigNozOrderByValue, Order: "ASC"}},
},
TableName: "logs",
ExpectedQuery: "SELECT method from (SELECT attributes_string_value[indexOf(attributes_string_key, 'method')] as method, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND indexOf(attributes_string_key, 'method') > 0 group by method order by value ASC) LIMIT 10",
Type: constants.FirstQueryGraphLimit,
},
{
Name: "Test TS with limit- first - with order by attribute",
PanelType: v3.PanelTypeGraph,
Start: 1680066360726210000,
End: 1680066458000000000,
Step: 60,
BuilderQuery: &v3.BuilderQuery{
QueryName: "A",
AggregateAttribute: v3.AttributeKey{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
AggregateOperator: v3.AggregateOperatorCountDistinct,
Expression: "A",
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "GET", Operator: "="},
},
},
Limit: 10,
GroupBy: []v3.AttributeKey{{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
OrderBy: []v3.OrderBy{{ColumnName: "method", Order: "ASC"}},
},
TableName: "logs",
ExpectedQuery: "SELECT method from (SELECT attributes_string_value[indexOf(attributes_string_key, 'method')] as method, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND indexOf(attributes_string_key, 'method') > 0 group by method order by method ASC) LIMIT 10",
Type: constants.FirstQueryGraphLimit,
},
{
Name: "Test TS with limit- second",
PanelType: v3.PanelTypeGraph,
Start: 1680066360726210000,
End: 1680066458000000000,
Step: 60,
BuilderQuery: &v3.BuilderQuery{
QueryName: "A",
AggregateAttribute: v3.AttributeKey{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
AggregateOperator: v3.AggregateOperatorCountDistinct,
Expression: "A",
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "GET", Operator: "="},
},
},
GroupBy: []v3.AttributeKey{{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
Limit: 2,
},
TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 0 SECOND) AS ts, attributes_string_value[indexOf(attributes_string_key, 'method')] as method, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND indexOf(attributes_string_key, 'method') > 0 AND (method) IN (%s) group by method,ts order by value DESC",
Type: constants.SecondQueryGraphLimit,
},
{
Name: "Test TS with limit- second - with order by",
PanelType: v3.PanelTypeGraph,
Start: 1680066360726210000,
End: 1680066458000000000,
Step: 60,
BuilderQuery: &v3.BuilderQuery{
QueryName: "A",
AggregateAttribute: v3.AttributeKey{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
AggregateOperator: v3.AggregateOperatorCountDistinct,
Expression: "A",
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "GET", Operator: "="},
},
},
GroupBy: []v3.AttributeKey{{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
OrderBy: []v3.OrderBy{{ColumnName: "method", Order: "ASC"}},
Limit: 2,
},
TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 0 SECOND) AS ts, attributes_string_value[indexOf(attributes_string_key, 'method')] as method, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND indexOf(attributes_string_key, 'method') > 0 AND (method) IN (%s) group by method,ts order by method ASC",
Type: constants.SecondQueryGraphLimit,
},
}
func TestPrepareLogsQuery(t *testing.T) {
for _, tt := range testPrepLogsQueryData {
Convey("TestBuildLogsQuery", t, func() {
query, err := PrepareLogsQuery(tt.Start, tt.End, "", tt.PanelType, tt.BuilderQuery, tt.Type)
So(err, ShouldBeNil)
So(query, ShouldEqual, tt.ExpectedQuery)
})
}
}

View File

@ -0,0 +1,196 @@
package v3
import (
"fmt"
"math"
"go.signoz.io/signoz/pkg/query-service/constants"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/query-service/utils"
)
// This logic is little convoluted for a reason.
// When we work with cumulative metrics, the table view need to show the data for the entire time range.
// In some cases, we could take the points at the start and end of the time range and divide it by the
// duration. But, the problem is there is no guarantee that the trend will be linear between the start and end.
// We can sum the rate of change for some interval X, this interval can be step size of time series.
// However, the speed of query depends on the number of timestamps, so we bump up the xx the step size.
// This should be a good balance between speed and accuracy.
// TODO: find a better way to do this
func stepForTableCumulative(start, end int64) int64 {
// round up to the nearest multiple of 60
duration := (end - start + 1) / 1000
step := math.Max(math.Floor(float64(duration)/120), 60) // assuming 120 max points
if duration > 1800 { // bump for longer duration
step = step * 5
}
return int64(step)
}
func buildMetricQueryForTable(start, end, _ int64, mq *v3.BuilderQuery, tableName string) (string, error) {
step := stepForTableCumulative(start, end)
points := ((end - start + 1) / 1000) / step
metricQueryGroupBy := mq.GroupBy
// if the aggregate operator is a histogram quantile, and user has not forgotten
// the le tag in the group by then add the le tag to the group by
if mq.AggregateOperator == v3.AggregateOperatorHistQuant50 ||
mq.AggregateOperator == v3.AggregateOperatorHistQuant75 ||
mq.AggregateOperator == v3.AggregateOperatorHistQuant90 ||
mq.AggregateOperator == v3.AggregateOperatorHistQuant95 ||
mq.AggregateOperator == v3.AggregateOperatorHistQuant99 {
found := false
for _, tag := range mq.GroupBy {
if tag.Key == "le" {
found = true
break
}
}
if !found {
metricQueryGroupBy = append(
metricQueryGroupBy,
v3.AttributeKey{
Key: "le",
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeTag,
IsColumn: false,
},
)
}
}
filterSubQuery, err := buildMetricsTimeSeriesFilterQuery(mq.Filters, metricQueryGroupBy, mq)
if err != nil {
return "", err
}
samplesTableTimeFilter := fmt.Sprintf("metric_name = %s AND timestamp_ms >= %d AND timestamp_ms <= %d", utils.ClickHouseFormattedValue(mq.AggregateAttribute.Key), start, end)
// Select the aggregate value for interval
queryTmplCounterInner :=
"SELECT %s" +
" toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL %d SECOND) as ts," +
" %s as value" +
" FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_TABLENAME +
" GLOBAL INNER JOIN" +
" (%s) as filtered_time_series" +
" USING fingerprint" +
" WHERE " + samplesTableTimeFilter +
" GROUP BY %s" +
" ORDER BY %s ts"
// Select the aggregate value for interval
queryTmpl :=
"SELECT %s" +
" toStartOfHour(now()) as ts," + // now() has no menaing & used as a placeholder for ts
" %s as value" +
" FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_TABLENAME +
" GLOBAL INNER JOIN" +
" (%s) as filtered_time_series" +
" USING fingerprint" +
" WHERE " + samplesTableTimeFilter +
" GROUP BY %s" +
" ORDER BY %s ts"
// tagsWithoutLe is used to group by all tags except le
// This is done because we want to group by le only when we are calculating quantile
// Otherwise, we want to group by all tags except le
tagsWithoutLe := []string{}
for _, tag := range mq.GroupBy {
if tag.Key != "le" {
tagsWithoutLe = append(tagsWithoutLe, tag.Key)
}
}
// orderWithoutLe := orderBy(mq.OrderBy, tagsWithoutLe)
groupByWithoutLe := groupBy(tagsWithoutLe...)
groupTagsWithoutLe := groupSelect(tagsWithoutLe...)
orderWithoutLe := orderBy(mq.OrderBy, tagsWithoutLe)
groupBy := groupByAttributeKeyTags(metricQueryGroupBy...)
groupTags := groupSelectAttributeKeyTags(metricQueryGroupBy...)
orderBy := orderByAttributeKeyTags(mq.OrderBy, metricQueryGroupBy)
if len(orderBy) != 0 {
orderBy += ","
}
if len(orderWithoutLe) != 0 {
orderWithoutLe += ","
}
switch mq.AggregateOperator {
case v3.AggregateOperatorRate:
return "", fmt.Errorf("rate is not supported for table view")
case v3.AggregateOperatorSumRate, v3.AggregateOperatorAvgRate, v3.AggregateOperatorMaxRate, v3.AggregateOperatorMinRate:
rateGroupBy := "fingerprint, " + groupBy
rateGroupTags := "fingerprint, " + groupTags
rateOrderBy := "fingerprint, " + orderBy
op := "max(value)"
subQuery := fmt.Sprintf(
queryTmplCounterInner, rateGroupTags, step, op, filterSubQuery, rateGroupBy, rateOrderBy,
) // labels will be same so any should be fine
query := `SELECT %s ts, ` + rateWithoutNegative + `as value FROM(%s) WHERE isNaN(value) = 0`
query = fmt.Sprintf(query, groupTags, subQuery)
query = fmt.Sprintf(`SELECT %s toStartOfHour(now()) as ts, %s(value)/%d as value FROM (%s) GROUP BY %s ORDER BY %s ts`, groupTags, aggregateOperatorToSQLFunc[mq.AggregateOperator], points, query, groupBy, orderBy)
return query, nil
case
v3.AggregateOperatorRateSum,
v3.AggregateOperatorRateMax,
v3.AggregateOperatorRateAvg,
v3.AggregateOperatorRateMin:
step = ((end - start + 1) / 1000) / 2
op := fmt.Sprintf("%s(value)", aggregateOperatorToSQLFunc[mq.AggregateOperator])
subQuery := fmt.Sprintf(queryTmplCounterInner, groupTags, step, op, filterSubQuery, groupBy, orderBy)
query := `SELECT %s toStartOfHour(now()) as ts, ` + rateWithoutNegative + `as value FROM(%s) WHERE isNaN(value) = 0`
query = fmt.Sprintf(query, groupTags, subQuery)
return query, nil
case
v3.AggregateOperatorP05,
v3.AggregateOperatorP10,
v3.AggregateOperatorP20,
v3.AggregateOperatorP25,
v3.AggregateOperatorP50,
v3.AggregateOperatorP75,
v3.AggregateOperatorP90,
v3.AggregateOperatorP95,
v3.AggregateOperatorP99:
op := fmt.Sprintf("quantile(%v)(value)", aggregateOperatorToPercentile[mq.AggregateOperator])
query := fmt.Sprintf(queryTmpl, groupTags, op, filterSubQuery, groupBy, orderBy)
return query, nil
case v3.AggregateOperatorHistQuant50, v3.AggregateOperatorHistQuant75, v3.AggregateOperatorHistQuant90, v3.AggregateOperatorHistQuant95, v3.AggregateOperatorHistQuant99:
rateGroupBy := "fingerprint, " + groupBy
rateGroupTags := "fingerprint, " + groupTags
rateOrderBy := "fingerprint, " + orderBy
op := "max(value)"
subQuery := fmt.Sprintf(
queryTmplCounterInner, rateGroupTags, step, op, filterSubQuery, rateGroupBy, rateOrderBy,
) // labels will be same so any should be fine
query := `SELECT %s ts, ` + rateWithoutNegative + ` as value FROM(%s) WHERE isNaN(value) = 0`
query = fmt.Sprintf(query, groupTags, subQuery)
query = fmt.Sprintf(`SELECT %s toStartOfHour(now()) as ts, sum(value)/%d as value FROM (%s) GROUP BY %s HAVING isNaN(value) = 0 ORDER BY %s ts`, groupTags, points, query, groupBy, orderBy)
value := aggregateOperatorToPercentile[mq.AggregateOperator]
query = fmt.Sprintf(`SELECT %s toStartOfHour(now()) as ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), %.3f) as value FROM (%s) GROUP BY %s ORDER BY %s ts`, groupTagsWithoutLe, value, query, groupByWithoutLe, orderWithoutLe)
return query, nil
case v3.AggregateOperatorAvg, v3.AggregateOperatorSum, v3.AggregateOperatorMin, v3.AggregateOperatorMax:
op := fmt.Sprintf("%s(value)", aggregateOperatorToSQLFunc[mq.AggregateOperator])
query := fmt.Sprintf(queryTmpl, groupTags, op, filterSubQuery, groupBy, orderBy)
return query, nil
case v3.AggregateOperatorCount:
op := "toFloat64(count(*))"
query := fmt.Sprintf(queryTmpl, groupTags, op, filterSubQuery, groupBy, orderBy)
return query, nil
case v3.AggregateOperatorCountDistinct:
op := "toFloat64(count(distinct(value)))"
query := fmt.Sprintf(queryTmpl, groupTags, op, filterSubQuery, groupBy, orderBy)
return query, nil
case v3.AggregateOperatorNoOp:
return "", fmt.Errorf("noop is not supported for table view")
default:
return "", fmt.Errorf("unsupported aggregate operator")
}
}

View File

@ -0,0 +1,99 @@
package v3
import (
"testing"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
)
func TestPanelTableForCumulative(t *testing.T) {
cases := []struct {
name string
query *v3.BuilderQuery
expected string
}{
{
name: "request rate",
query: &v3.BuilderQuery{
QueryName: "A",
DataSource: v3.DataSourceMetrics,
AggregateOperator: v3.AggregateOperatorSumRate,
AggregateAttribute: v3.AttributeKey{
Key: "signoz_latency_count",
},
Temporality: v3.Cumulative,
Filters: &v3.FilterSet{
Items: []v3.FilterItem{
{
Key: v3.AttributeKey{Key: "service_name"},
Operator: v3.FilterOperatorIn,
Value: []interface{}{"frontend"},
},
{
Key: v3.AttributeKey{Key: "operation"},
Operator: v3.FilterOperatorIn,
Value: []interface{}{"HTTP GET /dispatch"},
},
},
},
Expression: "A",
},
expected: "SELECT toStartOfHour(now()) as ts, sum(value)/29 as value FROM (SELECT ts, if(runningDifference(ts) <= 0, nan, if(runningDifference(value) < 0, (value) / runningDifference(ts), runningDifference(value) / runningDifference(ts))) as value FROM(SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL 60 SECOND) as ts, max(value) as value FROM signoz_metrics.distributed_samples_v2 GLOBAL INNER JOIN (SELECT fingerprint FROM signoz_metrics.distributed_time_series_v2 WHERE metric_name = 'signoz_latency_count' AND temporality IN ['Cumulative', 'Unspecified'] AND JSONExtractString(labels, 'service_name') IN ['frontend'] AND JSONExtractString(labels, 'operation') IN ['HTTP GET /dispatch']) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_count' AND timestamp_ms >= 1689255866000 AND timestamp_ms <= 1689257640000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(value) = 0) GROUP BY ts ORDER BY ts",
},
{
name: "latency p50",
query: &v3.BuilderQuery{
QueryName: "A",
DataSource: v3.DataSourceMetrics,
AggregateOperator: v3.AggregateOperatorHistQuant50,
AggregateAttribute: v3.AttributeKey{
Key: "signoz_latency_bucket",
},
Temporality: v3.Cumulative,
Filters: &v3.FilterSet{
Items: []v3.FilterItem{
{
Key: v3.AttributeKey{Key: "service_name"},
Operator: v3.FilterOperatorEqual,
Value: "frontend",
},
},
},
Expression: "A",
},
expected: "SELECT toStartOfHour(now()) as ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.500) as value FROM (SELECT le, toStartOfHour(now()) as ts, sum(value)/29 as value FROM (SELECT le, ts, if(runningDifference(ts) <= 0, nan, if(runningDifference(value) < 0, (value) / runningDifference(ts), runningDifference(value) / runningDifference(ts))) as value FROM(SELECT fingerprint, le, toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL 60 SECOND) as ts, max(value) as value FROM signoz_metrics.distributed_samples_v2 GLOBAL INNER JOIN (SELECT JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.distributed_time_series_v2 WHERE metric_name = 'signoz_latency_bucket' AND temporality IN ['Cumulative', 'Unspecified'] AND JSONExtractString(labels, 'service_name') = 'frontend') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND timestamp_ms >= 1689255866000 AND timestamp_ms <= 1689257640000 GROUP BY fingerprint, le,ts ORDER BY fingerprint, le ASC, ts) WHERE isNaN(value) = 0) GROUP BY le,ts HAVING isNaN(value) = 0 ORDER BY le ASC, ts) GROUP BY ts ORDER BY ts",
},
{
name: "latency p99 with group by",
query: &v3.BuilderQuery{
QueryName: "A",
DataSource: v3.DataSourceMetrics,
AggregateOperator: v3.AggregateOperatorHistQuant99,
AggregateAttribute: v3.AttributeKey{
Key: "signoz_latency_bucket",
},
Temporality: v3.Cumulative,
GroupBy: []v3.AttributeKey{
{
Key: "service_name",
},
},
Expression: "A",
},
expected: "SELECT service_name, toStartOfHour(now()) as ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT service_name,le, toStartOfHour(now()) as ts, sum(value)/29 as value FROM (SELECT service_name,le, ts, if(runningDifference(ts) <= 0, nan, if(runningDifference(value) < 0, (value) / runningDifference(ts), runningDifference(value) / runningDifference(ts))) as value FROM(SELECT fingerprint, service_name,le, toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL 60 SECOND) as ts, max(value) as value FROM signoz_metrics.distributed_samples_v2 GLOBAL INNER JOIN (SELECT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.distributed_time_series_v2 WHERE metric_name = 'signoz_latency_bucket' AND temporality IN ['Cumulative', 'Unspecified']) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND timestamp_ms >= 1689255866000 AND timestamp_ms <= 1689257640000 GROUP BY fingerprint, service_name,le,ts ORDER BY fingerprint, service_name ASC,le ASC, ts) WHERE isNaN(value) = 0) GROUP BY service_name,le,ts HAVING isNaN(value) = 0 ORDER BY service_name ASC,le ASC, ts) GROUP BY service_name,ts ORDER BY service_name ASC, ts",
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
query, err := buildMetricQueryForTable(1689255866000, 1689257640000, 1800, c.query, "distributed_time_series_v2")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if query != c.expected {
t.Fatalf("expected: %s, got: %s", c.expected, query)
}
})
}
}

View File

@ -0,0 +1,148 @@
package v3
import (
"fmt"
"math"
"go.signoz.io/signoz/pkg/query-service/constants"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/query-service/utils"
)
func buildDeltaMetricQueryForTable(start, end, _ int64, mq *v3.BuilderQuery, tableName string) (string, error) {
// round up to the nearest multiple of 60
step := int64(math.Ceil(float64(end-start+1)/1000/60) * 60)
metricQueryGroupBy := mq.GroupBy
// if the aggregate operator is a histogram quantile, and user has not forgotten
// the le tag in the group by then add the le tag to the group by
if mq.AggregateOperator == v3.AggregateOperatorHistQuant50 ||
mq.AggregateOperator == v3.AggregateOperatorHistQuant75 ||
mq.AggregateOperator == v3.AggregateOperatorHistQuant90 ||
mq.AggregateOperator == v3.AggregateOperatorHistQuant95 ||
mq.AggregateOperator == v3.AggregateOperatorHistQuant99 {
found := false
for _, tag := range mq.GroupBy {
if tag.Key == "le" {
found = true
break
}
}
if !found {
metricQueryGroupBy = append(
metricQueryGroupBy,
v3.AttributeKey{
Key: "le",
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeTag,
IsColumn: false,
},
)
}
}
filterSubQuery, err := buildMetricsTimeSeriesFilterQuery(mq.Filters, metricQueryGroupBy, mq)
if err != nil {
return "", err
}
samplesTableTimeFilter := fmt.Sprintf("metric_name = %s AND timestamp_ms >= %d AND timestamp_ms <= %d", utils.ClickHouseFormattedValue(mq.AggregateAttribute.Key), start, end)
queryTmpl :=
"SELECT %s toStartOfHour(now()) as ts," + // now() has no menaing & used as a placeholder for ts
" %s as value" +
" FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_TABLENAME +
" GLOBAL INNER JOIN" +
" (%s) as filtered_time_series" +
" USING fingerprint" +
" WHERE " + samplesTableTimeFilter +
" GROUP BY %s" +
" ORDER BY %s ts"
// tagsWithoutLe is used to group by all tags except le
// This is done because we want to group by le only when we are calculating quantile
// Otherwise, we want to group by all tags except le
tagsWithoutLe := []string{}
for _, tag := range mq.GroupBy {
if tag.Key != "le" {
tagsWithoutLe = append(tagsWithoutLe, tag.Key)
}
}
groupByWithoutLeTable := groupBy(tagsWithoutLe...)
groupTagsWithoutLeTable := groupSelect(tagsWithoutLe...)
orderWithoutLeTable := orderBy(mq.OrderBy, tagsWithoutLe)
groupBy := groupByAttributeKeyTags(metricQueryGroupBy...)
groupTags := groupSelectAttributeKeyTags(metricQueryGroupBy...)
orderBy := orderByAttributeKeyTags(mq.OrderBy, metricQueryGroupBy)
if len(orderBy) != 0 {
orderBy += ","
}
if len(orderWithoutLeTable) != 0 {
orderWithoutLeTable += ","
}
switch mq.AggregateOperator {
case v3.AggregateOperatorRate:
// TODO(srikanthccv): what should be the expected behavior here for metrics?
return "", fmt.Errorf("rate is not supported for table view")
case v3.AggregateOperatorSumRate, v3.AggregateOperatorAvgRate, v3.AggregateOperatorMaxRate, v3.AggregateOperatorMinRate:
op := fmt.Sprintf("%s(value)/%d", aggregateOperatorToSQLFunc[mq.AggregateOperator], step)
query := fmt.Sprintf(
queryTmpl, groupTags, op, filterSubQuery, groupBy, orderBy,
)
return query, nil
case
v3.AggregateOperatorRateSum,
v3.AggregateOperatorRateMax,
v3.AggregateOperatorRateAvg,
v3.AggregateOperatorRateMin:
op := fmt.Sprintf("%s(value)/%d", aggregateOperatorToSQLFunc[mq.AggregateOperator], step)
query := fmt.Sprintf(
queryTmpl, groupTags, op, filterSubQuery, groupBy, orderBy,
)
return query, nil
case
v3.AggregateOperatorP05,
v3.AggregateOperatorP10,
v3.AggregateOperatorP20,
v3.AggregateOperatorP25,
v3.AggregateOperatorP50,
v3.AggregateOperatorP75,
v3.AggregateOperatorP90,
v3.AggregateOperatorP95,
v3.AggregateOperatorP99:
op := fmt.Sprintf("quantile(%v)(value)", aggregateOperatorToPercentile[mq.AggregateOperator])
query := fmt.Sprintf(queryTmpl, groupTags, op, filterSubQuery, groupBy, orderBy)
return query, nil
case v3.AggregateOperatorHistQuant50, v3.AggregateOperatorHistQuant75, v3.AggregateOperatorHistQuant90, v3.AggregateOperatorHistQuant95, v3.AggregateOperatorHistQuant99:
op := fmt.Sprintf("sum(value)/%d", step)
query := fmt.Sprintf(
queryTmpl, groupTags, op, filterSubQuery, groupBy, orderBy,
) // labels will be same so any should be fine
value := aggregateOperatorToPercentile[mq.AggregateOperator]
query = fmt.Sprintf(`SELECT %s ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), %.3f) as value FROM (%s) GROUP BY %s ORDER BY %s ts`, groupTagsWithoutLeTable, value, query, groupByWithoutLeTable, orderWithoutLeTable)
return query, nil
case v3.AggregateOperatorAvg, v3.AggregateOperatorSum, v3.AggregateOperatorMin, v3.AggregateOperatorMax:
op := fmt.Sprintf("%s(value)", aggregateOperatorToSQLFunc[mq.AggregateOperator])
query := fmt.Sprintf(queryTmpl, groupTags, op, filterSubQuery, groupBy, orderBy)
return query, nil
case v3.AggregateOperatorCount:
op := "toFloat64(count(*))"
query := fmt.Sprintf(queryTmpl, groupTags, op, filterSubQuery, groupBy, orderBy)
return query, nil
case v3.AggregateOperatorCountDistinct:
op := "toFloat64(count(distinct(value)))"
query := fmt.Sprintf(queryTmpl, groupTags, op, filterSubQuery, groupBy, orderBy)
return query, nil
case v3.AggregateOperatorNoOp:
return "", fmt.Errorf("noop is not supported for table view")
default:
return "", fmt.Errorf("unsupported aggregate operator")
}
}

View File

@ -0,0 +1,99 @@
package v3
import (
"testing"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
)
func TestPanelTableForDelta(t *testing.T) {
cases := []struct {
name string
query *v3.BuilderQuery
expected string
}{
{
name: "request rate",
query: &v3.BuilderQuery{
QueryName: "A",
DataSource: v3.DataSourceMetrics,
AggregateOperator: v3.AggregateOperatorSumRate,
AggregateAttribute: v3.AttributeKey{
Key: "signoz_latency_count",
},
Temporality: v3.Delta,
Filters: &v3.FilterSet{
Items: []v3.FilterItem{
{
Key: v3.AttributeKey{Key: "service_name"},
Operator: v3.FilterOperatorIn,
Value: []interface{}{"frontend"},
},
{
Key: v3.AttributeKey{Key: "operation"},
Operator: v3.FilterOperatorIn,
Value: []interface{}{"HTTP GET /dispatch"},
},
},
},
Expression: "A",
},
expected: "SELECT toStartOfHour(now()) as ts, sum(value)/1800 as value FROM signoz_metrics.distributed_samples_v2 GLOBAL INNER JOIN (SELECT fingerprint FROM signoz_metrics.distributed_time_series_v2 WHERE metric_name = 'signoz_latency_count' AND temporality = 'Delta' AND JSONExtractString(labels, 'service_name') IN ['frontend'] AND JSONExtractString(labels, 'operation') IN ['HTTP GET /dispatch']) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_count' AND timestamp_ms >= 1689255866000 AND timestamp_ms <= 1689257640000 GROUP BY ts ORDER BY ts",
},
{
name: "latency p50",
query: &v3.BuilderQuery{
QueryName: "A",
DataSource: v3.DataSourceMetrics,
AggregateOperator: v3.AggregateOperatorHistQuant50,
AggregateAttribute: v3.AttributeKey{
Key: "signoz_latency_bucket",
},
Temporality: v3.Delta,
Filters: &v3.FilterSet{
Items: []v3.FilterItem{
{
Key: v3.AttributeKey{Key: "service_name"},
Operator: v3.FilterOperatorEqual,
Value: "frontend",
},
},
},
Expression: "A",
},
expected: "SELECT ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.500) as value FROM (SELECT le, toStartOfHour(now()) as ts, sum(value)/1800 as value FROM signoz_metrics.distributed_samples_v2 GLOBAL INNER JOIN (SELECT JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.distributed_time_series_v2 WHERE metric_name = 'signoz_latency_bucket' AND temporality = 'Delta' AND JSONExtractString(labels, 'service_name') = 'frontend') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND timestamp_ms >= 1689255866000 AND timestamp_ms <= 1689257640000 GROUP BY le,ts ORDER BY le ASC, ts) GROUP BY ts ORDER BY ts",
},
{
name: "latency p99 with group by",
query: &v3.BuilderQuery{
QueryName: "A",
DataSource: v3.DataSourceMetrics,
AggregateOperator: v3.AggregateOperatorHistQuant99,
AggregateAttribute: v3.AttributeKey{
Key: "signoz_latency_bucket",
},
Temporality: v3.Delta,
GroupBy: []v3.AttributeKey{
{
Key: "service_name",
},
},
Expression: "A",
},
expected: "SELECT service_name, ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT service_name,le, toStartOfHour(now()) as ts, sum(value)/1800 as value FROM signoz_metrics.distributed_samples_v2 GLOBAL INNER JOIN (SELECT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.distributed_time_series_v2 WHERE metric_name = 'signoz_latency_bucket' AND temporality = 'Delta' ) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND timestamp_ms >= 1689255866000 AND timestamp_ms <= 1689257640000 GROUP BY service_name,le,ts ORDER BY service_name ASC,le ASC, ts) GROUP BY service_name,ts ORDER BY service_name ASC, ts",
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
query, err := buildDeltaMetricQueryForTable(1689255866000, 1689257640000, 1800, c.query, "distributed_time_series_v2")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if query != c.expected {
t.Fatalf("expected: %s, got: %s", c.expected, query)
}
})
}
}

View File

@ -403,15 +403,15 @@ func reduceQuery(query string, reduceTo v3.ReduceToOperator, aggregateOperator v
// chart with just the query value. For the quer
switch reduceTo {
case v3.ReduceToOperatorLast:
query = fmt.Sprintf("SELECT anyLast(value) as value, any(ts) as ts %s FROM (%s) %s", selectLabels, query, groupBy)
query = fmt.Sprintf("SELECT anyLastIf(value, toUnixTimestamp(ts) != 0) as value, anyIf(ts, toUnixTimestamp(ts) != 0) AS timestamp %s FROM (%s) %s", selectLabels, query, groupBy)
case v3.ReduceToOperatorSum:
query = fmt.Sprintf("SELECT sum(value) as value, any(ts) as ts %s FROM (%s) %s", selectLabels, query, groupBy)
query = fmt.Sprintf("SELECT sumIf(value, toUnixTimestamp(ts) != 0) as value, anyIf(ts, toUnixTimestamp(ts) != 0) AS timestamp %s FROM (%s) %s", selectLabels, query, groupBy)
case v3.ReduceToOperatorAvg:
query = fmt.Sprintf("SELECT avg(value) as value, any(ts) as ts %s FROM (%s) %s", selectLabels, query, groupBy)
query = fmt.Sprintf("SELECT avgIf(value, toUnixTimestamp(ts) != 0) as value, anyIf(ts, toUnixTimestamp(ts) != 0) AS timestamp %s FROM (%s) %s", selectLabels, query, groupBy)
case v3.ReduceToOperatorMax:
query = fmt.Sprintf("SELECT max(value) as value, any(ts) as ts %s FROM (%s) %s", selectLabels, query, groupBy)
query = fmt.Sprintf("SELECT maxIf(value, toUnixTimestamp(ts) != 0) as value, anyIf(ts, toUnixTimestamp(ts) != 0) AS timestamp %s FROM (%s) %s", selectLabels, query, groupBy)
case v3.ReduceToOperatorMin:
query = fmt.Sprintf("SELECT min(value) as value, any(ts) as ts %s FROM (%s) %s", selectLabels, query, groupBy)
query = fmt.Sprintf("SELECT minIf(value, toUnixTimestamp(ts) != 0) as value, anyIf(ts, toUnixTimestamp(ts) != 0) AS timestamp %s FROM (%s) %s", selectLabels, query, groupBy)
default:
return "", fmt.Errorf("unsupported reduce operator")
}
@ -422,9 +422,17 @@ func PrepareMetricQuery(start, end int64, queryType v3.QueryType, panelType v3.P
var query string
var err error
if mq.Temporality == v3.Delta {
query, err = buildDeltaMetricQuery(start, end, mq.StepInterval, mq, constants.SIGNOZ_TIMESERIES_TABLENAME)
if panelType == v3.PanelTypeTable {
query, err = buildDeltaMetricQueryForTable(start, end, mq.StepInterval, mq, constants.SIGNOZ_TIMESERIES_TABLENAME)
} else {
query, err = buildDeltaMetricQuery(start, end, mq.StepInterval, mq, constants.SIGNOZ_TIMESERIES_TABLENAME)
}
} else {
query, err = buildMetricQuery(start, end, mq.StepInterval, mq, constants.SIGNOZ_TIMESERIES_TABLENAME)
if panelType == v3.PanelTypeTable {
query, err = buildMetricQueryForTable(start, end, mq.StepInterval, mq, constants.SIGNOZ_TIMESERIES_TABLENAME)
} else {
query, err = buildMetricQuery(start, end, mq.StepInterval, mq, constants.SIGNOZ_TIMESERIES_TABLENAME)
}
}
if err != nil {
return "", err

View File

@ -235,7 +235,7 @@ func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangePa
// TODO: add support for logs and traces
if builderQuery.DataSource == v3.DataSourceLogs {
query, err := logsV3.PrepareLogsQuery(params.Start, params.End, params.CompositeQuery.QueryType, params.CompositeQuery.PanelType, builderQuery)
query, err := logsV3.PrepareLogsQuery(params.Start, params.End, params.CompositeQuery.QueryType, params.CompositeQuery.PanelType, builderQuery, "")
if err != nil {
errQueriesByName[queryName] = err.Error()
continue

View File

@ -6,6 +6,7 @@ import (
"github.com/SigNoz/govaluate"
"go.signoz.io/signoz/pkg/query-service/cache"
"go.signoz.io/signoz/pkg/query-service/constants"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.uber.org/zap"
)
@ -39,7 +40,7 @@ var SupportedFunctions = []string{
var EvalFuncs = map[string]govaluate.ExpressionFunction{}
type prepareTracesQueryFunc func(start, end int64, queryType v3.QueryType, panelType v3.PanelType, bq *v3.BuilderQuery, keys map[string]v3.AttributeKey) (string, error)
type prepareLogsQueryFunc func(start, end int64, queryType v3.QueryType, panelType v3.PanelType, bq *v3.BuilderQuery) (string, error)
type prepareLogsQueryFunc func(start, end int64, queryType v3.QueryType, panelType v3.PanelType, bq *v3.BuilderQuery, graphLimitQtype string) (string, error)
type prepareMetricQueryFunc func(start, end int64, queryType v3.QueryType, panelType v3.PanelType, bq *v3.BuilderQuery) (string, error)
type QueryBuilder struct {
@ -152,11 +153,25 @@ func (qb *QueryBuilder) PrepareQueries(params *v3.QueryRangeParamsV3, args ...in
}
queries[queryName] = queryString
case v3.DataSourceLogs:
queryString, err := qb.options.BuildLogQuery(params.Start, params.End, compositeQuery.QueryType, compositeQuery.PanelType, query)
if err != nil {
return nil, err
// for ts query with limit replace it as it is already formed
if compositeQuery.PanelType == v3.PanelTypeGraph && query.Limit > 0 && len(query.GroupBy) > 0 {
limitQuery, err := qb.options.BuildLogQuery(params.Start, params.End, compositeQuery.QueryType, compositeQuery.PanelType, query, constants.FirstQueryGraphLimit)
if err != nil {
return nil, err
}
placeholderQuery, err := qb.options.BuildLogQuery(params.Start, params.End, compositeQuery.QueryType, compositeQuery.PanelType, query, constants.SecondQueryGraphLimit)
if err != nil {
return nil, err
}
query := fmt.Sprintf(placeholderQuery, limitQuery)
queries[queryName] = query
} else {
queryString, err := qb.options.BuildLogQuery(params.Start, params.End, compositeQuery.QueryType, compositeQuery.PanelType, query, "")
if err != nil {
return nil, err
}
queries[queryName] = queryString
}
queries[queryName] = queryString
case v3.DataSourceMetrics:
queryString, err := qb.options.BuildMetricQuery(params.Start, params.End, compositeQuery.QueryType, compositeQuery.PanelType, query)
if err != nil {

View File

@ -46,9 +46,10 @@ type ServerOptions struct {
HTTPHostPort string
PrivateHostPort string
// alert specific params
DisableRules bool
RuleRepoURL string
PreferDelta bool
DisableRules bool
RuleRepoURL string
PreferDelta bool
PreferSpanMetrics bool
}
// Server runs HTTP, Mux and a grpc server
@ -124,12 +125,13 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
telemetry.GetInstance().SetReader(reader)
apiHandler, err := NewAPIHandler(APIHandlerOpts{
Reader: reader,
SkipConfig: skipConfig,
PerferDelta: serverOptions.PreferDelta,
AppDao: dao.DB(),
RuleManager: rm,
FeatureFlags: fm,
Reader: reader,
SkipConfig: skipConfig,
PerferDelta: serverOptions.PreferDelta,
PreferSpanMetrics: serverOptions.PreferSpanMetrics,
AppDao: dao.DB(),
RuleManager: rm,
FeatureFlags: fm,
})
if err != nil {
return nil, err

View File

@ -87,6 +87,13 @@ var DEFAULT_FEATURE_SET = model.FeatureSet{
UsageLimit: -1,
Route: "",
},
model.Feature{
Name: model.UseSpanMetrics,
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
}
func GetContextTimeout() time.Duration {
@ -301,3 +308,6 @@ var StaticFieldsLogsV3 = map[string]v3.AttributeKey{
const SigNozOrderByValue = "#SIGNOZ_VALUE"
const TIMESTAMP = "timestamp"
const FirstQueryGraphLimit = "first_query_graph_limit"
const SecondQueryGraphLimit = "second_query_graph_limit"

View File

@ -35,11 +35,13 @@ func main() {
var ruleRepoURL string
var preferDelta bool
var preferSpanMetrics bool
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
flag.BoolVar(&preferDelta, "prefer-delta", false, "(prefer delta over gauge)")
flag.BoolVar(&preferDelta, "prefer-delta", false, "(prefer delta over cumulative metrics)")
flag.BoolVar(&preferSpanMetrics, "prefer-span-metrics", false, "(prefer span metrics for service level metrics)")
flag.StringVar(&ruleRepoURL, "rules.repo-url", constants.AlertHelpPage, "(host address used to build rule link in alert messages)")
flag.Parse()
@ -55,6 +57,7 @@ func main() {
PromConfigPath: promConfigPath,
SkipTopLvlOpsPath: skipTopLvlOpsPath,
PreferDelta: preferDelta,
PreferSpanMetrics: preferSpanMetrics,
PrivateHostPort: constants.PrivateHostPort,
DisableRules: disableRules,
RuleRepoURL: ruleRepoURL,

View File

@ -13,4 +13,5 @@ const SmartTraceDetail = "SMART_TRACE_DETAIL"
const CustomMetricsFunction = "CUSTOM_METRICS_FUNCTION"
const OSS = "OSS"
const QueryBuilderPanels = "QUERY_BUILDER_PANELS"
const QueryBuilderAlerts = "QUERY_BUILDER_ALERTS"
const QueryBuilderAlerts = "QUERY_BUILDER_ALERTS"
const UseSpanMetrics = "USE_SPAN_METRICS"

View File

@ -143,8 +143,11 @@ func ValidateAndCastValue(v interface{}, dataType v3.AttributeKeyDataType) (inte
// ClickHouseFormattedValue formats the value to be used in clickhouse query
func ClickHouseFormattedValue(v interface{}) string {
// if it's pointer convert it to a value
v = getPointerValue(v)
switch x := v.(type) {
case int, int8, int16, int32, int64:
case uint8, uint16, uint32, uint64, int, int8, int16, int32, int64:
return fmt.Sprintf("%d", x)
case float32, float64:
return fmt.Sprintf("%f", x)
@ -152,6 +155,7 @@ func ClickHouseFormattedValue(v interface{}) string {
return fmt.Sprintf("'%s'", x)
case bool:
return fmt.Sprintf("%v", x)
case []interface{}:
if len(x) == 0 {
return ""
@ -167,7 +171,7 @@ func ClickHouseFormattedValue(v interface{}) string {
}
str += "]"
return str
case int, int8, int16, int32, int64, float32, float64, bool:
case uint8, uint16, uint32, uint64, int, int8, int16, int32, int64, float32, float64, bool:
return strings.Join(strings.Fields(fmt.Sprint(x)), ",")
default:
zap.S().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x[0])))
@ -178,3 +182,42 @@ func ClickHouseFormattedValue(v interface{}) string {
return ""
}
}
func getPointerValue(v interface{}) interface{} {
switch x := v.(type) {
case *uint8:
return *x
case *uint16:
return *x
case *uint32:
return *x
case *uint64:
return *x
case *int:
return *x
case *int8:
return *x
case *int16:
return *x
case *int32:
return *x
case *int64:
return *x
case *float32:
return *x
case *float64:
return *x
case *string:
return *x
case *bool:
return *x
case []interface{}:
values := []interface{}{}
for _, val := range x {
values = append(values, getPointerValue(val))
}
return values
default:
return v
}
}

View File

@ -291,3 +291,86 @@ func TestValidateAndCastValue(t *testing.T) {
})
}
}
var one = 1
var onePointOne = 1.1
var oneString = "1"
var trueBool = true
var testClickHouseFormattedValueData = []struct {
name string
value interface{}
want interface{}
}{
{
name: "int",
value: 1,
want: "1",
},
{
name: "int64",
value: int64(1),
want: "1",
},
{
name: "float32",
value: float32(1.1),
want: "1.100000",
},
{
name: "string",
value: "1",
want: "'1'",
},
{
name: "bool",
value: true,
want: "true",
},
{
name: "[]interface{}",
value: []interface{}{1, 2},
want: "[1,2]",
},
{
name: "[]interface{}",
value: []interface{}{"1", "2"},
want: "['1','2']",
},
{
name: "pointer int",
value: &one,
want: "1",
},
{
name: "pointer float32",
value: onePointOne,
want: "1.100000",
},
{
name: "pointer string",
value: &oneString,
want: "'1'",
},
{
name: "pointer bool",
value: &trueBool,
want: "true",
},
{
name: "pointer []interface{}",
value: []interface{}{&one, &one},
want: "[1,1]",
},
}
func TestClickHouseFormattedValue(t *testing.T) {
for _, tt := range testClickHouseFormattedValueData {
t.Run(tt.name, func(t *testing.T) {
got := ClickHouseFormattedValue(tt.value)
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("ClickHouseFormattedValue() = %v, want %v", got, tt.want)
}
})
}
}