mirror of
https://git.mirrors.martin98.com/https://github.com/SigNoz/signoz
synced 2025-08-01 06:12:00 +08:00
commit
0ed6594e48
@ -137,7 +137,7 @@ services:
|
|||||||
condition: on-failure
|
condition: on-failure
|
||||||
|
|
||||||
query-service:
|
query-service:
|
||||||
image: signoz/query-service:0.23.0
|
image: signoz/query-service:0.23.1
|
||||||
command: ["-config=/root/config/prometheus.yml"]
|
command: ["-config=/root/config/prometheus.yml"]
|
||||||
# ports:
|
# ports:
|
||||||
# - "6060:6060" # pprof port
|
# - "6060:6060" # pprof port
|
||||||
@ -166,7 +166,7 @@ services:
|
|||||||
<<: *clickhouse-depend
|
<<: *clickhouse-depend
|
||||||
|
|
||||||
frontend:
|
frontend:
|
||||||
image: signoz/frontend:0.23.0
|
image: signoz/frontend:0.23.1
|
||||||
deploy:
|
deploy:
|
||||||
restart_policy:
|
restart_policy:
|
||||||
condition: on-failure
|
condition: on-failure
|
||||||
|
@ -153,7 +153,7 @@ services:
|
|||||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||||
|
|
||||||
query-service:
|
query-service:
|
||||||
image: signoz/query-service:${DOCKER_TAG:-0.23.0}
|
image: signoz/query-service:${DOCKER_TAG:-0.23.1}
|
||||||
container_name: query-service
|
container_name: query-service
|
||||||
command: ["-config=/root/config/prometheus.yml"]
|
command: ["-config=/root/config/prometheus.yml"]
|
||||||
# ports:
|
# ports:
|
||||||
@ -181,7 +181,7 @@ services:
|
|||||||
<<: *clickhouse-depend
|
<<: *clickhouse-depend
|
||||||
|
|
||||||
frontend:
|
frontend:
|
||||||
image: signoz/frontend:${DOCKER_TAG:-0.23.0}
|
image: signoz/frontend:${DOCKER_TAG:-0.23.1}
|
||||||
container_name: frontend
|
container_name: frontend
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
depends_on:
|
depends_on:
|
||||||
|
@ -18,6 +18,7 @@ type APIHandlerOptions struct {
|
|||||||
DataConnector interfaces.DataConnector
|
DataConnector interfaces.DataConnector
|
||||||
SkipConfig *basemodel.SkipConfig
|
SkipConfig *basemodel.SkipConfig
|
||||||
PreferDelta bool
|
PreferDelta bool
|
||||||
|
PreferSpanMetrics bool
|
||||||
AppDao dao.ModelDao
|
AppDao dao.ModelDao
|
||||||
RulesManager *rules.Manager
|
RulesManager *rules.Manager
|
||||||
FeatureFlags baseint.FeatureLookup
|
FeatureFlags baseint.FeatureLookup
|
||||||
@ -36,6 +37,7 @@ func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
|
|||||||
Reader: opts.DataConnector,
|
Reader: opts.DataConnector,
|
||||||
SkipConfig: opts.SkipConfig,
|
SkipConfig: opts.SkipConfig,
|
||||||
PerferDelta: opts.PreferDelta,
|
PerferDelta: opts.PreferDelta,
|
||||||
|
PreferSpanMetrics: opts.PreferSpanMetrics,
|
||||||
AppDao: opts.AppDao,
|
AppDao: opts.AppDao,
|
||||||
RuleManager: opts.RulesManager,
|
RuleManager: opts.RulesManager,
|
||||||
FeatureFlags: opts.FeatureFlags})
|
FeatureFlags: opts.FeatureFlags})
|
||||||
|
@ -2,6 +2,8 @@ package api
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
|
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
|
func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
|
||||||
@ -10,5 +12,13 @@ func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
|
|||||||
ah.HandleError(w, err, http.StatusInternalServerError)
|
ah.HandleError(w, err, http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if ah.opts.PreferSpanMetrics {
|
||||||
|
for idx := range featureSet {
|
||||||
|
feature := &featureSet[idx]
|
||||||
|
if feature.Name == basemodel.UseSpanMetrics {
|
||||||
|
featureSet[idx].Active = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
ah.Respond(w, featureSet)
|
ah.Respond(w, featureSet)
|
||||||
}
|
}
|
||||||
|
@ -57,6 +57,7 @@ type ServerOptions struct {
|
|||||||
DisableRules bool
|
DisableRules bool
|
||||||
RuleRepoURL string
|
RuleRepoURL string
|
||||||
PreferDelta bool
|
PreferDelta bool
|
||||||
|
PreferSpanMetrics bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Server runs HTTP api service
|
// Server runs HTTP api service
|
||||||
@ -172,6 +173,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
DataConnector: reader,
|
DataConnector: reader,
|
||||||
SkipConfig: skipConfig,
|
SkipConfig: skipConfig,
|
||||||
PreferDelta: serverOptions.PreferDelta,
|
PreferDelta: serverOptions.PreferDelta,
|
||||||
|
PreferSpanMetrics: serverOptions.PreferSpanMetrics,
|
||||||
AppDao: modelDao,
|
AppDao: modelDao,
|
||||||
RulesManager: rm,
|
RulesManager: rm,
|
||||||
FeatureFlags: lm,
|
FeatureFlags: lm,
|
||||||
|
@ -84,11 +84,13 @@ func main() {
|
|||||||
|
|
||||||
var enableQueryServiceLogOTLPExport bool
|
var enableQueryServiceLogOTLPExport bool
|
||||||
var preferDelta bool
|
var preferDelta bool
|
||||||
|
var preferSpanMetrics bool
|
||||||
|
|
||||||
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
|
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
|
||||||
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
|
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
|
||||||
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
|
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
|
||||||
flag.BoolVar(&preferDelta, "prefer-delta", false, "(prefer delta over raw metrics)")
|
flag.BoolVar(&preferDelta, "prefer-delta", false, "(prefer delta over cumulative metrics)")
|
||||||
|
flag.BoolVar(&preferSpanMetrics, "prefer-span-metrics", false, "(prefer span metrics for service level metrics)")
|
||||||
flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)")
|
flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)")
|
||||||
flag.BoolVar(&enableQueryServiceLogOTLPExport, "enable.query.service.log.otlp.export", false, "(enable query service log otlp export)")
|
flag.BoolVar(&enableQueryServiceLogOTLPExport, "enable.query.service.log.otlp.export", false, "(enable query service log otlp export)")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
@ -105,6 +107,7 @@ func main() {
|
|||||||
PromConfigPath: promConfigPath,
|
PromConfigPath: promConfigPath,
|
||||||
SkipTopLvlOpsPath: skipTopLvlOpsPath,
|
SkipTopLvlOpsPath: skipTopLvlOpsPath,
|
||||||
PreferDelta: preferDelta,
|
PreferDelta: preferDelta,
|
||||||
|
PreferSpanMetrics: preferSpanMetrics,
|
||||||
PrivateHostPort: baseconst.PrivateHostPort,
|
PrivateHostPort: baseconst.PrivateHostPort,
|
||||||
DisableRules: disableRules,
|
DisableRules: disableRules,
|
||||||
RuleRepoURL: ruleRepoURL,
|
RuleRepoURL: ruleRepoURL,
|
||||||
|
@ -60,6 +60,13 @@ var BasicPlan = basemodel.FeatureSet{
|
|||||||
UsageLimit: 5,
|
UsageLimit: 5,
|
||||||
Route: "",
|
Route: "",
|
||||||
},
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.UseSpanMetrics,
|
||||||
|
Active: false,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
var ProPlan = basemodel.FeatureSet{
|
var ProPlan = basemodel.FeatureSet{
|
||||||
@ -105,6 +112,13 @@ var ProPlan = basemodel.FeatureSet{
|
|||||||
UsageLimit: -1,
|
UsageLimit: -1,
|
||||||
Route: "",
|
Route: "",
|
||||||
},
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.UseSpanMetrics,
|
||||||
|
Active: false,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
var EnterprisePlan = basemodel.FeatureSet{
|
var EnterprisePlan = basemodel.FeatureSet{
|
||||||
@ -150,4 +164,11 @@ var EnterprisePlan = basemodel.FeatureSet{
|
|||||||
UsageLimit: -1,
|
UsageLimit: -1,
|
||||||
Route: "",
|
Route: "",
|
||||||
},
|
},
|
||||||
|
basemodel.Feature{
|
||||||
|
Name: basemodel.UseSpanMetrics,
|
||||||
|
Active: false,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
import { Drawer, Tabs } from 'antd';
|
import { Drawer, Tabs } from 'antd';
|
||||||
import JSONView from 'container/LogDetailedView/JsonView';
|
import JSONView from 'container/LogDetailedView/JsonView';
|
||||||
import TableView from 'container/LogDetailedView/TableView';
|
import TableView from 'container/LogDetailedView/TableView';
|
||||||
|
import { useMemo } from 'react';
|
||||||
|
|
||||||
import { LogDetailProps } from './LogDetail.interfaces';
|
import { LogDetailProps } from './LogDetail.interfaces';
|
||||||
|
|
||||||
@ -14,7 +15,8 @@ function LogDetail({
|
|||||||
onClose();
|
onClose();
|
||||||
};
|
};
|
||||||
|
|
||||||
const items = [
|
const items = useMemo(
|
||||||
|
() => [
|
||||||
{
|
{
|
||||||
label: 'Table',
|
label: 'Table',
|
||||||
key: '1',
|
key: '1',
|
||||||
@ -31,7 +33,9 @@ function LogDetail({
|
|||||||
key: '2',
|
key: '2',
|
||||||
children: log && <JSONView logData={log} />,
|
children: log && <JSONView logData={log} />,
|
||||||
},
|
},
|
||||||
];
|
],
|
||||||
|
[log, onAddToQuery, onClickActionItem],
|
||||||
|
);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<Drawer
|
<Drawer
|
||||||
|
@ -6,7 +6,6 @@ import dayjs from 'dayjs';
|
|||||||
import dompurify from 'dompurify';
|
import dompurify from 'dompurify';
|
||||||
import { FlatLogData } from 'lib/logs/flatLogData';
|
import { FlatLogData } from 'lib/logs/flatLogData';
|
||||||
import { useMemo } from 'react';
|
import { useMemo } from 'react';
|
||||||
import { ILog } from 'types/api/logs/log';
|
|
||||||
|
|
||||||
import { ExpandIconWrapper } from '../RawLogView/styles';
|
import { ExpandIconWrapper } from '../RawLogView/styles';
|
||||||
import { defaultCellStyle, defaultTableStyle } from './config';
|
import { defaultCellStyle, defaultTableStyle } from './config';
|
||||||
@ -57,14 +56,14 @@ export const useTableView = (props: UseTableViewProps): UseTableViewResult => {
|
|||||||
dataIndex: 'id',
|
dataIndex: 'id',
|
||||||
key: 'expand',
|
key: 'expand',
|
||||||
// https://github.com/ant-design/ant-design/discussions/36886
|
// https://github.com/ant-design/ant-design/discussions/36886
|
||||||
render: (_, item): ColumnTypeRender<Record<string, unknown>> => ({
|
render: (_, item, index): ColumnTypeRender<Record<string, unknown>> => ({
|
||||||
props: {
|
props: {
|
||||||
style: defaultCellStyle,
|
style: defaultCellStyle,
|
||||||
},
|
},
|
||||||
children: (
|
children: (
|
||||||
<ExpandIconWrapper
|
<ExpandIconWrapper
|
||||||
onClick={(): void => {
|
onClick={(): void => {
|
||||||
onClickExpand((item as unknown) as ILog);
|
onClickExpand(logs[index]);
|
||||||
}}
|
}}
|
||||||
>
|
>
|
||||||
<ExpandAltOutlined />
|
<ExpandAltOutlined />
|
||||||
@ -108,7 +107,7 @@ export const useTableView = (props: UseTableViewProps): UseTableViewResult => {
|
|||||||
},
|
},
|
||||||
...(appendTo === 'end' ? fieldColumns : []),
|
...(appendTo === 'end' ? fieldColumns : []),
|
||||||
];
|
];
|
||||||
}, [fields, linesPerRow, appendTo, onClickExpand]);
|
}, [fields, appendTo, linesPerRow, onClickExpand, logs]);
|
||||||
|
|
||||||
return { columns, dataSource: flattenLogData };
|
return { columns, dataSource: flattenLogData };
|
||||||
};
|
};
|
||||||
|
29
frontend/src/container/GoToTop/index.tsx
Normal file
29
frontend/src/container/GoToTop/index.tsx
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
import { ArrowUpOutlined } from '@ant-design/icons';
|
||||||
|
import { FloatButton } from 'antd';
|
||||||
|
import { PANEL_TYPES } from 'constants/queryBuilder';
|
||||||
|
// hooks
|
||||||
|
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
|
||||||
|
import useScrollToTop from 'hooks/useScrollToTop';
|
||||||
|
|
||||||
|
function GoToTop(): JSX.Element | null {
|
||||||
|
const { isVisible, scrollToTop } = useScrollToTop();
|
||||||
|
|
||||||
|
const { panelType } = useQueryBuilder();
|
||||||
|
|
||||||
|
if (!isVisible) return null;
|
||||||
|
|
||||||
|
if (panelType === PANEL_TYPES.LIST) {
|
||||||
|
return (
|
||||||
|
<FloatButton
|
||||||
|
onClick={scrollToTop}
|
||||||
|
shape="circle"
|
||||||
|
type="primary"
|
||||||
|
icon={<ArrowUpOutlined />}
|
||||||
|
/>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
export default GoToTop;
|
@ -146,12 +146,17 @@ function LogsExplorerList({
|
|||||||
isShowPageSize={false}
|
isShowPageSize={false}
|
||||||
optionsMenuConfig={config}
|
optionsMenuConfig={config}
|
||||||
/>
|
/>
|
||||||
|
|
||||||
{options.format !== 'table' && (
|
{options.format !== 'table' && (
|
||||||
<Heading>
|
<Heading>
|
||||||
<Typography.Text>Event</Typography.Text>
|
<Typography.Text>Event</Typography.Text>
|
||||||
</Heading>
|
</Heading>
|
||||||
)}
|
)}
|
||||||
{logs.length === 0 && <Typography>No logs lines found</Typography>}
|
|
||||||
|
{!isLoading && logs.length === 0 && (
|
||||||
|
<Typography>No logs lines found</Typography>
|
||||||
|
)}
|
||||||
|
|
||||||
<InfinityWrapperStyled>{renderContent}</InfinityWrapperStyled>
|
<InfinityWrapperStyled>{renderContent}</InfinityWrapperStyled>
|
||||||
</>
|
</>
|
||||||
);
|
);
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
import { TabsProps } from 'antd';
|
import { TabsProps } from 'antd';
|
||||||
import axios from 'axios';
|
|
||||||
import LogDetail from 'components/LogDetail';
|
import LogDetail from 'components/LogDetail';
|
||||||
import TabLabel from 'components/TabLabel';
|
import TabLabel from 'components/TabLabel';
|
||||||
import { QueryParams } from 'constants/query';
|
import { QueryParams } from 'constants/query';
|
||||||
@ -13,16 +12,17 @@ import { queryParamNamesMap } from 'constants/queryBuilderQueryNames';
|
|||||||
import ROUTES from 'constants/routes';
|
import ROUTES from 'constants/routes';
|
||||||
import { DEFAULT_PER_PAGE_VALUE } from 'container/Controls/config';
|
import { DEFAULT_PER_PAGE_VALUE } from 'container/Controls/config';
|
||||||
import ExportPanel from 'container/ExportPanel';
|
import ExportPanel from 'container/ExportPanel';
|
||||||
|
import GoToTop from 'container/GoToTop';
|
||||||
import LogsExplorerChart from 'container/LogsExplorerChart';
|
import LogsExplorerChart from 'container/LogsExplorerChart';
|
||||||
import LogsExplorerList from 'container/LogsExplorerList';
|
import LogsExplorerList from 'container/LogsExplorerList';
|
||||||
// TODO: temporary hide table view
|
import LogsExplorerTable from 'container/LogsExplorerTable';
|
||||||
// import LogsExplorerTable from 'container/LogsExplorerTable';
|
|
||||||
import { GRAPH_TYPES } from 'container/NewDashboard/ComponentsSlider';
|
import { GRAPH_TYPES } from 'container/NewDashboard/ComponentsSlider';
|
||||||
import TimeSeriesView from 'container/TimeSeriesView/TimeSeriesView';
|
import TimeSeriesView from 'container/TimeSeriesView/TimeSeriesView';
|
||||||
import { useUpdateDashboard } from 'hooks/dashboard/useUpdateDashboard';
|
import { useUpdateDashboard } from 'hooks/dashboard/useUpdateDashboard';
|
||||||
import { addEmptyWidgetInDashboardJSONWithQuery } from 'hooks/dashboard/utils';
|
import { addEmptyWidgetInDashboardJSONWithQuery } from 'hooks/dashboard/utils';
|
||||||
import { useGetExplorerQueryRange } from 'hooks/queryBuilder/useGetExplorerQueryRange';
|
import { useGetExplorerQueryRange } from 'hooks/queryBuilder/useGetExplorerQueryRange';
|
||||||
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
|
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
|
||||||
|
import useAxiosError from 'hooks/useAxiosError';
|
||||||
import { useNotifications } from 'hooks/useNotifications';
|
import { useNotifications } from 'hooks/useNotifications';
|
||||||
import useUrlQueryData from 'hooks/useUrlQueryData';
|
import useUrlQueryData from 'hooks/useUrlQueryData';
|
||||||
import { chooseAutocompleteFromCustomValue } from 'lib/newQueryBuilder/chooseAutocompleteFromCustomValue';
|
import { chooseAutocompleteFromCustomValue } from 'lib/newQueryBuilder/chooseAutocompleteFromCustomValue';
|
||||||
@ -82,6 +82,8 @@ function LogsExplorerViews(): JSX.Element {
|
|||||||
const [logs, setLogs] = useState<ILog[]>([]);
|
const [logs, setLogs] = useState<ILog[]>([]);
|
||||||
const [requestData, setRequestData] = useState<Query | null>(null);
|
const [requestData, setRequestData] = useState<Query | null>(null);
|
||||||
|
|
||||||
|
const handleAxisError = useAxiosError();
|
||||||
|
|
||||||
const currentStagedQueryData = useMemo(() => {
|
const currentStagedQueryData = useMemo(() => {
|
||||||
if (!stagedQuery || stagedQuery.builder.queryData.length !== 1) return null;
|
if (!stagedQuery || stagedQuery.builder.queryData.length !== 1) return null;
|
||||||
|
|
||||||
@ -358,16 +360,16 @@ function LogsExplorerViews(): JSX.Element {
|
|||||||
|
|
||||||
history.push(dashboardEditView);
|
history.push(dashboardEditView);
|
||||||
},
|
},
|
||||||
onError: (error) => {
|
onError: handleAxisError,
|
||||||
if (axios.isAxiosError(error)) {
|
|
||||||
notifications.error({
|
|
||||||
message: error.message,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
},
|
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
[exportDefaultQuery, history, notifications, updateDashboard],
|
[
|
||||||
|
exportDefaultQuery,
|
||||||
|
history,
|
||||||
|
notifications,
|
||||||
|
updateDashboard,
|
||||||
|
handleAxisError,
|
||||||
|
],
|
||||||
);
|
);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
@ -437,17 +439,16 @@ function LogsExplorerViews(): JSX.Element {
|
|||||||
<TimeSeriesView isLoading={isFetching} data={data} isError={isError} />
|
<TimeSeriesView isLoading={isFetching} data={data} isError={isError} />
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
// TODO: temporary hide table view
|
{
|
||||||
// {
|
label: 'Table',
|
||||||
// label: 'Table',
|
key: PANEL_TYPES.TABLE,
|
||||||
// key: PANEL_TYPES.TABLE,
|
children: (
|
||||||
// children: (
|
<LogsExplorerTable
|
||||||
// <LogsExplorerTable
|
data={data?.payload.data.newResult.data.result || []}
|
||||||
// data={data?.payload.data.newResult.data.result || []}
|
isLoading={isFetching}
|
||||||
// isLoading={isFetching}
|
/>
|
||||||
// />
|
),
|
||||||
// ),
|
},
|
||||||
// },
|
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
isMultipleQueries,
|
isMultipleQueries,
|
||||||
@ -513,6 +514,8 @@ function LogsExplorerViews(): JSX.Element {
|
|||||||
onAddToQuery={handleAddToQuery}
|
onAddToQuery={handleAddToQuery}
|
||||||
onClickActionItem={handleAddToQuery}
|
onClickActionItem={handleAddToQuery}
|
||||||
/>
|
/>
|
||||||
|
|
||||||
|
<GoToTop />
|
||||||
</>
|
</>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
import { InputNumber, Tooltip } from 'antd';
|
import { InputNumber } from 'antd';
|
||||||
// import { useMemo } from 'react';
|
import { useMemo } from 'react';
|
||||||
import { IBuilderQuery } from 'types/api/queryBuilder/queryBuilderData';
|
import { IBuilderQuery } from 'types/api/queryBuilder/queryBuilderData';
|
||||||
|
import { DataSource } from 'types/common/queryBuilder';
|
||||||
|
|
||||||
// import { DataSource } from 'types/common/queryBuilder';
|
|
||||||
import { selectStyle } from '../QueryBuilderSearch/config';
|
import { selectStyle } from '../QueryBuilderSearch/config';
|
||||||
|
|
||||||
function LimitFilter({ onChange, query }: LimitFilterProps): JSX.Element {
|
function LimitFilter({ onChange, query }: LimitFilterProps): JSX.Element {
|
||||||
@ -21,25 +21,23 @@ function LimitFilter({ onChange, query }: LimitFilterProps): JSX.Element {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// const isMetricsDataSource = useMemo(
|
const isMetricsDataSource = useMemo(
|
||||||
// () => query.dataSource === DataSource.METRICS,
|
() => query.dataSource === DataSource.METRICS,
|
||||||
// [query.dataSource],
|
[query.dataSource],
|
||||||
// );
|
);
|
||||||
|
|
||||||
// const isDisabled = isMetricsDataSource && !query.aggregateAttribute.key;
|
const isDisabled = isMetricsDataSource && !query.aggregateAttribute.key;
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<Tooltip placement="top" title="coming soon">
|
|
||||||
<InputNumber
|
<InputNumber
|
||||||
min={1}
|
min={1}
|
||||||
type="number"
|
type="number"
|
||||||
readOnly
|
|
||||||
value={query.limit}
|
value={query.limit}
|
||||||
style={selectStyle}
|
style={selectStyle}
|
||||||
|
disabled={isDisabled}
|
||||||
onChange={onChange}
|
onChange={onChange}
|
||||||
onKeyDown={handleKeyDown}
|
onKeyDown={handleKeyDown}
|
||||||
/>
|
/>
|
||||||
</Tooltip>
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
29
frontend/src/hooks/useScrollToTop/index.tsx
Normal file
29
frontend/src/hooks/useScrollToTop/index.tsx
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
import throttle from 'lodash-es/throttle';
|
||||||
|
import { useEffect, useState } from 'react';
|
||||||
|
|
||||||
|
import { UseScrollToTop } from './types';
|
||||||
|
|
||||||
|
function useScrollToTop(visibleOffset = 200): UseScrollToTop {
|
||||||
|
const [isVisible, setIsVisible] = useState<boolean>(false);
|
||||||
|
|
||||||
|
const scrollToTop = (): void => {
|
||||||
|
window.scrollTo({
|
||||||
|
top: 0,
|
||||||
|
behavior: 'smooth',
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
const toggleVisibility = throttle(() => {
|
||||||
|
setIsVisible(window.pageYOffset > visibleOffset);
|
||||||
|
}, 300);
|
||||||
|
|
||||||
|
window.addEventListener('scroll', toggleVisibility);
|
||||||
|
|
||||||
|
return (): void => window.removeEventListener('scroll', toggleVisibility);
|
||||||
|
}, [visibleOffset]);
|
||||||
|
|
||||||
|
return { isVisible, scrollToTop };
|
||||||
|
}
|
||||||
|
|
||||||
|
export default useScrollToTop;
|
4
frontend/src/hooks/useScrollToTop/types.ts
Normal file
4
frontend/src/hooks/useScrollToTop/types.ts
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
export interface UseScrollToTop {
|
||||||
|
isVisible: boolean;
|
||||||
|
scrollToTop: VoidFunction;
|
||||||
|
}
|
58
frontend/src/hooks/useScrollToTop/useScrollToTop.test.ts
Normal file
58
frontend/src/hooks/useScrollToTop/useScrollToTop.test.ts
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
import { act, renderHook } from '@testing-library/react';
|
||||||
|
|
||||||
|
import useScrollToTop from './index';
|
||||||
|
|
||||||
|
// Mocking window.scrollTo method
|
||||||
|
global.scrollTo = jest.fn();
|
||||||
|
|
||||||
|
describe('useScrollToTop hook', () => {
|
||||||
|
beforeAll(() => {
|
||||||
|
jest.useFakeTimers();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should change visibility and scroll to top on call', () => {
|
||||||
|
const { result } = renderHook(() => useScrollToTop(100));
|
||||||
|
|
||||||
|
// Simulate scrolling 150px down
|
||||||
|
act(() => {
|
||||||
|
global.pageYOffset = 150;
|
||||||
|
global.dispatchEvent(new Event('scroll'));
|
||||||
|
jest.advanceTimersByTime(300);
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result.current.isVisible).toBe(true);
|
||||||
|
|
||||||
|
// Simulate scrolling to top
|
||||||
|
act(() => {
|
||||||
|
result.current.scrollToTop();
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(global.scrollTo).toHaveBeenCalledWith({ top: 0, behavior: 'smooth' });
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should be invisible when scrolled less than offset', () => {
|
||||||
|
const { result } = renderHook(() => useScrollToTop(100));
|
||||||
|
|
||||||
|
// Simulate scrolling 50px down
|
||||||
|
act(() => {
|
||||||
|
global.pageYOffset = 50;
|
||||||
|
global.dispatchEvent(new Event('scroll'));
|
||||||
|
jest.advanceTimersByTime(300);
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result.current.isVisible).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should be visible when scrolled more than offset', () => {
|
||||||
|
const { result } = renderHook(() => useScrollToTop(100));
|
||||||
|
|
||||||
|
// Simulate scrolling 50px down
|
||||||
|
act(() => {
|
||||||
|
global.pageYOffset = 200;
|
||||||
|
global.dispatchEvent(new Event('scroll'));
|
||||||
|
jest.advanceTimersByTime(300);
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result.current.isVisible).toBe(true);
|
||||||
|
});
|
||||||
|
});
|
@ -4,8 +4,13 @@ import { FORMULA_REGEXP } from 'constants/regExp';
|
|||||||
import { QueryTableProps } from 'container/QueryTable/QueryTable.intefaces';
|
import { QueryTableProps } from 'container/QueryTable/QueryTable.intefaces';
|
||||||
import { toCapitalize } from 'lib/toCapitalize';
|
import { toCapitalize } from 'lib/toCapitalize';
|
||||||
import { ReactNode } from 'react';
|
import { ReactNode } from 'react';
|
||||||
import { IBuilderQuery, Query } from 'types/api/queryBuilder/queryBuilderData';
|
import {
|
||||||
|
IBuilderFormula,
|
||||||
|
IBuilderQuery,
|
||||||
|
Query,
|
||||||
|
} from 'types/api/queryBuilder/queryBuilderData';
|
||||||
import { ListItem, QueryDataV3, SeriesItem } from 'types/api/widgets/getQuery';
|
import { ListItem, QueryDataV3, SeriesItem } from 'types/api/widgets/getQuery';
|
||||||
|
import { QueryBuilderData } from 'types/common/queryBuilder';
|
||||||
import { v4 as uuid } from 'uuid';
|
import { v4 as uuid } from 'uuid';
|
||||||
|
|
||||||
type CreateTableDataFromQueryParams = Pick<
|
type CreateTableDataFromQueryParams = Pick<
|
||||||
@ -21,8 +26,10 @@ export type RowData = {
|
|||||||
|
|
||||||
type DynamicColumn = {
|
type DynamicColumn = {
|
||||||
key: keyof RowData;
|
key: keyof RowData;
|
||||||
|
title: string;
|
||||||
|
sourceLabel: string;
|
||||||
data: (string | number)[];
|
data: (string | number)[];
|
||||||
type: 'field' | 'operator';
|
type: 'field' | 'operator' | 'formula';
|
||||||
// sortable: boolean;
|
// sortable: boolean;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -39,7 +46,6 @@ type CreateTableDataFromQuery = (
|
|||||||
type FillColumnData = (
|
type FillColumnData = (
|
||||||
queryTableData: QueryDataV3[],
|
queryTableData: QueryDataV3[],
|
||||||
dynamicColumns: DynamicColumns,
|
dynamicColumns: DynamicColumns,
|
||||||
query: Query,
|
|
||||||
) => { filledDynamicColumns: DynamicColumns; rowsLength: number };
|
) => { filledDynamicColumns: DynamicColumns; rowsLength: number };
|
||||||
|
|
||||||
type GetDynamicColumns = (
|
type GetDynamicColumns = (
|
||||||
@ -54,43 +60,37 @@ type SeriesItemLabels = SeriesItem['labels'];
|
|||||||
const isFormula = (queryName: string): boolean =>
|
const isFormula = (queryName: string): boolean =>
|
||||||
FORMULA_REGEXP.test(queryName);
|
FORMULA_REGEXP.test(queryName);
|
||||||
|
|
||||||
const isColumnExist = (
|
const isValueExist = (
|
||||||
columnName: string,
|
field: keyof DynamicColumn,
|
||||||
|
value: string,
|
||||||
columns: DynamicColumns,
|
columns: DynamicColumns,
|
||||||
): boolean => {
|
): boolean => {
|
||||||
const columnKeys = columns.map((item) => item.key);
|
const existColumns = columns.find((item) => item[field] === value);
|
||||||
|
|
||||||
return columnKeys.includes(columnName);
|
return !!existColumns;
|
||||||
};
|
};
|
||||||
|
|
||||||
const prepareColumnTitle = (title: string): string => {
|
const getQueryByName = <T extends keyof QueryBuilderData>(
|
||||||
const haveUnderscore = title.includes('_');
|
builder: QueryBuilderData,
|
||||||
|
|
||||||
if (haveUnderscore) {
|
|
||||||
return title
|
|
||||||
.split('_')
|
|
||||||
.map((str) => toCapitalize(str))
|
|
||||||
.join(' ');
|
|
||||||
}
|
|
||||||
|
|
||||||
return toCapitalize(title);
|
|
||||||
};
|
|
||||||
|
|
||||||
const getQueryOperator = (
|
|
||||||
queryData: IBuilderQuery[],
|
|
||||||
currentQueryName: string,
|
currentQueryName: string,
|
||||||
): string => {
|
type: T,
|
||||||
const builderQuery = queryData.find((q) => q.queryName === currentQueryName);
|
): (T extends 'queryData' ? IBuilderQuery : IBuilderFormula) | null => {
|
||||||
|
const queryArray = builder[type];
|
||||||
|
|
||||||
return builderQuery ? builderQuery.aggregateOperator : '';
|
const currentQuery =
|
||||||
|
queryArray.find((q) => q.queryName === currentQueryName) || null;
|
||||||
|
|
||||||
|
if (!currentQuery) return null;
|
||||||
|
|
||||||
|
return currentQuery as T extends 'queryData' ? IBuilderQuery : IBuilderFormula;
|
||||||
};
|
};
|
||||||
|
|
||||||
const createLabels = <T extends ListItemData | SeriesItemLabels>(
|
const createLabels = <T extends ListItemData | SeriesItemLabels>(
|
||||||
labels: T,
|
// labels: T,
|
||||||
label: keyof T,
|
label: keyof T,
|
||||||
dynamicColumns: DynamicColumns,
|
dynamicColumns: DynamicColumns,
|
||||||
): void => {
|
): void => {
|
||||||
if (isColumnExist(label as string, dynamicColumns)) return;
|
if (isValueExist('key', label as string, dynamicColumns)) return;
|
||||||
|
|
||||||
// const labelValue = labels[label];
|
// const labelValue = labels[label];
|
||||||
|
|
||||||
@ -98,6 +98,8 @@ const createLabels = <T extends ListItemData | SeriesItemLabels>(
|
|||||||
|
|
||||||
const fieldObj: DynamicColumn = {
|
const fieldObj: DynamicColumn = {
|
||||||
key: label as string,
|
key: label as string,
|
||||||
|
title: label as string,
|
||||||
|
sourceLabel: label as string,
|
||||||
data: [],
|
data: [],
|
||||||
type: 'field',
|
type: 'field',
|
||||||
// sortable: isNumber,
|
// sortable: isNumber,
|
||||||
@ -106,6 +108,68 @@ const createLabels = <T extends ListItemData | SeriesItemLabels>(
|
|||||||
dynamicColumns.push(fieldObj);
|
dynamicColumns.push(fieldObj);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const appendOperatorFormulaColumns = (
|
||||||
|
builder: QueryBuilderData,
|
||||||
|
currentQueryName: string,
|
||||||
|
dynamicColumns: DynamicColumns,
|
||||||
|
): void => {
|
||||||
|
const currentFormula = getQueryByName(
|
||||||
|
builder,
|
||||||
|
currentQueryName,
|
||||||
|
'queryFormulas',
|
||||||
|
);
|
||||||
|
if (currentFormula) {
|
||||||
|
let formulaLabel = `${currentFormula.queryName}(${currentFormula.expression})`;
|
||||||
|
|
||||||
|
if (currentFormula.legend) {
|
||||||
|
formulaLabel += ` - ${currentFormula.legend}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
const formulaColumn: DynamicColumn = {
|
||||||
|
key: currentQueryName,
|
||||||
|
title: formulaLabel,
|
||||||
|
sourceLabel: formulaLabel,
|
||||||
|
data: [],
|
||||||
|
type: 'formula',
|
||||||
|
// sortable: isNumber,
|
||||||
|
};
|
||||||
|
|
||||||
|
dynamicColumns.push(formulaColumn);
|
||||||
|
}
|
||||||
|
|
||||||
|
const currentQueryData = getQueryByName(
|
||||||
|
builder,
|
||||||
|
currentQueryName,
|
||||||
|
'queryData',
|
||||||
|
);
|
||||||
|
|
||||||
|
if (!currentQueryData) return;
|
||||||
|
|
||||||
|
let operatorLabel = `${currentQueryData.aggregateOperator}`;
|
||||||
|
if (currentQueryData.aggregateAttribute.key) {
|
||||||
|
operatorLabel += `(${currentQueryData.aggregateAttribute.key})`;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (currentQueryData.legend) {
|
||||||
|
operatorLabel += ` - ${currentQueryData.legend}`;
|
||||||
|
} else {
|
||||||
|
operatorLabel += ` - ${currentQueryData.queryName}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
const resultValue = `${toCapitalize(operatorLabel)}`;
|
||||||
|
|
||||||
|
const operatorColumn: DynamicColumn = {
|
||||||
|
key: currentQueryName,
|
||||||
|
title: resultValue,
|
||||||
|
sourceLabel: resultValue,
|
||||||
|
data: [],
|
||||||
|
type: 'operator',
|
||||||
|
// sortable: isNumber,
|
||||||
|
};
|
||||||
|
|
||||||
|
dynamicColumns.push(operatorColumn);
|
||||||
|
};
|
||||||
|
|
||||||
const getDynamicColumns: GetDynamicColumns = (queryTableData, query) => {
|
const getDynamicColumns: GetDynamicColumns = (queryTableData, query) => {
|
||||||
const dynamicColumns: DynamicColumns = [];
|
const dynamicColumns: DynamicColumns = [];
|
||||||
|
|
||||||
@ -113,49 +177,52 @@ const getDynamicColumns: GetDynamicColumns = (queryTableData, query) => {
|
|||||||
if (currentQuery.list) {
|
if (currentQuery.list) {
|
||||||
currentQuery.list.forEach((listItem) => {
|
currentQuery.list.forEach((listItem) => {
|
||||||
Object.keys(listItem.data).forEach((label) => {
|
Object.keys(listItem.data).forEach((label) => {
|
||||||
createLabels<ListItemData>(
|
createLabels<ListItemData>(label as ListItemKey, dynamicColumns);
|
||||||
listItem.data,
|
|
||||||
label as ListItemKey,
|
|
||||||
dynamicColumns,
|
|
||||||
);
|
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
if (currentQuery.series) {
|
if (currentQuery.series) {
|
||||||
if (!isColumnExist('timestamp', dynamicColumns)) {
|
if (!isValueExist('key', 'timestamp', dynamicColumns)) {
|
||||||
dynamicColumns.push({
|
dynamicColumns.push({
|
||||||
key: 'timestamp',
|
key: 'timestamp',
|
||||||
|
title: 'Timestamp',
|
||||||
|
sourceLabel: 'Timestamp',
|
||||||
data: [],
|
data: [],
|
||||||
type: 'field',
|
type: 'field',
|
||||||
// sortable: true,
|
// sortable: true,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
currentQuery.series.forEach((seria) => {
|
appendOperatorFormulaColumns(
|
||||||
Object.keys(seria.labels).forEach((label) => {
|
query.builder,
|
||||||
createLabels<SeriesItemLabels>(seria.labels, label, dynamicColumns);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
const operator = getQueryOperator(
|
|
||||||
query.builder.queryData,
|
|
||||||
currentQuery.queryName,
|
currentQuery.queryName,
|
||||||
|
dynamicColumns,
|
||||||
);
|
);
|
||||||
|
|
||||||
if (operator === '' || isColumnExist(operator, dynamicColumns)) return;
|
currentQuery.series.forEach((seria) => {
|
||||||
|
Object.keys(seria.labels).forEach((label) => {
|
||||||
const operatorColumn: DynamicColumn = {
|
createLabels<SeriesItemLabels>(label, dynamicColumns);
|
||||||
key: operator,
|
});
|
||||||
data: [],
|
});
|
||||||
type: 'operator',
|
|
||||||
// sortable: true,
|
|
||||||
};
|
|
||||||
dynamicColumns.push(operatorColumn);
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
return dynamicColumns;
|
return dynamicColumns.map((item) => {
|
||||||
|
if (isFormula(item.key as string)) {
|
||||||
|
return item;
|
||||||
|
}
|
||||||
|
|
||||||
|
const sameValues = dynamicColumns.filter(
|
||||||
|
(column) => column.sourceLabel === item.sourceLabel,
|
||||||
|
);
|
||||||
|
|
||||||
|
if (sameValues.length > 1) {
|
||||||
|
return { ...item, title: `${item.title} - ${item.key}` };
|
||||||
|
}
|
||||||
|
|
||||||
|
return item;
|
||||||
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
const fillEmptyRowCells = (
|
const fillEmptyRowCells = (
|
||||||
@ -179,7 +246,6 @@ const fillDataFromSeria = (
|
|||||||
seria: SeriesItem,
|
seria: SeriesItem,
|
||||||
columns: DynamicColumns,
|
columns: DynamicColumns,
|
||||||
queryName: string,
|
queryName: string,
|
||||||
operator: string,
|
|
||||||
): void => {
|
): void => {
|
||||||
const labelEntries = Object.entries(seria.labels);
|
const labelEntries = Object.entries(seria.labels);
|
||||||
|
|
||||||
@ -195,13 +261,7 @@ const fillDataFromSeria = (
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (isFormula(queryName) && queryName === column.key) {
|
if (queryName === column.key) {
|
||||||
column.data.push(parseFloat(value.value).toFixed(2));
|
|
||||||
unusedColumnsKeys.delete(column.key);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!isFormula(queryName) && operator === column.key) {
|
|
||||||
column.data.push(parseFloat(value.value).toFixed(2));
|
column.data.push(parseFloat(value.value).toFixed(2));
|
||||||
unusedColumnsKeys.delete(column.key);
|
unusedColumnsKeys.delete(column.key);
|
||||||
return;
|
return;
|
||||||
@ -238,25 +298,16 @@ const fillDataFromList = (
|
|||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
const fillColumnsData: FillColumnData = (queryTableData, cols, query) => {
|
const fillColumnsData: FillColumnData = (queryTableData, cols) => {
|
||||||
const fields = cols.filter((item) => item.type === 'field');
|
const fields = cols.filter((item) => item.type === 'field');
|
||||||
const operators = cols.filter((item) => item.type === 'operator');
|
const operators = cols.filter((item) => item.type === 'operator');
|
||||||
const resultColumns = [...fields, ...operators];
|
const formulas = cols.filter((item) => item.type === 'formula');
|
||||||
|
const resultColumns = [...fields, ...operators, ...formulas];
|
||||||
|
|
||||||
queryTableData.forEach((currentQuery) => {
|
queryTableData.forEach((currentQuery) => {
|
||||||
if (currentQuery.series) {
|
if (currentQuery.series) {
|
||||||
currentQuery.series.forEach((seria) => {
|
currentQuery.series.forEach((seria) => {
|
||||||
const currentOperator = getQueryOperator(
|
fillDataFromSeria(seria, resultColumns, currentQuery.queryName);
|
||||||
query.builder.queryData,
|
|
||||||
currentQuery.queryName,
|
|
||||||
);
|
|
||||||
|
|
||||||
fillDataFromSeria(
|
|
||||||
seria,
|
|
||||||
resultColumns,
|
|
||||||
currentQuery.queryName,
|
|
||||||
currentOperator,
|
|
||||||
);
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -303,7 +354,7 @@ const generateTableColumns = (
|
|||||||
const column: ColumnType<RowData> = {
|
const column: ColumnType<RowData> = {
|
||||||
dataIndex: item.key,
|
dataIndex: item.key,
|
||||||
key: item.key,
|
key: item.key,
|
||||||
title: prepareColumnTitle(item.key as string),
|
title: item.title,
|
||||||
// sorter: item.sortable
|
// sorter: item.sortable
|
||||||
// ? (a: RowData, b: RowData): number =>
|
// ? (a: RowData, b: RowData): number =>
|
||||||
// (a[item.key] as number) - (b[item.key] as number)
|
// (a[item.key] as number) - (b[item.key] as number)
|
||||||
@ -326,7 +377,6 @@ export const createTableColumnsFromQuery: CreateTableDataFromQuery = ({
|
|||||||
const { filledDynamicColumns, rowsLength } = fillColumnsData(
|
const { filledDynamicColumns, rowsLength } = fillColumnsData(
|
||||||
queryTableData,
|
queryTableData,
|
||||||
dynamicColumns,
|
dynamicColumns,
|
||||||
query,
|
|
||||||
);
|
);
|
||||||
|
|
||||||
const dataSource = generateData(filledDynamicColumns, rowsLength);
|
const dataSource = generateData(filledDynamicColumns, rowsLength);
|
||||||
|
@ -72,6 +72,7 @@ type APIHandler struct {
|
|||||||
ready func(http.HandlerFunc) http.HandlerFunc
|
ready func(http.HandlerFunc) http.HandlerFunc
|
||||||
queryBuilder *queryBuilder.QueryBuilder
|
queryBuilder *queryBuilder.QueryBuilder
|
||||||
preferDelta bool
|
preferDelta bool
|
||||||
|
preferSpanMetrics bool
|
||||||
|
|
||||||
// SetupCompleted indicates if SigNoz is ready for general use.
|
// SetupCompleted indicates if SigNoz is ready for general use.
|
||||||
// at the moment, we mark the app ready when the first user
|
// at the moment, we mark the app ready when the first user
|
||||||
@ -87,6 +88,7 @@ type APIHandlerOpts struct {
|
|||||||
SkipConfig *model.SkipConfig
|
SkipConfig *model.SkipConfig
|
||||||
|
|
||||||
PerferDelta bool
|
PerferDelta bool
|
||||||
|
PreferSpanMetrics bool
|
||||||
// dao layer to perform crud on app objects like dashboard, alerts etc
|
// dao layer to perform crud on app objects like dashboard, alerts etc
|
||||||
AppDao dao.ModelDao
|
AppDao dao.ModelDao
|
||||||
|
|
||||||
@ -110,6 +112,7 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
|
|||||||
appDao: opts.AppDao,
|
appDao: opts.AppDao,
|
||||||
skipConfig: opts.SkipConfig,
|
skipConfig: opts.SkipConfig,
|
||||||
preferDelta: opts.PerferDelta,
|
preferDelta: opts.PerferDelta,
|
||||||
|
preferSpanMetrics: opts.PreferSpanMetrics,
|
||||||
alertManager: alertManager,
|
alertManager: alertManager,
|
||||||
ruleManager: opts.RuleManager,
|
ruleManager: opts.RuleManager,
|
||||||
featureFlags: opts.FeatureFlags,
|
featureFlags: opts.FeatureFlags,
|
||||||
@ -1668,6 +1671,14 @@ func (aH *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
|
|||||||
aH.HandleError(w, err, http.StatusInternalServerError)
|
aH.HandleError(w, err, http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if aH.preferSpanMetrics {
|
||||||
|
for idx := range featureSet {
|
||||||
|
feature := &featureSet[idx]
|
||||||
|
if feature.Name == model.UseSpanMetrics {
|
||||||
|
featureSet[idx].Active = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
aH.Respond(w, featureSet)
|
aH.Respond(w, featureSet)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2511,6 +2522,7 @@ func (aH *APIHandler) execClickHouseGraphQueries(ctx context.Context, queries ma
|
|||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(name, query string) {
|
go func(name, query string) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
|
||||||
seriesList, err := aH.reader.GetTimeSeriesResultV3(ctx, query)
|
seriesList, err := aH.reader.GetTimeSeriesResultV3(ctx, query)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -2842,20 +2854,48 @@ func applyMetricLimit(results []*v3.Result, queryRangeParams *v3.QueryRangeParam
|
|||||||
builderQueries := queryRangeParams.CompositeQuery.BuilderQueries
|
builderQueries := queryRangeParams.CompositeQuery.BuilderQueries
|
||||||
if builderQueries != nil && builderQueries[result.QueryName].DataSource == v3.DataSourceMetrics {
|
if builderQueries != nil && builderQueries[result.QueryName].DataSource == v3.DataSourceMetrics {
|
||||||
limit := builderQueries[result.QueryName].Limit
|
limit := builderQueries[result.QueryName].Limit
|
||||||
var orderAsc bool
|
|
||||||
for _, item := range builderQueries[result.QueryName].OrderBy {
|
orderByList := builderQueries[result.QueryName].OrderBy
|
||||||
if item.ColumnName == constants.SigNozOrderByValue {
|
|
||||||
orderAsc = strings.ToLower(item.Order) == "asc"
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if limit != 0 {
|
if limit != 0 {
|
||||||
sort.Slice(result.Series, func(i, j int) bool {
|
if len(orderByList) == 0 {
|
||||||
if orderAsc {
|
// If no orderBy is specified, sort by value in descending order
|
||||||
return result.Series[i].Points[0].Value < result.Series[j].Points[0].Value
|
orderByList = []v3.OrderBy{{ColumnName: constants.SigNozOrderByValue, Order: "desc"}}
|
||||||
}
|
}
|
||||||
return result.Series[i].Points[0].Value > result.Series[j].Points[0].Value
|
sort.SliceStable(result.Series, func(i, j int) bool {
|
||||||
|
for _, orderBy := range orderByList {
|
||||||
|
if orderBy.ColumnName == constants.SigNozOrderByValue {
|
||||||
|
if result.Series[i].GroupingSetsPoint == nil || result.Series[j].GroupingSetsPoint == nil {
|
||||||
|
// Handle nil GroupingSetsPoint, if needed
|
||||||
|
// Here, we assume non-nil values are always less than nil values
|
||||||
|
return result.Series[i].GroupingSetsPoint != nil
|
||||||
|
}
|
||||||
|
if orderBy.Order == "asc" {
|
||||||
|
return result.Series[i].GroupingSetsPoint.Value < result.Series[j].GroupingSetsPoint.Value
|
||||||
|
} else if orderBy.Order == "desc" {
|
||||||
|
return result.Series[i].GroupingSetsPoint.Value > result.Series[j].GroupingSetsPoint.Value
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Sort based on Labels map
|
||||||
|
labelI, existsI := result.Series[i].Labels[orderBy.ColumnName]
|
||||||
|
labelJ, existsJ := result.Series[j].Labels[orderBy.ColumnName]
|
||||||
|
|
||||||
|
if !existsI || !existsJ {
|
||||||
|
// Handle missing labels, if needed
|
||||||
|
// Here, we assume non-existent labels are always less than existing ones
|
||||||
|
return existsI
|
||||||
|
}
|
||||||
|
|
||||||
|
if orderBy.Order == "asc" {
|
||||||
|
return strings.Compare(labelI, labelJ) < 0
|
||||||
|
} else if orderBy.Order == "desc" {
|
||||||
|
return strings.Compare(labelI, labelJ) > 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Preserve original order if no matching orderBy is found
|
||||||
|
return i < j
|
||||||
})
|
})
|
||||||
|
|
||||||
if len(result.Series) > int(limit) {
|
if len(result.Series) > int(limit) {
|
||||||
result.Series = result.Series[:limit]
|
result.Series = result.Series[:limit]
|
||||||
}
|
}
|
||||||
|
@ -443,6 +443,278 @@ func TestApplyLimitOnMetricResult(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
// ["GET /api/v1/health", "DELETE /api/v1/health"] so result should be ["DELETE /api/v1/health"] although it has lower value
|
||||||
|
name: "test limit with operation asc",
|
||||||
|
inputResult: []*v3.Result{
|
||||||
|
{
|
||||||
|
QueryName: "A",
|
||||||
|
Series: []*v3.Series{
|
||||||
|
{
|
||||||
|
Labels: map[string]string{
|
||||||
|
"service_name": "frontend",
|
||||||
|
"operation": "GET /api/v1/health",
|
||||||
|
},
|
||||||
|
Points: []v3.Point{
|
||||||
|
{
|
||||||
|
Timestamp: 1689220036000,
|
||||||
|
Value: 19.2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Timestamp: 1689220096000,
|
||||||
|
Value: 19.5,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
GroupingSetsPoint: &v3.Point{
|
||||||
|
Timestamp: 0,
|
||||||
|
Value: 19.3,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Labels: map[string]string{
|
||||||
|
"service_name": "route",
|
||||||
|
"operation": "DELETE /api/v1/health",
|
||||||
|
},
|
||||||
|
Points: []v3.Point{
|
||||||
|
{
|
||||||
|
Timestamp: 1689220036000,
|
||||||
|
Value: 8.83,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Timestamp: 1689220096000,
|
||||||
|
Value: 8.83,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
GroupingSetsPoint: &v3.Point{
|
||||||
|
Timestamp: 0,
|
||||||
|
Value: 8.83,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
params: &v3.QueryRangeParamsV3{
|
||||||
|
Start: 1689220036000,
|
||||||
|
End: 1689220096000,
|
||||||
|
Step: 60,
|
||||||
|
CompositeQuery: &v3.CompositeQuery{
|
||||||
|
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||||
|
"A": {
|
||||||
|
QueryName: "A",
|
||||||
|
AggregateAttribute: v3.AttributeKey{Key: "signo_calls_total"},
|
||||||
|
DataSource: v3.DataSourceMetrics,
|
||||||
|
AggregateOperator: v3.AggregateOperatorSumRate,
|
||||||
|
Expression: "A",
|
||||||
|
GroupBy: []v3.AttributeKey{{Key: "service_name"}},
|
||||||
|
Limit: 1,
|
||||||
|
OrderBy: []v3.OrderBy{{ColumnName: "operation", Order: "asc"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
QueryType: v3.QueryTypeBuilder,
|
||||||
|
PanelType: v3.PanelTypeGraph,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedResult: []*v3.Result{
|
||||||
|
{
|
||||||
|
QueryName: "A",
|
||||||
|
Series: []*v3.Series{
|
||||||
|
{
|
||||||
|
Labels: map[string]string{
|
||||||
|
"service_name": "route",
|
||||||
|
"operation": "DELETE /api/v1/health",
|
||||||
|
},
|
||||||
|
Points: []v3.Point{
|
||||||
|
{
|
||||||
|
Timestamp: 1689220036000,
|
||||||
|
Value: 8.83,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Timestamp: 1689220096000,
|
||||||
|
Value: 8.83,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
GroupingSetsPoint: &v3.Point{
|
||||||
|
Timestamp: 0,
|
||||||
|
Value: 8.83,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "test limit with multiple order by labels",
|
||||||
|
inputResult: []*v3.Result{
|
||||||
|
{
|
||||||
|
QueryName: "A",
|
||||||
|
Series: []*v3.Series{
|
||||||
|
{
|
||||||
|
Labels: map[string]string{
|
||||||
|
"service_name": "frontend",
|
||||||
|
"operation": "GET /api/v1/health",
|
||||||
|
"status_code": "200",
|
||||||
|
"priority": "P0",
|
||||||
|
},
|
||||||
|
Points: []v3.Point{
|
||||||
|
{
|
||||||
|
Timestamp: 1689220036000,
|
||||||
|
Value: 19.2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Timestamp: 1689220096000,
|
||||||
|
Value: 19.5,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
GroupingSetsPoint: &v3.Point{
|
||||||
|
Timestamp: 0,
|
||||||
|
Value: 19.3,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Labels: map[string]string{
|
||||||
|
"service_name": "route",
|
||||||
|
"operation": "DELETE /api/v1/health",
|
||||||
|
"status_code": "301",
|
||||||
|
"priority": "P1",
|
||||||
|
},
|
||||||
|
Points: []v3.Point{
|
||||||
|
{
|
||||||
|
Timestamp: 1689220036000,
|
||||||
|
Value: 8.83,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Timestamp: 1689220096000,
|
||||||
|
Value: 8.83,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
GroupingSetsPoint: &v3.Point{
|
||||||
|
Timestamp: 0,
|
||||||
|
Value: 8.83,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Labels: map[string]string{
|
||||||
|
"service_name": "route",
|
||||||
|
"operation": "DELETE /api/v1/health",
|
||||||
|
"status_code": "400",
|
||||||
|
"priority": "P0",
|
||||||
|
},
|
||||||
|
Points: []v3.Point{
|
||||||
|
{
|
||||||
|
Timestamp: 1689220036000,
|
||||||
|
Value: 8.83,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Timestamp: 1689220096000,
|
||||||
|
Value: 8.83,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
GroupingSetsPoint: &v3.Point{
|
||||||
|
Timestamp: 0,
|
||||||
|
Value: 8.83,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Labels: map[string]string{
|
||||||
|
"service_name": "route",
|
||||||
|
"operation": "DELETE /api/v1/health",
|
||||||
|
"status_code": "200",
|
||||||
|
"priority": "P1",
|
||||||
|
},
|
||||||
|
Points: []v3.Point{
|
||||||
|
{
|
||||||
|
Timestamp: 1689220036000,
|
||||||
|
Value: 8.83,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Timestamp: 1689220096000,
|
||||||
|
Value: 8.83,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
GroupingSetsPoint: &v3.Point{
|
||||||
|
Timestamp: 0,
|
||||||
|
Value: 8.83,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
params: &v3.QueryRangeParamsV3{
|
||||||
|
Start: 1689220036000,
|
||||||
|
End: 1689220096000,
|
||||||
|
Step: 60,
|
||||||
|
CompositeQuery: &v3.CompositeQuery{
|
||||||
|
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||||
|
"A": {
|
||||||
|
QueryName: "A",
|
||||||
|
AggregateAttribute: v3.AttributeKey{Key: "signo_calls_total"},
|
||||||
|
DataSource: v3.DataSourceMetrics,
|
||||||
|
AggregateOperator: v3.AggregateOperatorSumRate,
|
||||||
|
Expression: "A",
|
||||||
|
GroupBy: []v3.AttributeKey{{Key: "service_name"}, {Key: "operation"}, {Key: "status_code"}, {Key: "priority"}},
|
||||||
|
Limit: 2,
|
||||||
|
OrderBy: []v3.OrderBy{
|
||||||
|
{ColumnName: "priority", Order: "asc"},
|
||||||
|
{ColumnName: "status_code", Order: "desc"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
QueryType: v3.QueryTypeBuilder,
|
||||||
|
PanelType: v3.PanelTypeGraph,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedResult: []*v3.Result{
|
||||||
|
{
|
||||||
|
QueryName: "A",
|
||||||
|
Series: []*v3.Series{
|
||||||
|
{
|
||||||
|
Labels: map[string]string{
|
||||||
|
"service_name": "frontend",
|
||||||
|
"operation": "GET /api/v1/health",
|
||||||
|
"status_code": "200",
|
||||||
|
"priority": "P0",
|
||||||
|
},
|
||||||
|
Points: []v3.Point{
|
||||||
|
{
|
||||||
|
Timestamp: 1689220036000,
|
||||||
|
Value: 19.2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Timestamp: 1689220096000,
|
||||||
|
Value: 19.5,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
GroupingSetsPoint: &v3.Point{
|
||||||
|
Timestamp: 0,
|
||||||
|
Value: 19.3,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Labels: map[string]string{
|
||||||
|
"service_name": "route",
|
||||||
|
"operation": "DELETE /api/v1/health",
|
||||||
|
"status_code": "400",
|
||||||
|
"priority": "P0",
|
||||||
|
},
|
||||||
|
Points: []v3.Point{
|
||||||
|
{
|
||||||
|
Timestamp: 1689220036000,
|
||||||
|
Value: 8.83,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Timestamp: 1689220096000,
|
||||||
|
Value: 8.83,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
GroupingSetsPoint: &v3.Point{
|
||||||
|
Timestamp: 0,
|
||||||
|
Value: 8.83,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
|
@ -89,17 +89,29 @@ func getClickhouseColumnName(key v3.AttributeKey) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getSelectLabels returns the select labels for the query based on groupBy and aggregateOperator
|
// getSelectLabels returns the select labels for the query based on groupBy and aggregateOperator
|
||||||
func getSelectLabels(aggregatorOperator v3.AggregateOperator, groupBy []v3.AttributeKey) (string, error) {
|
func getSelectLabels(aggregatorOperator v3.AggregateOperator, groupBy []v3.AttributeKey) string {
|
||||||
var selectLabels string
|
var selectLabels string
|
||||||
if aggregatorOperator == v3.AggregateOperatorNoOp {
|
if aggregatorOperator == v3.AggregateOperatorNoOp {
|
||||||
selectLabels = ""
|
selectLabels = ""
|
||||||
} else {
|
} else {
|
||||||
for _, tag := range groupBy {
|
for _, tag := range groupBy {
|
||||||
columnName := getClickhouseColumnName(tag)
|
columnName := getClickhouseColumnName(tag)
|
||||||
selectLabels += fmt.Sprintf(", %s as %s", columnName, tag.Key)
|
selectLabels += fmt.Sprintf(" %s as %s,", columnName, tag.Key)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return selectLabels, nil
|
return selectLabels
|
||||||
|
}
|
||||||
|
|
||||||
|
func getSelectKeys(aggregatorOperator v3.AggregateOperator, groupBy []v3.AttributeKey) string {
|
||||||
|
var selectLabels []string
|
||||||
|
if aggregatorOperator == v3.AggregateOperatorNoOp {
|
||||||
|
return ""
|
||||||
|
} else {
|
||||||
|
for _, tag := range groupBy {
|
||||||
|
selectLabels = append(selectLabels, tag.Key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return strings.Join(selectLabels, ",")
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildLogsTimeSeriesFilterQuery(fs *v3.FilterSet, groupBy []v3.AttributeKey) (string, error) {
|
func buildLogsTimeSeriesFilterQuery(fs *v3.FilterSet, groupBy []v3.AttributeKey) (string, error) {
|
||||||
@ -163,7 +175,7 @@ func getZerosForEpochNano(epoch int64) int64 {
|
|||||||
return int64(math.Pow(10, float64(19-count)))
|
return int64(math.Pow(10, float64(19-count)))
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.BuilderQuery) (string, error) {
|
func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.BuilderQuery, graphLimitQtype string) (string, error) {
|
||||||
|
|
||||||
filterSubQuery, err := buildLogsTimeSeriesFilterQuery(mq.Filters, mq.GroupBy)
|
filterSubQuery, err := buildLogsTimeSeriesFilterQuery(mq.Filters, mq.GroupBy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -173,10 +185,7 @@ func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.Build
|
|||||||
// timerange will be sent in epoch millisecond
|
// timerange will be sent in epoch millisecond
|
||||||
timeFilter := fmt.Sprintf("(timestamp >= %d AND timestamp <= %d)", start*getZerosForEpochNano(start), end*getZerosForEpochNano(end))
|
timeFilter := fmt.Sprintf("(timestamp >= %d AND timestamp <= %d)", start*getZerosForEpochNano(start), end*getZerosForEpochNano(end))
|
||||||
|
|
||||||
selectLabels, err := getSelectLabels(mq.AggregateOperator, mq.GroupBy)
|
selectLabels := getSelectLabels(mq.AggregateOperator, mq.GroupBy)
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
having := having(mq.Having)
|
having := having(mq.Having)
|
||||||
if having != "" {
|
if having != "" {
|
||||||
@ -184,35 +193,44 @@ func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.Build
|
|||||||
}
|
}
|
||||||
|
|
||||||
var queryTmpl string
|
var queryTmpl string
|
||||||
|
if graphLimitQtype == constants.FirstQueryGraphLimit {
|
||||||
if panelType == v3.PanelTypeTable {
|
queryTmpl = "SELECT"
|
||||||
|
} else if panelType == v3.PanelTypeTable {
|
||||||
queryTmpl =
|
queryTmpl =
|
||||||
"SELECT now() as ts" + selectLabels +
|
"SELECT now() as ts,"
|
||||||
", %s as value " +
|
|
||||||
"from signoz_logs.distributed_logs " +
|
|
||||||
"where " + timeFilter + "%s" +
|
|
||||||
"%s%s" +
|
|
||||||
"%s"
|
|
||||||
} else if panelType == v3.PanelTypeGraph || panelType == v3.PanelTypeValue {
|
} else if panelType == v3.PanelTypeGraph || panelType == v3.PanelTypeValue {
|
||||||
// Select the aggregate value for interval
|
// Select the aggregate value for interval
|
||||||
queryTmpl =
|
queryTmpl =
|
||||||
fmt.Sprintf("SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL %d SECOND) AS ts", step) + selectLabels +
|
fmt.Sprintf("SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL %d SECOND) AS ts,", step)
|
||||||
", %s as value " +
|
}
|
||||||
|
|
||||||
|
queryTmpl =
|
||||||
|
queryTmpl + selectLabels +
|
||||||
|
" %s as value " +
|
||||||
"from signoz_logs.distributed_logs " +
|
"from signoz_logs.distributed_logs " +
|
||||||
"where " + timeFilter + "%s" +
|
"where " + timeFilter + "%s" +
|
||||||
"%s%s" +
|
"%s%s" +
|
||||||
"%s"
|
"%s"
|
||||||
|
|
||||||
|
// we dont need value for first query
|
||||||
|
// going with this route as for a cleaner approach on implementation
|
||||||
|
if graphLimitQtype == constants.FirstQueryGraphLimit {
|
||||||
|
queryTmpl = "SELECT " + getSelectKeys(mq.AggregateOperator, mq.GroupBy) + " from (" + queryTmpl + ")"
|
||||||
}
|
}
|
||||||
|
|
||||||
groupBy := groupByAttributeKeyTags(panelType, mq.GroupBy...)
|
groupBy := groupByAttributeKeyTags(panelType, graphLimitQtype, mq.GroupBy...)
|
||||||
if panelType != v3.PanelTypeList && groupBy != "" {
|
if panelType != v3.PanelTypeList && groupBy != "" {
|
||||||
groupBy = " group by " + groupBy
|
groupBy = " group by " + groupBy
|
||||||
}
|
}
|
||||||
orderBy := orderByAttributeKeyTags(panelType, mq.AggregateOperator, mq.OrderBy, mq.GroupBy)
|
orderBy := orderByAttributeKeyTags(panelType, mq.OrderBy, mq.GroupBy)
|
||||||
if panelType != v3.PanelTypeList && orderBy != "" {
|
if panelType != v3.PanelTypeList && orderBy != "" {
|
||||||
orderBy = " order by " + orderBy
|
orderBy = " order by " + orderBy
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if graphLimitQtype == constants.SecondQueryGraphLimit {
|
||||||
|
filterSubQuery = filterSubQuery + " AND " + fmt.Sprintf("(%s) IN (", getSelectKeys(mq.AggregateOperator, mq.GroupBy)) + "%s)"
|
||||||
|
}
|
||||||
|
|
||||||
aggregationKey := ""
|
aggregationKey := ""
|
||||||
if mq.AggregateAttribute.Key != "" {
|
if mq.AggregateAttribute.Key != "" {
|
||||||
aggregationKey = getClickhouseColumnName(mq.AggregateAttribute)
|
aggregationKey = getClickhouseColumnName(mq.AggregateAttribute)
|
||||||
@ -273,82 +291,56 @@ func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.Build
|
|||||||
|
|
||||||
// groupBy returns a string of comma separated tags for group by clause
|
// groupBy returns a string of comma separated tags for group by clause
|
||||||
// `ts` is always added to the group by clause
|
// `ts` is always added to the group by clause
|
||||||
func groupBy(panelType v3.PanelType, tags ...string) string {
|
func groupBy(panelType v3.PanelType, graphLimitQtype string, tags ...string) string {
|
||||||
if panelType == v3.PanelTypeGraph || panelType == v3.PanelTypeValue {
|
if (graphLimitQtype != constants.FirstQueryGraphLimit) && (panelType == v3.PanelTypeGraph || panelType == v3.PanelTypeValue) {
|
||||||
tags = append(tags, "ts")
|
tags = append(tags, "ts")
|
||||||
}
|
}
|
||||||
return strings.Join(tags, ",")
|
return strings.Join(tags, ",")
|
||||||
}
|
}
|
||||||
|
|
||||||
func groupByAttributeKeyTags(panelType v3.PanelType, tags ...v3.AttributeKey) string {
|
func groupByAttributeKeyTags(panelType v3.PanelType, graphLimitQtype string, tags ...v3.AttributeKey) string {
|
||||||
groupTags := []string{}
|
groupTags := []string{}
|
||||||
for _, tag := range tags {
|
for _, tag := range tags {
|
||||||
groupTags = append(groupTags, tag.Key)
|
groupTags = append(groupTags, tag.Key)
|
||||||
}
|
}
|
||||||
return groupBy(panelType, groupTags...)
|
return groupBy(panelType, graphLimitQtype, groupTags...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// orderBy returns a string of comma separated tags for order by clause
|
// orderBy returns a string of comma separated tags for order by clause
|
||||||
// if there are remaining items which are not present in tags they are also added
|
// if there are remaining items which are not present in tags they are also added
|
||||||
// if the order is not specified, it defaults to ASC
|
// if the order is not specified, it defaults to ASC
|
||||||
func orderBy(panelType v3.PanelType, items []v3.OrderBy, tags []string) []string {
|
func orderBy(panelType v3.PanelType, items []v3.OrderBy, tagLookup map[string]struct{}) []string {
|
||||||
var orderBy []string
|
var orderBy []string
|
||||||
|
|
||||||
// create a lookup
|
|
||||||
addedToOrderBy := map[string]bool{}
|
|
||||||
itemsLookup := map[string]v3.OrderBy{}
|
|
||||||
|
|
||||||
for i := 0; i < len(items); i++ {
|
|
||||||
addedToOrderBy[items[i].ColumnName] = false
|
|
||||||
itemsLookup[items[i].ColumnName] = items[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tag := range tags {
|
|
||||||
if item, ok := itemsLookup[tag]; ok {
|
|
||||||
orderBy = append(orderBy, fmt.Sprintf("%s %s", item.ColumnName, item.Order))
|
|
||||||
addedToOrderBy[item.ColumnName] = true
|
|
||||||
} else {
|
|
||||||
orderBy = append(orderBy, fmt.Sprintf("%s ASC", tag))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// users might want to order by value of aggreagation
|
|
||||||
for _, item := range items {
|
for _, item := range items {
|
||||||
if item.ColumnName == constants.SigNozOrderByValue {
|
if item.ColumnName == constants.SigNozOrderByValue {
|
||||||
orderBy = append(orderBy, fmt.Sprintf("value %s", item.Order))
|
orderBy = append(orderBy, fmt.Sprintf("value %s", item.Order))
|
||||||
addedToOrderBy[item.ColumnName] = true
|
} else if _, ok := tagLookup[item.ColumnName]; ok {
|
||||||
}
|
orderBy = append(orderBy, fmt.Sprintf("%s %s", item.ColumnName, item.Order))
|
||||||
}
|
} else if panelType == v3.PanelTypeList {
|
||||||
|
|
||||||
// add the remaining items
|
|
||||||
if panelType == v3.PanelTypeList {
|
|
||||||
for _, item := range items {
|
|
||||||
// since these are not present in tags we will have to select them correctly
|
|
||||||
// for list view there is no need to check if it was added since they wont be added yet but this is just for safety
|
|
||||||
if !addedToOrderBy[item.ColumnName] {
|
|
||||||
attr := v3.AttributeKey{Key: item.ColumnName, DataType: item.DataType, Type: item.Type, IsColumn: item.IsColumn}
|
attr := v3.AttributeKey{Key: item.ColumnName, DataType: item.DataType, Type: item.Type, IsColumn: item.IsColumn}
|
||||||
name := getClickhouseColumnName(attr)
|
name := getClickhouseColumnName(attr)
|
||||||
orderBy = append(orderBy, fmt.Sprintf("%s %s", name, item.Order))
|
orderBy = append(orderBy, fmt.Sprintf("%s %s", name, item.Order))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return orderBy
|
return orderBy
|
||||||
}
|
}
|
||||||
|
|
||||||
func orderByAttributeKeyTags(panelType v3.PanelType, aggregatorOperator v3.AggregateOperator, items []v3.OrderBy, tags []v3.AttributeKey) string {
|
func orderByAttributeKeyTags(panelType v3.PanelType, items []v3.OrderBy, tags []v3.AttributeKey) string {
|
||||||
var groupTags []string
|
|
||||||
for _, tag := range tags {
|
|
||||||
groupTags = append(groupTags, tag.Key)
|
|
||||||
}
|
|
||||||
orderByArray := orderBy(panelType, items, groupTags)
|
|
||||||
|
|
||||||
if panelType == v3.PanelTypeList {
|
tagLookup := map[string]struct{}{}
|
||||||
if len(orderByArray) == 0 {
|
for _, v := range tags {
|
||||||
orderByArray = append(orderByArray, constants.TIMESTAMP)
|
tagLookup[v.Key] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
orderByArray := orderBy(panelType, items, tagLookup)
|
||||||
|
|
||||||
|
if len(orderByArray) == 0 {
|
||||||
|
if panelType == v3.PanelTypeList {
|
||||||
|
orderByArray = append(orderByArray, constants.TIMESTAMP+" DESC")
|
||||||
|
} else {
|
||||||
|
orderByArray = append(orderByArray, "value DESC")
|
||||||
}
|
}
|
||||||
} else if panelType == v3.PanelTypeGraph || panelType == v3.PanelTypeValue {
|
|
||||||
// since in other aggregation operator we will have to add ts as it will not be present in group by
|
|
||||||
orderByArray = append(orderByArray, "ts")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
str := strings.Join(orderByArray, ",")
|
str := strings.Join(orderByArray, ",")
|
||||||
@ -392,8 +384,26 @@ func addOffsetToQuery(query string, offset uint64) string {
|
|||||||
return fmt.Sprintf("%s OFFSET %d", query, offset)
|
return fmt.Sprintf("%s OFFSET %d", query, offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
func PrepareLogsQuery(start, end int64, queryType v3.QueryType, panelType v3.PanelType, mq *v3.BuilderQuery) (string, error) {
|
func PrepareLogsQuery(start, end int64, queryType v3.QueryType, panelType v3.PanelType, mq *v3.BuilderQuery, graphLimitQtype string) (string, error) {
|
||||||
query, err := buildLogsQuery(panelType, start, end, mq.StepInterval, mq)
|
|
||||||
|
if graphLimitQtype == constants.FirstQueryGraphLimit {
|
||||||
|
// give me just the groupby names
|
||||||
|
query, err := buildLogsQuery(panelType, start, end, mq.StepInterval, mq, graphLimitQtype)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
query = addLimitToQuery(query, mq.Limit)
|
||||||
|
|
||||||
|
return query, nil
|
||||||
|
} else if graphLimitQtype == constants.SecondQueryGraphLimit {
|
||||||
|
query, err := buildLogsQuery(panelType, start, end, mq.StepInterval, mq, graphLimitQtype)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return query, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
query, err := buildLogsQuery(panelType, start, end, mq.StepInterval, mq, graphLimitQtype)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -401,7 +411,7 @@ func PrepareLogsQuery(start, end int64, queryType v3.QueryType, panelType v3.Pan
|
|||||||
query, err = reduceQuery(query, mq.ReduceTo, mq.AggregateOperator)
|
query, err = reduceQuery(query, mq.ReduceTo, mq.AggregateOperator)
|
||||||
}
|
}
|
||||||
|
|
||||||
if panelType == v3.PanelTypeList {
|
if panelType == v3.PanelTypeList || panelType == v3.PanelTypeTable {
|
||||||
if mq.PageSize > 0 {
|
if mq.PageSize > 0 {
|
||||||
if mq.Limit > 0 && mq.Offset > mq.Limit {
|
if mq.Limit > 0 && mq.Offset > mq.Limit {
|
||||||
return "", fmt.Errorf("max limit exceeded")
|
return "", fmt.Errorf("max limit exceeded")
|
||||||
@ -414,4 +424,5 @@ func PrepareLogsQuery(start, end int64, queryType v3.QueryType, panelType v3.Pan
|
|||||||
}
|
}
|
||||||
|
|
||||||
return query, err
|
return query, err
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package v3
|
package v3
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
. "github.com/smartystreets/goconvey/convey"
|
. "github.com/smartystreets/goconvey/convey"
|
||||||
@ -59,13 +60,13 @@ var testGetSelectLabelsData = []struct {
|
|||||||
Name: "select fields for groupBy attribute",
|
Name: "select fields for groupBy attribute",
|
||||||
AggregateOperator: v3.AggregateOperatorCount,
|
AggregateOperator: v3.AggregateOperatorCount,
|
||||||
GroupByTags: []v3.AttributeKey{{Key: "user_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
|
GroupByTags: []v3.AttributeKey{{Key: "user_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
|
||||||
SelectLabels: ", attributes_string_value[indexOf(attributes_string_key, 'user_name')] as user_name",
|
SelectLabels: " attributes_string_value[indexOf(attributes_string_key, 'user_name')] as user_name,",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "select fields for groupBy resource",
|
Name: "select fields for groupBy resource",
|
||||||
AggregateOperator: v3.AggregateOperatorCount,
|
AggregateOperator: v3.AggregateOperatorCount,
|
||||||
GroupByTags: []v3.AttributeKey{{Key: "user_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource}},
|
GroupByTags: []v3.AttributeKey{{Key: "user_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource}},
|
||||||
SelectLabels: ", resources_string_value[indexOf(resources_string_key, 'user_name')] as user_name",
|
SelectLabels: " resources_string_value[indexOf(resources_string_key, 'user_name')] as user_name,",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "select fields for groupBy attribute and resource",
|
Name: "select fields for groupBy attribute and resource",
|
||||||
@ -74,27 +75,26 @@ var testGetSelectLabelsData = []struct {
|
|||||||
{Key: "user_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource},
|
{Key: "user_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource},
|
||||||
{Key: "host", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
|
{Key: "host", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
|
||||||
},
|
},
|
||||||
SelectLabels: ", resources_string_value[indexOf(resources_string_key, 'user_name')] as user_name, attributes_string_value[indexOf(attributes_string_key, 'host')] as host",
|
SelectLabels: " resources_string_value[indexOf(resources_string_key, 'user_name')] as user_name, attributes_string_value[indexOf(attributes_string_key, 'host')] as host,",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "select fields for groupBy materialized columns",
|
Name: "select fields for groupBy materialized columns",
|
||||||
AggregateOperator: v3.AggregateOperatorCount,
|
AggregateOperator: v3.AggregateOperatorCount,
|
||||||
GroupByTags: []v3.AttributeKey{{Key: "host", IsColumn: true}},
|
GroupByTags: []v3.AttributeKey{{Key: "host", IsColumn: true}},
|
||||||
SelectLabels: ", host as host",
|
SelectLabels: " host as host,",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "trace_id field as an attribute",
|
Name: "trace_id field as an attribute",
|
||||||
AggregateOperator: v3.AggregateOperatorCount,
|
AggregateOperator: v3.AggregateOperatorCount,
|
||||||
GroupByTags: []v3.AttributeKey{{Key: "trace_id", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
|
GroupByTags: []v3.AttributeKey{{Key: "trace_id", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
|
||||||
SelectLabels: ", attributes_string_value[indexOf(attributes_string_key, 'trace_id')] as trace_id",
|
SelectLabels: " attributes_string_value[indexOf(attributes_string_key, 'trace_id')] as trace_id,",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetSelectLabels(t *testing.T) {
|
func TestGetSelectLabels(t *testing.T) {
|
||||||
for _, tt := range testGetSelectLabelsData {
|
for _, tt := range testGetSelectLabelsData {
|
||||||
Convey("testGetSelectLabelsData", t, func() {
|
Convey("testGetSelectLabelsData", t, func() {
|
||||||
selectLabels, err := getSelectLabels(tt.AggregateOperator, tt.GroupByTags)
|
selectLabels := getSelectLabels(tt.AggregateOperator, tt.GroupByTags)
|
||||||
So(err, ShouldBeNil)
|
|
||||||
So(selectLabels, ShouldEqual, tt.SelectLabels)
|
So(selectLabels, ShouldEqual, tt.SelectLabels)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -238,6 +238,7 @@ var testBuildLogsQueryData = []struct {
|
|||||||
TableName string
|
TableName string
|
||||||
AggregateOperator v3.AggregateOperator
|
AggregateOperator v3.AggregateOperator
|
||||||
ExpectedQuery string
|
ExpectedQuery string
|
||||||
|
Type int
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
Name: "Test aggregate count on select field",
|
Name: "Test aggregate count on select field",
|
||||||
@ -251,7 +252,7 @@ var testBuildLogsQueryData = []struct {
|
|||||||
Expression: "A",
|
Expression: "A",
|
||||||
},
|
},
|
||||||
TableName: "logs",
|
TableName: "logs",
|
||||||
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) group by ts order by ts",
|
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) group by ts order by value DESC",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "Test aggregate count on a attribute",
|
Name: "Test aggregate count on a attribute",
|
||||||
@ -266,7 +267,7 @@ var testBuildLogsQueryData = []struct {
|
|||||||
Expression: "A",
|
Expression: "A",
|
||||||
},
|
},
|
||||||
TableName: "logs",
|
TableName: "logs",
|
||||||
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND has(attributes_string_key, 'user_name') group by ts order by ts",
|
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND has(attributes_string_key, 'user_name') group by ts order by value DESC",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "Test aggregate count on a with filter",
|
Name: "Test aggregate count on a with filter",
|
||||||
@ -284,7 +285,7 @@ var testBuildLogsQueryData = []struct {
|
|||||||
Expression: "A",
|
Expression: "A",
|
||||||
},
|
},
|
||||||
TableName: "logs",
|
TableName: "logs",
|
||||||
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_float64_value[indexOf(attributes_float64_key, 'bytes')] > 100.000000 AND has(attributes_string_key, 'user_name') group by ts order by ts",
|
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_float64_value[indexOf(attributes_float64_key, 'bytes')] > 100.000000 AND has(attributes_string_key, 'user_name') group by ts order by value DESC",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "Test aggregate count distinct and order by value",
|
Name: "Test aggregate count distinct and order by value",
|
||||||
@ -300,7 +301,7 @@ var testBuildLogsQueryData = []struct {
|
|||||||
OrderBy: []v3.OrderBy{{ColumnName: "#SIGNOZ_VALUE", Order: "ASC"}},
|
OrderBy: []v3.OrderBy{{ColumnName: "#SIGNOZ_VALUE", Order: "ASC"}},
|
||||||
},
|
},
|
||||||
TableName: "logs",
|
TableName: "logs",
|
||||||
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(name))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) group by ts order by value ASC,ts",
|
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(name))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) group by ts order by value ASC",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "Test aggregate count distinct on non selected field",
|
Name: "Test aggregate count distinct on non selected field",
|
||||||
@ -315,7 +316,7 @@ var testBuildLogsQueryData = []struct {
|
|||||||
Expression: "A",
|
Expression: "A",
|
||||||
},
|
},
|
||||||
TableName: "logs",
|
TableName: "logs",
|
||||||
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) group by ts order by ts",
|
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) group by ts order by value DESC",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "Test aggregate count distinct with filter and groupBy",
|
Name: "Test aggregate count distinct with filter and groupBy",
|
||||||
@ -344,7 +345,7 @@ var testBuildLogsQueryData = []struct {
|
|||||||
"AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND resources_string_value[indexOf(resources_string_key, 'x')] != 'abc' " +
|
"AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND resources_string_value[indexOf(resources_string_key, 'x')] != 'abc' " +
|
||||||
"AND indexOf(attributes_string_key, 'method') > 0 " +
|
"AND indexOf(attributes_string_key, 'method') > 0 " +
|
||||||
"group by method,ts " +
|
"group by method,ts " +
|
||||||
"order by method ASC,ts",
|
"order by method ASC",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "Test aggregate count with multiple filter,groupBy and orderBy",
|
Name: "Test aggregate count with multiple filter,groupBy and orderBy",
|
||||||
@ -375,7 +376,7 @@ var testBuildLogsQueryData = []struct {
|
|||||||
"AND indexOf(attributes_string_key, 'method') > 0 " +
|
"AND indexOf(attributes_string_key, 'method') > 0 " +
|
||||||
"AND indexOf(resources_string_key, 'x') > 0 " +
|
"AND indexOf(resources_string_key, 'x') > 0 " +
|
||||||
"group by method,x,ts " +
|
"group by method,x,ts " +
|
||||||
"order by method ASC,x ASC,ts",
|
"order by method ASC,x ASC",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "Test aggregate avg",
|
Name: "Test aggregate avg",
|
||||||
@ -404,7 +405,7 @@ var testBuildLogsQueryData = []struct {
|
|||||||
"AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' " +
|
"AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' " +
|
||||||
"AND indexOf(attributes_string_key, 'method') > 0 " +
|
"AND indexOf(attributes_string_key, 'method') > 0 " +
|
||||||
"group by method,ts " +
|
"group by method,ts " +
|
||||||
"order by method ASC,ts",
|
"order by method ASC",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "Test aggregate sum",
|
Name: "Test aggregate sum",
|
||||||
@ -433,7 +434,7 @@ var testBuildLogsQueryData = []struct {
|
|||||||
"AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' " +
|
"AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' " +
|
||||||
"AND indexOf(attributes_string_key, 'method') > 0 " +
|
"AND indexOf(attributes_string_key, 'method') > 0 " +
|
||||||
"group by method,ts " +
|
"group by method,ts " +
|
||||||
"order by method ASC,ts",
|
"order by method ASC",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "Test aggregate min",
|
Name: "Test aggregate min",
|
||||||
@ -462,7 +463,7 @@ var testBuildLogsQueryData = []struct {
|
|||||||
"AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' " +
|
"AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' " +
|
||||||
"AND indexOf(attributes_string_key, 'method') > 0 " +
|
"AND indexOf(attributes_string_key, 'method') > 0 " +
|
||||||
"group by method,ts " +
|
"group by method,ts " +
|
||||||
"order by method ASC,ts",
|
"order by method ASC",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "Test aggregate max",
|
Name: "Test aggregate max",
|
||||||
@ -491,7 +492,7 @@ var testBuildLogsQueryData = []struct {
|
|||||||
"AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' " +
|
"AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' " +
|
||||||
"AND indexOf(attributes_string_key, 'method') > 0 " +
|
"AND indexOf(attributes_string_key, 'method') > 0 " +
|
||||||
"group by method,ts " +
|
"group by method,ts " +
|
||||||
"order by method ASC,ts",
|
"order by method ASC",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "Test aggregate PXX",
|
Name: "Test aggregate PXX",
|
||||||
@ -516,7 +517,7 @@ var testBuildLogsQueryData = []struct {
|
|||||||
"where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
|
"where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
|
||||||
"AND indexOf(attributes_string_key, 'method') > 0 " +
|
"AND indexOf(attributes_string_key, 'method') > 0 " +
|
||||||
"group by method,ts " +
|
"group by method,ts " +
|
||||||
"order by method ASC,ts",
|
"order by method ASC",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "Test aggregate RateSum",
|
Name: "Test aggregate RateSum",
|
||||||
@ -538,7 +539,7 @@ var testBuildLogsQueryData = []struct {
|
|||||||
", sum(bytes)/60 as value from signoz_logs.distributed_logs " +
|
", sum(bytes)/60 as value from signoz_logs.distributed_logs " +
|
||||||
"where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
|
"where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
|
||||||
"AND indexOf(attributes_string_key, 'method') > 0 " +
|
"AND indexOf(attributes_string_key, 'method') > 0 " +
|
||||||
"group by method,ts order by method ASC,ts",
|
"group by method,ts order by method ASC",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "Test aggregate rate",
|
Name: "Test aggregate rate",
|
||||||
@ -561,7 +562,7 @@ var testBuildLogsQueryData = []struct {
|
|||||||
"from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
|
"from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
|
||||||
"AND indexOf(attributes_string_key, 'method') > 0 " +
|
"AND indexOf(attributes_string_key, 'method') > 0 " +
|
||||||
"group by method,ts " +
|
"group by method,ts " +
|
||||||
"order by method ASC,ts",
|
"order by method ASC",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "Test aggregate RateSum without materialized column",
|
Name: "Test aggregate RateSum without materialized column",
|
||||||
@ -585,7 +586,7 @@ var testBuildLogsQueryData = []struct {
|
|||||||
"from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
|
"from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
|
||||||
"AND indexOf(attributes_string_key, 'method') > 0 " +
|
"AND indexOf(attributes_string_key, 'method') > 0 " +
|
||||||
"group by method,ts " +
|
"group by method,ts " +
|
||||||
"order by method ASC,ts",
|
"order by method ASC",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "Test Noop",
|
Name: "Test Noop",
|
||||||
@ -603,7 +604,7 @@ var testBuildLogsQueryData = []struct {
|
|||||||
ExpectedQuery: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body,CAST((attributes_string_key, attributes_string_value), 'Map(String, String)') as attributes_string," +
|
ExpectedQuery: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body,CAST((attributes_string_key, attributes_string_value), 'Map(String, String)') as attributes_string," +
|
||||||
"CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64," +
|
"CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64," +
|
||||||
"CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string " +
|
"CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string " +
|
||||||
"from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) order by timestamp",
|
"from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) order by timestamp DESC",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "Test Noop order by custom",
|
Name: "Test Noop order by custom",
|
||||||
@ -642,7 +643,7 @@ var testBuildLogsQueryData = []struct {
|
|||||||
ExpectedQuery: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body,CAST((attributes_string_key, attributes_string_value), 'Map(String, String)') as attributes_string," +
|
ExpectedQuery: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body,CAST((attributes_string_key, attributes_string_value), 'Map(String, String)') as attributes_string," +
|
||||||
"CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64," +
|
"CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64," +
|
||||||
"CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string " +
|
"CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string " +
|
||||||
"from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND severity_number != 0 order by timestamp",
|
"from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND severity_number != 0 order by timestamp DESC",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "Test aggregate with having clause",
|
Name: "Test aggregate with having clause",
|
||||||
@ -664,7 +665,7 @@ var testBuildLogsQueryData = []struct {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
TableName: "logs",
|
TableName: "logs",
|
||||||
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) group by ts having value > 10 order by ts",
|
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) group by ts having value > 10 order by value DESC",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "Test aggregate with having clause and filters",
|
Name: "Test aggregate with having clause and filters",
|
||||||
@ -690,7 +691,7 @@ var testBuildLogsQueryData = []struct {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
TableName: "logs",
|
TableName: "logs",
|
||||||
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' group by ts having value > 10 order by ts",
|
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' group by ts having value > 10 order by value DESC",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "Test top level key",
|
Name: "Test top level key",
|
||||||
@ -716,7 +717,7 @@ var testBuildLogsQueryData = []struct {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
TableName: "logs",
|
TableName: "logs",
|
||||||
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND body ILIKE '%test%' group by ts having value > 10 order by ts",
|
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND body ILIKE '%test%' group by ts having value > 10 order by value DESC",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "Test attribute with same name as top level key",
|
Name: "Test attribute with same name as top level key",
|
||||||
@ -742,7 +743,7 @@ var testBuildLogsQueryData = []struct {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
TableName: "logs",
|
TableName: "logs",
|
||||||
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'body')] ILIKE '%test%' group by ts having value > 10 order by ts",
|
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'body')] ILIKE '%test%' group by ts having value > 10 order by value DESC",
|
||||||
},
|
},
|
||||||
|
|
||||||
// Tests for table panel type
|
// Tests for table panel type
|
||||||
@ -758,7 +759,7 @@ var testBuildLogsQueryData = []struct {
|
|||||||
Expression: "A",
|
Expression: "A",
|
||||||
},
|
},
|
||||||
TableName: "logs",
|
TableName: "logs",
|
||||||
ExpectedQuery: "SELECT now() as ts, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000)",
|
ExpectedQuery: "SELECT now() as ts, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) order by value DESC",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "TABLE: Test count with groupBy",
|
Name: "TABLE: Test count with groupBy",
|
||||||
@ -775,7 +776,7 @@ var testBuildLogsQueryData = []struct {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
TableName: "logs",
|
TableName: "logs",
|
||||||
ExpectedQuery: "SELECT now() as ts, attributes_string_value[indexOf(attributes_string_key, 'name')] as name, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND indexOf(attributes_string_key, 'name') > 0 group by name order by name ASC",
|
ExpectedQuery: "SELECT now() as ts, attributes_string_value[indexOf(attributes_string_key, 'name')] as name, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND indexOf(attributes_string_key, 'name') > 0 group by name order by value DESC",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "TABLE: Test count with groupBy, orderBy",
|
Name: "TABLE: Test count with groupBy, orderBy",
|
||||||
@ -802,7 +803,8 @@ var testBuildLogsQueryData = []struct {
|
|||||||
func TestBuildLogsQuery(t *testing.T) {
|
func TestBuildLogsQuery(t *testing.T) {
|
||||||
for _, tt := range testBuildLogsQueryData {
|
for _, tt := range testBuildLogsQueryData {
|
||||||
Convey("TestBuildLogsQuery", t, func() {
|
Convey("TestBuildLogsQuery", t, func() {
|
||||||
query, err := buildLogsQuery(tt.PanelType, tt.Start, tt.End, tt.Step, tt.BuilderQuery)
|
query, err := buildLogsQuery(tt.PanelType, tt.Start, tt.End, tt.Step, tt.BuilderQuery, "")
|
||||||
|
fmt.Println(query)
|
||||||
So(err, ShouldBeNil)
|
So(err, ShouldBeNil)
|
||||||
So(query, ShouldEqual, tt.ExpectedQuery)
|
So(query, ShouldEqual, tt.ExpectedQuery)
|
||||||
|
|
||||||
@ -844,8 +846,8 @@ var testOrderBy = []struct {
|
|||||||
Name string
|
Name string
|
||||||
PanelType v3.PanelType
|
PanelType v3.PanelType
|
||||||
Items []v3.OrderBy
|
Items []v3.OrderBy
|
||||||
Tags []string
|
Tags []v3.AttributeKey
|
||||||
Result []string
|
Result string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
Name: "Test 1",
|
Name: "Test 1",
|
||||||
@ -860,8 +862,10 @@ var testOrderBy = []struct {
|
|||||||
Order: "desc",
|
Order: "desc",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Tags: []string{"name"},
|
Tags: []v3.AttributeKey{
|
||||||
Result: []string{"name asc", "value desc"},
|
{Key: "name"},
|
||||||
|
},
|
||||||
|
Result: "name asc,value desc",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "Test 2",
|
Name: "Test 2",
|
||||||
@ -876,8 +880,34 @@ var testOrderBy = []struct {
|
|||||||
Order: "asc",
|
Order: "asc",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Tags: []string{"name", "bytes"},
|
Tags: []v3.AttributeKey{
|
||||||
Result: []string{"name asc", "bytes asc"},
|
{Key: "name"},
|
||||||
|
{Key: "bytes"},
|
||||||
|
},
|
||||||
|
Result: "name asc,bytes asc",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "Test Graph item not present in tag",
|
||||||
|
PanelType: v3.PanelTypeGraph,
|
||||||
|
Items: []v3.OrderBy{
|
||||||
|
{
|
||||||
|
ColumnName: "name",
|
||||||
|
Order: "asc",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ColumnName: "bytes",
|
||||||
|
Order: "asc",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ColumnName: "method",
|
||||||
|
Order: "asc",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Tags: []v3.AttributeKey{
|
||||||
|
{Key: "name"},
|
||||||
|
{Key: "bytes"},
|
||||||
|
},
|
||||||
|
Result: "name asc,bytes asc",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "Test 3",
|
Name: "Test 3",
|
||||||
@ -896,8 +926,11 @@ var testOrderBy = []struct {
|
|||||||
Order: "asc",
|
Order: "asc",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Tags: []string{"name", "bytes"},
|
Tags: []v3.AttributeKey{
|
||||||
Result: []string{"name asc", "bytes asc", "value asc"},
|
{Key: "name"},
|
||||||
|
{Key: "bytes"},
|
||||||
|
},
|
||||||
|
Result: "name asc,value asc,bytes asc",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "Test 4",
|
Name: "Test 4",
|
||||||
@ -923,16 +956,163 @@ var testOrderBy = []struct {
|
|||||||
DataType: v3.AttributeKeyDataTypeString,
|
DataType: v3.AttributeKeyDataTypeString,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Tags: []string{"name", "bytes"},
|
Tags: []v3.AttributeKey{
|
||||||
Result: []string{"name asc", "bytes asc", "value asc", "attributes_string_value[indexOf(attributes_string_key, 'response_time')] desc"},
|
{Key: "name"},
|
||||||
|
{Key: "bytes"},
|
||||||
|
},
|
||||||
|
Result: "name asc,value asc,bytes asc,attributes_string_value[indexOf(attributes_string_key, 'response_time')] desc",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOrderBy(t *testing.T) {
|
func TestOrderBy(t *testing.T) {
|
||||||
for _, tt := range testOrderBy {
|
for _, tt := range testOrderBy {
|
||||||
Convey("testOrderBy", t, func() {
|
Convey("testOrderBy", t, func() {
|
||||||
res := orderBy(tt.PanelType, tt.Items, tt.Tags)
|
res := orderByAttributeKeyTags(tt.PanelType, tt.Items, tt.Tags)
|
||||||
So(res, ShouldResemble, tt.Result)
|
So(res, ShouldResemble, tt.Result)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// if there is no group by then there is no point of limit in ts and table queries
|
||||||
|
// since the above will result in a single ts
|
||||||
|
|
||||||
|
// handle only when there is a group by something.
|
||||||
|
|
||||||
|
var testPrepLogsQueryData = []struct {
|
||||||
|
Name string
|
||||||
|
PanelType v3.PanelType
|
||||||
|
Start int64
|
||||||
|
End int64
|
||||||
|
Step int64
|
||||||
|
BuilderQuery *v3.BuilderQuery
|
||||||
|
GroupByTags []v3.AttributeKey
|
||||||
|
TableName string
|
||||||
|
AggregateOperator v3.AggregateOperator
|
||||||
|
ExpectedQuery string
|
||||||
|
Type string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
Name: "Test TS with limit- first",
|
||||||
|
PanelType: v3.PanelTypeGraph,
|
||||||
|
Start: 1680066360726210000,
|
||||||
|
End: 1680066458000000000,
|
||||||
|
Step: 60,
|
||||||
|
BuilderQuery: &v3.BuilderQuery{
|
||||||
|
QueryName: "A",
|
||||||
|
AggregateAttribute: v3.AttributeKey{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
|
||||||
|
AggregateOperator: v3.AggregateOperatorCountDistinct,
|
||||||
|
Expression: "A",
|
||||||
|
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
|
||||||
|
{Key: v3.AttributeKey{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "GET", Operator: "="},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Limit: 10,
|
||||||
|
GroupBy: []v3.AttributeKey{{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
|
||||||
|
},
|
||||||
|
TableName: "logs",
|
||||||
|
ExpectedQuery: "SELECT method from (SELECT attributes_string_value[indexOf(attributes_string_key, 'method')] as method, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND indexOf(attributes_string_key, 'method') > 0 group by method order by value DESC) LIMIT 10",
|
||||||
|
Type: constants.FirstQueryGraphLimit,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "Test TS with limit- first - with order by value",
|
||||||
|
PanelType: v3.PanelTypeGraph,
|
||||||
|
Start: 1680066360726210000,
|
||||||
|
End: 1680066458000000000,
|
||||||
|
Step: 60,
|
||||||
|
BuilderQuery: &v3.BuilderQuery{
|
||||||
|
QueryName: "A",
|
||||||
|
AggregateAttribute: v3.AttributeKey{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
|
||||||
|
AggregateOperator: v3.AggregateOperatorCountDistinct,
|
||||||
|
Expression: "A",
|
||||||
|
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
|
||||||
|
{Key: v3.AttributeKey{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "GET", Operator: "="},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Limit: 10,
|
||||||
|
GroupBy: []v3.AttributeKey{{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
|
||||||
|
OrderBy: []v3.OrderBy{{ColumnName: constants.SigNozOrderByValue, Order: "ASC"}},
|
||||||
|
},
|
||||||
|
TableName: "logs",
|
||||||
|
ExpectedQuery: "SELECT method from (SELECT attributes_string_value[indexOf(attributes_string_key, 'method')] as method, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND indexOf(attributes_string_key, 'method') > 0 group by method order by value ASC) LIMIT 10",
|
||||||
|
Type: constants.FirstQueryGraphLimit,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "Test TS with limit- first - with order by attribute",
|
||||||
|
PanelType: v3.PanelTypeGraph,
|
||||||
|
Start: 1680066360726210000,
|
||||||
|
End: 1680066458000000000,
|
||||||
|
Step: 60,
|
||||||
|
BuilderQuery: &v3.BuilderQuery{
|
||||||
|
QueryName: "A",
|
||||||
|
AggregateAttribute: v3.AttributeKey{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
|
||||||
|
AggregateOperator: v3.AggregateOperatorCountDistinct,
|
||||||
|
Expression: "A",
|
||||||
|
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
|
||||||
|
{Key: v3.AttributeKey{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "GET", Operator: "="},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Limit: 10,
|
||||||
|
GroupBy: []v3.AttributeKey{{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
|
||||||
|
OrderBy: []v3.OrderBy{{ColumnName: "method", Order: "ASC"}},
|
||||||
|
},
|
||||||
|
TableName: "logs",
|
||||||
|
ExpectedQuery: "SELECT method from (SELECT attributes_string_value[indexOf(attributes_string_key, 'method')] as method, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND indexOf(attributes_string_key, 'method') > 0 group by method order by method ASC) LIMIT 10",
|
||||||
|
Type: constants.FirstQueryGraphLimit,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "Test TS with limit- second",
|
||||||
|
PanelType: v3.PanelTypeGraph,
|
||||||
|
Start: 1680066360726210000,
|
||||||
|
End: 1680066458000000000,
|
||||||
|
Step: 60,
|
||||||
|
BuilderQuery: &v3.BuilderQuery{
|
||||||
|
QueryName: "A",
|
||||||
|
AggregateAttribute: v3.AttributeKey{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
|
||||||
|
AggregateOperator: v3.AggregateOperatorCountDistinct,
|
||||||
|
Expression: "A",
|
||||||
|
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
|
||||||
|
{Key: v3.AttributeKey{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "GET", Operator: "="},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
GroupBy: []v3.AttributeKey{{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
|
||||||
|
Limit: 2,
|
||||||
|
},
|
||||||
|
TableName: "logs",
|
||||||
|
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 0 SECOND) AS ts, attributes_string_value[indexOf(attributes_string_key, 'method')] as method, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND indexOf(attributes_string_key, 'method') > 0 AND (method) IN (%s) group by method,ts order by value DESC",
|
||||||
|
Type: constants.SecondQueryGraphLimit,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "Test TS with limit- second - with order by",
|
||||||
|
PanelType: v3.PanelTypeGraph,
|
||||||
|
Start: 1680066360726210000,
|
||||||
|
End: 1680066458000000000,
|
||||||
|
Step: 60,
|
||||||
|
BuilderQuery: &v3.BuilderQuery{
|
||||||
|
QueryName: "A",
|
||||||
|
AggregateAttribute: v3.AttributeKey{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
|
||||||
|
AggregateOperator: v3.AggregateOperatorCountDistinct,
|
||||||
|
Expression: "A",
|
||||||
|
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
|
||||||
|
{Key: v3.AttributeKey{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "GET", Operator: "="},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
GroupBy: []v3.AttributeKey{{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
|
||||||
|
OrderBy: []v3.OrderBy{{ColumnName: "method", Order: "ASC"}},
|
||||||
|
Limit: 2,
|
||||||
|
},
|
||||||
|
TableName: "logs",
|
||||||
|
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 0 SECOND) AS ts, attributes_string_value[indexOf(attributes_string_key, 'method')] as method, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND indexOf(attributes_string_key, 'method') > 0 AND (method) IN (%s) group by method,ts order by method ASC",
|
||||||
|
Type: constants.SecondQueryGraphLimit,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPrepareLogsQuery(t *testing.T) {
|
||||||
|
for _, tt := range testPrepLogsQueryData {
|
||||||
|
Convey("TestBuildLogsQuery", t, func() {
|
||||||
|
query, err := PrepareLogsQuery(tt.Start, tt.End, "", tt.PanelType, tt.BuilderQuery, tt.Type)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
So(query, ShouldEqual, tt.ExpectedQuery)
|
||||||
|
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
196
pkg/query-service/app/metrics/v3/cumulative_table.go
Normal file
196
pkg/query-service/app/metrics/v3/cumulative_table.go
Normal file
@ -0,0 +1,196 @@
|
|||||||
|
package v3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/constants"
|
||||||
|
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This logic is little convoluted for a reason.
|
||||||
|
// When we work with cumulative metrics, the table view need to show the data for the entire time range.
|
||||||
|
// In some cases, we could take the points at the start and end of the time range and divide it by the
|
||||||
|
// duration. But, the problem is there is no guarantee that the trend will be linear between the start and end.
|
||||||
|
// We can sum the rate of change for some interval X, this interval can be step size of time series.
|
||||||
|
// However, the speed of query depends on the number of timestamps, so we bump up the xx the step size.
|
||||||
|
// This should be a good balance between speed and accuracy.
|
||||||
|
// TODO: find a better way to do this
|
||||||
|
func stepForTableCumulative(start, end int64) int64 {
|
||||||
|
// round up to the nearest multiple of 60
|
||||||
|
duration := (end - start + 1) / 1000
|
||||||
|
step := math.Max(math.Floor(float64(duration)/120), 60) // assuming 120 max points
|
||||||
|
if duration > 1800 { // bump for longer duration
|
||||||
|
step = step * 5
|
||||||
|
}
|
||||||
|
return int64(step)
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildMetricQueryForTable(start, end, _ int64, mq *v3.BuilderQuery, tableName string) (string, error) {
|
||||||
|
|
||||||
|
step := stepForTableCumulative(start, end)
|
||||||
|
|
||||||
|
points := ((end - start + 1) / 1000) / step
|
||||||
|
|
||||||
|
metricQueryGroupBy := mq.GroupBy
|
||||||
|
|
||||||
|
// if the aggregate operator is a histogram quantile, and user has not forgotten
|
||||||
|
// the le tag in the group by then add the le tag to the group by
|
||||||
|
if mq.AggregateOperator == v3.AggregateOperatorHistQuant50 ||
|
||||||
|
mq.AggregateOperator == v3.AggregateOperatorHistQuant75 ||
|
||||||
|
mq.AggregateOperator == v3.AggregateOperatorHistQuant90 ||
|
||||||
|
mq.AggregateOperator == v3.AggregateOperatorHistQuant95 ||
|
||||||
|
mq.AggregateOperator == v3.AggregateOperatorHistQuant99 {
|
||||||
|
found := false
|
||||||
|
for _, tag := range mq.GroupBy {
|
||||||
|
if tag.Key == "le" {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
metricQueryGroupBy = append(
|
||||||
|
metricQueryGroupBy,
|
||||||
|
v3.AttributeKey{
|
||||||
|
Key: "le",
|
||||||
|
DataType: v3.AttributeKeyDataTypeString,
|
||||||
|
Type: v3.AttributeKeyTypeTag,
|
||||||
|
IsColumn: false,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
filterSubQuery, err := buildMetricsTimeSeriesFilterQuery(mq.Filters, metricQueryGroupBy, mq)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
samplesTableTimeFilter := fmt.Sprintf("metric_name = %s AND timestamp_ms >= %d AND timestamp_ms <= %d", utils.ClickHouseFormattedValue(mq.AggregateAttribute.Key), start, end)
|
||||||
|
|
||||||
|
// Select the aggregate value for interval
|
||||||
|
queryTmplCounterInner :=
|
||||||
|
"SELECT %s" +
|
||||||
|
" toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL %d SECOND) as ts," +
|
||||||
|
" %s as value" +
|
||||||
|
" FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_TABLENAME +
|
||||||
|
" GLOBAL INNER JOIN" +
|
||||||
|
" (%s) as filtered_time_series" +
|
||||||
|
" USING fingerprint" +
|
||||||
|
" WHERE " + samplesTableTimeFilter +
|
||||||
|
" GROUP BY %s" +
|
||||||
|
" ORDER BY %s ts"
|
||||||
|
|
||||||
|
// Select the aggregate value for interval
|
||||||
|
queryTmpl :=
|
||||||
|
"SELECT %s" +
|
||||||
|
" toStartOfHour(now()) as ts," + // now() has no menaing & used as a placeholder for ts
|
||||||
|
" %s as value" +
|
||||||
|
" FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_TABLENAME +
|
||||||
|
" GLOBAL INNER JOIN" +
|
||||||
|
" (%s) as filtered_time_series" +
|
||||||
|
" USING fingerprint" +
|
||||||
|
" WHERE " + samplesTableTimeFilter +
|
||||||
|
" GROUP BY %s" +
|
||||||
|
" ORDER BY %s ts"
|
||||||
|
|
||||||
|
// tagsWithoutLe is used to group by all tags except le
|
||||||
|
// This is done because we want to group by le only when we are calculating quantile
|
||||||
|
// Otherwise, we want to group by all tags except le
|
||||||
|
tagsWithoutLe := []string{}
|
||||||
|
for _, tag := range mq.GroupBy {
|
||||||
|
if tag.Key != "le" {
|
||||||
|
tagsWithoutLe = append(tagsWithoutLe, tag.Key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// orderWithoutLe := orderBy(mq.OrderBy, tagsWithoutLe)
|
||||||
|
|
||||||
|
groupByWithoutLe := groupBy(tagsWithoutLe...)
|
||||||
|
groupTagsWithoutLe := groupSelect(tagsWithoutLe...)
|
||||||
|
orderWithoutLe := orderBy(mq.OrderBy, tagsWithoutLe)
|
||||||
|
|
||||||
|
groupBy := groupByAttributeKeyTags(metricQueryGroupBy...)
|
||||||
|
groupTags := groupSelectAttributeKeyTags(metricQueryGroupBy...)
|
||||||
|
orderBy := orderByAttributeKeyTags(mq.OrderBy, metricQueryGroupBy)
|
||||||
|
|
||||||
|
if len(orderBy) != 0 {
|
||||||
|
orderBy += ","
|
||||||
|
}
|
||||||
|
if len(orderWithoutLe) != 0 {
|
||||||
|
orderWithoutLe += ","
|
||||||
|
}
|
||||||
|
|
||||||
|
switch mq.AggregateOperator {
|
||||||
|
case v3.AggregateOperatorRate:
|
||||||
|
return "", fmt.Errorf("rate is not supported for table view")
|
||||||
|
case v3.AggregateOperatorSumRate, v3.AggregateOperatorAvgRate, v3.AggregateOperatorMaxRate, v3.AggregateOperatorMinRate:
|
||||||
|
rateGroupBy := "fingerprint, " + groupBy
|
||||||
|
rateGroupTags := "fingerprint, " + groupTags
|
||||||
|
rateOrderBy := "fingerprint, " + orderBy
|
||||||
|
op := "max(value)"
|
||||||
|
subQuery := fmt.Sprintf(
|
||||||
|
queryTmplCounterInner, rateGroupTags, step, op, filterSubQuery, rateGroupBy, rateOrderBy,
|
||||||
|
) // labels will be same so any should be fine
|
||||||
|
query := `SELECT %s ts, ` + rateWithoutNegative + `as value FROM(%s) WHERE isNaN(value) = 0`
|
||||||
|
query = fmt.Sprintf(query, groupTags, subQuery)
|
||||||
|
query = fmt.Sprintf(`SELECT %s toStartOfHour(now()) as ts, %s(value)/%d as value FROM (%s) GROUP BY %s ORDER BY %s ts`, groupTags, aggregateOperatorToSQLFunc[mq.AggregateOperator], points, query, groupBy, orderBy)
|
||||||
|
return query, nil
|
||||||
|
case
|
||||||
|
v3.AggregateOperatorRateSum,
|
||||||
|
v3.AggregateOperatorRateMax,
|
||||||
|
v3.AggregateOperatorRateAvg,
|
||||||
|
v3.AggregateOperatorRateMin:
|
||||||
|
step = ((end - start + 1) / 1000) / 2
|
||||||
|
op := fmt.Sprintf("%s(value)", aggregateOperatorToSQLFunc[mq.AggregateOperator])
|
||||||
|
subQuery := fmt.Sprintf(queryTmplCounterInner, groupTags, step, op, filterSubQuery, groupBy, orderBy)
|
||||||
|
query := `SELECT %s toStartOfHour(now()) as ts, ` + rateWithoutNegative + `as value FROM(%s) WHERE isNaN(value) = 0`
|
||||||
|
query = fmt.Sprintf(query, groupTags, subQuery)
|
||||||
|
return query, nil
|
||||||
|
case
|
||||||
|
v3.AggregateOperatorP05,
|
||||||
|
v3.AggregateOperatorP10,
|
||||||
|
v3.AggregateOperatorP20,
|
||||||
|
v3.AggregateOperatorP25,
|
||||||
|
v3.AggregateOperatorP50,
|
||||||
|
v3.AggregateOperatorP75,
|
||||||
|
v3.AggregateOperatorP90,
|
||||||
|
v3.AggregateOperatorP95,
|
||||||
|
v3.AggregateOperatorP99:
|
||||||
|
op := fmt.Sprintf("quantile(%v)(value)", aggregateOperatorToPercentile[mq.AggregateOperator])
|
||||||
|
query := fmt.Sprintf(queryTmpl, groupTags, op, filterSubQuery, groupBy, orderBy)
|
||||||
|
return query, nil
|
||||||
|
case v3.AggregateOperatorHistQuant50, v3.AggregateOperatorHistQuant75, v3.AggregateOperatorHistQuant90, v3.AggregateOperatorHistQuant95, v3.AggregateOperatorHistQuant99:
|
||||||
|
rateGroupBy := "fingerprint, " + groupBy
|
||||||
|
rateGroupTags := "fingerprint, " + groupTags
|
||||||
|
rateOrderBy := "fingerprint, " + orderBy
|
||||||
|
op := "max(value)"
|
||||||
|
subQuery := fmt.Sprintf(
|
||||||
|
queryTmplCounterInner, rateGroupTags, step, op, filterSubQuery, rateGroupBy, rateOrderBy,
|
||||||
|
) // labels will be same so any should be fine
|
||||||
|
query := `SELECT %s ts, ` + rateWithoutNegative + ` as value FROM(%s) WHERE isNaN(value) = 0`
|
||||||
|
query = fmt.Sprintf(query, groupTags, subQuery)
|
||||||
|
query = fmt.Sprintf(`SELECT %s toStartOfHour(now()) as ts, sum(value)/%d as value FROM (%s) GROUP BY %s HAVING isNaN(value) = 0 ORDER BY %s ts`, groupTags, points, query, groupBy, orderBy)
|
||||||
|
value := aggregateOperatorToPercentile[mq.AggregateOperator]
|
||||||
|
|
||||||
|
query = fmt.Sprintf(`SELECT %s toStartOfHour(now()) as ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), %.3f) as value FROM (%s) GROUP BY %s ORDER BY %s ts`, groupTagsWithoutLe, value, query, groupByWithoutLe, orderWithoutLe)
|
||||||
|
return query, nil
|
||||||
|
case v3.AggregateOperatorAvg, v3.AggregateOperatorSum, v3.AggregateOperatorMin, v3.AggregateOperatorMax:
|
||||||
|
op := fmt.Sprintf("%s(value)", aggregateOperatorToSQLFunc[mq.AggregateOperator])
|
||||||
|
query := fmt.Sprintf(queryTmpl, groupTags, op, filterSubQuery, groupBy, orderBy)
|
||||||
|
return query, nil
|
||||||
|
case v3.AggregateOperatorCount:
|
||||||
|
op := "toFloat64(count(*))"
|
||||||
|
query := fmt.Sprintf(queryTmpl, groupTags, op, filterSubQuery, groupBy, orderBy)
|
||||||
|
return query, nil
|
||||||
|
case v3.AggregateOperatorCountDistinct:
|
||||||
|
op := "toFloat64(count(distinct(value)))"
|
||||||
|
query := fmt.Sprintf(queryTmpl, groupTags, op, filterSubQuery, groupBy, orderBy)
|
||||||
|
return query, nil
|
||||||
|
case v3.AggregateOperatorNoOp:
|
||||||
|
return "", fmt.Errorf("noop is not supported for table view")
|
||||||
|
default:
|
||||||
|
return "", fmt.Errorf("unsupported aggregate operator")
|
||||||
|
}
|
||||||
|
}
|
99
pkg/query-service/app/metrics/v3/cumulative_table_test.go
Normal file
99
pkg/query-service/app/metrics/v3/cumulative_table_test.go
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
package v3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPanelTableForCumulative(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
query *v3.BuilderQuery
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "request rate",
|
||||||
|
query: &v3.BuilderQuery{
|
||||||
|
QueryName: "A",
|
||||||
|
DataSource: v3.DataSourceMetrics,
|
||||||
|
AggregateOperator: v3.AggregateOperatorSumRate,
|
||||||
|
AggregateAttribute: v3.AttributeKey{
|
||||||
|
Key: "signoz_latency_count",
|
||||||
|
},
|
||||||
|
Temporality: v3.Cumulative,
|
||||||
|
Filters: &v3.FilterSet{
|
||||||
|
Items: []v3.FilterItem{
|
||||||
|
{
|
||||||
|
Key: v3.AttributeKey{Key: "service_name"},
|
||||||
|
Operator: v3.FilterOperatorIn,
|
||||||
|
Value: []interface{}{"frontend"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: v3.AttributeKey{Key: "operation"},
|
||||||
|
Operator: v3.FilterOperatorIn,
|
||||||
|
Value: []interface{}{"HTTP GET /dispatch"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Expression: "A",
|
||||||
|
},
|
||||||
|
expected: "SELECT toStartOfHour(now()) as ts, sum(value)/29 as value FROM (SELECT ts, if(runningDifference(ts) <= 0, nan, if(runningDifference(value) < 0, (value) / runningDifference(ts), runningDifference(value) / runningDifference(ts))) as value FROM(SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL 60 SECOND) as ts, max(value) as value FROM signoz_metrics.distributed_samples_v2 GLOBAL INNER JOIN (SELECT fingerprint FROM signoz_metrics.distributed_time_series_v2 WHERE metric_name = 'signoz_latency_count' AND temporality IN ['Cumulative', 'Unspecified'] AND JSONExtractString(labels, 'service_name') IN ['frontend'] AND JSONExtractString(labels, 'operation') IN ['HTTP GET /dispatch']) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_count' AND timestamp_ms >= 1689255866000 AND timestamp_ms <= 1689257640000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(value) = 0) GROUP BY ts ORDER BY ts",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "latency p50",
|
||||||
|
query: &v3.BuilderQuery{
|
||||||
|
QueryName: "A",
|
||||||
|
DataSource: v3.DataSourceMetrics,
|
||||||
|
AggregateOperator: v3.AggregateOperatorHistQuant50,
|
||||||
|
AggregateAttribute: v3.AttributeKey{
|
||||||
|
Key: "signoz_latency_bucket",
|
||||||
|
},
|
||||||
|
Temporality: v3.Cumulative,
|
||||||
|
Filters: &v3.FilterSet{
|
||||||
|
Items: []v3.FilterItem{
|
||||||
|
{
|
||||||
|
Key: v3.AttributeKey{Key: "service_name"},
|
||||||
|
Operator: v3.FilterOperatorEqual,
|
||||||
|
Value: "frontend",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Expression: "A",
|
||||||
|
},
|
||||||
|
expected: "SELECT toStartOfHour(now()) as ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.500) as value FROM (SELECT le, toStartOfHour(now()) as ts, sum(value)/29 as value FROM (SELECT le, ts, if(runningDifference(ts) <= 0, nan, if(runningDifference(value) < 0, (value) / runningDifference(ts), runningDifference(value) / runningDifference(ts))) as value FROM(SELECT fingerprint, le, toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL 60 SECOND) as ts, max(value) as value FROM signoz_metrics.distributed_samples_v2 GLOBAL INNER JOIN (SELECT JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.distributed_time_series_v2 WHERE metric_name = 'signoz_latency_bucket' AND temporality IN ['Cumulative', 'Unspecified'] AND JSONExtractString(labels, 'service_name') = 'frontend') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND timestamp_ms >= 1689255866000 AND timestamp_ms <= 1689257640000 GROUP BY fingerprint, le,ts ORDER BY fingerprint, le ASC, ts) WHERE isNaN(value) = 0) GROUP BY le,ts HAVING isNaN(value) = 0 ORDER BY le ASC, ts) GROUP BY ts ORDER BY ts",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "latency p99 with group by",
|
||||||
|
query: &v3.BuilderQuery{
|
||||||
|
QueryName: "A",
|
||||||
|
DataSource: v3.DataSourceMetrics,
|
||||||
|
AggregateOperator: v3.AggregateOperatorHistQuant99,
|
||||||
|
AggregateAttribute: v3.AttributeKey{
|
||||||
|
Key: "signoz_latency_bucket",
|
||||||
|
},
|
||||||
|
Temporality: v3.Cumulative,
|
||||||
|
GroupBy: []v3.AttributeKey{
|
||||||
|
{
|
||||||
|
Key: "service_name",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Expression: "A",
|
||||||
|
},
|
||||||
|
expected: "SELECT service_name, toStartOfHour(now()) as ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT service_name,le, toStartOfHour(now()) as ts, sum(value)/29 as value FROM (SELECT service_name,le, ts, if(runningDifference(ts) <= 0, nan, if(runningDifference(value) < 0, (value) / runningDifference(ts), runningDifference(value) / runningDifference(ts))) as value FROM(SELECT fingerprint, service_name,le, toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL 60 SECOND) as ts, max(value) as value FROM signoz_metrics.distributed_samples_v2 GLOBAL INNER JOIN (SELECT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.distributed_time_series_v2 WHERE metric_name = 'signoz_latency_bucket' AND temporality IN ['Cumulative', 'Unspecified']) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND timestamp_ms >= 1689255866000 AND timestamp_ms <= 1689257640000 GROUP BY fingerprint, service_name,le,ts ORDER BY fingerprint, service_name ASC,le ASC, ts) WHERE isNaN(value) = 0) GROUP BY service_name,le,ts HAVING isNaN(value) = 0 ORDER BY service_name ASC,le ASC, ts) GROUP BY service_name,ts ORDER BY service_name ASC, ts",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range cases {
|
||||||
|
t.Run(c.name, func(t *testing.T) {
|
||||||
|
query, err := buildMetricQueryForTable(1689255866000, 1689257640000, 1800, c.query, "distributed_time_series_v2")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if query != c.expected {
|
||||||
|
t.Fatalf("expected: %s, got: %s", c.expected, query)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
148
pkg/query-service/app/metrics/v3/delta_table.go
Normal file
148
pkg/query-service/app/metrics/v3/delta_table.go
Normal file
@ -0,0 +1,148 @@
|
|||||||
|
package v3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/constants"
|
||||||
|
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
func buildDeltaMetricQueryForTable(start, end, _ int64, mq *v3.BuilderQuery, tableName string) (string, error) {
|
||||||
|
|
||||||
|
// round up to the nearest multiple of 60
|
||||||
|
step := int64(math.Ceil(float64(end-start+1)/1000/60) * 60)
|
||||||
|
|
||||||
|
metricQueryGroupBy := mq.GroupBy
|
||||||
|
|
||||||
|
// if the aggregate operator is a histogram quantile, and user has not forgotten
|
||||||
|
// the le tag in the group by then add the le tag to the group by
|
||||||
|
if mq.AggregateOperator == v3.AggregateOperatorHistQuant50 ||
|
||||||
|
mq.AggregateOperator == v3.AggregateOperatorHistQuant75 ||
|
||||||
|
mq.AggregateOperator == v3.AggregateOperatorHistQuant90 ||
|
||||||
|
mq.AggregateOperator == v3.AggregateOperatorHistQuant95 ||
|
||||||
|
mq.AggregateOperator == v3.AggregateOperatorHistQuant99 {
|
||||||
|
found := false
|
||||||
|
for _, tag := range mq.GroupBy {
|
||||||
|
if tag.Key == "le" {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
metricQueryGroupBy = append(
|
||||||
|
metricQueryGroupBy,
|
||||||
|
v3.AttributeKey{
|
||||||
|
Key: "le",
|
||||||
|
DataType: v3.AttributeKeyDataTypeString,
|
||||||
|
Type: v3.AttributeKeyTypeTag,
|
||||||
|
IsColumn: false,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
filterSubQuery, err := buildMetricsTimeSeriesFilterQuery(mq.Filters, metricQueryGroupBy, mq)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
samplesTableTimeFilter := fmt.Sprintf("metric_name = %s AND timestamp_ms >= %d AND timestamp_ms <= %d", utils.ClickHouseFormattedValue(mq.AggregateAttribute.Key), start, end)
|
||||||
|
|
||||||
|
queryTmpl :=
|
||||||
|
"SELECT %s toStartOfHour(now()) as ts," + // now() has no menaing & used as a placeholder for ts
|
||||||
|
" %s as value" +
|
||||||
|
" FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_TABLENAME +
|
||||||
|
" GLOBAL INNER JOIN" +
|
||||||
|
" (%s) as filtered_time_series" +
|
||||||
|
" USING fingerprint" +
|
||||||
|
" WHERE " + samplesTableTimeFilter +
|
||||||
|
" GROUP BY %s" +
|
||||||
|
" ORDER BY %s ts"
|
||||||
|
|
||||||
|
// tagsWithoutLe is used to group by all tags except le
|
||||||
|
// This is done because we want to group by le only when we are calculating quantile
|
||||||
|
// Otherwise, we want to group by all tags except le
|
||||||
|
tagsWithoutLe := []string{}
|
||||||
|
for _, tag := range mq.GroupBy {
|
||||||
|
if tag.Key != "le" {
|
||||||
|
tagsWithoutLe = append(tagsWithoutLe, tag.Key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
groupByWithoutLeTable := groupBy(tagsWithoutLe...)
|
||||||
|
groupTagsWithoutLeTable := groupSelect(tagsWithoutLe...)
|
||||||
|
orderWithoutLeTable := orderBy(mq.OrderBy, tagsWithoutLe)
|
||||||
|
|
||||||
|
groupBy := groupByAttributeKeyTags(metricQueryGroupBy...)
|
||||||
|
groupTags := groupSelectAttributeKeyTags(metricQueryGroupBy...)
|
||||||
|
orderBy := orderByAttributeKeyTags(mq.OrderBy, metricQueryGroupBy)
|
||||||
|
|
||||||
|
if len(orderBy) != 0 {
|
||||||
|
orderBy += ","
|
||||||
|
}
|
||||||
|
if len(orderWithoutLeTable) != 0 {
|
||||||
|
orderWithoutLeTable += ","
|
||||||
|
}
|
||||||
|
|
||||||
|
switch mq.AggregateOperator {
|
||||||
|
case v3.AggregateOperatorRate:
|
||||||
|
// TODO(srikanthccv): what should be the expected behavior here for metrics?
|
||||||
|
return "", fmt.Errorf("rate is not supported for table view")
|
||||||
|
case v3.AggregateOperatorSumRate, v3.AggregateOperatorAvgRate, v3.AggregateOperatorMaxRate, v3.AggregateOperatorMinRate:
|
||||||
|
op := fmt.Sprintf("%s(value)/%d", aggregateOperatorToSQLFunc[mq.AggregateOperator], step)
|
||||||
|
query := fmt.Sprintf(
|
||||||
|
queryTmpl, groupTags, op, filterSubQuery, groupBy, orderBy,
|
||||||
|
)
|
||||||
|
return query, nil
|
||||||
|
case
|
||||||
|
v3.AggregateOperatorRateSum,
|
||||||
|
v3.AggregateOperatorRateMax,
|
||||||
|
v3.AggregateOperatorRateAvg,
|
||||||
|
v3.AggregateOperatorRateMin:
|
||||||
|
op := fmt.Sprintf("%s(value)/%d", aggregateOperatorToSQLFunc[mq.AggregateOperator], step)
|
||||||
|
query := fmt.Sprintf(
|
||||||
|
queryTmpl, groupTags, op, filterSubQuery, groupBy, orderBy,
|
||||||
|
)
|
||||||
|
return query, nil
|
||||||
|
case
|
||||||
|
v3.AggregateOperatorP05,
|
||||||
|
v3.AggregateOperatorP10,
|
||||||
|
v3.AggregateOperatorP20,
|
||||||
|
v3.AggregateOperatorP25,
|
||||||
|
v3.AggregateOperatorP50,
|
||||||
|
v3.AggregateOperatorP75,
|
||||||
|
v3.AggregateOperatorP90,
|
||||||
|
v3.AggregateOperatorP95,
|
||||||
|
v3.AggregateOperatorP99:
|
||||||
|
op := fmt.Sprintf("quantile(%v)(value)", aggregateOperatorToPercentile[mq.AggregateOperator])
|
||||||
|
query := fmt.Sprintf(queryTmpl, groupTags, op, filterSubQuery, groupBy, orderBy)
|
||||||
|
return query, nil
|
||||||
|
case v3.AggregateOperatorHistQuant50, v3.AggregateOperatorHistQuant75, v3.AggregateOperatorHistQuant90, v3.AggregateOperatorHistQuant95, v3.AggregateOperatorHistQuant99:
|
||||||
|
op := fmt.Sprintf("sum(value)/%d", step)
|
||||||
|
query := fmt.Sprintf(
|
||||||
|
queryTmpl, groupTags, op, filterSubQuery, groupBy, orderBy,
|
||||||
|
) // labels will be same so any should be fine
|
||||||
|
value := aggregateOperatorToPercentile[mq.AggregateOperator]
|
||||||
|
|
||||||
|
query = fmt.Sprintf(`SELECT %s ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), %.3f) as value FROM (%s) GROUP BY %s ORDER BY %s ts`, groupTagsWithoutLeTable, value, query, groupByWithoutLeTable, orderWithoutLeTable)
|
||||||
|
return query, nil
|
||||||
|
case v3.AggregateOperatorAvg, v3.AggregateOperatorSum, v3.AggregateOperatorMin, v3.AggregateOperatorMax:
|
||||||
|
op := fmt.Sprintf("%s(value)", aggregateOperatorToSQLFunc[mq.AggregateOperator])
|
||||||
|
query := fmt.Sprintf(queryTmpl, groupTags, op, filterSubQuery, groupBy, orderBy)
|
||||||
|
return query, nil
|
||||||
|
case v3.AggregateOperatorCount:
|
||||||
|
op := "toFloat64(count(*))"
|
||||||
|
query := fmt.Sprintf(queryTmpl, groupTags, op, filterSubQuery, groupBy, orderBy)
|
||||||
|
return query, nil
|
||||||
|
case v3.AggregateOperatorCountDistinct:
|
||||||
|
op := "toFloat64(count(distinct(value)))"
|
||||||
|
query := fmt.Sprintf(queryTmpl, groupTags, op, filterSubQuery, groupBy, orderBy)
|
||||||
|
return query, nil
|
||||||
|
case v3.AggregateOperatorNoOp:
|
||||||
|
return "", fmt.Errorf("noop is not supported for table view")
|
||||||
|
default:
|
||||||
|
return "", fmt.Errorf("unsupported aggregate operator")
|
||||||
|
}
|
||||||
|
}
|
99
pkg/query-service/app/metrics/v3/delta_table_test.go
Normal file
99
pkg/query-service/app/metrics/v3/delta_table_test.go
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
package v3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPanelTableForDelta(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
query *v3.BuilderQuery
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "request rate",
|
||||||
|
query: &v3.BuilderQuery{
|
||||||
|
QueryName: "A",
|
||||||
|
DataSource: v3.DataSourceMetrics,
|
||||||
|
AggregateOperator: v3.AggregateOperatorSumRate,
|
||||||
|
AggregateAttribute: v3.AttributeKey{
|
||||||
|
Key: "signoz_latency_count",
|
||||||
|
},
|
||||||
|
Temporality: v3.Delta,
|
||||||
|
Filters: &v3.FilterSet{
|
||||||
|
Items: []v3.FilterItem{
|
||||||
|
{
|
||||||
|
Key: v3.AttributeKey{Key: "service_name"},
|
||||||
|
Operator: v3.FilterOperatorIn,
|
||||||
|
Value: []interface{}{"frontend"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: v3.AttributeKey{Key: "operation"},
|
||||||
|
Operator: v3.FilterOperatorIn,
|
||||||
|
Value: []interface{}{"HTTP GET /dispatch"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Expression: "A",
|
||||||
|
},
|
||||||
|
expected: "SELECT toStartOfHour(now()) as ts, sum(value)/1800 as value FROM signoz_metrics.distributed_samples_v2 GLOBAL INNER JOIN (SELECT fingerprint FROM signoz_metrics.distributed_time_series_v2 WHERE metric_name = 'signoz_latency_count' AND temporality = 'Delta' AND JSONExtractString(labels, 'service_name') IN ['frontend'] AND JSONExtractString(labels, 'operation') IN ['HTTP GET /dispatch']) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_count' AND timestamp_ms >= 1689255866000 AND timestamp_ms <= 1689257640000 GROUP BY ts ORDER BY ts",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "latency p50",
|
||||||
|
query: &v3.BuilderQuery{
|
||||||
|
QueryName: "A",
|
||||||
|
DataSource: v3.DataSourceMetrics,
|
||||||
|
AggregateOperator: v3.AggregateOperatorHistQuant50,
|
||||||
|
AggregateAttribute: v3.AttributeKey{
|
||||||
|
Key: "signoz_latency_bucket",
|
||||||
|
},
|
||||||
|
Temporality: v3.Delta,
|
||||||
|
Filters: &v3.FilterSet{
|
||||||
|
Items: []v3.FilterItem{
|
||||||
|
{
|
||||||
|
Key: v3.AttributeKey{Key: "service_name"},
|
||||||
|
Operator: v3.FilterOperatorEqual,
|
||||||
|
Value: "frontend",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Expression: "A",
|
||||||
|
},
|
||||||
|
expected: "SELECT ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.500) as value FROM (SELECT le, toStartOfHour(now()) as ts, sum(value)/1800 as value FROM signoz_metrics.distributed_samples_v2 GLOBAL INNER JOIN (SELECT JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.distributed_time_series_v2 WHERE metric_name = 'signoz_latency_bucket' AND temporality = 'Delta' AND JSONExtractString(labels, 'service_name') = 'frontend') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND timestamp_ms >= 1689255866000 AND timestamp_ms <= 1689257640000 GROUP BY le,ts ORDER BY le ASC, ts) GROUP BY ts ORDER BY ts",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "latency p99 with group by",
|
||||||
|
query: &v3.BuilderQuery{
|
||||||
|
QueryName: "A",
|
||||||
|
DataSource: v3.DataSourceMetrics,
|
||||||
|
AggregateOperator: v3.AggregateOperatorHistQuant99,
|
||||||
|
AggregateAttribute: v3.AttributeKey{
|
||||||
|
Key: "signoz_latency_bucket",
|
||||||
|
},
|
||||||
|
Temporality: v3.Delta,
|
||||||
|
GroupBy: []v3.AttributeKey{
|
||||||
|
{
|
||||||
|
Key: "service_name",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Expression: "A",
|
||||||
|
},
|
||||||
|
expected: "SELECT service_name, ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.990) as value FROM (SELECT service_name,le, toStartOfHour(now()) as ts, sum(value)/1800 as value FROM signoz_metrics.distributed_samples_v2 GLOBAL INNER JOIN (SELECT JSONExtractString(labels, 'service_name') as service_name, JSONExtractString(labels, 'le') as le, fingerprint FROM signoz_metrics.distributed_time_series_v2 WHERE metric_name = 'signoz_latency_bucket' AND temporality = 'Delta' ) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_bucket' AND timestamp_ms >= 1689255866000 AND timestamp_ms <= 1689257640000 GROUP BY service_name,le,ts ORDER BY service_name ASC,le ASC, ts) GROUP BY service_name,ts ORDER BY service_name ASC, ts",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range cases {
|
||||||
|
t.Run(c.name, func(t *testing.T) {
|
||||||
|
query, err := buildDeltaMetricQueryForTable(1689255866000, 1689257640000, 1800, c.query, "distributed_time_series_v2")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if query != c.expected {
|
||||||
|
t.Fatalf("expected: %s, got: %s", c.expected, query)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
@ -403,15 +403,15 @@ func reduceQuery(query string, reduceTo v3.ReduceToOperator, aggregateOperator v
|
|||||||
// chart with just the query value. For the quer
|
// chart with just the query value. For the quer
|
||||||
switch reduceTo {
|
switch reduceTo {
|
||||||
case v3.ReduceToOperatorLast:
|
case v3.ReduceToOperatorLast:
|
||||||
query = fmt.Sprintf("SELECT anyLast(value) as value, any(ts) as ts %s FROM (%s) %s", selectLabels, query, groupBy)
|
query = fmt.Sprintf("SELECT anyLastIf(value, toUnixTimestamp(ts) != 0) as value, anyIf(ts, toUnixTimestamp(ts) != 0) AS timestamp %s FROM (%s) %s", selectLabels, query, groupBy)
|
||||||
case v3.ReduceToOperatorSum:
|
case v3.ReduceToOperatorSum:
|
||||||
query = fmt.Sprintf("SELECT sum(value) as value, any(ts) as ts %s FROM (%s) %s", selectLabels, query, groupBy)
|
query = fmt.Sprintf("SELECT sumIf(value, toUnixTimestamp(ts) != 0) as value, anyIf(ts, toUnixTimestamp(ts) != 0) AS timestamp %s FROM (%s) %s", selectLabels, query, groupBy)
|
||||||
case v3.ReduceToOperatorAvg:
|
case v3.ReduceToOperatorAvg:
|
||||||
query = fmt.Sprintf("SELECT avg(value) as value, any(ts) as ts %s FROM (%s) %s", selectLabels, query, groupBy)
|
query = fmt.Sprintf("SELECT avgIf(value, toUnixTimestamp(ts) != 0) as value, anyIf(ts, toUnixTimestamp(ts) != 0) AS timestamp %s FROM (%s) %s", selectLabels, query, groupBy)
|
||||||
case v3.ReduceToOperatorMax:
|
case v3.ReduceToOperatorMax:
|
||||||
query = fmt.Sprintf("SELECT max(value) as value, any(ts) as ts %s FROM (%s) %s", selectLabels, query, groupBy)
|
query = fmt.Sprintf("SELECT maxIf(value, toUnixTimestamp(ts) != 0) as value, anyIf(ts, toUnixTimestamp(ts) != 0) AS timestamp %s FROM (%s) %s", selectLabels, query, groupBy)
|
||||||
case v3.ReduceToOperatorMin:
|
case v3.ReduceToOperatorMin:
|
||||||
query = fmt.Sprintf("SELECT min(value) as value, any(ts) as ts %s FROM (%s) %s", selectLabels, query, groupBy)
|
query = fmt.Sprintf("SELECT minIf(value, toUnixTimestamp(ts) != 0) as value, anyIf(ts, toUnixTimestamp(ts) != 0) AS timestamp %s FROM (%s) %s", selectLabels, query, groupBy)
|
||||||
default:
|
default:
|
||||||
return "", fmt.Errorf("unsupported reduce operator")
|
return "", fmt.Errorf("unsupported reduce operator")
|
||||||
}
|
}
|
||||||
@ -422,10 +422,18 @@ func PrepareMetricQuery(start, end int64, queryType v3.QueryType, panelType v3.P
|
|||||||
var query string
|
var query string
|
||||||
var err error
|
var err error
|
||||||
if mq.Temporality == v3.Delta {
|
if mq.Temporality == v3.Delta {
|
||||||
|
if panelType == v3.PanelTypeTable {
|
||||||
|
query, err = buildDeltaMetricQueryForTable(start, end, mq.StepInterval, mq, constants.SIGNOZ_TIMESERIES_TABLENAME)
|
||||||
|
} else {
|
||||||
query, err = buildDeltaMetricQuery(start, end, mq.StepInterval, mq, constants.SIGNOZ_TIMESERIES_TABLENAME)
|
query, err = buildDeltaMetricQuery(start, end, mq.StepInterval, mq, constants.SIGNOZ_TIMESERIES_TABLENAME)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if panelType == v3.PanelTypeTable {
|
||||||
|
query, err = buildMetricQueryForTable(start, end, mq.StepInterval, mq, constants.SIGNOZ_TIMESERIES_TABLENAME)
|
||||||
} else {
|
} else {
|
||||||
query, err = buildMetricQuery(start, end, mq.StepInterval, mq, constants.SIGNOZ_TIMESERIES_TABLENAME)
|
query, err = buildMetricQuery(start, end, mq.StepInterval, mq, constants.SIGNOZ_TIMESERIES_TABLENAME)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
@ -235,7 +235,7 @@ func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangePa
|
|||||||
|
|
||||||
// TODO: add support for logs and traces
|
// TODO: add support for logs and traces
|
||||||
if builderQuery.DataSource == v3.DataSourceLogs {
|
if builderQuery.DataSource == v3.DataSourceLogs {
|
||||||
query, err := logsV3.PrepareLogsQuery(params.Start, params.End, params.CompositeQuery.QueryType, params.CompositeQuery.PanelType, builderQuery)
|
query, err := logsV3.PrepareLogsQuery(params.Start, params.End, params.CompositeQuery.QueryType, params.CompositeQuery.PanelType, builderQuery, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errQueriesByName[queryName] = err.Error()
|
errQueriesByName[queryName] = err.Error()
|
||||||
continue
|
continue
|
||||||
|
@ -6,6 +6,7 @@ import (
|
|||||||
|
|
||||||
"github.com/SigNoz/govaluate"
|
"github.com/SigNoz/govaluate"
|
||||||
"go.signoz.io/signoz/pkg/query-service/cache"
|
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||||
|
"go.signoz.io/signoz/pkg/query-service/constants"
|
||||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
@ -39,7 +40,7 @@ var SupportedFunctions = []string{
|
|||||||
var EvalFuncs = map[string]govaluate.ExpressionFunction{}
|
var EvalFuncs = map[string]govaluate.ExpressionFunction{}
|
||||||
|
|
||||||
type prepareTracesQueryFunc func(start, end int64, queryType v3.QueryType, panelType v3.PanelType, bq *v3.BuilderQuery, keys map[string]v3.AttributeKey) (string, error)
|
type prepareTracesQueryFunc func(start, end int64, queryType v3.QueryType, panelType v3.PanelType, bq *v3.BuilderQuery, keys map[string]v3.AttributeKey) (string, error)
|
||||||
type prepareLogsQueryFunc func(start, end int64, queryType v3.QueryType, panelType v3.PanelType, bq *v3.BuilderQuery) (string, error)
|
type prepareLogsQueryFunc func(start, end int64, queryType v3.QueryType, panelType v3.PanelType, bq *v3.BuilderQuery, graphLimitQtype string) (string, error)
|
||||||
type prepareMetricQueryFunc func(start, end int64, queryType v3.QueryType, panelType v3.PanelType, bq *v3.BuilderQuery) (string, error)
|
type prepareMetricQueryFunc func(start, end int64, queryType v3.QueryType, panelType v3.PanelType, bq *v3.BuilderQuery) (string, error)
|
||||||
|
|
||||||
type QueryBuilder struct {
|
type QueryBuilder struct {
|
||||||
@ -152,11 +153,25 @@ func (qb *QueryBuilder) PrepareQueries(params *v3.QueryRangeParamsV3, args ...in
|
|||||||
}
|
}
|
||||||
queries[queryName] = queryString
|
queries[queryName] = queryString
|
||||||
case v3.DataSourceLogs:
|
case v3.DataSourceLogs:
|
||||||
queryString, err := qb.options.BuildLogQuery(params.Start, params.End, compositeQuery.QueryType, compositeQuery.PanelType, query)
|
// for ts query with limit replace it as it is already formed
|
||||||
|
if compositeQuery.PanelType == v3.PanelTypeGraph && query.Limit > 0 && len(query.GroupBy) > 0 {
|
||||||
|
limitQuery, err := qb.options.BuildLogQuery(params.Start, params.End, compositeQuery.QueryType, compositeQuery.PanelType, query, constants.FirstQueryGraphLimit)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
placeholderQuery, err := qb.options.BuildLogQuery(params.Start, params.End, compositeQuery.QueryType, compositeQuery.PanelType, query, constants.SecondQueryGraphLimit)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
query := fmt.Sprintf(placeholderQuery, limitQuery)
|
||||||
|
queries[queryName] = query
|
||||||
|
} else {
|
||||||
|
queryString, err := qb.options.BuildLogQuery(params.Start, params.End, compositeQuery.QueryType, compositeQuery.PanelType, query, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
queries[queryName] = queryString
|
queries[queryName] = queryString
|
||||||
|
}
|
||||||
case v3.DataSourceMetrics:
|
case v3.DataSourceMetrics:
|
||||||
queryString, err := qb.options.BuildMetricQuery(params.Start, params.End, compositeQuery.QueryType, compositeQuery.PanelType, query)
|
queryString, err := qb.options.BuildMetricQuery(params.Start, params.End, compositeQuery.QueryType, compositeQuery.PanelType, query)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -49,6 +49,7 @@ type ServerOptions struct {
|
|||||||
DisableRules bool
|
DisableRules bool
|
||||||
RuleRepoURL string
|
RuleRepoURL string
|
||||||
PreferDelta bool
|
PreferDelta bool
|
||||||
|
PreferSpanMetrics bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Server runs HTTP, Mux and a grpc server
|
// Server runs HTTP, Mux and a grpc server
|
||||||
@ -127,6 +128,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
Reader: reader,
|
Reader: reader,
|
||||||
SkipConfig: skipConfig,
|
SkipConfig: skipConfig,
|
||||||
PerferDelta: serverOptions.PreferDelta,
|
PerferDelta: serverOptions.PreferDelta,
|
||||||
|
PreferSpanMetrics: serverOptions.PreferSpanMetrics,
|
||||||
AppDao: dao.DB(),
|
AppDao: dao.DB(),
|
||||||
RuleManager: rm,
|
RuleManager: rm,
|
||||||
FeatureFlags: fm,
|
FeatureFlags: fm,
|
||||||
|
@ -87,6 +87,13 @@ var DEFAULT_FEATURE_SET = model.FeatureSet{
|
|||||||
UsageLimit: -1,
|
UsageLimit: -1,
|
||||||
Route: "",
|
Route: "",
|
||||||
},
|
},
|
||||||
|
model.Feature{
|
||||||
|
Name: model.UseSpanMetrics,
|
||||||
|
Active: false,
|
||||||
|
Usage: 0,
|
||||||
|
UsageLimit: -1,
|
||||||
|
Route: "",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetContextTimeout() time.Duration {
|
func GetContextTimeout() time.Duration {
|
||||||
@ -301,3 +308,6 @@ var StaticFieldsLogsV3 = map[string]v3.AttributeKey{
|
|||||||
const SigNozOrderByValue = "#SIGNOZ_VALUE"
|
const SigNozOrderByValue = "#SIGNOZ_VALUE"
|
||||||
|
|
||||||
const TIMESTAMP = "timestamp"
|
const TIMESTAMP = "timestamp"
|
||||||
|
|
||||||
|
const FirstQueryGraphLimit = "first_query_graph_limit"
|
||||||
|
const SecondQueryGraphLimit = "second_query_graph_limit"
|
||||||
|
@ -35,11 +35,13 @@ func main() {
|
|||||||
var ruleRepoURL string
|
var ruleRepoURL string
|
||||||
|
|
||||||
var preferDelta bool
|
var preferDelta bool
|
||||||
|
var preferSpanMetrics bool
|
||||||
|
|
||||||
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
|
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
|
||||||
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
|
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
|
||||||
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
|
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
|
||||||
flag.BoolVar(&preferDelta, "prefer-delta", false, "(prefer delta over gauge)")
|
flag.BoolVar(&preferDelta, "prefer-delta", false, "(prefer delta over cumulative metrics)")
|
||||||
|
flag.BoolVar(&preferSpanMetrics, "prefer-span-metrics", false, "(prefer span metrics for service level metrics)")
|
||||||
flag.StringVar(&ruleRepoURL, "rules.repo-url", constants.AlertHelpPage, "(host address used to build rule link in alert messages)")
|
flag.StringVar(&ruleRepoURL, "rules.repo-url", constants.AlertHelpPage, "(host address used to build rule link in alert messages)")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
@ -55,6 +57,7 @@ func main() {
|
|||||||
PromConfigPath: promConfigPath,
|
PromConfigPath: promConfigPath,
|
||||||
SkipTopLvlOpsPath: skipTopLvlOpsPath,
|
SkipTopLvlOpsPath: skipTopLvlOpsPath,
|
||||||
PreferDelta: preferDelta,
|
PreferDelta: preferDelta,
|
||||||
|
PreferSpanMetrics: preferSpanMetrics,
|
||||||
PrivateHostPort: constants.PrivateHostPort,
|
PrivateHostPort: constants.PrivateHostPort,
|
||||||
DisableRules: disableRules,
|
DisableRules: disableRules,
|
||||||
RuleRepoURL: ruleRepoURL,
|
RuleRepoURL: ruleRepoURL,
|
||||||
|
@ -14,3 +14,4 @@ const CustomMetricsFunction = "CUSTOM_METRICS_FUNCTION"
|
|||||||
const OSS = "OSS"
|
const OSS = "OSS"
|
||||||
const QueryBuilderPanels = "QUERY_BUILDER_PANELS"
|
const QueryBuilderPanels = "QUERY_BUILDER_PANELS"
|
||||||
const QueryBuilderAlerts = "QUERY_BUILDER_ALERTS"
|
const QueryBuilderAlerts = "QUERY_BUILDER_ALERTS"
|
||||||
|
const UseSpanMetrics = "USE_SPAN_METRICS"
|
||||||
|
@ -143,8 +143,11 @@ func ValidateAndCastValue(v interface{}, dataType v3.AttributeKeyDataType) (inte
|
|||||||
|
|
||||||
// ClickHouseFormattedValue formats the value to be used in clickhouse query
|
// ClickHouseFormattedValue formats the value to be used in clickhouse query
|
||||||
func ClickHouseFormattedValue(v interface{}) string {
|
func ClickHouseFormattedValue(v interface{}) string {
|
||||||
|
// if it's pointer convert it to a value
|
||||||
|
v = getPointerValue(v)
|
||||||
|
|
||||||
switch x := v.(type) {
|
switch x := v.(type) {
|
||||||
case int, int8, int16, int32, int64:
|
case uint8, uint16, uint32, uint64, int, int8, int16, int32, int64:
|
||||||
return fmt.Sprintf("%d", x)
|
return fmt.Sprintf("%d", x)
|
||||||
case float32, float64:
|
case float32, float64:
|
||||||
return fmt.Sprintf("%f", x)
|
return fmt.Sprintf("%f", x)
|
||||||
@ -152,6 +155,7 @@ func ClickHouseFormattedValue(v interface{}) string {
|
|||||||
return fmt.Sprintf("'%s'", x)
|
return fmt.Sprintf("'%s'", x)
|
||||||
case bool:
|
case bool:
|
||||||
return fmt.Sprintf("%v", x)
|
return fmt.Sprintf("%v", x)
|
||||||
|
|
||||||
case []interface{}:
|
case []interface{}:
|
||||||
if len(x) == 0 {
|
if len(x) == 0 {
|
||||||
return ""
|
return ""
|
||||||
@ -167,7 +171,7 @@ func ClickHouseFormattedValue(v interface{}) string {
|
|||||||
}
|
}
|
||||||
str += "]"
|
str += "]"
|
||||||
return str
|
return str
|
||||||
case int, int8, int16, int32, int64, float32, float64, bool:
|
case uint8, uint16, uint32, uint64, int, int8, int16, int32, int64, float32, float64, bool:
|
||||||
return strings.Join(strings.Fields(fmt.Sprint(x)), ",")
|
return strings.Join(strings.Fields(fmt.Sprint(x)), ",")
|
||||||
default:
|
default:
|
||||||
zap.S().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x[0])))
|
zap.S().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x[0])))
|
||||||
@ -178,3 +182,42 @@ func ClickHouseFormattedValue(v interface{}) string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getPointerValue(v interface{}) interface{} {
|
||||||
|
switch x := v.(type) {
|
||||||
|
case *uint8:
|
||||||
|
return *x
|
||||||
|
case *uint16:
|
||||||
|
return *x
|
||||||
|
case *uint32:
|
||||||
|
return *x
|
||||||
|
case *uint64:
|
||||||
|
return *x
|
||||||
|
case *int:
|
||||||
|
return *x
|
||||||
|
case *int8:
|
||||||
|
return *x
|
||||||
|
case *int16:
|
||||||
|
return *x
|
||||||
|
case *int32:
|
||||||
|
return *x
|
||||||
|
case *int64:
|
||||||
|
return *x
|
||||||
|
case *float32:
|
||||||
|
return *x
|
||||||
|
case *float64:
|
||||||
|
return *x
|
||||||
|
case *string:
|
||||||
|
return *x
|
||||||
|
case *bool:
|
||||||
|
return *x
|
||||||
|
case []interface{}:
|
||||||
|
values := []interface{}{}
|
||||||
|
for _, val := range x {
|
||||||
|
values = append(values, getPointerValue(val))
|
||||||
|
}
|
||||||
|
return values
|
||||||
|
default:
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -291,3 +291,86 @@ func TestValidateAndCastValue(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var one = 1
|
||||||
|
var onePointOne = 1.1
|
||||||
|
var oneString = "1"
|
||||||
|
var trueBool = true
|
||||||
|
|
||||||
|
var testClickHouseFormattedValueData = []struct {
|
||||||
|
name string
|
||||||
|
value interface{}
|
||||||
|
want interface{}
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "int",
|
||||||
|
value: 1,
|
||||||
|
want: "1",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "int64",
|
||||||
|
value: int64(1),
|
||||||
|
want: "1",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "float32",
|
||||||
|
value: float32(1.1),
|
||||||
|
want: "1.100000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "string",
|
||||||
|
value: "1",
|
||||||
|
want: "'1'",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "bool",
|
||||||
|
value: true,
|
||||||
|
want: "true",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "[]interface{}",
|
||||||
|
value: []interface{}{1, 2},
|
||||||
|
want: "[1,2]",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "[]interface{}",
|
||||||
|
value: []interface{}{"1", "2"},
|
||||||
|
want: "['1','2']",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "pointer int",
|
||||||
|
value: &one,
|
||||||
|
want: "1",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "pointer float32",
|
||||||
|
value: onePointOne,
|
||||||
|
want: "1.100000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "pointer string",
|
||||||
|
value: &oneString,
|
||||||
|
want: "'1'",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "pointer bool",
|
||||||
|
value: &trueBool,
|
||||||
|
want: "true",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "pointer []interface{}",
|
||||||
|
value: []interface{}{&one, &one},
|
||||||
|
want: "[1,1]",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClickHouseFormattedValue(t *testing.T) {
|
||||||
|
for _, tt := range testClickHouseFormattedValueData {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got := ClickHouseFormattedValue(tt.value)
|
||||||
|
if !reflect.DeepEqual(got, tt.want) {
|
||||||
|
t.Errorf("ClickHouseFormattedValue() = %v, want %v", got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user