{activeStep === 1 && (
@@ -285,6 +318,7 @@ export default function Onboarding(): JSX.Element {
setActiveStep(activeStep - 1);
setSelectedModule(useCases.APM);
resetProgress();
+ history.push(ROUTES.GET_STARTED);
}}
selectedModule={selectedModule}
selectedModuleSteps={selectedModuleSteps}
diff --git a/frontend/src/container/OnboardingContainer/Steps/LogsConnectionStatus/LogsConnectionStatus.tsx b/frontend/src/container/OnboardingContainer/Steps/LogsConnectionStatus/LogsConnectionStatus.tsx
index b7ebbcfde8..3654b21fd7 100644
--- a/frontend/src/container/OnboardingContainer/Steps/LogsConnectionStatus/LogsConnectionStatus.tsx
+++ b/frontend/src/container/OnboardingContainer/Steps/LogsConnectionStatus/LogsConnectionStatus.tsx
@@ -5,6 +5,7 @@ import {
CloseCircleTwoTone,
LoadingOutlined,
} from '@ant-design/icons';
+import { DEFAULT_ENTITY_VERSION } from 'constants/app';
import { PANEL_TYPES } from 'constants/queryBuilder';
import Header from 'container/OnboardingContainer/common/Header/Header';
import { useOnboardingContext } from 'container/OnboardingContainer/context/OnboardingContext';
@@ -72,6 +73,9 @@ export default function LogsConnectionStatus(): JSX.Element {
reduceTo: 'sum',
offset: 0,
pageSize: 100,
+ timeAggregation: '',
+ spaceAggregation: '',
+ functions: [],
},
],
queryFormulas: [],
@@ -84,6 +88,7 @@ export default function LogsConnectionStatus(): JSX.Element {
const { data, isFetching, error, isError } = useGetExplorerQueryRange(
requestData,
PANEL_TYPES.LIST,
+ DEFAULT_ENTITY_VERSION,
{
keepPreviousData: true,
refetchInterval: pollingInterval,
diff --git a/frontend/src/container/OnboardingContainer/constants/apmDocFilePaths.ts b/frontend/src/container/OnboardingContainer/constants/apmDocFilePaths.ts
index 7bf505f30d..485a33382c 100644
--- a/frontend/src/container/OnboardingContainer/constants/apmDocFilePaths.ts
+++ b/frontend/src/container/OnboardingContainer/constants/apmDocFilePaths.ts
@@ -403,6 +403,38 @@ import APM_javascript_reactjs_macOsARM64_quickStart_runApplication from '../Modu
import APM_javascript_reactjs_macOsARM64_recommendedSteps_setupOtelCollector from '../Modules/APM/Javascript/md-docs/ReactJS/MacOsARM64/Recommended/reactjs-macosarm64-recommended-installOtelCollector.md';
import APM_javascript_reactjs_macOsARM64_recommendedSteps_instrumentApplication from '../Modules/APM/Javascript/md-docs/ReactJS/MacOsARM64/Recommended/reactjs-macosarm64-recommended-instrumentApplication.md';
import APM_javascript_reactjs_macOsARM64_recommendedSteps_runApplication from '../Modules/APM/Javascript/md-docs/ReactJS/MacOsARM64/Recommended/reactjs-macosarm64-recommended-runApplication.md';
+// PHP-Kubernetes
+import APM_php_kubernetes_recommendedSteps_setupOtelCollector from '../Modules/APM/Php/md-docs/Kubernetes/php-kubernetes-installOtelCollector.md';
+import APM_php_kubernetes_recommendedSteps_instrumentApplication from '../Modules/APM/Php/md-docs/Kubernetes/php-kubernetes-instrumentApplication.md';
+import APM_php_kubernetes_recommendedSteps_runApplication from '../Modules/APM/Php/md-docs/Kubernetes/php-kubernetes-runApplication.md';
+// PHP-LinuxAMD64-quickstart
+import APM_php_linuxAMD64_quickStart_instrumentApplication from '../Modules/APM/Php/md-docs/LinuxAMD64/QuickStart/php-linuxamd64-quickStart-instrumentApplication.md';
+import APM_php_linuxAMD64_quickStart_runApplication from '../Modules/APM/Php/md-docs/LinuxAMD64/QuickStart/php-linuxamd64-quickStart-runApplication.md';
+// PHP-LinuxAMD64-recommended
+import APM_php_linuxAMD64_recommendedSteps_setupOtelCollector from '../Modules/APM/Php/md-docs/LinuxAMD64/Recommended/php-linuxamd64-recommended-installOtelCollector.md';
+import APM_php_linuxAMD64_recommendedSteps_instrumentApplication from '../Modules/APM/Php/md-docs/LinuxAMD64/Recommended/php-linuxamd64-recommended-instrumentApplication.md';
+import APM_php_linuxAMD64_recommendedSteps_runApplication from '../Modules/APM/Php/md-docs/LinuxAMD64/Recommended/php-linuxamd64-recommended-runApplication.md';
+// PHP-LinuxARM64-quickstart
+import APM_php_linuxARM64_quickStart_instrumentApplication from '../Modules/APM/Php/md-docs/LinuxARM64/QuickStart/php-linuxarm64-quickStart-instrumentApplication.md';
+import APM_php_linuxARM64_quickStart_runApplication from '../Modules/APM/Php/md-docs/LinuxARM64/QuickStart/php-linuxarm64-quickStart-runApplication.md';
+// PHP-LinuxARM64-recommended
+import APM_php_linuxARM64_recommendedSteps_setupOtelCollector from '../Modules/APM/Php/md-docs/LinuxARM64/Recommended/php-linuxarm64-recommended-installOtelCollector.md';
+import APM_php_linuxARM64_recommendedSteps_instrumentApplication from '../Modules/APM/Php/md-docs/LinuxARM64/Recommended/php-linuxarm64-recommended-instrumentApplication.md';
+import APM_php_linuxARM64_recommendedSteps_runApplication from '../Modules/APM/Php/md-docs/LinuxARM64/Recommended/php-linuxarm64-recommended-runApplication.md';
+// PHP-MacOsAMD64-quickstart
+import APM_php_macOsAMD64_quickStart_instrumentApplication from '../Modules/APM/Php/md-docs/MacOsAMD64/QuickStart/php-macosamd64-quickStart-instrumentApplication.md';
+import APM_php_macOsAMD64_quickStart_runApplication from '../Modules/APM/Php/md-docs/MacOsAMD64/QuickStart/php-macosamd64-quickStart-runApplication.md';
+// PHP-MacOsAMD64-recommended
+import APM_php_macOsAMD64_recommendedSteps_setupOtelCollector from '../Modules/APM/Php/md-docs/MacOsAMD64/Recommended/php-macosamd64-recommended-installOtelCollector.md';
+import APM_php_macOsAMD64_recommendedSteps_instrumentApplication from '../Modules/APM/Php/md-docs/MacOsAMD64/Recommended/php-macosamd64-recommended-instrumentApplication.md';
+import APM_php_macOsAMD64_recommendedSteps_runApplication from '../Modules/APM/Php/md-docs/MacOsAMD64/Recommended/php-macosamd64-recommended-runApplication.md';
+// PHP-MacOsARM64-quickstart
+import APM_php_macOsARM64_quickStart_instrumentApplication from '../Modules/APM/Php/md-docs/MacOsARM64/QuickStart/php-macosarm64-quickStart-instrumentApplication.md';
+import APM_php_macOsARM64_quickStart_runApplication from '../Modules/APM/Php/md-docs/MacOsARM64/QuickStart/php-macosarm64-quickStart-runApplication.md';
+// PHP-MacOsARM64-recommended
+import APM_php_macOsARM64_recommendedSteps_setupOtelCollector from '../Modules/APM/Php/md-docs/MacOsARM64/Recommended/php-macosarm64-recommended-installOtelCollector.md';
+import APM_php_macOsARM64_recommendedSteps_instrumentApplication from '../Modules/APM/Php/md-docs/MacOsARM64/Recommended/php-macosarm64-recommended-instrumentApplication.md';
+import APM_php_macOsARM64_recommendedSteps_runApplication from '../Modules/APM/Php/md-docs/MacOsARM64/Recommended/php-macosarm64-recommended-runApplication.md';
/// ////// Javascript Done
/// ///// Python Start
// Django
@@ -575,7 +607,6 @@ import APM_python_other_macOsARM64_recommendedSteps_setupOtelCollector from '../
import APM_python_other_macOsARM64_recommendedSteps_instrumentApplication from '../Modules/APM/Python/md-docs/Others/MacOsARM64/Recommended/others-macosarm64-recommended-instrumentApplication.md';
import APM_python_other_macOsARM64_recommendedSteps_runApplication from '../Modules/APM/Python/md-docs/Others/MacOsARM64/Recommended/others-macosarm64-recommended-runApplication.md';
// ----------------------------------------------------------------------------
-/// ////// Go Done
/// ///// ROR Start
// ROR-Kubernetes
import APM_rails_kubernetes_recommendedSteps_setupOtelCollector from '../Modules/APM/RubyOnRails/md-docs/Kubernetes/ror-kubernetes-installOtelCollector.md';
@@ -1546,4 +1577,36 @@ export const ApmDocFilePaths = {
APM_swift_macOsARM64_recommendedSteps_setupOtelCollector,
APM_swift_macOsARM64_recommendedSteps_instrumentApplication,
APM_swift_macOsARM64_recommendedSteps_runApplication,
+
+ APM_php_kubernetes_recommendedSteps_setupOtelCollector,
+ APM_php_kubernetes_recommendedSteps_instrumentApplication,
+ APM_php_kubernetes_recommendedSteps_runApplication,
+
+ APM_php_linuxAMD64_quickStart_instrumentApplication,
+ APM_php_linuxAMD64_quickStart_runApplication,
+
+ APM_php_linuxAMD64_recommendedSteps_setupOtelCollector,
+ APM_php_linuxAMD64_recommendedSteps_instrumentApplication,
+ APM_php_linuxAMD64_recommendedSteps_runApplication,
+
+ APM_php_linuxARM64_quickStart_instrumentApplication,
+ APM_php_linuxARM64_quickStart_runApplication,
+
+ APM_php_linuxARM64_recommendedSteps_setupOtelCollector,
+ APM_php_linuxARM64_recommendedSteps_instrumentApplication,
+ APM_php_linuxARM64_recommendedSteps_runApplication,
+
+ APM_php_macOsAMD64_quickStart_instrumentApplication,
+ APM_php_macOsAMD64_quickStart_runApplication,
+
+ APM_php_macOsAMD64_recommendedSteps_setupOtelCollector,
+ APM_php_macOsAMD64_recommendedSteps_instrumentApplication,
+ APM_php_macOsAMD64_recommendedSteps_runApplication,
+
+ APM_php_macOsARM64_quickStart_instrumentApplication,
+ APM_php_macOsARM64_quickStart_runApplication,
+
+ APM_php_macOsARM64_recommendedSteps_setupOtelCollector,
+ APM_php_macOsARM64_recommendedSteps_instrumentApplication,
+ APM_php_macOsARM64_recommendedSteps_runApplication,
};
diff --git a/frontend/src/container/OnboardingContainer/utils/dataSourceUtils.ts b/frontend/src/container/OnboardingContainer/utils/dataSourceUtils.ts
index 7b851feac9..517cc38171 100644
--- a/frontend/src/container/OnboardingContainer/utils/dataSourceUtils.ts
+++ b/frontend/src/container/OnboardingContainer/utils/dataSourceUtils.ts
@@ -1,6 +1,15 @@
-import { ModuleProps, ModulesMap } from '../OnboardingContainer';
+import ROUTES from 'constants/routes';
+
+import { ModuleProps } from '../OnboardingContainer';
import { DataSourceType } from '../Steps/DataSource/DataSource';
+export enum ModulesMap {
+ APM = 'APM',
+ LogsManagement = 'LogsManagement',
+ InfrastructureMonitoring = 'InfrastructureMonitoring',
+ AwsMonitoring = 'AwsMonitoring',
+}
+
export const frameworksMap = {
APM: {
java: [
@@ -81,174 +90,179 @@ const supportedLanguages = [
{
name: 'java',
id: 'java',
- imgURL: `Logos/java.png`,
+ imgURL: `/Logos/java.png`,
},
{
name: 'python',
id: 'python',
- imgURL: `Logos/python.png`,
+ imgURL: `/Logos/python.png`,
},
{
name: 'go',
id: 'go',
- imgURL: `Logos/go.png`,
+ imgURL: `/Logos/go.png`,
},
{
name: 'javascript',
id: 'javascript',
- imgURL: `Logos/javascript.png`,
+ imgURL: `/Logos/javascript.png`,
},
{
name: 'rails',
id: 'rails',
- imgURL: `Logos/rails.png`,
+ imgURL: `/Logos/rails.png`,
},
{
name: '.NET',
id: 'dotnet',
- imgURL: `Logos/dotnet.png`,
+ imgURL: `/Logos/dotnet.png`,
},
{
name: 'rust',
id: 'rust',
- imgURL: `Logos/rust.png`,
+ imgURL: `/Logos/rust.png`,
},
{
name: 'elixir',
id: 'elixir',
- imgURL: `Logos/elixir.png`,
+ imgURL: `/Logos/elixir.png`,
},
{
name: 'swift',
id: 'swift',
- imgURL: `Logos/swift.png`,
+ imgURL: `/Logos/swift.png`,
+ },
+ {
+ name: 'php',
+ id: 'php',
+ imgURL: `/Logos/php.png`,
},
];
export const defaultLogsType = {
name: 'Kubernetes Pod Logs',
id: 'kubernetes',
- imgURL: `Logos/kubernetes.svg`,
+ imgURL: `/Logos/kubernetes.svg`,
};
const supportedLogsTypes = [
{
name: 'Kubernetes Pod Logs',
id: 'kubernetes',
- imgURL: `Logos/kubernetes.svg`,
+ imgURL: `/Logos/kubernetes.svg`,
},
{
name: 'Docker Container Logs',
id: 'docker',
- imgURL: `Logos/docker.svg`,
+ imgURL: `/Logos/docker.svg`,
},
{
name: 'SysLogs',
id: 'syslogs',
- imgURL: `Logos/syslogs.svg`,
+ imgURL: `/Logos/syslogs.svg`,
},
{
name: 'Application Logs',
id: 'application_logs',
- imgURL: `Logos/software-window.svg`,
+ imgURL: `/Logos/software-window.svg`,
},
{
name: 'FluentBit',
id: 'fluentBit',
- imgURL: `Logos/fluent-bit.png`,
+ imgURL: `/Logos/fluent-bit.png`,
},
{
name: 'FluentD',
id: 'fluentD',
- imgURL: `Logos/fluentd.png`,
+ imgURL: `/Logos/fluentd.png`,
},
{
name: 'LogStash',
id: 'logStash',
- imgURL: `Logos/logstash.svg`,
+ imgURL: `/Logos/logstash.svg`,
},
{
name: 'Heroku',
id: 'heroku',
- imgURL: `Logos/heroku.png`,
+ imgURL: `/Logos/heroku.png`,
},
{
name: 'Vercel',
id: 'vercel',
- imgURL: `Logos/vercel.png`,
+ imgURL: `/Logos/vercel.png`,
},
{
name: 'HTTP',
id: 'http',
- imgURL: `Logos/http.png`,
+ imgURL: `/Logos/http.png`,
},
{
name: 'Cloudwatch',
id: 'cloudwatch',
- imgURL: `Logos/cloudwatch.png`,
+ imgURL: `/Logos/cloudwatch.png`,
},
];
export const defaultInfraMetricsType = {
name: 'Kubernetes Infra Metrics',
id: 'kubernetesInfraMetrics',
- imgURL: `Logos/kubernetes.svg`,
+ imgURL: `/Logos/kubernetes.svg`,
};
const supportedInfraMetrics = [
{
name: 'Kubernetes Infra Metrics',
id: 'kubernetesInfraMetrics',
- imgURL: `Logos/kubernetes.svg`,
+ imgURL: `/Logos/kubernetes.svg`,
},
{
name: 'HostMetrics',
id: 'hostMetrics',
- imgURL: `Logos/software-window.svg`,
+ imgURL: `/Logos/software-window.svg`,
},
{
name: 'Other Metrics',
id: 'otherMetrics',
- imgURL: `Logos/cmd-terminal.svg`,
+ imgURL: `/Logos/cmd-terminal.svg`,
},
];
export const defaultAwsServices = {
- name: 'EC2 - Application Logs',
+ name: 'EC2 - App/Server Logs',
id: 'awsEc2ApplicationLogs',
- imgURL: `Logos/ec2.svg`,
+ imgURL: `/Logos/ec2.svg`,
};
const supportedAwsServices = [
{
name: 'EC2 - App/Server Logs',
id: 'awsEc2ApplicationLogs',
- imgURL: `Logos/ec2.svg`,
+ imgURL: `/Logos/ec2.svg`,
},
{
name: 'EC2 - Infra Metrics',
id: 'awsEc2InfrastructureMetrics',
- imgURL: `Logos/ec2.svg`,
+ imgURL: `/Logos/ec2.svg`,
},
{
name: 'ECS - EC2',
id: 'awsEcsEc2',
- imgURL: `Logos/ecs.svg`,
+ imgURL: `/Logos/ecs.svg`,
},
{
name: 'ECS - Fargate',
id: 'awsEcsFargate',
- imgURL: `Logos/ecs.svg`,
+ imgURL: `/Logos/ecs.svg`,
},
{
name: 'ECS - External',
id: 'awsEcsExternal',
- imgURL: `Logos/ecs.svg`,
+ imgURL: `/Logos/ecs.svg`,
},
{
name: 'EKS',
id: 'awsEks',
- imgURL: `Logos/eks.svg`,
+ imgURL: `/Logos/eks.svg`,
},
];
@@ -284,7 +298,8 @@ export const getSupportedFrameworks = ({
(moduleID === ModulesMap.APM && dataSourceName === '.NET') ||
(moduleID === ModulesMap.APM && dataSourceName === 'rust') ||
(moduleID === ModulesMap.APM && dataSourceName === 'elixir') ||
- (moduleID === ModulesMap.APM && dataSourceName === 'swift')
+ (moduleID === ModulesMap.APM && dataSourceName === 'swift') ||
+ (moduleID === ModulesMap.APM && dataSourceName === 'php')
) {
return [];
}
@@ -313,10 +328,19 @@ export const hasFrameworks = ({
(moduleID === ModulesMap.APM && dataSourceName === '.NET') ||
(moduleID === ModulesMap.APM && dataSourceName === 'rust') ||
(moduleID === ModulesMap.APM && dataSourceName === 'elixir') ||
- (moduleID === ModulesMap.APM && dataSourceName === 'swift')
+ (moduleID === ModulesMap.APM && dataSourceName === 'swift') ||
+ (moduleID === ModulesMap.APM && dataSourceName === 'php')
) {
return false;
}
return true;
};
+
+export const moduleRouteMap = {
+ [ModulesMap.APM]: ROUTES.GET_STARTED_APPLICATION_MONITORING,
+ [ModulesMap.LogsManagement]: ROUTES.GET_STARTED_LOGS_MANAGEMENT,
+ [ModulesMap.InfrastructureMonitoring]:
+ ROUTES.GET_STARTED_INFRASTRUCTURE_MONITORING,
+ [ModulesMap.AwsMonitoring]: ROUTES.GET_STARTED_AWS_MONITORING,
+};
diff --git a/frontend/src/container/OptionsMenu/constants.ts b/frontend/src/container/OptionsMenu/constants.ts
index b1e5463686..2db02f85b8 100644
--- a/frontend/src/container/OptionsMenu/constants.ts
+++ b/frontend/src/container/OptionsMenu/constants.ts
@@ -1,3 +1,5 @@
+import { DataTypes } from 'types/api/queryBuilder/queryAutocompleteResponse';
+
import { OptionsQuery } from './types';
export const URL_OPTIONS = 'options';
@@ -7,3 +9,46 @@ export const defaultOptionsQuery: OptionsQuery = {
maxLines: 2,
format: 'list',
};
+
+export const defaultTraceSelectedColumns = [
+ {
+ key: 'serviceName',
+ dataType: DataTypes.String,
+ type: 'tag',
+ isColumn: true,
+ isJSON: false,
+ id: 'serviceName--string--tag--true',
+ },
+ {
+ key: 'name',
+ dataType: DataTypes.String,
+ type: 'tag',
+ isColumn: true,
+ isJSON: false,
+ id: 'name--string--tag--true',
+ },
+ {
+ key: 'durationNano',
+ dataType: DataTypes.Float64,
+ type: 'tag',
+ isColumn: true,
+ isJSON: false,
+ id: 'durationNano--float64--tag--true',
+ },
+ {
+ key: 'httpMethod',
+ dataType: DataTypes.String,
+ type: 'tag',
+ isColumn: true,
+ isJSON: false,
+ id: 'httpMethod--string--tag--true',
+ },
+ {
+ key: 'responseStatusCode',
+ dataType: DataTypes.String,
+ type: 'tag',
+ isColumn: true,
+ isJSON: false,
+ id: 'responseStatusCode--string--tag--true',
+ },
+];
diff --git a/frontend/src/container/OptionsMenu/useOptionsMenu.ts b/frontend/src/container/OptionsMenu/useOptionsMenu.ts
index be2ae00b37..97fbbbb006 100644
--- a/frontend/src/container/OptionsMenu/useOptionsMenu.ts
+++ b/frontend/src/container/OptionsMenu/useOptionsMenu.ts
@@ -16,7 +16,11 @@ import {
} from 'types/api/queryBuilder/queryAutocompleteResponse';
import { DataSource } from 'types/common/queryBuilder';
-import { defaultOptionsQuery, URL_OPTIONS } from './constants';
+import {
+ defaultOptionsQuery,
+ defaultTraceSelectedColumns,
+ URL_OPTIONS,
+} from './constants';
import { InitialOptions, OptionsMenuConfig, OptionsQuery } from './types';
import { getOptionsFromKeys } from './utils';
@@ -124,20 +128,29 @@ const useOptionsMenu = ({
{ queryKey: [debouncedSearchText, isFocused], enabled: isFocused },
);
- const searchedAttributeKeys = useMemo(
- () => searchedAttributesData?.payload?.attributeKeys || [],
- [searchedAttributesData?.payload?.attributeKeys],
- );
+ const searchedAttributeKeys = useMemo(() => {
+ if (searchedAttributesData?.payload?.attributeKeys?.length) {
+ return searchedAttributesData.payload.attributeKeys;
+ }
+ if (dataSource === DataSource.TRACES) {
+ return defaultTraceSelectedColumns;
+ }
+
+ return [];
+ }, [dataSource, searchedAttributesData?.payload?.attributeKeys]);
const initialOptionsQuery: OptionsQuery = useMemo(
() => ({
...defaultOptionsQuery,
...initialOptions,
+ // eslint-disable-next-line no-nested-ternary
selectColumns: initialOptions?.selectColumns
? initialSelectedColumns
+ : dataSource === DataSource.TRACES
+ ? defaultTraceSelectedColumns
: defaultOptionsQuery.selectColumns,
}),
- [initialOptions, initialSelectedColumns],
+ [dataSource, initialOptions, initialSelectedColumns],
);
const selectedColumnKeys = useMemo(
diff --git a/frontend/src/container/OrganizationSettings/PendingInvitesContainer/index.tsx b/frontend/src/container/OrganizationSettings/PendingInvitesContainer/index.tsx
index 7395102d4c..3e9276f596 100644
--- a/frontend/src/container/OrganizationSettings/PendingInvitesContainer/index.tsx
+++ b/frontend/src/container/OrganizationSettings/PendingInvitesContainer/index.tsx
@@ -279,9 +279,6 @@ function PendingInvitesContainer(): JSX.Element {
-
- {t('invite_link_share_manually')}
-
}
type="primary"
diff --git a/frontend/src/container/PipelinePage/PipelineListsView/Preview/components/PreviewIntervalSelector/components/LogsCountInInterval/index.tsx b/frontend/src/container/PipelinePage/PipelineListsView/Preview/components/PreviewIntervalSelector/components/LogsCountInInterval/index.tsx
index 63ee3ff3c0..b388bc0e7a 100644
--- a/frontend/src/container/PipelinePage/PipelineListsView/Preview/components/PreviewIntervalSelector/components/LogsCountInInterval/index.tsx
+++ b/frontend/src/container/PipelinePage/PipelineListsView/Preview/components/PreviewIntervalSelector/components/LogsCountInInterval/index.tsx
@@ -1,5 +1,6 @@
import './styles.scss';
+import { DEFAULT_ENTITY_VERSION } from 'constants/app';
import {
initialFilters,
initialQueriesMap,
@@ -26,12 +27,15 @@ function LogsCountInInterval({
return q;
}, [filter]);
- const result = useGetQueryRange({
- graphType: PANEL_TYPES.TABLE,
- query,
- selectedTime: 'GLOBAL_TIME',
- globalSelectedInterval: timeInterval,
- });
+ const result = useGetQueryRange(
+ {
+ graphType: PANEL_TYPES.TABLE,
+ query,
+ selectedTime: 'GLOBAL_TIME',
+ globalSelectedInterval: timeInterval,
+ },
+ DEFAULT_ENTITY_VERSION,
+ );
if (!result.isFetched) {
return null;
diff --git a/frontend/src/container/PipelinePage/PipelineListsView/Preview/hooks/useSampleLogs.ts b/frontend/src/container/PipelinePage/PipelineListsView/Preview/hooks/useSampleLogs.ts
index 3789856771..457e3bd976 100644
--- a/frontend/src/container/PipelinePage/PipelineListsView/Preview/hooks/useSampleLogs.ts
+++ b/frontend/src/container/PipelinePage/PipelineListsView/Preview/hooks/useSampleLogs.ts
@@ -1,3 +1,4 @@
+import { DEFAULT_ENTITY_VERSION } from 'constants/app';
import {
initialFilters,
initialQueriesMap,
@@ -42,12 +43,15 @@ const useSampleLogs = ({
return q;
}, [count, filter]);
- const response = useGetQueryRange({
- graphType: PANEL_TYPES.LIST,
- query,
- selectedTime: 'GLOBAL_TIME',
- globalSelectedInterval: timeInterval,
- });
+ const response = useGetQueryRange(
+ {
+ graphType: PANEL_TYPES.LIST,
+ query,
+ selectedTime: 'GLOBAL_TIME',
+ globalSelectedInterval: timeInterval,
+ },
+ DEFAULT_ENTITY_VERSION,
+ );
const { isFetching: isLoading, data } = response;
diff --git a/frontend/src/container/QueryBuilder/QueryBuilder.interfaces.ts b/frontend/src/container/QueryBuilder/QueryBuilder.interfaces.ts
index ef18d8ce39..5ebfd47da7 100644
--- a/frontend/src/container/QueryBuilder/QueryBuilder.interfaces.ts
+++ b/frontend/src/container/QueryBuilder/QueryBuilder.interfaces.ts
@@ -27,4 +27,6 @@ export type QueryBuilderProps = {
filterConfigs?: Partial;
queryComponents?: { renderOrderBy?: (props: OrderByFilterProps) => ReactNode };
isListViewPanel?: boolean;
+ showFunctions?: boolean;
+ version: string;
};
diff --git a/frontend/src/container/QueryBuilder/QueryBuilder.styles.scss b/frontend/src/container/QueryBuilder/QueryBuilder.styles.scss
index b23521ad68..dbb7a962ef 100644
--- a/frontend/src/container/QueryBuilder/QueryBuilder.styles.scss
+++ b/frontend/src/container/QueryBuilder/QueryBuilder.styles.scss
@@ -152,7 +152,7 @@
}
::-webkit-scrollbar {
- height: 1rem;
+ height: 0.2rem;
width: 0.2rem;
}
}
diff --git a/frontend/src/container/QueryBuilder/QueryBuilder.tsx b/frontend/src/container/QueryBuilder/QueryBuilder.tsx
index 0bdc321c1e..f95049a2d1 100644
--- a/frontend/src/container/QueryBuilder/QueryBuilder.tsx
+++ b/frontend/src/container/QueryBuilder/QueryBuilder.tsx
@@ -25,6 +25,8 @@ export const QueryBuilder = memo(function QueryBuilder({
filterConfigs = {},
queryComponents,
isListViewPanel = false,
+ showFunctions = false,
+ version,
}: QueryBuilderProps): JSX.Element {
const {
currentQuery,
@@ -46,7 +48,7 @@ export const QueryBuilder = memo(function QueryBuilder({
useEffect(() => {
if (currentDataSource !== initialDataSource || newPanelType !== panelType) {
- if (panelType === PANEL_TYPES.BAR) {
+ if (newPanelType === PANEL_TYPES.BAR) {
handleSetConfig(PANEL_TYPES.BAR, DataSource.METRICS);
return;
}
@@ -170,6 +172,8 @@ export const QueryBuilder = memo(function QueryBuilder({
: listViewLogFilterConfigs
}
queryComponents={queryComponents}
+ showFunctions={showFunctions}
+ version={version}
isListViewPanel
/>
)}
@@ -188,6 +192,8 @@ export const QueryBuilder = memo(function QueryBuilder({
query={query}
filterConfigs={filterConfigs}
queryComponents={queryComponents}
+ showFunctions={showFunctions}
+ version={version}
/>
))}
diff --git a/frontend/src/container/QueryBuilder/components/Formula/Formula.tsx b/frontend/src/container/QueryBuilder/components/Formula/Formula.tsx
index 5f25edb29b..b6f9ebf894 100644
--- a/frontend/src/container/QueryBuilder/components/Formula/Formula.tsx
+++ b/frontend/src/container/QueryBuilder/components/Formula/Formula.tsx
@@ -39,6 +39,7 @@ export function Formula({
query,
filterConfigs,
formula,
+ entityVersion: '',
});
const [isCollapse, setIsCollapsed] = useState(false);
@@ -146,6 +147,7 @@ export function Formula({
void;
+ onCloneQuery?: (type: string, query: IBuilderQuery) => void;
onToggleVisibility: () => void;
onCollapseEntity: () => void;
+ onQueryFunctionsUpdates?: (functions: QueryFunctionProps[]) => void;
showDeleteButton: boolean;
isListViewPanel?: boolean;
}
export default function QBEntityOptions({
+ query,
+ isMetricsDataSource,
isCollapsed,
+ showFunctions,
entityType,
entityData,
onDelete,
+ onCloneQuery,
onToggleVisibility,
onCollapseEntity,
showDeleteButton,
- isListViewPanel = false,
+ onQueryFunctionsUpdates,
+ isListViewPanel,
}: QBEntityOptionsProps): JSX.Element {
+ const handleCloneEntity = (): void => {
+ if (isFunction(onCloneQuery)) {
+ onCloneQuery(entityType, entityData);
+ }
+ };
+
return (
@@ -46,6 +78,15 @@ export default function QBEntityOptions({
>
{entityData.disabled ? : }
+
+ {entityType === 'query' && (
+
+
+
+
+
+ )}
+
{entityData.queryName}
+
+ {showFunctions &&
+ isMetricsDataSource &&
+ query &&
+ onQueryFunctionsUpdates && (
+
+ )}
@@ -77,4 +128,9 @@ export default function QBEntityOptions({
QBEntityOptions.defaultProps = {
isListViewPanel: false,
+ query: undefined,
+ isMetricsDataSource: false,
+ onQueryFunctionsUpdates: undefined,
+ showFunctions: false,
+ onCloneQuery: noop,
};
diff --git a/frontend/src/container/QueryBuilder/components/Query/Query.interfaces.ts b/frontend/src/container/QueryBuilder/components/Query/Query.interfaces.ts
index fba34ef6cd..05713395ff 100644
--- a/frontend/src/container/QueryBuilder/components/Query/Query.interfaces.ts
+++ b/frontend/src/container/QueryBuilder/components/Query/Query.interfaces.ts
@@ -7,4 +7,6 @@ export type QueryProps = {
query: IBuilderQuery;
queryVariant: 'static' | 'dropdown';
isListViewPanel?: boolean;
+ showFunctions?: boolean;
+ version: string;
} & Pick;
diff --git a/frontend/src/container/QueryBuilder/components/Query/Query.tsx b/frontend/src/container/QueryBuilder/components/Query/Query.tsx
index 0c90142cb2..1bb761fde7 100644
--- a/frontend/src/container/QueryBuilder/components/Query/Query.tsx
+++ b/frontend/src/container/QueryBuilder/components/Query/Query.tsx
@@ -2,8 +2,9 @@
import './Query.styles.scss';
import { Col, Input, Row } from 'antd';
+import { ENTITY_VERSION_V4 } from 'constants/app';
// ** Constants
-import { PANEL_TYPES } from 'constants/queryBuilder';
+import { ATTRIBUTE_TYPES, PANEL_TYPES } from 'constants/queryBuilder';
import ROUTES from 'constants/routes';
// ** Components
import {
@@ -38,9 +39,11 @@ import { IBuilderQuery } from 'types/api/queryBuilder/queryBuilderData';
import { transformToUpperCase } from 'utils/transformToUpperCase';
import QBEntityOptions from '../QBEntityOptions/QBEntityOptions';
+import SpaceAggregationOptions from '../SpaceAggregationOptions/SpaceAggregationOptions';
// ** Types
import { QueryProps } from './Query.interfaces';
+// eslint-disable-next-line sonarjs/cognitive-complexity
export const Query = memo(function Query({
index,
queryVariant,
@@ -48,14 +51,17 @@ export const Query = memo(function Query({
filterConfigs,
queryComponents,
isListViewPanel = false,
+ showFunctions = false,
+ version,
}: QueryProps): JSX.Element {
- const { panelType, currentQuery } = useQueryBuilder();
+ const { panelType, currentQuery, cloneQuery } = useQueryBuilder();
const { pathname } = useLocation();
const [isCollapse, setIsCollapsed] = useState(false);
const {
operators,
+ spaceAggregationOptions,
isMetricsDataSource,
isTracePanelType,
listOfAdditionalFilters,
@@ -63,8 +69,16 @@ export const Query = memo(function Query({
handleChangeQueryData,
handleChangeDataSource,
handleChangeOperator,
+ handleSpaceAggregationChange,
handleDeleteQuery,
- } = useQueryOperations({ index, query, filterConfigs, isListViewPanel });
+ handleQueryFunctionsUpdates,
+ } = useQueryOperations({
+ index,
+ query,
+ filterConfigs,
+ isListViewPanel,
+ entityVersion: version,
+ });
const handleChangeAggregateEvery = useCallback(
(value: IBuilderQuery['stepInterval']) => {
@@ -192,13 +206,17 @@ export const Query = memo(function Query({
-
+
-
+
@@ -225,7 +243,11 @@ export const Query = memo(function Query({
-
+
@@ -257,7 +279,11 @@ export const Query = memo(function Query({
-
+
@@ -279,23 +305,36 @@ export const Query = memo(function Query({
}, [
panelType,
query,
- filterConfigs?.limit?.isHidden,
- filterConfigs?.having?.isHidden,
handleChangeLimit,
+ version,
handleChangeHavingFilter,
renderOrderByFilter,
renderAggregateEveryFilter,
+ filterConfigs?.limit?.isHidden,
+ filterConfigs?.having?.isHidden,
]);
+ const disableOperatorSelector =
+ !query?.aggregateAttribute.key || query?.aggregateAttribute.key === '';
+
+ const isVersionV4 = version && version === ENTITY_VERSION_V4;
+
return (
1}
isListViewPanel={isListViewPanel}
/>
@@ -322,23 +361,42 @@ export const Query = memo(function Query({
{isMetricsDataSource && (
-
-
-
+ {version && version === 'v3' && (
+
+
+
+ )}
+
+
+ {version &&
+ version === ENTITY_VERSION_V4 &&
+ operators &&
+ Array.isArray(operators) &&
+ operators.length > 0 && (
+
+
+
+ )}
)}
-
+
+
{isMetricsDataSource && (
@@ -379,16 +437,40 @@ export const Query = memo(function Query({
)}
{!isListViewPanel && (
-
+
-
+ {isVersionV4 && isMetricsDataSource ? (
+
+ ) : (
+
+ )}
+
{panelType === PANEL_TYPES.VALUE ? (
-
+
+ {isVersionV4 && isMetricsDataSource && (
+
+
+
+ )}
+
+
+
+
) : (
)}
+
+ {isVersionV4 && isMetricsDataSource && panelType === PANEL_TYPES.TABLE && (
+
+
+
+
+
+
+
+
+
+
+
+ )}
)}
diff --git a/frontend/src/container/QueryBuilder/components/QueryFunctions/Function.tsx b/frontend/src/container/QueryBuilder/components/QueryFunctions/Function.tsx
new file mode 100644
index 0000000000..1374c2bb1a
--- /dev/null
+++ b/frontend/src/container/QueryBuilder/components/QueryFunctions/Function.tsx
@@ -0,0 +1,89 @@
+/* eslint-disable react/jsx-props-no-spreading */
+import { Button, Flex, Input, Select } from 'antd';
+import cx from 'classnames';
+import {
+ queryFunctionOptions,
+ queryFunctionsTypesConfig,
+} from 'constants/queryFunctionOptions';
+import { useIsDarkMode } from 'hooks/useDarkMode';
+import { debounce, isNil } from 'lodash-es';
+import { X } from 'lucide-react';
+import { QueryFunctionProps } from 'types/api/queryBuilder/queryBuilderData';
+
+interface FunctionProps {
+ funcData: QueryFunctionProps;
+ index: any;
+ handleUpdateFunctionArgs: any;
+ handleUpdateFunctionName: any;
+ handleDeleteFunction: any;
+}
+
+export default function Function({
+ funcData,
+ index,
+ handleUpdateFunctionArgs,
+ handleUpdateFunctionName,
+ handleDeleteFunction,
+}: FunctionProps): JSX.Element {
+ const isDarkMode = useIsDarkMode();
+ const { showInput } = queryFunctionsTypesConfig[funcData.name];
+
+ let functionValue;
+
+ const hasValue = !isNil(
+ funcData.args && funcData.args.length > 0 && funcData.args[0],
+ );
+
+ if (hasValue) {
+ // eslint-disable-next-line prefer-destructuring
+ functionValue = funcData.args[0];
+ }
+
+ const debouncedhandleUpdateFunctionArgs = debounce(
+ handleUpdateFunctionArgs,
+ 500,
+ );
+
+ return (
+
+ {
+ handleUpdateFunctionName(funcData, index, value);
+ }}
+ dropdownStyle={{
+ minWidth: 200,
+ borderRadius: '4px',
+ border: isDarkMode
+ ? '1px solid var(--bg-slate-400)'
+ : '1px solid var(--bg-vanilla-300)',
+ boxShadow: `4px 10px 16px 2px rgba(0, 0, 0, 0.20)`,
+ }}
+ placement="bottomRight"
+ options={queryFunctionOptions}
+ />
+
+ {showInput && (
+ {
+ debouncedhandleUpdateFunctionArgs(funcData, index, event.target.value);
+ }}
+ />
+ )}
+
+ {
+ handleDeleteFunction(funcData, index);
+ }}
+ >
+
+
+
+ );
+}
diff --git a/frontend/src/container/QueryBuilder/components/QueryFunctions/QueryFunctions.styles.scss b/frontend/src/container/QueryBuilder/components/QueryFunctions/QueryFunctions.styles.scss
new file mode 100644
index 0000000000..8eb6bf2ffd
--- /dev/null
+++ b/frontend/src/container/QueryBuilder/components/QueryFunctions/QueryFunctions.styles.scss
@@ -0,0 +1,151 @@
+.query-functions-container {
+ display: flex;
+ margin: 0 12px;
+ justify-content: center;
+ align-items: center;
+
+ .function-btn,
+ .add-function-btn {
+ display: flex;
+ gap: 8px;
+
+ cursor: pointer;
+ border-radius: 3px !important;
+ }
+
+ .function-btn {
+ border-top-right-radius: 0px !important;
+ border-bottom-right-radius: 0px !important;
+
+ .function-icon {
+ height: 18px;
+ width: 18px;
+ }
+ }
+
+ .add-function-btn {
+ border-top-left-radius: 0px !important;
+ border-bottom-left-radius: 0px !important;
+
+ background-color: var(--bg-slate-500) !important;
+ opacity: 0.8;
+
+ &:disabled {
+ opacity: 0.4;
+ }
+ }
+
+ &.hasFunctions {
+ .function-btn {
+ border-top-right-radius: 3px !important;
+ border-bottom-right-radius: 3px !important;
+ margin-right: 8px;
+ }
+
+ .add-function-btn {
+ border-top-left-radius: 3px !important;
+ border-bottom-left-radius: 3px !important;
+ margin-left: 8px;
+ }
+ }
+}
+
+.query-functions-list {
+ display: flex;
+ gap: 8px;
+
+ .query-function {
+ position: relative;
+
+ &::before {
+ content: '';
+ height: 1px;
+ width: 8px;
+ position: absolute;
+ left: -8px;
+ top: 16px;
+ z-index: 0;
+ color: var(--bg-sakura-500);
+ background-color: var(--bg-sakura-500);
+ }
+
+ &::after {
+ content: '';
+ height: 1px;
+ width: 8px;
+ position: absolute;
+ right: -8px;
+ top: 16px;
+ z-index: 0;
+ color: var(--bg-sakura-500);
+ background-color: var(--bg-sakura-500);
+ }
+
+ .query-function-name-selector {
+ border-top-left-radius: 3px;
+ border-bottom-left-radius: 3px;
+
+ .ant-select-selector {
+ border: none;
+ background: var(--bg-ink-200);
+ }
+
+ &.showInput {
+ .ant-select-selector {
+ border-top-right-radius: 0;
+ border-bottom-right-radius: 0;
+ }
+ }
+ }
+
+ .query-function-value {
+ width: 55px;
+ border-left: 0;
+ background: var(--bg-ink-200);
+ border-radius: 0;
+ border: 1px solid transparent;
+
+ &:focus {
+ border-color: transparent !important;
+ }
+ }
+
+ .query-function-delete-btn {
+ border-top-right-radius: 3px;
+ border-bottom-right-radius: 3px;
+
+ border: none !important;
+
+ border-top-left-radius: 0px !important;
+ border-bottom-left-radius: 0px !important;
+ min-width: 24px !important;
+ }
+ }
+}
+
+.lightMode {
+ .query-functions-container {
+ .add-function-btn {
+ background-color: var(--bg-vanilla-100) !important;
+ }
+ }
+
+ .query-functions-list {
+ .query-function {
+ border: 1px solid var(--bg-vanilla-300);
+ .query-function-name-selector {
+ .ant-select-selector {
+ background: var(--bg-vanilla-100);
+ }
+ }
+
+ .query-function-value {
+ background: var(--bg-vanilla-100);
+
+ &:focus {
+ border-color: transparent !important;
+ }
+ }
+ }
+ }
+}
diff --git a/frontend/src/container/QueryBuilder/components/QueryFunctions/QueryFunctions.tsx b/frontend/src/container/QueryBuilder/components/QueryFunctions/QueryFunctions.tsx
new file mode 100644
index 0000000000..68c2701982
--- /dev/null
+++ b/frontend/src/container/QueryBuilder/components/QueryFunctions/QueryFunctions.tsx
@@ -0,0 +1,181 @@
+import './QueryFunctions.styles.scss';
+
+import { Button, Tooltip } from 'antd';
+import cx from 'classnames';
+import { useIsDarkMode } from 'hooks/useDarkMode';
+import { cloneDeep, pullAt } from 'lodash-es';
+import { Plus } from 'lucide-react';
+import { useState } from 'react';
+import { QueryFunctionProps } from 'types/api/queryBuilder/queryBuilderData';
+import { QueryFunctionsTypes } from 'types/common/queryBuilder';
+
+import Function from './Function';
+
+const defaultFunctionStruct: QueryFunctionProps = {
+ name: QueryFunctionsTypes.CUTOFF_MIN,
+ args: [],
+};
+
+interface QueryFunctionsProps {
+ queryFunctions: QueryFunctionProps[];
+ onChange: (functions: QueryFunctionProps[]) => void;
+}
+
+// SVG component
+function FunctionIcon({
+ fillColor = 'white',
+ className,
+}: {
+ fillColor: string;
+ className: string;
+}): JSX.Element {
+ return (
+
+
+
+
+
+
+ );
+}
+
+export default function QueryFunctions({
+ queryFunctions,
+ onChange,
+}: QueryFunctionsProps): JSX.Element {
+ const [functions, setFunctions] = useState(
+ queryFunctions,
+ );
+
+ const isDarkMode = useIsDarkMode();
+
+ const handleAddNewFunction = (): void => {
+ const updatedFunctionsArr = [
+ ...functions,
+ {
+ ...defaultFunctionStruct,
+ },
+ ];
+
+ setFunctions(updatedFunctionsArr);
+
+ onChange(updatedFunctionsArr);
+ };
+
+ const handleDeleteFunction = (
+ queryFunction: QueryFunctionProps,
+ index: number,
+ ): void => {
+ const clonedFunctions = cloneDeep(functions);
+ pullAt(clonedFunctions, index);
+
+ setFunctions(clonedFunctions);
+ onChange(clonedFunctions);
+ };
+
+ const handleUpdateFunctionName = (
+ func: QueryFunctionProps,
+ index: number,
+ value: string,
+ ): void => {
+ const updateFunctions = cloneDeep(functions);
+
+ if (updateFunctions && updateFunctions.length > 0 && updateFunctions[index]) {
+ updateFunctions[index].name = value;
+ setFunctions(updateFunctions);
+ onChange(updateFunctions);
+ }
+ };
+
+ const handleUpdateFunctionArgs = (
+ func: QueryFunctionProps,
+ index: number,
+ value: string,
+ ): void => {
+ const updateFunctions = cloneDeep(functions);
+
+ if (updateFunctions && updateFunctions.length > 0 && updateFunctions[index]) {
+ updateFunctions[index].args = [value];
+ setFunctions(updateFunctions);
+ onChange(updateFunctions);
+ }
+ };
+
+ return (
+ 0 ? 'hasFunctions' : '',
+ )}
+ >
+
+
+
+
+
+ {functions.map((func, index) => (
+
+ ))}
+
+
+
= 3
+ ? 'Functions are in early access. You can add a maximum of 3 function as of now.'
+ : ''
+ }
+ placement="right"
+ >
+ = 3}
+ onClick={handleAddNewFunction}
+ >
+
+
+
+
+ );
+}
diff --git a/frontend/src/container/QueryBuilder/components/SpaceAggregationOptions/SpaceAggregationOptions.tsx b/frontend/src/container/QueryBuilder/components/SpaceAggregationOptions/SpaceAggregationOptions.tsx
new file mode 100644
index 0000000000..9ed6f93c45
--- /dev/null
+++ b/frontend/src/container/QueryBuilder/components/SpaceAggregationOptions/SpaceAggregationOptions.tsx
@@ -0,0 +1,67 @@
+import { Select } from 'antd';
+import { ATTRIBUTE_TYPES, PANEL_TYPES } from 'constants/queryBuilder';
+import { useEffect, useState } from 'react';
+import { MetricAggregateOperator } from 'types/common/queryBuilder';
+
+interface SpaceAggregationOptionsProps {
+ panelType: PANEL_TYPES | null;
+ selectedValue: string | undefined;
+ aggregatorAttributeType: ATTRIBUTE_TYPES | null;
+ disabled: boolean;
+ onSelect: (value: string) => void;
+ operators: any[];
+}
+
+export default function SpaceAggregationOptions({
+ panelType,
+ selectedValue,
+ aggregatorAttributeType = ATTRIBUTE_TYPES.GAUGE,
+ disabled,
+ onSelect,
+ operators,
+}: SpaceAggregationOptionsProps): JSX.Element {
+ const placeHolderText = panelType === PANEL_TYPES.VALUE ? 'Sum' : 'Sum By';
+ const [defaultValue, setDefaultValue] = useState(
+ selectedValue || placeHolderText,
+ );
+
+ useEffect(() => {
+ if (!selectedValue) {
+ if (
+ aggregatorAttributeType === ATTRIBUTE_TYPES.HISTOGRAM ||
+ aggregatorAttributeType === ATTRIBUTE_TYPES.EXPONENTIAL_HISTOGRAM
+ ) {
+ setDefaultValue(MetricAggregateOperator.P90);
+ onSelect(MetricAggregateOperator.P90);
+ } else if (aggregatorAttributeType === ATTRIBUTE_TYPES.SUM) {
+ setDefaultValue(MetricAggregateOperator.SUM);
+ onSelect(MetricAggregateOperator.SUM);
+ } else if (aggregatorAttributeType === ATTRIBUTE_TYPES.GAUGE) {
+ setDefaultValue(MetricAggregateOperator.AVG);
+ onSelect(MetricAggregateOperator.AVG);
+ }
+ }
+
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [aggregatorAttributeType]);
+
+ return (
+
+
+ {operators.map((operator) => (
+
+ {operator.label} {panelType !== PANEL_TYPES.VALUE ? ' By' : ''}
+
+ ))}
+
+
+ );
+}
diff --git a/frontend/src/container/QueryBuilder/filters/AggregatorFilter/AggregatorFilter.tsx b/frontend/src/container/QueryBuilder/filters/AggregatorFilter/AggregatorFilter.tsx
index f27336e96a..492329f69b 100644
--- a/frontend/src/container/QueryBuilder/filters/AggregatorFilter/AggregatorFilter.tsx
+++ b/frontend/src/container/QueryBuilder/filters/AggregatorFilter/AggregatorFilter.tsx
@@ -111,7 +111,7 @@ export const AggregatorFilter = memo(function AggregatorFilter({
debouncedValue,
query.aggregateOperator,
query.dataSource,
- ])?.payload.attributeKeys || [],
+ ])?.payload?.attributeKeys || [],
[debouncedValue, query.aggregateOperator, query.dataSource, queryClient],
);
diff --git a/frontend/src/container/QueryBuilder/filters/GroupByFilter/GroupByFilter.tsx b/frontend/src/container/QueryBuilder/filters/GroupByFilter/GroupByFilter.tsx
index 386786f70c..e7b00756f5 100644
--- a/frontend/src/container/QueryBuilder/filters/GroupByFilter/GroupByFilter.tsx
+++ b/frontend/src/container/QueryBuilder/filters/GroupByFilter/GroupByFilter.tsx
@@ -1,11 +1,7 @@
import { Select, Spin } from 'antd';
import { getAggregateKeys } from 'api/queryBuilder/getAttributeKeys';
// ** Constants
-import {
- idDivider,
- QueryBuilderKeys,
- selectValueDivider,
-} from 'constants/queryBuilder';
+import { idDivider, QueryBuilderKeys } from 'constants/queryBuilder';
import { DEBOUNCE_DELAY } from 'constants/queryBuilderFilterConfig';
import { useGetAggregateKeys } from 'hooks/queryBuilder/useGetAggregateKeys';
import useDebounce from 'hooks/useDebounce';
@@ -83,11 +79,7 @@ export const GroupByFilter = memo(function GroupByFilter({
dataType={item.dataType || ''}
/>
),
- value: `${transformStringWithPrefix({
- str: item.key,
- prefix: item.type || '',
- condition: !item.isColumn,
- })}${selectValueDivider}${item.id}`,
+ value: `${item.id}`,
})) || [];
setOptionsData(options);
@@ -135,7 +127,8 @@ export const GroupByFilter = memo(function GroupByFilter({
const keys = await getAttributeKeys();
const groupByValues: BaseAutocompleteData[] = values.map((item) => {
- const [currentValue, id] = item.value.split(selectValueDivider);
+ const id = item.value;
+ const currentValue = item.value.split(idDivider)[0];
if (id && id.includes(idDivider)) {
const attribute = keys.find((item) => item.id === id);
@@ -174,11 +167,7 @@ export const GroupByFilter = memo(function GroupByFilter({
condition: !item.isColumn,
}),
)}`,
- value: `${transformStringWithPrefix({
- str: item.key,
- prefix: item.type || '',
- condition: !item.isColumn,
- })}${selectValueDivider}${item.id}`,
+ value: `${item.id}`,
}),
);
diff --git a/frontend/src/container/QueryBuilder/filters/HavingFilter/HavingFilter.interfaces.ts b/frontend/src/container/QueryBuilder/filters/HavingFilter/HavingFilter.interfaces.ts
index d4c9ebbf88..1bdb838aa6 100644
--- a/frontend/src/container/QueryBuilder/filters/HavingFilter/HavingFilter.interfaces.ts
+++ b/frontend/src/container/QueryBuilder/filters/HavingFilter/HavingFilter.interfaces.ts
@@ -1,6 +1,7 @@
import { Having, IBuilderQuery } from 'types/api/queryBuilder/queryBuilderData';
export type HavingFilterProps = {
+ entityVersion: string;
query: IBuilderQuery;
onChange: (having: Having[]) => void;
};
diff --git a/frontend/src/container/QueryBuilder/filters/HavingFilter/HavingFilter.tsx b/frontend/src/container/QueryBuilder/filters/HavingFilter/HavingFilter.tsx
index 4f638496f4..7d11d018cc 100644
--- a/frontend/src/container/QueryBuilder/filters/HavingFilter/HavingFilter.tsx
+++ b/frontend/src/container/QueryBuilder/filters/HavingFilter/HavingFilter.tsx
@@ -1,4 +1,5 @@
import { Select } from 'antd';
+import { ENTITY_VERSION_V4 } from 'constants/app';
// ** Constants
import { HAVING_OPERATORS, initialHavingValues } from 'constants/queryBuilder';
import { HavingFilterTag } from 'container/QueryBuilder/components';
@@ -22,6 +23,7 @@ import { getHavingObject, isValidHavingValue } from '../utils';
import { HavingFilterProps } from './HavingFilter.interfaces';
export function HavingFilter({
+ entityVersion,
query,
onChange,
}: HavingFilterProps): JSX.Element {
@@ -48,10 +50,18 @@ export function HavingFilter({
[query],
);
- const columnName = useMemo(
- () => `${query.aggregateOperator.toUpperCase()}(${aggregatorAttribute})`,
- [query, aggregatorAttribute],
- );
+ const columnName = useMemo(() => {
+ if (
+ query &&
+ query.dataSource === DataSource.METRICS &&
+ query.spaceAggregation &&
+ entityVersion === ENTITY_VERSION_V4
+ ) {
+ return `${query.spaceAggregation.toUpperCase()}(${aggregatorAttribute})`;
+ }
+
+ return `${query.aggregateOperator.toUpperCase()}(${aggregatorAttribute})`;
+ }, [query, aggregatorAttribute, entityVersion]);
const aggregatorOptions: SelectOption[] = useMemo(
() => [{ label: columnName, value: columnName }],
@@ -211,7 +221,7 @@ export function HavingFilter({
disabled={isMetricsDataSource && !query.aggregateAttribute.key}
style={{ width: '100%' }}
notFoundContent={currentFormValue.value.length === 0 ? undefined : null}
- placeholder="Count(operation) > 5"
+ placeholder="GroupBy(operation) > 5"
onDeselect={handleDeselect}
onChange={handleChange}
onSelect={handleSelect}
diff --git a/frontend/src/container/QueryBuilder/filters/HavingFilter/__tests__/utils.test.tsx b/frontend/src/container/QueryBuilder/filters/HavingFilter/__tests__/utils.test.tsx
index 5732b22712..24cf6b1730 100644
--- a/frontend/src/container/QueryBuilder/filters/HavingFilter/__tests__/utils.test.tsx
+++ b/frontend/src/container/QueryBuilder/filters/HavingFilter/__tests__/utils.test.tsx
@@ -1,5 +1,6 @@
import { render, screen } from '@testing-library/react';
import userEvent from '@testing-library/user-event';
+import { DEFAULT_ENTITY_VERSION } from 'constants/app';
// Constants
import {
HAVING_OPERATORS,
@@ -31,6 +32,7 @@ describe('Having filter behaviour', () => {
,
);
@@ -49,6 +51,7 @@ describe('Having filter behaviour', () => {
,
);
@@ -62,7 +65,11 @@ describe('Having filter behaviour', () => {
test('Is having filter is enable', () => {
const mockFn = jest.fn();
const { unmount } = render(
- ,
+ ,
);
const input = screen.getByRole('combobox');
@@ -80,7 +87,11 @@ describe('Having filter behaviour', () => {
const optionTestTitle = 'havingOption';
const { unmount } = render(
- ,
+ ,
);
// get input
diff --git a/frontend/src/container/QueryBuilder/filters/QueryBuilderSearch/OptionRenderer.tsx b/frontend/src/container/QueryBuilder/filters/QueryBuilderSearch/OptionRenderer.tsx
index 32bf8fbd40..a7dcef96c3 100644
--- a/frontend/src/container/QueryBuilder/filters/QueryBuilderSearch/OptionRenderer.tsx
+++ b/frontend/src/container/QueryBuilder/filters/QueryBuilderSearch/OptionRenderer.tsx
@@ -1,9 +1,8 @@
-import {
- SelectOptionContainer,
- TagContainer,
- TagLabel,
- TagValue,
-} from './style';
+import './QueryBuilderSearch.styles.scss';
+
+import { Tooltip } from 'antd';
+
+import { TagContainer, TagLabel, TagValue } from './style';
import { getOptionType } from './utils';
function OptionRenderer({
@@ -16,21 +15,25 @@ function OptionRenderer({
return (
{optionType ? (
-
- {value}
-
-
- Type:
- {optionType}
-
-
- Data type:
- {dataType}
-
+
+
+
{value}
+
+
+ Type:
+ {optionType}
+
+
+ Data type:
+ {dataType}
+
+
-
+
) : (
-
{label}
+
+ {label}
+
)}
);
diff --git a/frontend/src/container/QueryBuilder/filters/QueryBuilderSearch/QueryBuilderSearch.styles.scss b/frontend/src/container/QueryBuilder/filters/QueryBuilderSearch/QueryBuilderSearch.styles.scss
index 8fd979fa8e..a6f5fcaf37 100644
--- a/frontend/src/container/QueryBuilder/filters/QueryBuilderSearch/QueryBuilderSearch.styles.scss
+++ b/frontend/src/container/QueryBuilder/filters/QueryBuilderSearch/QueryBuilderSearch.styles.scss
@@ -1,3 +1,16 @@
+.selectOptionContainer {
+ display: flex;
+ gap: 8px;
+ justify-content: space-between;
+ align-items: center;
+ overflow-x: auto;
+
+ &::-webkit-scrollbar {
+ width: 0.2rem;
+ height: 0.2rem;
+ }
+}
+
.lightMode {
.query-builder-search {
.ant-select-dropdown {
diff --git a/frontend/src/container/QueryBuilder/filters/QueryBuilderSearch/index.tsx b/frontend/src/container/QueryBuilder/filters/QueryBuilderSearch/index.tsx
index 46d535737a..85c7ea2c64 100644
--- a/frontend/src/container/QueryBuilder/filters/QueryBuilderSearch/index.tsx
+++ b/frontend/src/container/QueryBuilder/filters/QueryBuilderSearch/index.tsx
@@ -59,6 +59,7 @@ function QueryBuilderSearch({
updateTag,
handleClearTag,
handleKeyDown,
+ handleOnBlur,
handleSearch,
handleSelect,
tags,
@@ -260,6 +261,7 @@ function QueryBuilderSearch({
notFoundContent={isFetching ?
: null}
suffixIcon={suffixIcon}
showAction={['focus']}
+ onBlur={handleOnBlur}
>
{options.map((option) => (
diff --git a/frontend/src/container/QueryBuilder/filters/QueryBuilderSearch/style.ts b/frontend/src/container/QueryBuilder/filters/QueryBuilderSearch/style.ts
index fd6d5f209e..5e010ff34a 100644
--- a/frontend/src/container/QueryBuilder/filters/QueryBuilderSearch/style.ts
+++ b/frontend/src/container/QueryBuilder/filters/QueryBuilderSearch/style.ts
@@ -16,19 +16,12 @@ export const StyledCheckOutlined = styled(CheckOutlined)`
float: right;
`;
-export const SelectOptionContainer = styled.div`
- display: flex;
- gap: 8px;
- justify-content: space-between;
- align-items: center;
- overflow-x: auto;
-`;
-
export const TagContainer = styled(Tag)`
&&& {
+ display: inline-block;
border-radius: 3px;
- padding: 0.3rem 0.3rem;
- font-weight: 400;
+ padding: 0.1rem 0.2rem;
+ font-weight: 300;
font-size: 0.6rem;
}
`;
diff --git a/frontend/src/container/QueryBuilder/filters/QueryBuilderSearch/utils.ts b/frontend/src/container/QueryBuilder/filters/QueryBuilderSearch/utils.ts
index c549a6fd62..ec7eba3973 100644
--- a/frontend/src/container/QueryBuilder/filters/QueryBuilderSearch/utils.ts
+++ b/frontend/src/container/QueryBuilder/filters/QueryBuilderSearch/utils.ts
@@ -5,7 +5,7 @@ import { parse } from 'papaparse';
import { orderByValueDelimiter } from '../OrderByFilter/utils';
// eslint-disable-next-line no-useless-escape
-export const tagRegexp = /^\s*(.*?)\s*(IN|NOT_IN|LIKE|NOT_LIKE|REGEX|NOT_REGEX|=|!=|EXISTS|NOT_EXISTS|CONTAINS|NOT_CONTAINS|>=|>|<=|<|HAS|NHAS)\s*(.*)$/g;
+export const tagRegexp = /^\s*(.*?)\s*(\bIN\b|\bNOT_IN\b|\bLIKE\b|\bNOT_LIKE\b|\bREGEX\b|\bNOT_REGEX\b|=|!=|\bEXISTS\b|\bNOT_EXISTS\b|\bCONTAINS\b|\bNOT_CONTAINS\b|>=|>|<=|<|\bHAS\b|\bNHAS\b)\s*(.*)$/gi;
export function isInNInOperator(value: string): boolean {
return value === OPERATORS.IN || value === OPERATORS.NIN;
@@ -25,8 +25,8 @@ export function getTagToken(tag: string): ITagToken {
const [, matchTagKey, matchTagOperator, matchTagValue] = match;
return {
tagKey: matchTagKey,
- tagOperator: matchTagOperator,
- tagValue: isInNInOperator(matchTagOperator)
+ tagOperator: matchTagOperator.toUpperCase(),
+ tagValue: isInNInOperator(matchTagOperator.toUpperCase())
? parse(matchTagValue).data.flat()
: matchTagValue,
} as ITagToken;
diff --git a/frontend/src/container/QueryTable/QueryTable.tsx b/frontend/src/container/QueryTable/QueryTable.tsx
index 34b146f6da..2fa1d05f65 100644
--- a/frontend/src/container/QueryTable/QueryTable.tsx
+++ b/frontend/src/container/QueryTable/QueryTable.tsx
@@ -22,7 +22,8 @@ export function QueryTable({
...props
}: QueryTableProps): JSX.Element {
const { isDownloadEnabled = false, fileName = '' } = downloadOption || {};
- const { servicename } = useParams();
+ const { servicename: encodedServiceName } = useParams();
+ const servicename = decodeURIComponent(encodedServiceName);
const { loading } = props;
const { columns: newColumns, dataSource: newDataSource } = useMemo(() => {
if (columns && dataSource) {
diff --git a/frontend/src/container/ResourceAttributesFilter/ResourceAttributesFilter.styles.scss b/frontend/src/container/ResourceAttributesFilter/ResourceAttributesFilter.styles.scss
new file mode 100644
index 0000000000..51b4c266b2
--- /dev/null
+++ b/frontend/src/container/ResourceAttributesFilter/ResourceAttributesFilter.styles.scss
@@ -0,0 +1,33 @@
+.resourceAttributesFilter-container {
+ display: flex;
+ align-items: center;
+ justify-content: stretch;
+ flex-wrap: wrap;
+ gap: 8px;
+ margin-bottom: 16px;
+
+ .resource-attributes-selector {
+ flex: 1;
+ border-radius: 3px;
+
+ background-color: var(--bg-ink-400);
+ border: 1px solid #454c58;
+ }
+
+ .environment-selector {
+ min-width: 200px;
+ }
+
+ .ant-form-item {
+ margin-bottom: 0;
+ }
+}
+
+.lightMode {
+ .resourceAttributesFilter-container {
+ .resource-attributes-selector {
+ border: 1px solid #d9d9d9;
+ background: var(--bg-vanilla-100);
+ }
+ }
+}
diff --git a/frontend/src/container/ResourceAttributesFilter/ResourceAttributesFilter.tsx b/frontend/src/container/ResourceAttributesFilter/ResourceAttributesFilter.tsx
index a61a0ce0ee..4211291742 100644
--- a/frontend/src/container/ResourceAttributesFilter/ResourceAttributesFilter.tsx
+++ b/frontend/src/container/ResourceAttributesFilter/ResourceAttributesFilter.tsx
@@ -1,10 +1,17 @@
+import './ResourceAttributesFilter.styles.scss';
+
import { CloseCircleFilled } from '@ant-design/icons';
import { Button, Select, Spin } from 'antd';
import useResourceAttribute, {
isResourceEmpty,
} from 'hooks/useResourceAttribute';
-import { convertMetricKeyToTrace } from 'hooks/useResourceAttribute/utils';
-import { ReactNode, useMemo } from 'react';
+import {
+ convertMetricKeyToTrace,
+ getEnvironmentTagKeys,
+ getEnvironmentTagValues,
+} from 'hooks/useResourceAttribute/utils';
+import { ReactNode, useEffect, useMemo, useState } from 'react';
+import { SelectOption } from 'types/common/select';
import { popupContainer } from 'utils/selectPopupContainer';
import { v4 as uuid } from 'uuid';
@@ -22,60 +29,129 @@ function ResourceAttributesFilter({
handleClearAll,
handleFocus,
handleChange,
+ handleEnvironmentChange,
selectedQuery,
optionsData,
loading,
} = useResourceAttribute();
- const isEmpty = useMemo(
- () => isResourceEmpty(queries, staging, selectedQuery),
- [queries, selectedQuery, staging],
+ const [environments, setEnvironments] = useState<
+ SelectOption[]
+ >([]);
+
+ const [selectedEnvironments, setSelectedEnvironments] = useState([]);
+
+ const queriesExcludingEnvironment = useMemo(
+ () =>
+ queries.filter(
+ (query) => query.tagKey !== 'resource_deployment_environment',
+ ),
+ [queries],
);
- return (
-
-
- {queries.map((query) => (
-
- ))}
- {staging.map((query, idx) => (
-
- {idx === 0 ? convertMetricKeyToTrace(query) : query}
-
- ))}
-
-
- Loading...
-
- ) : (
-
- No resource attributes available to filter. Please refer docs to send
- attributes.
-
- )
- }
- />
+ const isEmpty = useMemo(
+ () => isResourceEmpty(queriesExcludingEnvironment, staging, selectedQuery),
+ [queriesExcludingEnvironment, selectedQuery, staging],
+ );
- {queries.length || staging.length || selectedQuery.length ? (
- } type="text" />
- ) : null}
-
+ useEffect(() => {
+ const resourceDeploymentEnvironmentQuery = queries.filter(
+ (query) => query.tagKey === 'resource_deployment_environment',
+ );
+
+ if (resourceDeploymentEnvironmentQuery?.length > 0) {
+ setSelectedEnvironments(resourceDeploymentEnvironmentQuery[0].tagValue);
+ } else {
+ setSelectedEnvironments([]);
+ }
+ }, [queries]);
+
+ useEffect(() => {
+ getEnvironmentTagKeys().then((tagKeys) => {
+ if (tagKeys && Array.isArray(tagKeys) && tagKeys.length > 0) {
+ getEnvironmentTagValues().then((tagValues) => {
+ setEnvironments(tagValues);
+ });
+ }
+ });
+ }, []);
+
+ return (
+
+
+
+ {environments.map((opt) => (
+
+ {opt.label}
+
+ ))}
+
+
+
+
+
+
+ {queriesExcludingEnvironment.map((query) => (
+
+ ))}
+ {staging.map((query, idx) => (
+
+ {idx === 0 ? convertMetricKeyToTrace(query) : query}
+
+ ))}
+
+
+ Loading...
+
+ ) : (
+
+ No resource attributes available to filter. Please refer docs to send
+ attributes.
+
+ )
+ }
+ />
+
+ {queries.length || staging.length || selectedQuery.length ? (
+ }
+ type="text"
+ />
+ ) : null}
+
+
+
);
}
diff --git a/frontend/src/container/ResourceAttributesFilter/components/QueryChip/QueryChip.tsx b/frontend/src/container/ResourceAttributesFilter/components/QueryChip/QueryChip.tsx
index 363e6d5143..b2babd78b5 100644
--- a/frontend/src/container/ResourceAttributesFilter/components/QueryChip/QueryChip.tsx
+++ b/frontend/src/container/ResourceAttributesFilter/components/QueryChip/QueryChip.tsx
@@ -12,7 +12,10 @@ function QueryChip({ queryData, onClose }: IQueryChipProps): JSX.Element {
{convertMetricKeyToTrace(queryData.tagKey)}
{queryData.operator}
-
+
{queryData.tagValue.join(', ')}
diff --git a/frontend/src/container/ResourceAttributesFilter/styles.ts b/frontend/src/container/ResourceAttributesFilter/styles.ts
index c1dcd863f2..390190d2e4 100644
--- a/frontend/src/container/ResourceAttributesFilter/styles.ts
+++ b/frontend/src/container/ResourceAttributesFilter/styles.ts
@@ -7,9 +7,9 @@ export const SearchContainer = styled.div`
display: flex;
align-items: center;
gap: 0.2rem;
- padding: 0.2rem;
- margin: 1rem 0;
- border: 1px solid #ccc5;
+ padding: 0 0.2rem;
+ box-sizing: border-box;
+ border-radius: 3px;
`;
export const QueryChipContainer = styled.span`
diff --git a/frontend/src/container/ServiceApplication/ServiceMetrics/ServiceMetricTable.tsx b/frontend/src/container/ServiceApplication/ServiceMetrics/ServiceMetricTable.tsx
index 5213513dc8..f592f6a540 100644
--- a/frontend/src/container/ServiceApplication/ServiceMetrics/ServiceMetricTable.tsx
+++ b/frontend/src/container/ServiceApplication/ServiceMetrics/ServiceMetricTable.tsx
@@ -1,6 +1,7 @@
import { WarningFilled } from '@ant-design/icons';
import { Flex, Typography } from 'antd';
import { ResizeTable } from 'components/ResizeTable';
+import { ENTITY_VERSION_V4 } from 'constants/app';
import { MAX_RPS_LIMIT } from 'constants/global';
import ResourceAttributesFilter from 'container/ResourceAttributesFilter';
import { useGetQueriesRange } from 'hooks/queryBuilder/useGetQueriesRange';
@@ -35,7 +36,7 @@ function ServiceMetricTable({
const { data: licenseData, isFetching } = useLicense();
const isCloudUserVal = isCloudUser();
- const queries = useGetQueriesRange(queryRangeRequestData, {
+ const queries = useGetQueriesRange(queryRangeRequestData, ENTITY_VERSION_V4, {
queryKey: [
`GetMetricsQueryRange-${queryRangeRequestData[0].selectedTime}-${globalSelectedInterval}`,
maxTime,
diff --git a/frontend/src/container/ServiceApplication/ServiceMetrics/ServiceMetricsQuery.ts b/frontend/src/container/ServiceApplication/ServiceMetrics/ServiceMetricsQuery.ts
index 46f94acd87..352e144ade 100644
--- a/frontend/src/container/ServiceApplication/ServiceMetrics/ServiceMetricsQuery.ts
+++ b/frontend/src/container/ServiceApplication/ServiceMetrics/ServiceMetricsQuery.ts
@@ -166,11 +166,17 @@ export const serviceMetricsQuery = (
operationPrSecondAdditionalItems,
];
- const aggregateOperators = [
- MetricAggregateOperator.HIST_QUANTILE_99,
- MetricAggregateOperator.SUM_RATE,
- MetricAggregateOperator.SUM_RATE,
- MetricAggregateOperator.SUM_RATE,
+ const timeAggregateOperators = [
+ MetricAggregateOperator.EMPTY,
+ MetricAggregateOperator.RATE,
+ MetricAggregateOperator.RATE,
+ MetricAggregateOperator.RATE,
+ ];
+ const spaceAggregateOperators = [
+ MetricAggregateOperator.P99,
+ MetricAggregateOperator.SUM,
+ MetricAggregateOperator.SUM,
+ MetricAggregateOperator.SUM,
];
const disabled = [false, true, true, false];
@@ -201,7 +207,8 @@ export const serviceMetricsQuery = (
additionalItems,
disabled,
legends,
- aggregateOperators,
+ timeAggregateOperators,
+ spaceAggregateOperators,
expressions,
legendFormulas,
groupBy,
diff --git a/frontend/src/container/SideNav/menuItems.tsx b/frontend/src/container/SideNav/menuItems.tsx
index 9b897d2a9a..ed6f10b10a 100644
--- a/frontend/src/container/SideNav/menuItems.tsx
+++ b/frontend/src/container/SideNav/menuItems.tsx
@@ -16,6 +16,7 @@ import {
ScrollText,
Settings,
Slack,
+ // Unplug,
UserPlus,
} from 'lucide-react';
@@ -89,6 +90,11 @@ const menuItems: SidebarItem[] = [
label: 'Alerts',
icon: ,
},
+ // {
+ // key: ROUTES.INTEGRATIONS_INSTALLED,
+ // label: 'Integrations',
+ // icon: ,
+ // },
{
key: ROUTES.ALL_ERROR,
label: 'Exceptions',
@@ -121,6 +127,7 @@ export const NEW_ROUTES_MENU_ITEM_KEY_MAP: Record = {
[ROUTES.TRACES_EXPLORER]: ROUTES.TRACE,
[ROUTES.TRACE_EXPLORER]: ROUTES.TRACE,
[ROUTES.LOGS_BASE]: ROUTES.LOGS_EXPLORER,
+ [ROUTES.INTEGRATIONS_BASE]: ROUTES.INTEGRATIONS_INSTALLED,
};
export default menuItems;
diff --git a/frontend/src/container/TimeSeriesView/index.tsx b/frontend/src/container/TimeSeriesView/index.tsx
index 4acffd7e61..2dd009746d 100644
--- a/frontend/src/container/TimeSeriesView/index.tsx
+++ b/frontend/src/container/TimeSeriesView/index.tsx
@@ -1,3 +1,4 @@
+import { DEFAULT_ENTITY_VERSION } from 'constants/app';
import { initialQueriesMap, PANEL_TYPES } from 'constants/queryBuilder';
import { REACT_QUERY_KEY } from 'constants/reactQueryKeys';
import { useGetQueryRange } from 'hooks/queryBuilder/useGetQueryRange';
@@ -49,6 +50,7 @@ function TimeSeriesViewContainer({
dataSource,
},
},
+ DEFAULT_ENTITY_VERSION,
{
queryKey: [
REACT_QUERY_KEY.GET_QUERY_RANGE,
diff --git a/frontend/src/container/TopNav/DateTimeSelection/config.ts b/frontend/src/container/TopNav/DateTimeSelection/config.ts
index bc77afe7d6..3618686c95 100644
--- a/frontend/src/container/TopNav/DateTimeSelection/config.ts
+++ b/frontend/src/container/TopNav/DateTimeSelection/config.ts
@@ -81,6 +81,10 @@ export const routesToSkip = [
ROUTES.ALL_CHANNELS,
ROUTES.USAGE_EXPLORER,
ROUTES.GET_STARTED,
+ ROUTES.GET_STARTED_APPLICATION_MONITORING,
+ ROUTES.GET_STARTED_INFRASTRUCTURE_MONITORING,
+ ROUTES.GET_STARTED_LOGS_MANAGEMENT,
+ ROUTES.GET_STARTED_AWS_MONITORING,
ROUTES.VERSION,
ROUTES.ALL_DASHBOARD,
ROUTES.ORG_SETTINGS,
diff --git a/frontend/src/container/TopNav/DateTimeSelectionV2/DateTimeSelectionV2.styles.scss b/frontend/src/container/TopNav/DateTimeSelectionV2/DateTimeSelectionV2.styles.scss
index 73688ad109..bd4cc3cdb1 100644
--- a/frontend/src/container/TopNav/DateTimeSelectionV2/DateTimeSelectionV2.styles.scss
+++ b/frontend/src/container/TopNav/DateTimeSelectionV2/DateTimeSelectionV2.styles.scss
@@ -58,8 +58,6 @@
.date-time-root {
.ant-popover-inner {
- width: 532px;
- min-height: 334px;
border-radius: 4px !important;
border: 1px solid var(--bg-slate-400);
box-shadow: 4px 10px 16px 2px rgba(0, 0, 0, 0.2) !important;
@@ -129,12 +127,20 @@
}
.relative-date-time {
- width: 307px;
display: flex;
flex-direction: column;
gap: 35px;
padding: 13px 14px;
+ &.date-picker {
+ width: 480px;
+ height: 430px;
+ }
+
+ &.relative-times {
+ width: 320px;
+ }
+
.relative-date-time-section {
display: flex;
gap: 6px;
diff --git a/frontend/src/container/TopNav/DateTimeSelectionV2/config.ts b/frontend/src/container/TopNav/DateTimeSelectionV2/config.ts
index becd3fed7b..6231505580 100644
--- a/frontend/src/container/TopNav/DateTimeSelectionV2/config.ts
+++ b/frontend/src/container/TopNav/DateTimeSelectionV2/config.ts
@@ -53,7 +53,7 @@ export const Options: Option[] = [
{ value: '1day', label: 'Last 1 day' },
{ value: '3days', label: 'Last 3 days' },
{ value: '1week', label: 'Last 1 week' },
- { value: 'custom', label: 'Custom...' },
+ { value: 'custom', label: 'Custom' },
];
export interface Option {
@@ -117,6 +117,10 @@ export const routesToSkip = [
ROUTES.ALL_CHANNELS,
ROUTES.USAGE_EXPLORER,
ROUTES.GET_STARTED,
+ ROUTES.GET_STARTED_APPLICATION_MONITORING,
+ ROUTES.GET_STARTED_INFRASTRUCTURE_MONITORING,
+ ROUTES.GET_STARTED_LOGS_MANAGEMENT,
+ ROUTES.GET_STARTED_AWS_MONITORING,
ROUTES.VERSION,
ROUTES.ALL_DASHBOARD,
ROUTES.ORG_SETTINGS,
@@ -135,6 +139,9 @@ export const routesToSkip = [
ROUTES.TRACES_EXPLORER,
ROUTES.TRACES_SAVE_VIEWS,
ROUTES.SHORTCUTS,
+ ROUTES.INTEGRATIONS_BASE,
+ ROUTES.INTEGRATIONS_INSTALLED,
+ ROUTES.INTEGRATIONS_MARKETPLACE,
];
export const routesToDisable = [ROUTES.LOGS_EXPLORER, ROUTES.LIVE_LOGS];
diff --git a/frontend/src/container/TopNav/DateTimeSelectionV2/index.tsx b/frontend/src/container/TopNav/DateTimeSelectionV2/index.tsx
index 6e5c0c5b48..3ef5125ad7 100644
--- a/frontend/src/container/TopNav/DateTimeSelectionV2/index.tsx
+++ b/frontend/src/container/TopNav/DateTimeSelectionV2/index.tsx
@@ -44,7 +44,6 @@ import { DateTimeRangeType } from '../CustomDateTimeModal';
import {
getDefaultOption,
getOptions,
- LexicalContext,
LocalStorageTimeRange,
Time,
TimeRange,
@@ -319,22 +318,12 @@ function DateTimeSelection({
onLastRefreshHandler();
};
- const onCustomDateHandler = (
- dateTimeRange: DateTimeRangeType,
- lexicalContext?: LexicalContext,
- ): void => {
+ const onCustomDateHandler = (dateTimeRange: DateTimeRangeType): void => {
if (dateTimeRange !== null) {
const [startTimeMoment, endTimeMoment] = dateTimeRange;
if (startTimeMoment && endTimeMoment) {
- let startTime = startTimeMoment;
- let endTime = endTimeMoment;
- if (
- lexicalContext &&
- lexicalContext === LexicalContext.CUSTOM_DATE_PICKER
- ) {
- startTime = startTime.startOf('day');
- endTime = endTime.endOf('day');
- }
+ const startTime = startTimeMoment;
+ const endTime = endTimeMoment;
setCustomDTPickerVisible(false);
updateTimeInterval('custom', [
startTime.toDate().getTime(),
diff --git a/frontend/src/container/TraceDetail/SelectedSpanDetails/config.ts b/frontend/src/container/TraceDetail/SelectedSpanDetails/config.ts
index 46d0a5fc90..145b79f2ff 100644
--- a/frontend/src/container/TraceDetail/SelectedSpanDetails/config.ts
+++ b/frontend/src/container/TraceDetail/SelectedSpanDetails/config.ts
@@ -49,13 +49,16 @@ export const getTraceToLogsQuery = (
limit: null,
aggregateAttribute: initialAutocompleteData,
aggregateOperator: LogsAggregatorOperator.NOOP,
+ timeAggregation: '',
+ spaceAggregation: '',
+ functions: [],
expression: 'A',
groupBy: [],
having: [],
legend: '',
orderBy: [],
queryName: 'A',
- reduceTo: 'min',
+ reduceTo: 'avg',
stepInterval: getStep({
start: minTime,
end: maxTime,
diff --git a/frontend/src/container/TracesExplorer/ListView/index.tsx b/frontend/src/container/TracesExplorer/ListView/index.tsx
index 4f18bb3a27..f78be8c4de 100644
--- a/frontend/src/container/TracesExplorer/ListView/index.tsx
+++ b/frontend/src/container/TracesExplorer/ListView/index.tsx
@@ -1,4 +1,5 @@
import { ResizeTable } from 'components/ResizeTable';
+import { DEFAULT_ENTITY_VERSION } from 'constants/app';
import { LOCALSTORAGE } from 'constants/localStorage';
import { QueryParams } from 'constants/query';
import { initialQueriesMap, PANEL_TYPES } from 'constants/queryBuilder';
@@ -62,6 +63,7 @@ function ListView(): JSX.Element {
selectColumns: options?.selectColumns,
},
},
+ DEFAULT_ENTITY_VERSION,
{
queryKey: [
REACT_QUERY_KEY.GET_QUERY_RANGE,
diff --git a/frontend/src/container/TracesExplorer/QuerySection/index.tsx b/frontend/src/container/TracesExplorer/QuerySection/index.tsx
index 279f57b5df..0bd9515720 100644
--- a/frontend/src/container/TracesExplorer/QuerySection/index.tsx
+++ b/frontend/src/container/TracesExplorer/QuerySection/index.tsx
@@ -53,6 +53,7 @@ function QuerySection(): JSX.Element {
}}
filterConfigs={filterConfigs}
queryComponents={queryComponents}
+ version="v3" // setting this to v3 as we this is rendered in logs explorer
actions={
diff --git a/frontend/src/container/TracesExplorer/TableView/index.tsx b/frontend/src/container/TracesExplorer/TableView/index.tsx
index 11f27f6201..c6544f8eee 100644
--- a/frontend/src/container/TracesExplorer/TableView/index.tsx
+++ b/frontend/src/container/TracesExplorer/TableView/index.tsx
@@ -1,4 +1,5 @@
import { Space } from 'antd';
+import { DEFAULT_ENTITY_VERSION } from 'constants/app';
import { initialQueriesMap, PANEL_TYPES } from 'constants/queryBuilder';
import { REACT_QUERY_KEY } from 'constants/reactQueryKeys';
import { QueryTable } from 'container/QueryTable';
@@ -27,6 +28,7 @@ function TableView(): JSX.Element {
dataSource: 'traces',
},
},
+ DEFAULT_ENTITY_VERSION,
{
queryKey: [
REACT_QUERY_KEY.GET_QUERY_RANGE,
diff --git a/frontend/src/container/TracesExplorer/TracesView/index.tsx b/frontend/src/container/TracesExplorer/TracesView/index.tsx
index 21fa41431c..2093881e01 100644
--- a/frontend/src/container/TracesExplorer/TracesView/index.tsx
+++ b/frontend/src/container/TracesExplorer/TracesView/index.tsx
@@ -1,5 +1,6 @@
import { Typography } from 'antd';
import { ResizeTable } from 'components/ResizeTable';
+import { DEFAULT_ENTITY_VERSION } from 'constants/app';
import { QueryParams } from 'constants/query';
import { initialQueriesMap, PANEL_TYPES } from 'constants/queryBuilder';
import { REACT_QUERY_KEY } from 'constants/reactQueryKeys';
@@ -41,6 +42,7 @@ function TracesView(): JSX.Element {
pagination: paginationQueryData,
},
},
+ DEFAULT_ENTITY_VERSION,
{
queryKey: [
REACT_QUERY_KEY.GET_QUERY_RANGE,
diff --git a/frontend/src/container/TracesTableComponent/TracesTableComponent.styles.scss b/frontend/src/container/TracesTableComponent/TracesTableComponent.styles.scss
index e1ff9ba437..0787972eec 100644
--- a/frontend/src/container/TracesTableComponent/TracesTableComponent.styles.scss
+++ b/frontend/src/container/TracesTableComponent/TracesTableComponent.styles.scss
@@ -1,65 +1,73 @@
.traces-table {
- position: relative;
- display: flex;
- flex-direction: column;
- height: 100%;
+ position: relative;
+ display: flex;
+ flex-direction: column;
+ height: 100%;
- .resize-table {
- height: calc(90% - 5px);
- overflow: scroll;
-
- .ant-table-wrapper .ant-table-tbody >tr >td {
- border: none;
- background-color: transparent;
- color: var(--bg-vanilla-100);
- font-size: 14px;
- font-style: normal;
- font-weight: 400;
- line-height: 18px;
- padding: 10px 8px;
- font-family: Inter;
- cursor: pointer;
- }
+ .resize-table {
+ height: calc(100% - 40px);
+ overflow: scroll;
+ overflow-x: hidden;
- .ant-table-wrapper .ant-table-thead > tr > th {
- font-family: Inter;
- color: var(--bg-vanilla-100);
- background-color: transparent;
- border: none;
- border-bottom: 0.5px solid var(--bg-slate-400);
- font-size: 14px;
- font-style: normal;
- font-weight: 600;
- line-height: 22px;
- letter-spacing: 0.5px;
- padding: 8px;
- }
+ &::-webkit-scrollbar {
+ width: 0.2rem;
+ height: 0.2rem;
+ }
- .ant-table-wrapper .ant-table-thead > tr > th::before {
- display: none;
- }
- }
+ .ant-table-wrapper .ant-table-tbody > tr > td {
+ border: none;
+ background-color: transparent;
+ color: var(--bg-vanilla-100);
+ font-size: 14px;
+ font-style: normal;
+ font-weight: 400;
+ line-height: 18px;
+ padding: 10px 8px;
+ font-family: Inter;
+ cursor: pointer;
+ }
- .controller {
- position: absolute;
- bottom: 5px;
- right: 10px;
- }
+ .ant-table-wrapper .ant-table-thead > tr > th {
+ font-family: Inter;
+ color: var(--bg-vanilla-100);
+ background-color: transparent;
+ border: none;
+ border-bottom: 0.5px solid var(--bg-slate-400);
+ font-size: 14px;
+ font-style: normal;
+ font-weight: 600;
+ line-height: 22px;
+ letter-spacing: 0.5px;
+ padding: 8px;
+ }
+
+ .ant-table-wrapper .ant-table-thead > tr > th::before {
+ display: none;
+ }
+ }
+
+ .controller {
+ display: flex;
+ align-items: center;
+ height: 40px;
+ justify-content: end;
+ padding: 0 8px;
+ }
}
.lightMode {
- .traces-table {
- .resize-table {
- .ant-table-wrapper .ant-table-tbody >tr >td {
- background-color: var(--bg-vanilla-100);
- color: var(--bg-ink-500);
- border-color: rgba(0, 0, 0, 0.06);
- }
- .ant-table-wrapper .ant-table-thead > tr > th {
- background-color: var(--bg-vanilla-300);
- color: var(--bg-ink-500);
- border-color: rgba(0, 0, 0, 0.06);
- }
- }
- }
-}
\ No newline at end of file
+ .traces-table {
+ .resize-table {
+ .ant-table-wrapper .ant-table-tbody > tr > td {
+ background-color: var(--bg-vanilla-100);
+ color: var(--bg-ink-500);
+ border-color: rgba(0, 0, 0, 0.06);
+ }
+ .ant-table-wrapper .ant-table-thead > tr > th {
+ background-color: var(--bg-vanilla-300);
+ color: var(--bg-ink-500);
+ border-color: rgba(0, 0, 0, 0.06);
+ }
+ }
+ }
+}
diff --git a/frontend/src/container/TracesTableComponent/TracesTableComponent.tsx b/frontend/src/container/TracesTableComponent/TracesTableComponent.tsx
index a59303780c..7d2e8e7498 100644
--- a/frontend/src/container/TracesTableComponent/TracesTableComponent.tsx
+++ b/frontend/src/container/TracesTableComponent/TracesTableComponent.tsx
@@ -30,6 +30,7 @@ import { GlobalReducer } from 'types/reducer/globalTime';
function TracesTableComponent({
selectedTracesFields,
query,
+ version,
selectedTime,
}: TracesTableComponentProps): JSX.Element {
const { selectedTime: globalSelectedTime, maxTime, minTime } = useSelector<
@@ -59,6 +60,7 @@ function TracesTableComponent({
},
variables: getDashboardVariables(selectedDashboard?.data.variables),
},
+ version,
{
queryKey: [
REACT_QUERY_KEY.GET_QUERY_RANGE,
@@ -160,6 +162,7 @@ function TracesTableComponent({
export type TracesTableComponentProps = {
selectedTracesFields: Widgets['selectedTracesFields'];
query: Query;
+ version: string;
selectedTime?: timePreferance;
};
diff --git a/frontend/src/hooks/Integrations/useGetAllIntegrations.ts b/frontend/src/hooks/Integrations/useGetAllIntegrations.ts
new file mode 100644
index 0000000000..c32bbd19e7
--- /dev/null
+++ b/frontend/src/hooks/Integrations/useGetAllIntegrations.ts
@@ -0,0 +1,13 @@
+import { getAllIntegrations } from 'api/Integrations/getAllIntegrations';
+import { AxiosError, AxiosResponse } from 'axios';
+import { useQuery, UseQueryResult } from 'react-query';
+import { AllIntegrationsProps } from 'types/api/integrations/types';
+
+export const useGetAllIntegrations = (): UseQueryResult<
+ AxiosResponse,
+ AxiosError
+> =>
+ useQuery, AxiosError>({
+ queryKey: ['Integrations'],
+ queryFn: () => getAllIntegrations(),
+ });
diff --git a/frontend/src/hooks/Integrations/useGetIntegration.ts b/frontend/src/hooks/Integrations/useGetIntegration.ts
new file mode 100644
index 0000000000..05cad6c40d
--- /dev/null
+++ b/frontend/src/hooks/Integrations/useGetIntegration.ts
@@ -0,0 +1,18 @@
+import { getIntegration } from 'api/Integrations/getIntegration';
+import { AxiosError, AxiosResponse } from 'axios';
+import { useQuery, UseQueryResult } from 'react-query';
+import {
+ GetIntegrationPayloadProps,
+ GetIntegrationProps,
+} from 'types/api/integrations/types';
+
+export const useGetIntegration = ({
+ integrationId,
+}: GetIntegrationPayloadProps): UseQueryResult<
+ AxiosResponse,
+ AxiosError
+> =>
+ useQuery, AxiosError>({
+ queryKey: ['Integration', integrationId],
+ queryFn: () => getIntegration({ integrationId }),
+ });
diff --git a/frontend/src/hooks/Integrations/useGetIntegrationStatus.ts b/frontend/src/hooks/Integrations/useGetIntegrationStatus.ts
new file mode 100644
index 0000000000..56849d2515
--- /dev/null
+++ b/frontend/src/hooks/Integrations/useGetIntegrationStatus.ts
@@ -0,0 +1,19 @@
+import { getIntegrationStatus } from 'api/Integrations/getIntegrationStatus';
+import { AxiosError, AxiosResponse } from 'axios';
+import { useQuery, UseQueryResult } from 'react-query';
+import {
+ GetIntegrationPayloadProps,
+ GetIntegrationStatusProps,
+} from 'types/api/integrations/types';
+
+export const useGetIntegrationStatus = ({
+ integrationId,
+}: GetIntegrationPayloadProps): UseQueryResult<
+ AxiosResponse,
+ AxiosError
+> =>
+ useQuery, AxiosError>({
+ queryKey: ['integration-connection-status', integrationId],
+ queryFn: () => getIntegrationStatus({ integrationId }),
+ refetchInterval: 5000,
+ });
diff --git a/frontend/src/hooks/hotkeys/useKeyboardHotkeys.tsx b/frontend/src/hooks/hotkeys/useKeyboardHotkeys.tsx
index ec1b861664..68e1bc7ae4 100644
--- a/frontend/src/hooks/hotkeys/useKeyboardHotkeys.tsx
+++ b/frontend/src/hooks/hotkeys/useKeyboardHotkeys.tsx
@@ -72,6 +72,9 @@ function KeyboardHotkeysProvider({
shortcutKey = shortcutKey + isAltKey + isShiftKey + isMetaKey;
if (shortcuts.current[shortcutKey]) {
+ event.preventDefault();
+ event.stopImmediatePropagation();
+
shortcuts.current[shortcutKey]();
}
};
diff --git a/frontend/src/hooks/logs/useCopyLogLink.ts b/frontend/src/hooks/logs/useCopyLogLink.ts
index 35b4293f51..b663aa750c 100644
--- a/frontend/src/hooks/logs/useCopyLogLink.ts
+++ b/frontend/src/hooks/logs/useCopyLogLink.ts
@@ -11,11 +11,8 @@ import {
useMemo,
useState,
} from 'react';
-import { useSelector } from 'react-redux';
import { useLocation } from 'react-router-dom';
import { useCopyToClipboard } from 'react-use';
-import { AppState } from 'store/reducers';
-import { GlobalReducer } from 'types/reducer/globalTime';
import { HIGHLIGHTED_DELAY } from './configs';
import { LogTimeRange, UseCopyLogLink } from './types';
@@ -25,9 +22,6 @@ export const useCopyLogLink = (logId?: string): UseCopyLogLink => {
const { pathname } = useLocation();
const [, setCopy] = useCopyToClipboard();
const { notifications } = useNotifications();
- const { maxTime, minTime } = useSelector(
- (state) => state.globalTime,
- );
const { queryData: timeRange } = useUrlQueryData(
QueryParams.timeRange,
@@ -70,8 +64,8 @@ export const useCopyLogLink = (logId?: string): UseCopyLogLink => {
urlQuery.delete(QueryParams.timeRange);
urlQuery.set(QueryParams.activeLogId, `"${logId}"`);
urlQuery.set(QueryParams.timeRange, range);
- urlQuery.set(QueryParams.startTime, minTime.toString());
- urlQuery.set(QueryParams.endTime, maxTime.toString());
+ urlQuery.set(QueryParams.startTime, timeRange?.start.toString() || '');
+ urlQuery.set(QueryParams.endTime, timeRange?.end.toString() || '');
const link = `${window.location.origin}${pathname}?${urlQuery.toString()}`;
@@ -80,16 +74,7 @@ export const useCopyLogLink = (logId?: string): UseCopyLogLink => {
message: 'Copied to clipboard',
});
},
- [
- logId,
- timeRange,
- urlQuery,
- minTime,
- maxTime,
- pathname,
- setCopy,
- notifications,
- ],
+ [logId, timeRange, urlQuery, pathname, setCopy, notifications],
);
useEffect(() => {
diff --git a/frontend/src/hooks/queryBuilder/useAutoComplete.ts b/frontend/src/hooks/queryBuilder/useAutoComplete.ts
index dad262757a..6ac51eb45d 100644
--- a/frontend/src/hooks/queryBuilder/useAutoComplete.ts
+++ b/frontend/src/hooks/queryBuilder/useAutoComplete.ts
@@ -1,3 +1,4 @@
+import { OPERATORS } from 'constants/queryBuilder';
import {
getRemovePrefixFromKey,
getTagToken,
@@ -10,7 +11,7 @@ import { KeyboardEvent, useCallback, useState } from 'react';
import { IBuilderQuery } from 'types/api/queryBuilder/queryBuilderData';
import { useFetchKeysAndValues } from './useFetchKeysAndValues';
-import { useOptions } from './useOptions';
+import { useOptions, WHERE_CLAUSE_CUSTOM_SUFFIX } from './useOptions';
import { useSetCurrentKeyAndOperator } from './useSetCurrentKeyAndOperator';
import { useTag } from './useTag';
import { useTagValidation } from './useTagValidation';
@@ -98,6 +99,23 @@ export const useAutoComplete = (
[handleAddTag, handleClearTag, isMulti, isValidTag, searchValue, tags],
);
+ const handleOnBlur = (event: React.FocusEvent): void => {
+ event.preventDefault();
+ if (searchValue) {
+ if (
+ key &&
+ !operator &&
+ whereClauseConfig?.customKey === 'body' &&
+ whereClauseConfig.customOp === OPERATORS.CONTAINS
+ ) {
+ const value = `${searchValue}${WHERE_CLAUSE_CUSTOM_SUFFIX}`;
+ handleAddTag(value);
+ return;
+ }
+ handleAddTag(searchValue);
+ }
+ };
+
const options = useOptions(
key,
keys,
@@ -117,6 +135,7 @@ export const useAutoComplete = (
handleClearTag,
handleSelect,
handleKeyDown,
+ handleOnBlur,
options,
tags,
searchValue,
@@ -133,6 +152,7 @@ interface IAutoComplete {
handleClearTag: (value: string) => void;
handleSelect: (value: string) => void;
handleKeyDown: (event: React.KeyboardEvent) => void;
+ handleOnBlur: (event: React.FocusEvent) => void;
options: Option[];
tags: string[];
searchValue: string;
diff --git a/frontend/src/hooks/queryBuilder/useCreateAlerts.tsx b/frontend/src/hooks/queryBuilder/useCreateAlerts.tsx
index 6cf7da047c..71089875a9 100644
--- a/frontend/src/hooks/queryBuilder/useCreateAlerts.tsx
+++ b/frontend/src/hooks/queryBuilder/useCreateAlerts.tsx
@@ -1,5 +1,6 @@
import { getQueryRangeFormat } from 'api/dashboard/queryRangeFormat';
import { SOMETHING_WENT_WRONG } from 'constants/api';
+import { DEFAULT_ENTITY_VERSION } from 'constants/app';
import { QueryParams } from 'constants/query';
import ROUTES from 'constants/routes';
import { useNotifications } from 'hooks/useNotifications';
@@ -45,7 +46,9 @@ const useCreateAlerts = (widget?: Widgets): VoidFunction => {
history.push(
`${ROUTES.ALERTS_NEW}?${QueryParams.compositeQuery}=${encodeURIComponent(
JSON.stringify(updatedQuery),
- )}&${QueryParams.panelTypes}=${widget.panelTypes}`,
+ )}&${QueryParams.panelTypes}=${widget.panelTypes}&version=${
+ selectedDashboard?.data.version || DEFAULT_ENTITY_VERSION
+ }`,
);
},
onError: () => {
@@ -59,6 +62,7 @@ const useCreateAlerts = (widget?: Widgets): VoidFunction => {
notifications,
queryRangeMutation,
selectedDashboard?.data.variables,
+ selectedDashboard?.data.version,
widget,
]);
};
diff --git a/frontend/src/hooks/queryBuilder/useGetExplorerQueryRange.ts b/frontend/src/hooks/queryBuilder/useGetExplorerQueryRange.ts
index bd85d8f799..cdcfb3e0c7 100644
--- a/frontend/src/hooks/queryBuilder/useGetExplorerQueryRange.ts
+++ b/frontend/src/hooks/queryBuilder/useGetExplorerQueryRange.ts
@@ -15,6 +15,7 @@ import { useQueryBuilder } from './useQueryBuilder';
export const useGetExplorerQueryRange = (
requestData: Query | null,
panelType: PANEL_TYPES | null,
+ version: string,
options?: UseQueryOptions, Error>,
params?: Record,
isDependentOnQB = true,
@@ -47,6 +48,7 @@ export const useGetExplorerQueryRange = (
query: requestData || initialQueriesMap.metrics,
params,
},
+ version,
{
...options,
retry: false,
diff --git a/frontend/src/hooks/queryBuilder/useGetQueriesRange.ts b/frontend/src/hooks/queryBuilder/useGetQueriesRange.ts
index d7ec04d336..3c92bf8d21 100644
--- a/frontend/src/hooks/queryBuilder/useGetQueriesRange.ts
+++ b/frontend/src/hooks/queryBuilder/useGetQueriesRange.ts
@@ -15,6 +15,7 @@ import { MetricRangePayloadProps } from 'types/api/metrics/getQueryRange';
export const useGetQueriesRange = (
requestData: GetQueryResultsProps[],
+ version: string,
options: UseQueryOptions, Error>,
): UseQueryResult, Error>[] => {
const queryKey = useMemo(() => {
@@ -26,7 +27,7 @@ export const useGetQueriesRange = (
const queryData = requestData.map((request, index) => ({
queryFn: async (): Promise> =>
- GetMetricQueryRange(request),
+ GetMetricQueryRange(request, version),
...options,
queryKey: [...queryKey, index] as QueryKey,
}));
diff --git a/frontend/src/hooks/queryBuilder/useGetQueryRange.ts b/frontend/src/hooks/queryBuilder/useGetQueryRange.ts
index e832be1c4c..334ee7f628 100644
--- a/frontend/src/hooks/queryBuilder/useGetQueryRange.ts
+++ b/frontend/src/hooks/queryBuilder/useGetQueryRange.ts
@@ -11,10 +11,15 @@ import { MetricRangePayloadProps } from 'types/api/metrics/getQueryRange';
type UseGetQueryRange = (
requestData: GetQueryResultsProps,
+ version: string,
options?: UseQueryOptions, Error>,
) => UseQueryResult, Error>;
-export const useGetQueryRange: UseGetQueryRange = (requestData, options) => {
+export const useGetQueryRange: UseGetQueryRange = (
+ requestData,
+ version,
+ options,
+) => {
const newRequestData: GetQueryResultsProps = useMemo(
() => ({
...requestData,
@@ -39,7 +44,8 @@ export const useGetQueryRange: UseGetQueryRange = (requestData, options) => {
}, [options?.queryKey, newRequestData]);
return useQuery, Error>({
- queryFn: async ({ signal }) => GetMetricQueryRange(newRequestData, signal),
+ queryFn: async ({ signal }) =>
+ GetMetricQueryRange(requestData, version, signal),
...options,
queryKey,
});
diff --git a/frontend/src/hooks/queryBuilder/useGetWidgetQueryRange.ts b/frontend/src/hooks/queryBuilder/useGetWidgetQueryRange.ts
index f3ecd21101..5e5ec70e39 100644
--- a/frontend/src/hooks/queryBuilder/useGetWidgetQueryRange.ts
+++ b/frontend/src/hooks/queryBuilder/useGetWidgetQueryRange.ts
@@ -18,6 +18,7 @@ export const useGetWidgetQueryRange = (
graphType,
selectedTime,
}: Pick,
+ version: string,
options?: UseQueryOptions, Error>,
): UseQueryResult, Error> => {
const { selectedTime: globalSelectedInterval } = useSelector<
@@ -37,6 +38,7 @@ export const useGetWidgetQueryRange = (
query: stagedQuery || initialQueriesMap.metrics,
variables: getDashboardVariables(selectedDashboard?.data.variables),
},
+ version,
{
enabled: !!stagedQuery,
retry: false,
diff --git a/frontend/src/hooks/queryBuilder/useOperators.ts b/frontend/src/hooks/queryBuilder/useOperators.ts
index 63f4a9222a..11cc797667 100644
--- a/frontend/src/hooks/queryBuilder/useOperators.ts
+++ b/frontend/src/hooks/queryBuilder/useOperators.ts
@@ -1,4 +1,7 @@
-import { QUERY_BUILDER_OPERATORS_BY_TYPES } from 'constants/queryBuilder';
+import {
+ OPERATORS,
+ QUERY_BUILDER_OPERATORS_BY_TYPES,
+} from 'constants/queryBuilder';
import { getRemovePrefixFromKey } from 'container/QueryBuilder/filters/QueryBuilderSearch/utils';
import { useMemo } from 'react';
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
@@ -16,9 +19,14 @@ export const useOperators = (
): IOperators =>
useMemo(() => {
const currentKey = keys?.find((el) => el.key === getRemovePrefixFromKey(key));
+ const strippedKey = key.split(' ')[0];
+
+ // eslint-disable-next-line no-nested-ternary
return currentKey?.dataType
? QUERY_BUILDER_OPERATORS_BY_TYPES[
currentKey.dataType as keyof typeof QUERY_BUILDER_OPERATORS_BY_TYPES
]
+ : strippedKey.endsWith('[*]') && strippedKey.startsWith('body.')
+ ? [OPERATORS.HAS, OPERATORS.NHAS]
: QUERY_BUILDER_OPERATORS_BY_TYPES.universal;
}, [keys, key]);
diff --git a/frontend/src/hooks/queryBuilder/useQueryBuilderOperations.ts b/frontend/src/hooks/queryBuilder/useQueryBuilderOperations.ts
index 799640da4e..7766122a45 100644
--- a/frontend/src/hooks/queryBuilder/useQueryBuilderOperations.ts
+++ b/frontend/src/hooks/queryBuilder/useQueryBuilderOperations.ts
@@ -1,16 +1,24 @@
+import { ENTITY_VERSION_V4 } from 'constants/app';
import { LEGEND } from 'constants/global';
import {
+ ATTRIBUTE_TYPES,
initialAutocompleteData,
initialQueryBuilderFormValuesMap,
mapOfFormulaToFilters,
mapOfQueryFilters,
PANEL_TYPES,
} from 'constants/queryBuilder';
+import {
+ metricsGaugeSpaceAggregateOperatorOptions,
+ metricsHistogramSpaceAggregateOperatorOptions,
+ metricsSumSpaceAggregateOperatorOptions,
+} from 'constants/queryBuilderOperators';
import {
listViewInitialLogQuery,
listViewInitialTraceQuery,
} from 'container/NewDashboard/ComponentsSlider/constants';
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
+import { getMetricsOperatorsByAttributeType } from 'lib/newQueryBuilder/getMetricsOperatorsByAttributeType';
import { getOperatorsBySourceAndPanelType } from 'lib/newQueryBuilder/getOperatorsBySourceAndPanelType';
import { findDataTypeOfOperator } from 'lib/query/findDataTypeOfOperator';
import { useCallback, useEffect, useState } from 'react';
@@ -18,13 +26,14 @@ import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteRe
import {
IBuilderFormula,
IBuilderQuery,
+ QueryFunctionProps,
} from 'types/api/queryBuilder/queryBuilderData';
import {
HandleChangeFormulaData,
HandleChangeQueryData,
UseQueryOperations,
} from 'types/common/operations.types';
-import { DataSource } from 'types/common/queryBuilder';
+import { DataSource, MetricAggregateOperator } from 'types/common/queryBuilder';
import { SelectOption } from 'types/common/select';
import { getFormatedLegend } from 'utils/getFormatedLegend';
@@ -34,6 +43,7 @@ export const useQueryOperations: UseQueryOperations = ({
filterConfigs,
formula,
isListViewPanel = false,
+ entityVersion,
}) => {
const {
handleSetQueryData,
@@ -46,6 +56,9 @@ export const useQueryOperations: UseQueryOperations = ({
} = useQueryBuilder();
const [operators, setOperators] = useState[]>([]);
+ const [spaceAggregationOptions, setSpaceAggregationOptions] = useState<
+ SelectOption[]
+ >([]);
const { dataSource, aggregateOperator } = query;
@@ -104,6 +117,7 @@ export const useQueryOperations: UseQueryOperations = ({
const newQuery: IBuilderQuery = {
...query,
aggregateOperator: value,
+ timeAggregation: value,
having: [],
limit: null,
...(shouldResetAggregateAttribute
@@ -116,6 +130,52 @@ export const useQueryOperations: UseQueryOperations = ({
[index, query, handleSetQueryData],
);
+ const handleSpaceAggregationChange = useCallback(
+ (value: string): void => {
+ const newQuery: IBuilderQuery = {
+ ...query,
+ spaceAggregation: value,
+ };
+
+ handleSetQueryData(index, newQuery);
+ },
+ [index, query, handleSetQueryData],
+ );
+
+ const handleMetricAggregateAtributeTypes = useCallback(
+ (aggregateAttribute: BaseAutocompleteData): any => {
+ const newOperators = getMetricsOperatorsByAttributeType({
+ dataSource: DataSource.METRICS,
+ panelType: panelType || PANEL_TYPES.TIME_SERIES,
+ aggregateAttributeType:
+ (aggregateAttribute.type as ATTRIBUTE_TYPES) || ATTRIBUTE_TYPES.GAUGE,
+ });
+
+ switch (aggregateAttribute.type) {
+ case ATTRIBUTE_TYPES.SUM:
+ setSpaceAggregationOptions(metricsSumSpaceAggregateOperatorOptions);
+ break;
+ case ATTRIBUTE_TYPES.GAUGE:
+ setSpaceAggregationOptions(metricsGaugeSpaceAggregateOperatorOptions);
+ break;
+
+ case ATTRIBUTE_TYPES.HISTOGRAM:
+ setSpaceAggregationOptions(metricsHistogramSpaceAggregateOperatorOptions);
+ break;
+
+ case ATTRIBUTE_TYPES.EXPONENTIAL_HISTOGRAM:
+ setSpaceAggregationOptions(metricsHistogramSpaceAggregateOperatorOptions);
+ break;
+ default:
+ setSpaceAggregationOptions(metricsGaugeSpaceAggregateOperatorOptions);
+ break;
+ }
+
+ setOperators(newOperators);
+ },
+ [panelType],
+ );
+
const handleChangeAggregatorAttribute = useCallback(
(value: BaseAutocompleteData): void => {
const newQuery: IBuilderQuery = {
@@ -124,9 +184,34 @@ export const useQueryOperations: UseQueryOperations = ({
having: [],
};
+ if (
+ newQuery.dataSource === DataSource.METRICS &&
+ entityVersion === ENTITY_VERSION_V4
+ ) {
+ handleMetricAggregateAtributeTypes(newQuery.aggregateAttribute);
+
+ if (newQuery.aggregateAttribute.type === ATTRIBUTE_TYPES.SUM) {
+ newQuery.aggregateOperator = MetricAggregateOperator.RATE;
+ newQuery.timeAggregation = MetricAggregateOperator.RATE;
+ } else if (newQuery.aggregateAttribute.type === ATTRIBUTE_TYPES.GAUGE) {
+ newQuery.aggregateOperator = MetricAggregateOperator.AVG;
+ newQuery.timeAggregation = MetricAggregateOperator.AVG;
+ } else {
+ newQuery.timeAggregation = '';
+ }
+
+ newQuery.spaceAggregation = '';
+ }
+
handleSetQueryData(index, newQuery);
},
- [index, query, handleSetQueryData],
+ [
+ query,
+ entityVersion,
+ handleSetQueryData,
+ index,
+ handleMetricAggregateAtributeTypes,
+ ],
);
const handleChangeDataSource = useCallback(
@@ -203,6 +288,21 @@ export const useQueryOperations: UseQueryOperations = ({
[formula, handleSetFormulaData, index],
);
+ const handleQueryFunctionsUpdates = useCallback(
+ (functions: QueryFunctionProps[]): void => {
+ const newQuery: IBuilderQuery = {
+ ...query,
+ };
+
+ if (newQuery.dataSource === DataSource.METRICS) {
+ newQuery.functions = functions;
+ }
+
+ handleSetQueryData(index, newQuery);
+ },
+ [query, handleSetQueryData, index],
+ );
+
const isMetricsDataSource = query.dataSource === DataSource.METRICS;
const isTracePanelType = panelType === PANEL_TYPES.TRACE;
@@ -210,15 +310,26 @@ export const useQueryOperations: UseQueryOperations = ({
useEffect(() => {
if (initialDataSource && dataSource !== initialDataSource) return;
- const initialOperators = getOperatorsBySourceAndPanelType({
- dataSource,
- panelType: panelType || PANEL_TYPES.TIME_SERIES,
- });
+ if (
+ dataSource === DataSource.METRICS &&
+ query &&
+ query.aggregateAttribute &&
+ entityVersion === ENTITY_VERSION_V4
+ ) {
+ handleMetricAggregateAtributeTypes(query.aggregateAttribute);
+ } else {
+ const initialOperators = getOperatorsBySourceAndPanelType({
+ dataSource,
+ panelType: panelType || PANEL_TYPES.TIME_SERIES,
+ });
- if (JSON.stringify(operators) === JSON.stringify(initialOperators)) return;
+ if (JSON.stringify(operators) === JSON.stringify(initialOperators)) return;
- setOperators(initialOperators);
- }, [dataSource, initialDataSource, panelType, operators]);
+ setOperators(initialOperators);
+ }
+
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [dataSource, initialDataSource, panelType, operators, entityVersion]);
useEffect(() => {
const additionalFilters = getNewListOfAdditionalFilters(dataSource, true);
@@ -236,13 +347,16 @@ export const useQueryOperations: UseQueryOperations = ({
isTracePanelType,
isMetricsDataSource,
operators,
+ spaceAggregationOptions,
listOfAdditionalFilters,
handleChangeOperator,
+ handleSpaceAggregationChange,
handleChangeAggregatorAttribute,
handleChangeDataSource,
handleDeleteQuery,
handleChangeQueryData,
listOfAdditionalFormulaFilters,
handleChangeFormulaData,
+ handleQueryFunctionsUpdates,
};
};
diff --git a/frontend/src/hooks/queryBuilder/useTag.ts b/frontend/src/hooks/queryBuilder/useTag.ts
index 268a01e0c6..419aaaedc9 100644
--- a/frontend/src/hooks/queryBuilder/useTag.ts
+++ b/frontend/src/hooks/queryBuilder/useTag.ts
@@ -74,7 +74,14 @@ export const useTag = (
const handleAddTag = useCallback(
(value: string): void => {
const { tagKey } = getTagToken(value);
- const [key, id] = tagKey.split('-');
+ const parts = tagKey.split('-');
+ // this is done to ensure that `hello-world` also gets converted to `body CONTAINS hello-world`
+ let id = parts[parts.length - 1];
+ let key = parts.slice(0, -1).join('-');
+ if (parts.length === 1) {
+ id = '';
+ [key] = parts;
+ }
if (id === 'custom') {
const customValue = whereClauseConfig
diff --git a/frontend/src/hooks/useLogsData.ts b/frontend/src/hooks/useLogsData.ts
index 6105c03cbf..bad1e53200 100644
--- a/frontend/src/hooks/useLogsData.ts
+++ b/frontend/src/hooks/useLogsData.ts
@@ -1,3 +1,4 @@
+import { DEFAULT_ENTITY_VERSION } from 'constants/app';
import { QueryParams } from 'constants/query';
import {
initialQueryBuilderFormValues,
@@ -126,6 +127,7 @@ export const useLogsData = ({
const { data, isFetching } = useGetExplorerQueryRange(
requestData,
panelType,
+ DEFAULT_ENTITY_VERSION,
{
keepPreviousData: true,
enabled: !isLimit && !!requestData,
diff --git a/frontend/src/hooks/useResourceAttribute/ResourceProvider.tsx b/frontend/src/hooks/useResourceAttribute/ResourceProvider.tsx
index e027c70b8f..8a3de793e8 100644
--- a/frontend/src/hooks/useResourceAttribute/ResourceProvider.tsx
+++ b/frontend/src/hooks/useResourceAttribute/ResourceProvider.tsx
@@ -52,6 +52,7 @@ function ResourceProvider({ children }: Props): JSX.Element {
? `?resourceAttribute=${encode(JSON.stringify(queries))}`
: '',
});
+
setQueries(queries);
},
[pathname],
@@ -62,12 +63,14 @@ function ResourceProvider({ children }: Props): JSX.Element {
onSelectTagKey: () => {
handleLoading(true);
GetTagKeys()
- .then((tagKeys) =>
+ .then((tagKeys) => {
+ const options = mappingWithRoutesAndKeys(pathname, tagKeys);
+
setOptionsData({
- options: mappingWithRoutesAndKeys(pathname, tagKeys),
+ options,
mode: undefined,
- }),
- )
+ });
+ })
.finally(() => {
handleLoading(false);
});
@@ -96,6 +99,7 @@ function ResourceProvider({ children }: Props): JSX.Element {
}
const generatedQuery = createQuery([...staging, selectedQuery]);
+
if (generatedQuery) {
dispatchQueries([...queries, generatedQuery]);
}
@@ -127,6 +131,29 @@ function ResourceProvider({ children }: Props): JSX.Element {
[optionsData.mode, send],
);
+ const handleEnvironmentChange = useCallback(
+ (environments: string[]): void => {
+ const staging = ['resource_deployment_environment', 'IN'];
+
+ const queriesCopy = queries.filter(
+ (query) => query.tagKey !== 'resource_deployment_environment',
+ );
+
+ if (environments && Array.isArray(environments) && environments.length > 0) {
+ const generatedQuery = createQuery([...staging, environments]);
+
+ if (generatedQuery) {
+ dispatchQueries([...queriesCopy, generatedQuery]);
+ }
+ } else {
+ dispatchQueries([...queriesCopy]);
+ }
+
+ send('RESET');
+ },
+ [dispatchQueries, queries, send],
+ );
+
const handleClose = useCallback(
(id: string): void => {
dispatchQueries(queries.filter((queryData) => queryData.id !== id));
@@ -159,12 +186,14 @@ function ResourceProvider({ children }: Props): JSX.Element {
handleFocus,
loading,
handleChange,
+ handleEnvironmentChange,
selectedQuery,
optionsData,
}),
[
handleBlur,
handleChange,
+ handleEnvironmentChange,
handleClearAll,
handleClose,
handleFocus,
diff --git a/frontend/src/hooks/useResourceAttribute/types.ts b/frontend/src/hooks/useResourceAttribute/types.ts
index 422a0555ba..cce06c5cd1 100644
--- a/frontend/src/hooks/useResourceAttribute/types.ts
+++ b/frontend/src/hooks/useResourceAttribute/types.ts
@@ -28,4 +28,5 @@ export interface IResourceAttributeProps {
handleChange: (value: string) => void;
selectedQuery: string[];
optionsData: OptionsData;
+ handleEnvironmentChange: (environments: string[]) => void;
}
diff --git a/frontend/src/hooks/useResourceAttribute/utils.ts b/frontend/src/hooks/useResourceAttribute/utils.ts
index 8926621e32..52dc85c1c4 100644
--- a/frontend/src/hooks/useResourceAttribute/utils.ts
+++ b/frontend/src/hooks/useResourceAttribute/utils.ts
@@ -109,12 +109,43 @@ export const GetTagKeys = async (): Promise => {
if (!payload || !payload?.data) {
return [];
}
+ return payload.data
+ .filter((tagKey: string) => tagKey !== 'resource_deployment_environment')
+ .map((tagKey: string) => ({
+ label: convertMetricKeyToTrace(tagKey),
+ value: tagKey,
+ }));
+};
+
+export const getEnvironmentTagKeys = async (): Promise => {
+ const { payload } = await getResourceAttributesTagKeys({
+ metricName: 'signoz_calls_total',
+ match: 'resource_deployment_environment',
+ });
+ if (!payload || !payload?.data) {
+ return [];
+ }
return payload.data.map((tagKey: string) => ({
label: convertMetricKeyToTrace(tagKey),
value: tagKey,
}));
};
+export const getEnvironmentTagValues = async (): Promise => {
+ const { payload } = await getResourceAttributesTagValues({
+ tagKey: 'resource_deployment_environment',
+ metricName: 'signoz_calls_total',
+ });
+
+ if (!payload || !payload?.data) {
+ return [];
+ }
+ return payload.data.map((tagValue: string) => ({
+ label: tagValue,
+ value: tagValue,
+ }));
+};
+
export const GetTagValues = async (tagKey: string): Promise => {
const { payload } = await getResourceAttributesTagValues({
tagKey,
@@ -132,6 +163,23 @@ export const GetTagValues = async (tagKey: string): Promise => {
export const createQuery = (
selectedItems: Array = [],
+): IResourceAttribute | null => {
+ console.log('selectedItems', selectedItems);
+
+ if (selectedItems.length === 3) {
+ return {
+ id: uuid().slice(0, 8),
+ tagKey: selectedItems[0] as string,
+ operator: selectedItems[1] as string,
+ tagValue: selectedItems[2] as string[],
+ };
+ }
+ return null;
+};
+
+export const updateQuery = (
+ queryKey: string,
+ selectedItems: Array = [],
): IResourceAttribute | null => {
if (selectedItems.length === 3) {
return {
diff --git a/frontend/src/index.tsx b/frontend/src/index.tsx
index b95631c107..570db8c1da 100644
--- a/frontend/src/index.tsx
+++ b/frontend/src/index.tsx
@@ -3,6 +3,7 @@ import 'styles.scss';
import * as Sentry from '@sentry/react';
import AppRoutes from 'AppRoutes';
+import { AxiosError } from 'axios';
import { ThemeProvider } from 'hooks/useDarkMode';
import ErrorBoundaryFallback from 'pages/ErrorBoundaryFallback/ErrorBoundaryFallback';
import { createRoot } from 'react-dom/client';
@@ -16,6 +17,17 @@ const queryClient = new QueryClient({
defaultOptions: {
queries: {
refetchOnWindowFocus: false,
+ retry(failureCount, error): boolean {
+ if (
+ // in case of manually throwing errors please make sure to send error.response.status
+ error instanceof AxiosError &&
+ error.response?.status &&
+ (error.response?.status >= 400 || error.response?.status <= 499)
+ ) {
+ return false;
+ }
+ return failureCount < 2;
+ },
},
},
});
diff --git a/frontend/src/lib/dashboard/getQueryResults.ts b/frontend/src/lib/dashboard/getQueryResults.ts
index ac012ce3a7..64b749e45c 100644
--- a/frontend/src/lib/dashboard/getQueryResults.ts
+++ b/frontend/src/lib/dashboard/getQueryResults.ts
@@ -18,11 +18,16 @@ import { prepareQueryRangePayload } from './prepareQueryRangePayload';
export async function GetMetricQueryRange(
props: GetQueryResultsProps,
+ version: string,
signal?: AbortSignal,
): Promise> {
const { legendMap, queryPayload } = prepareQueryRangePayload(props);
- const response = await getMetricsQueryRange(queryPayload, signal);
+ const response = await getMetricsQueryRange(
+ queryPayload,
+ version || 'v3',
+ signal,
+ );
if (response.statusCode >= 400) {
throw new Error(
diff --git a/frontend/src/lib/logql/parser.ts b/frontend/src/lib/logql/parser.ts
index 46fe02930b..a067a586ea 100644
--- a/frontend/src/lib/logql/parser.ts
+++ b/frontend/src/lib/logql/parser.ts
@@ -144,8 +144,6 @@ export const parseQuery = (queryString) => {
];
}
}
-
- // console.log(parsedRaw);
return parsedRaw;
};
diff --git a/frontend/src/lib/newQueryBuilder/getMetricsOperatorsByAttributeType.ts b/frontend/src/lib/newQueryBuilder/getMetricsOperatorsByAttributeType.ts
new file mode 100644
index 0000000000..79e110e084
--- /dev/null
+++ b/frontend/src/lib/newQueryBuilder/getMetricsOperatorsByAttributeType.ts
@@ -0,0 +1,31 @@
+import {
+ ATTRIBUTE_TYPES,
+ metricsOperatorsByType,
+ PANEL_TYPES,
+} from 'constants/queryBuilder';
+import { metricsEmptyTimeAggregateOperatorOptions } from 'constants/queryBuilderOperators';
+import { DataSource } from 'types/common/queryBuilder';
+import { SelectOption } from 'types/common/select';
+
+type GetQueryOperatorsParams = {
+ dataSource: DataSource;
+ panelType: PANEL_TYPES;
+ aggregateAttributeType: ATTRIBUTE_TYPES;
+};
+
+export const getMetricsOperatorsByAttributeType = ({
+ dataSource,
+ aggregateAttributeType,
+}: GetQueryOperatorsParams): SelectOption[] => {
+ if (dataSource === DataSource.METRICS && aggregateAttributeType) {
+ if (aggregateAttributeType === ATTRIBUTE_TYPES.SUM) {
+ return metricsOperatorsByType.Sum;
+ }
+
+ if (aggregateAttributeType === ATTRIBUTE_TYPES.GAUGE) {
+ return metricsOperatorsByType.Gauge;
+ }
+ }
+
+ return metricsEmptyTimeAggregateOperatorOptions;
+};
diff --git a/frontend/src/lib/uPlotLib/getUplotChartOptions.ts b/frontend/src/lib/uPlotLib/getUplotChartOptions.ts
index dae5bbdfd5..0b281506f6 100644
--- a/frontend/src/lib/uPlotLib/getUplotChartOptions.ts
+++ b/frontend/src/lib/uPlotLib/getUplotChartOptions.ts
@@ -6,11 +6,13 @@ import './uPlotLib.styles.scss';
import { PANEL_TYPES } from 'constants/queryBuilder';
import { FullViewProps } from 'container/GridCardLayout/GridCard/FullView/types';
+import { saveLegendEntriesToLocalStorage } from 'container/GridCardLayout/GridCard/FullView/utils';
import { ThresholdProps } from 'container/NewWidget/RightContainer/Threshold/types';
import { Dimensions } from 'hooks/useDimensions';
import { convertValue } from 'lib/getConvertedValue';
import _noop from 'lodash-es/noop';
import { MetricRangePayloadProps } from 'types/api/metrics/getQueryRange';
+import { Query } from 'types/api/queryBuilder/queryBuilderData';
import uPlot from 'uplot';
import onClickPlugin, { OnClickPluginOpts } from './plugins/onClickPlugin';
@@ -39,6 +41,7 @@ export interface GetUPlotChartOptions {
maxTimeScale?: number;
softMin: number | null;
softMax: number | null;
+ currentQuery?: Query;
}
export const getUPlotChartOptions = ({
@@ -58,6 +61,7 @@ export const getUPlotChartOptions = ({
softMax,
softMin,
panelType,
+ currentQuery,
}: GetUPlotChartOptions): uPlot.Options => {
const timeScaleProps = getXAxisScale(minTimeScale, maxTimeScale);
@@ -203,6 +207,11 @@ export const getUPlotChartOptions = ({
newGraphVisibilityStates.fill(false);
newGraphVisibilityStates[index + 1] = true;
}
+ saveLegendEntriesToLocalStorage({
+ options: self,
+ graphVisibilityState: newGraphVisibilityStates,
+ name: id || '',
+ });
return newGraphVisibilityStates;
});
}
@@ -217,6 +226,7 @@ export const getUPlotChartOptions = ({
widgetMetaData: apiResponse?.data.result,
graphsVisibilityStates,
panelType,
+ currentQuery,
}),
axes: getAxes(isDarkMode, yAxisUnit),
};
diff --git a/frontend/src/lib/uPlotLib/plugins/onClickPlugin.ts b/frontend/src/lib/uPlotLib/plugins/onClickPlugin.ts
index 56a6f1e333..7dfbbe9b47 100644
--- a/frontend/src/lib/uPlotLib/plugins/onClickPlugin.ts
+++ b/frontend/src/lib/uPlotLib/plugins/onClickPlugin.ts
@@ -18,8 +18,9 @@ function onClickPlugin(opts: OnClickPluginOpts): uPlot.Plugin {
const mouseY = event.offsetY + 40;
// Convert pixel positions to data values
- const xValue = u.posToVal(mouseX, 'x');
- const yValue = u.posToVal(mouseY, 'y');
+ // do not use mouseX and mouseY here as it offsets the timestamp as well
+ const xValue = u.posToVal(event.offsetX, 'x');
+ const yValue = u.posToVal(event.offsetY, 'y');
opts.onClick(xValue, yValue, mouseX, mouseY);
};
diff --git a/frontend/src/lib/uPlotLib/plugins/tooltipPlugin.ts b/frontend/src/lib/uPlotLib/plugins/tooltipPlugin.ts
index 713bf7958d..b06e5bff63 100644
--- a/frontend/src/lib/uPlotLib/plugins/tooltipPlugin.ts
+++ b/frontend/src/lib/uPlotLib/plugins/tooltipPlugin.ts
@@ -3,6 +3,7 @@ import { themeColors } from 'constants/theme';
import dayjs from 'dayjs';
import customParseFormat from 'dayjs/plugin/customParseFormat';
import getLabelName from 'lib/getLabelName';
+import { get } from 'lodash-es';
import { MetricRangePayloadProps } from 'types/api/metrics/getQueryRange';
import { placement } from '../placement';
@@ -18,6 +19,7 @@ interface UplotTooltipDataProps {
value: number;
tooltipValue: string;
textContent: string;
+ queryName: string;
}
const generateTooltipContent = (
@@ -26,6 +28,7 @@ const generateTooltipContent = (
idx: number,
yAxisUnit?: string,
series?: uPlot.Options['series'],
+ isBillingUsageGraphs?: boolean,
// eslint-disable-next-line sonarjs/cognitive-complexity
): HTMLElement => {
const container = document.createElement('div');
@@ -35,6 +38,7 @@ const generateTooltipContent = (
let tooltipTitle = '';
const formattedData: Record = {};
+ const duplicatedLegendLabels: Record = {};
function sortTooltipContentBasedOnValue(
tooltipDataObj: Record,
@@ -47,18 +51,61 @@ const generateTooltipContent = (
if (Array.isArray(series) && series.length > 0) {
series.forEach((item, index) => {
if (index === 0) {
- tooltipTitle = dayjs(data[0][idx] * 1000).format('MMM DD YYYY HH:mm:ss');
+ if (isBillingUsageGraphs) {
+ tooltipTitle = dayjs(data[0][idx] * 1000).format('MMM DD YYYY');
+ } else {
+ tooltipTitle = dayjs(data[0][idx] * 1000).format('MMM DD YYYY HH:mm:ss');
+ }
} else if (item.show) {
- const { metric = {}, queryName = '', legend = '' } =
- seriesList[index - 1] || {};
+ const {
+ metric = {},
+ queryName = '',
+ legend = '',
+ quantity = [],
+ unit = '',
+ } = seriesList[index - 1] || {};
const value = data[index][idx];
+ const dataIngested = quantity[idx];
const label = getLabelName(metric, queryName || '', legend || '');
- const color = generateColor(label, themeColors.chartcolors);
+ let color = generateColor(label, themeColors.chartcolors);
+
+ // in case of billing graph pick colors from the series options
+ if (isBillingUsageGraphs) {
+ let clr;
+ series.forEach((item) => {
+ if (item.label === label) {
+ clr = get(item, '_fill');
+ }
+ });
+ color = clr ?? color;
+ }
+
+ let tooltipItemLabel = label;
if (Number.isFinite(value)) {
const tooltipValue = getToolTipValue(value, yAxisUnit);
+ const dataIngestedFormated = getToolTipValue(dataIngested);
+ if (
+ duplicatedLegendLabels[label] ||
+ Object.prototype.hasOwnProperty.call(formattedData, label)
+ ) {
+ duplicatedLegendLabels[label] = true;
+ const tempDataObj = formattedData[label];
+
+ if (tempDataObj) {
+ const newLabel = `${tempDataObj.queryName}: ${tempDataObj.label}`;
+
+ tempDataObj.textContent = `${newLabel} : ${tempDataObj.tooltipValue}`;
+
+ formattedData[newLabel] = tempDataObj;
+
+ delete formattedData[label];
+ }
+
+ tooltipItemLabel = `${queryName}: ${label}`;
+ }
const dataObj = {
show: item.show || false,
@@ -69,11 +116,15 @@ const generateTooltipContent = (
focus: item?._focus || false,
value,
tooltipValue,
- textContent: `${label} : ${tooltipValue}`,
+ queryName,
+ textContent: isBillingUsageGraphs
+ ? `${tooltipItemLabel} : $${tooltipValue} - ${dataIngestedFormated} ${unit}`
+ : `${tooltipItemLabel} : ${tooltipValue}`,
};
tooltipCount += 1;
- formattedData[label] = dataObj;
+
+ formattedData[tooltipItemLabel] = dataObj;
}
}
});
@@ -143,6 +194,7 @@ const generateTooltipContent = (
const tooltipPlugin = (
apiResponse: MetricRangePayloadProps | undefined,
yAxisUnit?: string,
+ isBillingUsageGraphs?: boolean,
): any => {
let over: HTMLElement;
let bound: HTMLElement;
@@ -203,6 +255,7 @@ const tooltipPlugin = (
idx,
yAxisUnit,
u.series,
+ isBillingUsageGraphs,
);
overlay.appendChild(content);
placement(overlay, anchor, 'right', 'start', { bound });
diff --git a/frontend/src/lib/uPlotLib/utils/getSeriesData.ts b/frontend/src/lib/uPlotLib/utils/getSeriesData.ts
index cf60a632cb..574b8dc1de 100644
--- a/frontend/src/lib/uPlotLib/utils/getSeriesData.ts
+++ b/frontend/src/lib/uPlotLib/utils/getSeriesData.ts
@@ -3,6 +3,7 @@ import { PANEL_TYPES } from 'constants/queryBuilder';
import { themeColors } from 'constants/theme';
import getLabelName from 'lib/getLabelName';
import { MetricRangePayloadProps } from 'types/api/metrics/getQueryRange';
+import { Query } from 'types/api/queryBuilder/queryBuilderData';
import { QueryData } from 'types/api/widgets/getQuery';
import { drawStyles, lineInterpolations } from './constants';
@@ -31,6 +32,7 @@ const getSeries = ({
widgetMetaData,
graphsVisibilityStates,
panelType,
+ currentQuery,
}: GetSeriesProps): uPlot.Options['series'] => {
const configurations: uPlot.Series[] = [
{ label: 'Timestamp', stroke: 'purple' },
@@ -40,13 +42,15 @@ const getSeries = ({
const newGraphVisibilityStates = graphsVisibilityStates?.slice(1);
for (let i = 0; i < seriesList?.length; i += 1) {
- const { metric = {}, queryName = '', legend = '' } = widgetMetaData[i] || {};
+ const { metric = {}, queryName = '', legend: lgd } = widgetMetaData[i] || {};
- const label = getLabelName(
- metric,
- queryName || '', // query
- legend || '',
- );
+ const newLegend =
+ currentQuery?.builder.queryData.find((item) => item.queryName === queryName)
+ ?.legend || '';
+
+ const legend = newLegend || lgd || '';
+
+ const label = getLabelName(metric, queryName || '', legend);
const color = generateColor(label, themeColors.chartcolors);
@@ -87,6 +91,7 @@ export type GetSeriesProps = {
widgetMetaData: QueryData[];
graphsVisibilityStates?: boolean[];
panelType?: PANEL_TYPES;
+ currentQuery?: Query;
};
export default getSeries;
diff --git a/frontend/src/mocks-server/handlers.ts b/frontend/src/mocks-server/handlers.ts
index 25564363e4..af25738aa7 100644
--- a/frontend/src/mocks-server/handlers.ts
+++ b/frontend/src/mocks-server/handlers.ts
@@ -11,6 +11,10 @@ export const handlers = [
res(ctx.status(200), ctx.json(queryRangeSuccessResponse)),
),
+ rest.post('http://localhost/api/v4/query_range', (req, res, ctx) =>
+ res(ctx.status(200), ctx.json(queryRangeSuccessResponse)),
+ ),
+
rest.post('http://localhost/api/v1/services', (req, res, ctx) =>
res(ctx.status(200), ctx.json(serviceSuccessResponse)),
),
diff --git a/frontend/src/pages/AlertList/index.tsx b/frontend/src/pages/AlertList/index.tsx
index 336c399a2f..33f3ada0f9 100644
--- a/frontend/src/pages/AlertList/index.tsx
+++ b/frontend/src/pages/AlertList/index.tsx
@@ -12,6 +12,11 @@ function AllAlertList(): JSX.Element {
children: ,
},
// {
+ // label: 'Planned Downtime',
+ // key: 'Planned Downtime',
+ // // children: ,
+ // },
+ // {
// label: 'Map Alert Channels',
// key = 'Map Alert Channels',
// children: ,
diff --git a/frontend/src/pages/Billing/BillingPage.styles.scss b/frontend/src/pages/Billing/BillingPage.styles.scss
index ced1d4d055..bb6bd3b529 100644
--- a/frontend/src/pages/Billing/BillingPage.styles.scss
+++ b/frontend/src/pages/Billing/BillingPage.styles.scss
@@ -2,4 +2,6 @@
display: flex;
width: 100%;
color: #fff;
+ justify-content: center;
+ align-items: center;
}
diff --git a/frontend/src/pages/ChannelsEdit/index.tsx b/frontend/src/pages/ChannelsEdit/index.tsx
index 8a578c06e0..9925c84849 100644
--- a/frontend/src/pages/ChannelsEdit/index.tsx
+++ b/frontend/src/pages/ChannelsEdit/index.tsx
@@ -81,6 +81,15 @@ function ChannelsEdit(): JSX.Element {
};
}
+ if (value && 'email_configs' in value) {
+ const emailConfig = value.email_configs[0];
+ channel = emailConfig;
+ return {
+ type: ChannelType.Email,
+ channel,
+ };
+ }
+
if (value && 'webhook_configs' in value) {
const webhookConfig = value.webhook_configs[0];
channel = webhookConfig;
diff --git a/frontend/src/pages/Integrations/Header.tsx b/frontend/src/pages/Integrations/Header.tsx
new file mode 100644
index 0000000000..f6b8592762
--- /dev/null
+++ b/frontend/src/pages/Integrations/Header.tsx
@@ -0,0 +1,37 @@
+import './Integrations.styles.scss';
+
+import { Color } from '@signozhq/design-tokens';
+import { Input, Typography } from 'antd';
+import { Search } from 'lucide-react';
+import { Dispatch, SetStateAction } from 'react';
+
+interface HeaderProps {
+ searchTerm: string;
+ setSearchTerm: Dispatch>;
+}
+
+function Header(props: HeaderProps): JSX.Element {
+ const { searchTerm, setSearchTerm } = props;
+
+ const handleSearch = (e: React.ChangeEvent): void => {
+ setSearchTerm(e.target.value);
+ };
+ return (
+
+ Integrations
+
+ Manage Integrations for this workspace
+
+
+ }
+ value={searchTerm}
+ onChange={handleSearch}
+ className="integrations-search-input"
+ />
+
+ );
+}
+
+export default Header;
diff --git a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContent.tsx b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContent.tsx
new file mode 100644
index 0000000000..ec81d51db6
--- /dev/null
+++ b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContent.tsx
@@ -0,0 +1,80 @@
+import './IntegrationDetailPage.styles.scss';
+
+import { Button, Tabs, TabsProps, Typography } from 'antd';
+import ConfigureIcon from 'assets/Integrations/ConfigureIcon';
+import { CableCar, Group } from 'lucide-react';
+import { IntegrationDetailedProps } from 'types/api/integrations/types';
+
+import Configure from './IntegrationDetailContentTabs/Configure';
+import DataCollected from './IntegrationDetailContentTabs/DataCollected';
+import Overview from './IntegrationDetailContentTabs/Overview';
+
+interface IntegrationDetailContentProps {
+ activeDetailTab: string;
+ integrationData: IntegrationDetailedProps;
+}
+
+function IntegrationDetailContent(
+ props: IntegrationDetailContentProps,
+): JSX.Element {
+ const { activeDetailTab, integrationData } = props;
+ const items: TabsProps['items'] = [
+ {
+ key: 'overview',
+ label: (
+ }
+ >
+ Overview
+
+ ),
+ children: (
+
+ ),
+ },
+ {
+ key: 'configuration',
+ label: (
+ }
+ >
+ Configure
+
+ ),
+ children: ,
+ },
+ {
+ key: 'dataCollected',
+ label: (
+ }
+ >
+ Data Collected
+
+ ),
+ children: (
+
+ ),
+ },
+ ];
+ return (
+
+
+
+ );
+}
+
+export default IntegrationDetailContent;
diff --git a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/Configure.tsx b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/Configure.tsx
new file mode 100644
index 0000000000..92a5e0c823
--- /dev/null
+++ b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/Configure.tsx
@@ -0,0 +1,48 @@
+import './IntegrationDetailContentTabs.styles.scss';
+
+import { Button, Typography } from 'antd';
+import cx from 'classnames';
+import { MarkdownRenderer } from 'components/MarkdownRenderer/MarkdownRenderer';
+import { useState } from 'react';
+
+interface ConfigurationProps {
+ configuration: Array<{ title: string; instructions: string }>;
+}
+
+function Configure(props: ConfigurationProps): JSX.Element {
+ // TODO Mardown renderer support once instructions are ready
+ const { configuration } = props;
+ const [selectedConfigStep, setSelectedConfigStep] = useState(0);
+
+ const handleMenuClick = (index: number): void => {
+ setSelectedConfigStep(index);
+ };
+ return (
+
+
+ {configuration.map((config, index) => (
+ handleMenuClick(index)}
+ >
+
+ {config.title}
+
+
+ ))}
+
+
+
+
+
+ );
+}
+
+export default Configure;
diff --git a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/DataCollected.tsx b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/DataCollected.tsx
new file mode 100644
index 0000000000..1c605ec863
--- /dev/null
+++ b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/DataCollected.tsx
@@ -0,0 +1,85 @@
+import './IntegrationDetailContentTabs.styles.scss';
+
+import { Table, Typography } from 'antd';
+import { BarChart2, ScrollText } from 'lucide-react';
+
+interface DataCollectedProps {
+ logsData: Array;
+ metricsData: Array;
+}
+
+function DataCollected(props: DataCollectedProps): JSX.Element {
+ const { logsData, metricsData } = props;
+ const logsColumns = [
+ {
+ title: 'Name',
+ dataIndex: 'name',
+ key: 'name',
+ },
+ {
+ title: 'Path',
+ dataIndex: 'path',
+ key: 'path',
+ },
+ {
+ title: 'Type',
+ dataIndex: 'type',
+ key: 'type',
+ },
+ ];
+
+ const metricsColumns = [
+ {
+ title: 'Name',
+ dataIndex: 'name',
+ key: 'name',
+ },
+ {
+ title: 'Type',
+ dataIndex: 'type',
+ key: 'type',
+ },
+ {
+ title: 'Unit',
+ dataIndex: 'unit',
+ key: 'unit',
+ },
+ ];
+
+ return (
+
+
+
+
+ Logs
+
+
+ index % 2 === 0 ? 'table-row-dark' : ''
+ }
+ dataSource={logsData}
+ pagination={{ pageSize: 20 }}
+ className="logs-section-table"
+ />
+
+
+
+
+ Metrics
+
+
+ index % 2 === 0 ? 'table-row-dark' : ''
+ }
+ dataSource={metricsData}
+ pagination={{ pageSize: 20 }}
+ className="metrics-section-table"
+ />
+
+
+ );
+}
+
+export default DataCollected;
diff --git a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/IntegrationDetailContentTabs.styles.scss b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/IntegrationDetailContentTabs.styles.scss
new file mode 100644
index 0000000000..81dcb6bf59
--- /dev/null
+++ b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/IntegrationDetailContentTabs.styles.scss
@@ -0,0 +1,301 @@
+.integration-detail-overview {
+ display: flex;
+
+ .integration-detail-overview-left-container {
+ display: flex;
+ flex-direction: column;
+ width: 30%;
+ gap: 26px;
+ border-right: 1px solid var(--bg-slate-500);
+ padding: 16px 0;
+ color: var(--bg-vanilla-400);
+ font-family: Inter;
+ font-size: 11px;
+ font-style: normal;
+ font-weight: 400;
+ line-height: 16px; /* 145.455% */
+ letter-spacing: 0.44px;
+ text-transform: uppercase;
+
+ .integration-detail-overview-category {
+ display: flex;
+ flex-direction: column;
+
+ .heading {
+ color: var(--bg-vanilla-400);
+ font-family: Inter;
+ font-size: 11px;
+ font-style: normal;
+ font-weight: 500;
+ line-height: 16px; /* 145.455% */
+ letter-spacing: 0.44px;
+ text-transform: uppercase;
+ }
+
+ .category-tabs {
+ display: flex;
+ gap: 6px;
+ flex-flow: wrap;
+ margin-top: 12px;
+
+ .category-tab {
+ padding: 2px 8px;
+ border-radius: 4px;
+ border: 1px solid rgba(173, 127, 88, 0.2);
+ background: rgba(173, 127, 88, 0.1);
+ color: var(--bg-sienna-400);
+ font-family: Inter;
+ font-size: 14px;
+ font-style: normal;
+ font-weight: 400;
+ line-height: 20px; /* 142.857% */
+ letter-spacing: -0.07px;
+ text-transform: none;
+ }
+ }
+ }
+
+ .integration-detail-overview-assets {
+ display: flex;
+ flex-direction: column;
+
+ .heading {
+ color: var(--bg-vanilla-400);
+ font-family: Inter;
+ font-size: 11px;
+ font-style: normal;
+ font-weight: 500;
+ line-height: 16px; /* 145.455% */
+ letter-spacing: 0.44px;
+ text-transform: uppercase;
+ }
+
+ .assets-list {
+ margin-left: 5px;
+ margin-top: 12px;
+ color: var(--bg-vanilla-400);
+ font-family: Inter;
+ font-size: 14px;
+ font-style: normal;
+ font-weight: 400;
+ line-height: 22px; /* 157.143% */
+ letter-spacing: -0.07px;
+ padding-inline-start: 16px !important;
+ text-transform: none;
+ }
+ }
+ }
+
+ .integration-detail-overview-right-container {
+ width: 75%;
+ padding: 16px 0 0 16px;
+ max-height: 600px;
+ overflow-y: auto;
+ }
+}
+
+.integration-data-collected {
+ display: flex;
+ flex-direction: column;
+ gap: 32px;
+ margin-top: 8px;
+ color: var(--bg-vanilla-400);
+ font-family: Inter;
+ font-size: 14px;
+ font-style: normal;
+ font-weight: 400;
+ line-height: 20px; /* 142.857% */
+ letter-spacing: -0.07px;
+
+ .logs-section {
+ display: flex;
+ flex-direction: column;
+ gap: 8px;
+
+ .table-row-dark {
+ background: rgba(255, 255, 255, 0.01);
+ }
+
+ .logs-section-table {
+ border-radius: 6px;
+ border: 1px solid var(--bg-slate-400);
+ background: var(--bg-ink-400);
+
+ .ant-table-thead {
+ text-transform: uppercase;
+ }
+ .ant-table-cell {
+ background: unset !important;
+ border-bottom: none !important;
+ }
+
+ .ant-table-cell::before {
+ background-color: unset !important;
+ }
+ }
+
+ .logs-heading {
+ display: flex;
+ align-items: center;
+ gap: 8px;
+ padding: 4px 6px;
+ }
+ }
+
+ .metrics-section {
+ display: flex;
+ flex-direction: column;
+ gap: 8px;
+
+ .table-row-dark {
+ background: rgba(255, 255, 255, 0.01);
+ }
+
+ .metrics-section-table {
+ border-radius: 6px;
+ border: 1px solid var(--bg-slate-400);
+ background: var(--bg-ink-400);
+
+ .ant-table-thead {
+ text-transform: uppercase;
+ }
+
+ .ant-table-cell {
+ background: unset !important;
+ border-bottom: none !important;
+ }
+
+ .ant-table-cell::before {
+ background-color: unset !important;
+ }
+ }
+
+ .metrics-heading {
+ display: flex;
+ align-items: center;
+ gap: 8px;
+ padding: 4px 6px;
+ }
+ }
+}
+
+.integration-detail-configure {
+ display: flex;
+
+ .configure-menu {
+ display: flex;
+ flex-direction: column;
+ width: 30%;
+ padding: 16px 16px 0px 0px;
+ border-right: 1px solid var(--bg-slate-500);
+ gap: 8px;
+
+ .configure-menu-item {
+ padding: 4px 8px;
+ height: auto;
+ text-align: start;
+ color: var(--bg-vanilla-100);
+ font-family: Inter;
+ font-size: 14px;
+ font-style: normal;
+ font-weight: 400;
+ line-height: 18px; /* 128.571% */
+
+ .configure-text {
+ text-wrap: pretty;
+ }
+ }
+
+ .configure-menu-item:hover {
+ background-color: rgba(255, 255, 255, 0.08);
+ }
+
+ .active {
+ color: rgba(255, 255, 255, 0.85);
+ background-color: rgba(255, 255, 255, 0.08);
+ }
+ }
+
+ .markdown-container {
+ width: 75%;
+ padding: 16px 0px 0px 16px;
+ max-height: 600px;
+ overflow-y: auto;
+ }
+}
+
+.lightMode {
+ .integration-detail-overview {
+ .integration-detail-overview-left-container {
+ border-right: 1px solid var(--bg-vanilla-400);
+
+ color: var(--bg-slate-100);
+
+ .integration-detail-overview-category {
+ .heading {
+ color: var(--bg-slate-100);
+ }
+ .category-tabs {
+ .category-tab {
+ border: 1px solid var(--bg-sienna-600);
+ background: rgba(173, 127, 88, 0.1);
+ color: var(--bg-sienna-500);
+ }
+ }
+ }
+
+ .integration-detail-overview-assets {
+ .heading {
+ color: var(--bg-slate-100);
+ }
+ .assets-list {
+ color: var(--bg-slate-100);
+ }
+ }
+ }
+ }
+
+ .integration-data-collected {
+ color: var(--bg-vanilla-400);
+
+ .logs-section {
+ .table-row-dark {
+ background: rgba(255, 255, 255, 0.01);
+ }
+
+ .logs-section-table {
+ border: 1px solid var(--bg-vanilla-400);
+ background: var(--bg-vanilla-300);
+ }
+ }
+
+ .metrics-section {
+ .table-row-dark {
+ background: rgba(255, 255, 255, 0.01);
+ }
+
+ .metrics-section-table {
+ border: 1px solid var(--bg-vanilla-400);
+ background: var(--bg-vanilla-300);
+ }
+ }
+ }
+
+ .integration-detail-configure {
+ .configure-menu {
+ border-right: 1px solid var(--bg-vanilla-400);
+
+ .configure-menu-item {
+ color: var(--bg-vanilla-100);
+ }
+ .configure-menu-item:hover {
+ background-color: var(--bg-vanilla-200);
+ }
+
+ .active {
+ color: rgba(255, 255, 255, 0.85);
+ background-color: var(--bg-vanilla-200);
+ }
+ }
+ }
+}
diff --git a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/Overview.tsx b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/Overview.tsx
new file mode 100644
index 0000000000..5160115e12
--- /dev/null
+++ b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/Overview.tsx
@@ -0,0 +1,63 @@
+import './IntegrationDetailContentTabs.styles.scss';
+
+import { Typography } from 'antd';
+import { MarkdownRenderer } from 'components/MarkdownRenderer/MarkdownRenderer';
+
+interface OverviewProps {
+ categories: string[];
+ assets: {
+ logs: {
+ pipelines: Array;
+ };
+ dashboards: Array;
+ alerts: Array;
+ };
+ overviewContent: string;
+}
+
+function Overview(props: OverviewProps): JSX.Element {
+ const { categories, assets, overviewContent } = props;
+ const assetsCount = [
+ assets?.logs?.pipelines?.length || 0,
+ assets?.dashboards?.length || 0,
+ assets?.alerts?.length || 0,
+ ];
+
+ const assetLabelMap = ['Pipelines', 'Dashboards', 'Alerts'];
+ return (
+
+
+
+
Category
+
+ {categories.map((category) => (
+
+ {category}
+
+ ))}
+
+
+
+
Assets
+
+ {assetsCount.map((count, index) => {
+ if (count === 0) {
+ return undefined;
+ }
+ return (
+
+ {count} {assetLabelMap[index]}
+
+ );
+ })}
+
+
+
+
+
+
+
+ );
+}
+
+export default Overview;
diff --git a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailHeader.tsx b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailHeader.tsx
new file mode 100644
index 0000000000..cab49391f5
--- /dev/null
+++ b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailHeader.tsx
@@ -0,0 +1,214 @@
+/* eslint-disable no-nested-ternary */
+import './IntegrationDetailPage.styles.scss';
+
+import { Button, Modal, Tooltip, Typography } from 'antd';
+import installIntegration from 'api/Integrations/installIntegration';
+import { SOMETHING_WENT_WRONG } from 'constants/api';
+import dayjs from 'dayjs';
+import { useNotifications } from 'hooks/useNotifications';
+import { ArrowLeftRight, Check } from 'lucide-react';
+import { useState } from 'react';
+import { useMutation } from 'react-query';
+import { IntegrationConnectionStatus } from 'types/api/integrations/types';
+
+import TestConnection, { ConnectionStates } from './TestConnection';
+
+interface IntegrationDetailHeaderProps {
+ id: string;
+ title: string;
+ description: string;
+ icon: string;
+ refetchIntegrationDetails: () => void;
+ connectionState: ConnectionStates;
+ connectionData: IntegrationConnectionStatus;
+}
+// eslint-disable-next-line sonarjs/cognitive-complexity
+function IntegrationDetailHeader(
+ props: IntegrationDetailHeaderProps,
+): JSX.Element {
+ const {
+ id,
+ title,
+ icon,
+ description,
+ connectionState,
+ connectionData,
+ refetchIntegrationDetails,
+ } = props;
+ const [isModalOpen, setIsModalOpen] = useState(false);
+
+ const { notifications } = useNotifications();
+
+ const showModal = (): void => {
+ setIsModalOpen(true);
+ };
+
+ const handleOk = (): void => {
+ setIsModalOpen(false);
+ };
+
+ const handleCancel = (): void => {
+ setIsModalOpen(false);
+ };
+
+ const { mutate, isLoading: isInstallLoading } = useMutation(
+ installIntegration,
+ {
+ onSuccess: () => {
+ refetchIntegrationDetails();
+ },
+ onError: () => {
+ notifications.error({
+ message: SOMETHING_WENT_WRONG,
+ });
+ },
+ },
+ );
+
+ let latestData: {
+ last_received_ts_ms: number | null;
+ last_received_from: string | null;
+ } = {
+ last_received_ts_ms: null,
+ last_received_from: null,
+ };
+
+ if (
+ connectionData.logs?.last_received_ts_ms &&
+ connectionData.metrics?.last_received_ts_ms
+ ) {
+ if (
+ connectionData.logs.last_received_ts_ms >
+ connectionData.metrics.last_received_ts_ms
+ ) {
+ latestData = {
+ last_received_ts_ms: connectionData.logs.last_received_ts_ms,
+ last_received_from: connectionData.logs.last_received_from,
+ };
+ } else {
+ latestData = {
+ last_received_ts_ms: connectionData.metrics.last_received_ts_ms,
+ last_received_from: connectionData.metrics.last_received_from,
+ };
+ }
+ } else if (connectionData.logs?.last_received_ts_ms) {
+ latestData = {
+ last_received_ts_ms: connectionData.logs.last_received_ts_ms,
+ last_received_from: connectionData.logs.last_received_from,
+ };
+ } else if (connectionData.metrics?.last_received_ts_ms) {
+ latestData = {
+ last_received_ts_ms: connectionData.metrics.last_received_ts_ms,
+ last_received_from: connectionData.metrics.last_received_from,
+ };
+ }
+ return (
+
+
+
+
+
+
+
+ {title}
+ {description}
+
+
+
}
+ disabled={isInstallLoading}
+ onClick={(): void => {
+ if (connectionState === ConnectionStates.NotInstalled) {
+ mutate({ integration_id: id, config: {} });
+ } else {
+ showModal();
+ }
+ }}
+ >
+ {connectionState === ConnectionStates.NotInstalled
+ ? `Connect ${title}`
+ : `Test Connection`}
+
+
+
+ {connectionState !== ConnectionStates.NotInstalled && (
+
+ )}
+
+
}}
+ cancelButtonProps={{ style: { display: 'none' } }}
+ >
+
+
+ {connectionState === ConnectionStates.Connected ||
+ connectionState === ConnectionStates.NoDataSinceLong ? (
+ <>
+
+
+ Last recieved from
+
+
+
+
+ {latestData.last_received_from}
+
+
+
+
+
+ Last recieved at
+
+
+
+
+ {latestData.last_received_ts_ms
+ ? dayjs(latestData.last_received_ts_ms).format('DD MMM YYYY HH:mm')
+ : ''}
+
+
+
+ >
+ ) : connectionState === ConnectionStates.TestingConnection ? (
+
+
+ After adding the {title} integration, you need to manually configure
+ your Redis data source to start sending data to SigNoz.
+
+
+ The status bar above would turn green if we are successfully receiving
+ the data.
+
+
+ ) : null}
+
+
+
+ );
+}
+
+export default IntegrationDetailHeader;
diff --git a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailPage.styles.scss b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailPage.styles.scss
new file mode 100644
index 0000000000..b7630491ae
--- /dev/null
+++ b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailPage.styles.scss
@@ -0,0 +1,674 @@
+.integration-detail-content {
+ display: flex;
+ flex-direction: column;
+ gap: 16px;
+ margin: 12px 0px 20px 0px;
+
+ .error-container {
+ display: flex;
+ border-radius: 6px;
+ border: 1px solid var(--bg-slate-500);
+ background: var(--bg-ink-400);
+ align-items: center;
+ justify-content: center;
+ flex-direction: column;
+
+ .error-content {
+ display: flex;
+ flex-direction: column;
+ justify-content: center;
+ height: 300px;
+ gap: 15px;
+
+ .error-btns {
+ display: flex;
+ flex-direction: row;
+ gap: 16px;
+ align-items: center;
+
+ .retry-btn {
+ display: flex;
+ align-items: center;
+ }
+
+ .contact-support {
+ display: flex;
+ align-items: center;
+ gap: 4px;
+ cursor: pointer;
+
+ .text {
+ color: var(--text-robin-400);
+ font-weight: 500;
+ }
+ }
+ }
+
+ .error-state-svg {
+ height: 40px;
+ width: 40px;
+ }
+ }
+ }
+
+ .loading-integration-details {
+ display: flex;
+ flex-direction: column;
+ gap: 16px;
+
+ .skeleton-1 {
+ height: 125px;
+ width: 100%;
+ }
+ .skeleton-2 {
+ height: 250px;
+ width: 100%;
+ }
+ }
+
+ .all-integrations-btn {
+ width: fit-content;
+ display: flex;
+ justify-content: center;
+ align-items: center;
+ height: 24px;
+ padding-left: 0px;
+ color: #c0c1c3;
+ font-family: Inter;
+ font-size: 14px;
+ font-style: normal;
+ font-weight: 400;
+ line-height: 18px; /* 128.571% */
+ }
+
+ .all-integrations-btn:hover {
+ &.ant-btn-text {
+ background-color: unset !important;
+ }
+ }
+
+ .integration-connection-header {
+ display: flex;
+ flex-direction: column;
+ padding: 16px;
+ gap: 12px;
+ border-radius: 6px;
+ border: 1px solid var(--bg-slate-500);
+ background: var(--bg-ink-400);
+
+ .integration-detail-header {
+ display: flex;
+ gap: 10px;
+ justify-content: space-between;
+
+ .image-container {
+ height: 40px;
+ width: 40px;
+ flex-shrink: 0;
+ border-radius: 2px;
+ border: 1px solid var(--bg-ink-50);
+ background: var(--bg-ink-300);
+ display: flex;
+ align-items: center;
+ justify-content: center;
+
+ .image {
+ height: 24px;
+ width: 24px;
+ }
+ }
+ .details {
+ display: flex;
+ flex-direction: column;
+ .heading {
+ color: var(--bg-vanilla-100);
+ font-family: Inter;
+ font-size: 14px;
+ font-style: normal;
+ font-weight: 500;
+ line-height: 20px; /* 142.857% */
+ letter-spacing: -0.07px;
+ }
+
+ .description {
+ color: var(--bg-vanilla-400);
+ font-family: Inter;
+ font-size: 12px;
+ font-style: normal;
+ font-weight: 400;
+ line-height: 18px; /* 150% */
+ }
+ }
+
+ .configure-btn {
+ display: flex;
+ justify-content: center;
+ align-items: center;
+ align-self: flex-start;
+ gap: 2px;
+ flex-shrink: 0;
+ min-width: 143px;
+ height: 30px;
+ padding: 6px;
+ border-radius: 2px;
+ border: 1px solid var(--bg-ink-50);
+ background: var(--bg-robin-500);
+ color: var(--bg-vanilla-100);
+ font-family: Inter;
+ font-size: 12px;
+ font-style: normal;
+ font-weight: 500;
+ line-height: 10px; /* 83.333% */
+ letter-spacing: 0.12px;
+ }
+ }
+
+ .connection-container {
+ padding: 0 18px;
+ height: 37px;
+ display: flex;
+ align-items: center;
+
+ .connection-text {
+ margin: 0px;
+ padding: 0px 0px 0px 10px;
+ font-family: Inter;
+ font-size: 14px;
+ font-style: normal;
+ font-weight: 400;
+ line-height: 22px; /* 157.143% */
+ letter-spacing: -0.07px;
+ }
+ }
+
+ .testingConnection {
+ border-radius: 4px;
+ border: 1px solid rgba(255, 205, 86, 0.1);
+ background: rgba(255, 205, 86, 0.1);
+ color: var(--bg-amber-400);
+ }
+
+ .connected {
+ border-radius: 4px;
+ border: 1px solid rgba(37, 225, 146, 0.1);
+ background: rgba(37, 225, 146, 0.1);
+ color: var(--bg-forest-400);
+ }
+
+ .connectionFailed {
+ border-radius: 4px;
+ border: 1px solid rgba(218, 85, 101, 0.2);
+ background: rgba(218, 85, 101, 0.06);
+ color: var(--bg-cherry-500);
+ }
+
+ .noDataSinceLong {
+ border-radius: 4px;
+ border: 1px solid rgba(78, 116, 248, 0.1);
+ background: rgba(78, 116, 248, 0.1);
+ color: var(--bg-robin-400);
+ }
+ }
+
+ .integration-detail-container {
+ border-radius: 6px;
+ padding: 10px 16px;
+ border: 1px solid var(--bg-slate-500);
+ background: var(--bg-ink-400, #121317);
+
+ .integration-tab-btns {
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ padding: 8px 8px 18px 8px !important;
+
+ .typography {
+ color: var(--bg-vanilla-100);
+ font-family: Inter;
+ font-size: 14px;
+ font-style: normal;
+ font-weight: 400;
+ line-height: 20px; /* 142.857% */
+ letter-spacing: -0.07px;
+ }
+ }
+
+ .integration-tab-btns:hover {
+ &.ant-btn-text {
+ background-color: unset !important;
+ }
+ }
+
+ .ant-tabs-nav-list {
+ gap: 24px;
+ }
+
+ .ant-tabs-nav {
+ padding: 0px !important;
+ }
+
+ .ant-tabs-tab {
+ padding: 0 !important;
+ }
+
+ .ant-tabs-tab + .ant-tabs-tab {
+ margin: 0px !important;
+ }
+ }
+
+ .uninstall-integration-bar {
+ display: flex;
+ padding: 16px;
+ border-radius: 4px;
+ border: 1px solid rgba(218, 85, 101, 0.2);
+ background: rgba(218, 85, 101, 0.06);
+ gap: 32px;
+
+ .unintall-integration-bar-text {
+ display: flex;
+ flex-direction: column;
+ gap: 6px;
+
+ .heading {
+ color: var(--bg-cherry-500);
+ font-family: Inter;
+ font-size: 14px;
+ font-style: normal;
+ font-weight: 400;
+ line-height: normal;
+ letter-spacing: -0.07px;
+ }
+
+ .subtitle {
+ color: var(--bg-cherry-300);
+ font-family: Inter;
+ font-size: 14px;
+ font-style: normal;
+ font-weight: 400;
+ line-height: 22px; /* 157.143% */
+ letter-spacing: -0.07px;
+ }
+ }
+
+ .uninstall-integration-btn {
+ border-radius: 2px;
+ background: var(--Accent---Secondary-Cherry, #da5565);
+ border-color: unset !important;
+ padding: 9px 13px;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ color: var(--bg-ink-300);
+ text-align: center;
+ font-family: Inter;
+ font-size: 12px;
+ font-style: normal;
+ font-weight: 500;
+ line-height: 13.3px; /* 110.833% */
+ }
+
+ .uninstall-integration-btn:hover {
+ &.ant-btn-default {
+ color: var(--bg-ink-300) !important;
+ }
+ }
+ }
+}
+
+.remove-integration-modal {
+ .ant-modal-content {
+ width: 400px;
+ min-height: 200px;
+ flex-shrink: 0;
+ border-radius: 4px;
+ border: 1px solid var(--bg-slate-500);
+ box-shadow: 0px -4px 16px 2px rgba(0, 0, 0, 0.2);
+ background: var(--bg-ink-400);
+ }
+
+ .ant-modal-footer {
+ margin-top: 28px;
+ }
+
+ .ant-modal-header {
+ background: unset;
+ margin-bottom: 8px;
+ }
+
+ .ant-modal-title {
+ color: var(--bg-vanilla-100);
+ font-family: Inter;
+ font-size: 14px;
+ font-style: normal;
+ font-weight: 500;
+ line-height: 20px; /* 142.857% */
+ letter-spacing: -0.07px;
+ }
+
+ .remove-integration-text {
+ color: var(--bg-vanilla-400);
+ font-family: Inter;
+ font-size: 14px;
+ font-style: normal;
+ font-weight: 400;
+ line-height: 20px; /* 142.857% */
+ letter-spacing: -0.07px;
+ }
+}
+
+.test-connection-modal {
+ .ant-modal-content {
+ width: 512px;
+ min-height: 170px;
+ flex-shrink: 0;
+ border-radius: 4px;
+ border: 1px solid var(--bg-slate-500);
+ background: var(--bg-ink-400);
+ box-shadow: 0px -4px 16px 2px rgba(0, 0, 0, 0.2);
+
+ .ant-modal-header {
+ margin-bottom: 16px;
+ }
+
+ .ant-modal-body {
+ border-top: 1px solid var(--bg-slate-500);
+ padding-top: 16px;
+ }
+
+ .ant-modal-footer {
+ margin-top: 25px;
+ display: flex;
+ flex-direction: row-reverse;
+
+ .understandBtn {
+ border-radius: 2px;
+ border: 1px solid var(--bg-slate-400);
+ background: var(--bg-ink-300);
+ box-shadow: none;
+ color: var(--bg-vanilla-400);
+ font-family: Inter;
+ font-size: 12px;
+ font-style: normal;
+ font-weight: 400;
+ line-height: 10px; /* 83.333% */
+ letter-spacing: 0.12px;
+ display: flex;
+ justify-content: center;
+ align-items: center;
+ width: 131px;
+ height: 30px;
+ padding: 6px;
+ flex-shrink: 0;
+ }
+ }
+ }
+
+ .ant-modal-header {
+ background: unset;
+ }
+
+ .connection-content {
+ display: flex;
+ flex-direction: column;
+ gap: 16px;
+
+ .connection-container {
+ padding: 0 10px;
+ height: 37px;
+ display: flex;
+ align-items: center;
+
+ .connection-text {
+ margin: 0px;
+ padding: 0px 0px 0px 10px;
+ font-family: Inter;
+ font-size: 14px;
+ font-style: normal;
+ font-weight: 400;
+ line-height: 22px; /* 157.143% */
+ letter-spacing: -0.07px;
+ }
+ }
+
+ .data-test-connection {
+ display: flex;
+ flex-direction: column;
+ gap: 16px;
+ }
+ .data-info {
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+
+ .connection-line {
+ border: 1px dashed var(--bg-slate-200);
+ min-width: 20px;
+ height: 0px;
+ flex-grow: 1;
+ margin: 0px 8px;
+ }
+
+ .last-data {
+ color: var(--bg-vanilla-400);
+ font-family: Inter;
+ font-size: 14px;
+ font-style: normal;
+ font-weight: 400;
+ line-height: 22px; /* 157.143% */
+ letter-spacing: -0.07px;
+ }
+
+ .last-value {
+ color: var(--bg-vanilla-100);
+ font-family: 'Space Mono';
+ font-size: 12px;
+ font-style: normal;
+ font-weight: 400;
+ line-height: 18px; /* 150% */
+ max-width: 320px;
+ }
+ }
+ .testingConnection {
+ border-radius: 4px;
+ border: 1px solid rgba(255, 205, 86, 0.1);
+ background: rgba(255, 205, 86, 0.1);
+ color: var(--bg-amber-400);
+ }
+
+ .connected {
+ border-radius: 4px;
+ border: 1px solid rgba(37, 225, 146, 0.1);
+ background: rgba(37, 225, 146, 0.1);
+ color: var(--bg-forest-400);
+ }
+
+ .connectionFailed {
+ border-radius: 4px;
+ border: 1px solid rgba(218, 85, 101, 0.2);
+ background: rgba(218, 85, 101, 0.06);
+ color: var(--bg-cherry-500);
+ }
+
+ .noDataSinceLong {
+ border-radius: 4px;
+ border: 1px solid rgba(78, 116, 248, 0.1);
+ background: rgba(78, 116, 248, 0.1);
+ color: var(--bg-robin-400);
+ }
+ }
+}
+
+.lightMode {
+ .integration-detail-content {
+ .error-container {
+ border: 1px solid var(--bg-slate-500);
+ background: var(--bg-ink-400);
+
+ .error-content {
+ .error-btns {
+ .contact-support {
+ .text {
+ color: var(--text-robin-400);
+ font-weight: 500;
+ }
+ }
+ }
+ }
+ }
+
+ .all-integrations-btn {
+ color: var(--bg-slate-300);
+ }
+
+ .all-integrations-btn:hover {
+ &.ant-btn-text {
+ background-color: unset !important;
+ }
+ }
+
+ .integration-connection-header {
+ border: 1px solid rgba(53, 59, 76, 0.2);
+ background: var(--bg-vanilla-100);
+
+ .integration-detail-header {
+ .image-container {
+ border: 1.111px solid var(--bg-vanilla-300);
+ background: var(--bg-vanilla-100);
+ }
+ .details {
+ .heading {
+ color: var(--bg-ink-500);
+ }
+
+ .description {
+ color: var(--bg-slate-200);
+ }
+ }
+ }
+
+ .testingConnection {
+ border: 1px solid rgba(255, 205, 86, 0.4);
+ background: rgba(255, 205, 86, 0.2);
+ color: var(--bg-amber-600);
+ }
+
+ .connected {
+ border: 1px solid rgba(37, 225, 146, 0.1);
+ background: rgba(37, 225, 146, 0.1);
+ color: var(--bg-forest-600);
+ }
+
+ .noDataSinceLong {
+ border: 1px solid rgba(78, 116, 248, 0.1);
+ background: rgba(78, 116, 248, 0.1);
+ color: var(--bg-robin-400);
+ }
+ }
+
+ .integration-detail-container {
+ border: 1px solid rgba(53, 59, 76, 0.2);
+ background: var(--bg-vanilla-100);
+
+ .integration-tab-btns {
+ .typography {
+ color: var(--bg-ink-500);
+ }
+ }
+ }
+
+ .uninstall-integration-bar {
+ border: 1px solid rgba(53, 59, 76, 0.2);
+ background: var(--bg-vanilla-100);
+
+ .unintall-integration-bar-text {
+ .heading {
+ color: var(--bg-ink-500);
+ }
+
+ .subtitle {
+ color: var(--bg-slate-100);
+ }
+ }
+
+ .uninstall-integration-btn {
+ background: var(--bg-cherry-500, #e5484d);
+ border-color: none !important;
+ color: var(--bg-vanilla-100);
+ }
+
+ .uninstall-integration-btn:hover {
+ &.ant-btn-default {
+ color: var(--bg-vanilla-300) !important;
+ }
+ }
+ }
+ }
+
+ .remove-integration-modal {
+ .ant-modal-content {
+ border: 1px solid var(--bg-vanilla-400);
+ box-shadow: 0px -4px 16px 2px rgba(0, 0, 0, 0.2);
+ background: var(--bg-vanilla-100);
+ }
+
+ .ant-modal-title {
+ color: var(--bg-ink-500);
+ }
+
+ .remove-integration-text {
+ color: var(--bg-slate-400);
+ }
+ }
+
+ .test-connection-modal {
+ .ant-modal-content {
+ border: 1px solid rgba(53, 59, 76, 0.2);
+ background: var(--bg-vanilla-100);
+ box-shadow: 0px -4px 16px 2px rgba(0, 0, 0, 0.2);
+
+ .ant-modal-body {
+ border-top: 1px solid var(--bg-vanilla-400);
+ }
+
+ .ant-modal-footer {
+ .understandBtn {
+ border: 1px solid rgba(53, 59, 76, 0.2);
+ background: var(--bg-vanilla-200);
+ color: var(--bg-slate-400);
+ }
+ }
+ }
+
+ .connection-content {
+ .data-info {
+ .connection-line {
+ border: 1px dashed var(--bg-vanilla-400);
+ }
+ .last-data {
+ color: var(--bg-slate-400);
+ }
+
+ .last-value {
+ color: var(--bg-slate-100);
+ }
+ }
+ .testingConnection {
+ border: 1px solid rgba(255, 205, 86, 0.4);
+ background: rgba(255, 205, 86, 0.2);
+ color: var(--bg-amber-600);
+ }
+
+ .connected {
+ border: 1px solid rgba(37, 225, 146, 0.1);
+ background: rgba(37, 225, 146, 0.1);
+ color: var(--bg-forest-600);
+ }
+
+ .noDataSinceLong {
+ border: 1px solid rgba(78, 116, 248, 0.1);
+ background: rgba(78, 116, 248, 0.1);
+ color: var(--bg-robin-400);
+ }
+ }
+ }
+}
diff --git a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailPage.tsx b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailPage.tsx
new file mode 100644
index 0000000000..88be0dc3a3
--- /dev/null
+++ b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailPage.tsx
@@ -0,0 +1,142 @@
+/* eslint-disable jsx-a11y/no-static-element-interactions */
+/* eslint-disable jsx-a11y/click-events-have-key-events */
+/* eslint-disable no-nested-ternary */
+import './IntegrationDetailPage.styles.scss';
+
+import { Color } from '@signozhq/design-tokens';
+import { Button, Skeleton, Typography } from 'antd';
+import { useGetIntegration } from 'hooks/Integrations/useGetIntegration';
+import { useGetIntegrationStatus } from 'hooks/Integrations/useGetIntegrationStatus';
+import { defaultTo } from 'lodash-es';
+import { ArrowLeft, MoveUpRight, RotateCw } from 'lucide-react';
+import { isCloudUser } from 'utils/app';
+
+import { handleContactSupport } from '../utils';
+import IntegrationDetailContent from './IntegrationDetailContent';
+import IntegrationDetailHeader from './IntegrationDetailHeader';
+import IntergrationsUninstallBar from './IntegrationsUninstallBar';
+import { ConnectionStates } from './TestConnection';
+import { getConnectionStatesFromConnectionStatus } from './utils';
+
+interface IntegrationDetailPageProps {
+ selectedIntegration: string;
+ setSelectedIntegration: (id: string | null) => void;
+ activeDetailTab: string;
+}
+
+function IntegrationDetailPage(props: IntegrationDetailPageProps): JSX.Element {
+ const { selectedIntegration, setSelectedIntegration, activeDetailTab } = props;
+
+ const {
+ data,
+ isLoading,
+ isFetching,
+ refetch,
+ isRefetching,
+ isError,
+ } = useGetIntegration({
+ integrationId: selectedIntegration,
+ });
+
+ const {
+ data: integrationStatus,
+ isLoading: isStatusLoading,
+ } = useGetIntegrationStatus({
+ integrationId: selectedIntegration,
+ });
+
+ const loading = isLoading || isFetching || isRefetching || isStatusLoading;
+ const integrationData = data?.data.data;
+
+ const connectionStatus = getConnectionStatesFromConnectionStatus(
+ integrationData?.installation,
+ defaultTo(
+ integrationStatus?.data.data,
+ defaultTo(integrationData?.connection_status, { logs: null, metrics: null }),
+ ),
+ );
+
+ return (
+
+
}
+ className="all-integrations-btn"
+ onClick={(): void => {
+ setSelectedIntegration(null);
+ }}
+ >
+ All Integrations
+
+
+ {loading ? (
+
+
+
+
+ ) : isError ? (
+
+
+
+
+ Something went wrong :/ Please retry or contact support.
+
+
+
=> refetch()}
+ icon={ }
+ >
+ Retry
+
+
handleContactSupport(isCloudUser())}
+ >
+ Contact Support
+
+
+
+
+
+
+ ) : (
+ integrationData && (
+ <>
+
+
+
+ {connectionStatus !== ConnectionStates.NotInstalled && (
+
+ )}
+ >
+ )
+ )}
+
+ );
+}
+
+export default IntegrationDetailPage;
diff --git a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationsUninstallBar.tsx b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationsUninstallBar.tsx
new file mode 100644
index 0000000000..41e985abf8
--- /dev/null
+++ b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationsUninstallBar.tsx
@@ -0,0 +1,89 @@
+import './IntegrationDetailPage.styles.scss';
+
+import { Button, Modal, Typography } from 'antd';
+import unInstallIntegration from 'api/Integrations/uninstallIntegration';
+import { SOMETHING_WENT_WRONG } from 'constants/api';
+import { useNotifications } from 'hooks/useNotifications';
+import { X } from 'lucide-react';
+import { useState } from 'react';
+import { useMutation } from 'react-query';
+
+interface IntergrationsUninstallBarProps {
+ integrationTitle: string;
+ integrationId: string;
+ refetchIntegrationDetails: () => void;
+}
+function IntergrationsUninstallBar(
+ props: IntergrationsUninstallBarProps,
+): JSX.Element {
+ const { integrationTitle, integrationId, refetchIntegrationDetails } = props;
+ const { notifications } = useNotifications();
+ const [isModalOpen, setIsModalOpen] = useState(false);
+
+ const {
+ mutate: uninstallIntegration,
+ isLoading: isUninstallLoading,
+ } = useMutation(unInstallIntegration, {
+ onSuccess: () => {
+ refetchIntegrationDetails();
+ setIsModalOpen(false);
+ },
+ onError: () => {
+ notifications.error({
+ message: SOMETHING_WENT_WRONG,
+ });
+ },
+ });
+
+ const showModal = (): void => {
+ setIsModalOpen(true);
+ };
+
+ const handleOk = (): void => {
+ uninstallIntegration({
+ integration_id: integrationId,
+ });
+ };
+
+ const handleCancel = (): void => {
+ setIsModalOpen(false);
+ };
+ return (
+
+
+ Remove Integration
+
+ Removing the {integrationTitle} integration would make your workspace stop
+ listening for data from {integrationTitle} instances.
+
+
+
}
+ onClick={(): void => showModal()}
+ >
+ Remove from SigNoz
+
+
+
+ Removing this integration makes SigNoz stop listening for data from{' '}
+ {integrationTitle} instances. You would still have to manually remove the
+ configuration in your code to stop sending data.
+
+
+
+ );
+}
+
+export default IntergrationsUninstallBar;
diff --git a/frontend/src/pages/Integrations/IntegrationDetailPage/TestConnection.tsx b/frontend/src/pages/Integrations/IntegrationDetailPage/TestConnection.tsx
new file mode 100644
index 0000000000..e593e121e1
--- /dev/null
+++ b/frontend/src/pages/Integrations/IntegrationDetailPage/TestConnection.tsx
@@ -0,0 +1,35 @@
+import './IntegrationDetailPage.styles.scss';
+
+import cx from 'classnames';
+
+export enum ConnectionStates {
+ Connected = 'connected',
+ TestingConnection = 'testingConnection',
+ NoDataSinceLong = 'noDataSinceLong',
+ NotInstalled = 'notInstalled',
+}
+
+const ConnectionStatesLabelMap = {
+ [ConnectionStates.Connected]: 'This integration is working properly',
+ [ConnectionStates.TestingConnection]: 'Listening for data...',
+ [ConnectionStates.NoDataSinceLong]:
+ 'This integration has not received data in a while :/',
+ [ConnectionStates.NotInstalled]: '',
+};
+
+interface TestConnectionProps {
+ connectionState: ConnectionStates;
+}
+
+function TestConnection(props: TestConnectionProps): JSX.Element {
+ const { connectionState } = props;
+ return (
+
+
+ {ConnectionStatesLabelMap[connectionState]}
+
+
+ );
+}
+
+export default TestConnection;
diff --git a/frontend/src/pages/Integrations/IntegrationDetailPage/utils.ts b/frontend/src/pages/Integrations/IntegrationDetailPage/utils.ts
new file mode 100644
index 0000000000..43a4f76a5e
--- /dev/null
+++ b/frontend/src/pages/Integrations/IntegrationDetailPage/utils.ts
@@ -0,0 +1,55 @@
+import dayjs from 'dayjs';
+import { isNull, isUndefined } from 'lodash-es';
+
+import { ConnectionStates } from './TestConnection';
+
+export function getConnectionStatesFromConnectionStatus(
+ installation:
+ | {
+ installed_at: string;
+ }
+ | null
+ | undefined,
+ connection_status: {
+ logs:
+ | {
+ last_received_ts_ms: number;
+ last_received_from: string;
+ }
+ | null
+ | undefined;
+ metrics:
+ | {
+ last_received_ts_ms: number;
+ last_received_from: string;
+ }
+ | null
+ | undefined;
+ },
+): ConnectionStates {
+ if (isNull(installation) || isUndefined(installation)) {
+ return ConnectionStates.NotInstalled;
+ }
+ if (
+ (isNull(connection_status.logs) || isUndefined(connection_status.logs)) &&
+ (isNull(connection_status.metrics) || isUndefined(connection_status.metrics))
+ ) {
+ const installationDate = dayjs(installation.installed_at);
+ if (installationDate.isBefore(dayjs().subtract(7, 'days'))) {
+ return ConnectionStates.NoDataSinceLong;
+ }
+ return ConnectionStates.TestingConnection;
+ }
+
+ const logsDate = dayjs(connection_status.logs?.last_received_ts_ms);
+ const metricsDate = dayjs(connection_status.metrics?.last_received_ts_ms);
+
+ if (
+ logsDate.isBefore(dayjs().subtract(7, 'days')) &&
+ metricsDate.isBefore(dayjs().subtract(7, 'days'))
+ ) {
+ return ConnectionStates.NoDataSinceLong;
+ }
+
+ return ConnectionStates.Connected;
+}
diff --git a/frontend/src/pages/Integrations/Integrations.styles.scss b/frontend/src/pages/Integrations/Integrations.styles.scss
new file mode 100644
index 0000000000..794b596407
--- /dev/null
+++ b/frontend/src/pages/Integrations/Integrations.styles.scss
@@ -0,0 +1,228 @@
+.integrations-container {
+ margin-top: 24px;
+ display: flex;
+ justify-content: center;
+ width: 100%;
+
+ .integrations-content {
+ width: calc(100% - 30px);
+ max-width: 736px;
+
+ .integrations-header {
+ .title {
+ color: var(--bg-vanilla-100);
+ font-size: var(--font-size-lg);
+ font-style: normal;
+ line-height: 28px; /* 155.556% */
+ letter-spacing: -0.09px;
+ font-family: Inter;
+ font-weight: 500;
+ }
+
+ .subtitle {
+ color: var(--bg-vanilla-400);
+ font-size: var(--font-size-sm);
+ font-style: normal;
+ line-height: 20px; /* 142.857% */
+ letter-spacing: -0.07px;
+ font-family: Inter;
+ font-weight: 400;
+ }
+
+ .integrations-search-input {
+ margin-top: 1rem;
+ border-radius: 2px;
+ border: 1px solid var(--bg-slate-400);
+ background: var(--bg-ink-300);
+
+ .ant-input {
+ background-color: unset;
+ }
+ }
+ }
+
+ .integrations-list {
+ margin-top: 16px;
+
+ .error-container {
+ display: flex;
+ border-radius: 6px;
+ border: 1px solid var(--bg-slate-500);
+ background: var(--bg-ink-400);
+ align-items: center;
+ justify-content: center;
+ flex-direction: column;
+
+ .error-content {
+ display: flex;
+ flex-direction: column;
+ justify-content: center;
+ height: 300px;
+ gap: 15px;
+
+ .error-btns {
+ display: flex;
+ flex-direction: row;
+ gap: 16px;
+ align-items: center;
+
+ .retry-btn {
+ display: flex;
+ align-items: center;
+ }
+
+ .contact-support {
+ display: flex;
+ align-items: center;
+ gap: 4px;
+ cursor: pointer;
+
+ .text {
+ color: var(--text-robin-400);
+ font-weight: 500;
+ }
+ }
+ }
+
+ .error-state-svg {
+ height: 40px;
+ width: 40px;
+ }
+ }
+ }
+
+ .ant-list-items {
+ gap: 16px;
+ display: flex;
+ flex-direction: column;
+ }
+
+ .integrations-list-item {
+ display: flex;
+ gap: 10px;
+ padding: 16px;
+ border-radius: 6px;
+ border: 1px solid var(--bg-slate-500);
+ background: var(--bg-ink-400);
+ cursor: pointer;
+
+ .list-item-image-container {
+ height: 40px;
+ width: 40px;
+ flex-shrink: 0;
+ border-radius: 2px;
+ border: 1px solid var(--bg-ink-50);
+ background: var(--bg-ink-300);
+ display: flex;
+ align-items: center;
+ justify-content: center;
+
+ .list-item-image {
+ height: 24px;
+ width: 24px;
+ }
+ }
+
+ .list-item-details {
+ display: flex;
+ flex-direction: column;
+
+ .heading {
+ color: var(--bg-vanilla-100);
+ font-family: Inter;
+ font-size: 14px;
+ font-style: normal;
+ font-weight: 500;
+ line-height: 20px; /* 142.857% */
+ letter-spacing: -0.07px;
+ margin-bottom: 8px;
+ }
+
+ .description {
+ color: var(--bg-vanilla-400);
+ font-family: Inter;
+ font-size: 12px;
+ font-style: normal;
+ font-weight: 400;
+ line-height: 18px; /* 150% */
+ }
+ }
+
+ .configure-btn {
+ display: flex;
+ justify-content: center;
+ align-items: center;
+ align-self: flex-start;
+ gap: 2px;
+ flex-shrink: 0;
+ width: 78px;
+ height: 24px;
+ padding: 6px 1px;
+ border-radius: 2px;
+ border: 1px solid #303540;
+ background: var(--bg-ink-200);
+ box-shadow: none;
+ color: var(--bg-vanilla-400);
+ font-family: Inter;
+ font-size: 12px;
+ font-style: normal;
+ font-weight: 400;
+ line-height: 10px; /* 83.333% */
+ letter-spacing: 0.12px;
+ }
+ }
+ }
+ }
+}
+
+.lightMode {
+ .integrations-container {
+ .integrations-content {
+ .integrations-header {
+ .title {
+ color: var(--bg-ink-500);
+ }
+ .subtitle {
+ color: var(--bg-slate-200);
+ }
+ .integrations-search-input {
+ border: 1px solid rgba(53, 59, 76, 0.2);
+ background: var(--bg-vanilla-100);
+ }
+ }
+
+ .integrations-list {
+ .error-container {
+ border: 1px solid rgba(53, 59, 76, 0.2);
+ background: var(--bg-vanilla-100);
+ }
+
+ .integrations-list-item {
+ border: 1px solid rgba(53, 59, 76, 0.2);
+ background: var(--bg-vanilla-100);
+
+ .list-item-image-container {
+ border: 1.111px solid var(--bg-vanilla-300);
+ background: var(--bg-vanilla-100);
+ }
+
+ .list-item-details {
+ .heading {
+ color: var(--bg-ink-500);
+ }
+
+ .description {
+ color: var(--bg-slate-200);
+ }
+ }
+
+ .configure-btn {
+ border: 1px solid rgba(53, 59, 76, 0.2);
+ background: var(--bg-vanilla-200);
+ color: var(--bg-ink-500);
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/frontend/src/pages/Integrations/Integrations.tsx b/frontend/src/pages/Integrations/Integrations.tsx
new file mode 100644
index 0000000000..bda4184eab
--- /dev/null
+++ b/frontend/src/pages/Integrations/Integrations.tsx
@@ -0,0 +1,62 @@
+import './Integrations.styles.scss';
+
+import useUrlQuery from 'hooks/useUrlQuery';
+import { useCallback, useMemo, useState } from 'react';
+import { useHistory, useLocation } from 'react-router-dom';
+
+import Header from './Header';
+import IntegrationDetailPage from './IntegrationDetailPage/IntegrationDetailPage';
+import IntegrationsList from './IntegrationsList';
+
+function Integrations(): JSX.Element {
+ const urlQuery = useUrlQuery();
+ const history = useHistory();
+ const location = useLocation();
+
+ const selectedIntegration = useMemo(() => urlQuery.get('integration'), [
+ urlQuery,
+ ]);
+
+ const setSelectedIntegration = useCallback(
+ (integration: string | null) => {
+ if (integration) {
+ urlQuery.set('integration', integration);
+ } else {
+ urlQuery.set('integration', '');
+ }
+ const generatedUrl = `${location.pathname}?${urlQuery.toString()}`;
+ history.push(generatedUrl);
+ },
+ [history, location.pathname, urlQuery],
+ );
+
+ const [activeDetailTab, setActiveDetailTab] = useState(
+ 'overview',
+ );
+
+ const [searchTerm, setSearchTerm] = useState('');
+ return (
+
+
+ {selectedIntegration && activeDetailTab ? (
+
+ ) : (
+ <>
+
+
+ >
+ )}
+
+
+ );
+}
+
+export default Integrations;
diff --git a/frontend/src/pages/Integrations/IntegrationsList.tsx b/frontend/src/pages/Integrations/IntegrationsList.tsx
new file mode 100644
index 0000000000..47cd76bb68
--- /dev/null
+++ b/frontend/src/pages/Integrations/IntegrationsList.tsx
@@ -0,0 +1,120 @@
+/* eslint-disable jsx-a11y/no-static-element-interactions */
+/* eslint-disable jsx-a11y/click-events-have-key-events */
+import './Integrations.styles.scss';
+
+import { Color } from '@signozhq/design-tokens';
+import { Button, List, Typography } from 'antd';
+import { useGetAllIntegrations } from 'hooks/Integrations/useGetAllIntegrations';
+import { MoveUpRight, RotateCw } from 'lucide-react';
+import { Dispatch, SetStateAction, useMemo } from 'react';
+import { isCloudUser } from 'utils/app';
+
+import { handleContactSupport } from './utils';
+
+interface IntegrationsListProps {
+ setSelectedIntegration: (id: string) => void;
+ setActiveDetailTab: Dispatch>;
+ searchTerm: string;
+}
+
+function IntegrationsList(props: IntegrationsListProps): JSX.Element {
+ const { setSelectedIntegration, searchTerm, setActiveDetailTab } = props;
+
+ const {
+ data,
+ isFetching,
+ isLoading,
+ isRefetching,
+ isError,
+ refetch,
+ } = useGetAllIntegrations();
+
+ const filteredDataList = useMemo(() => {
+ if (data?.data.data.integrations) {
+ return data?.data.data.integrations.filter((item) =>
+ item.title.toLowerCase().includes(searchTerm.toLowerCase()),
+ );
+ }
+ return [];
+ }, [data?.data.data.integrations, searchTerm]);
+
+ const loading = isLoading || isFetching || isRefetching;
+
+ return (
+
+ {!loading && isError && (
+
+
+
+
+ Something went wrong :/ Please retry or contact support.
+
+
+
=> refetch()}
+ icon={ }
+ >
+ Retry
+
+
handleContactSupport(isCloudUser())}
+ >
+ Contact Support
+
+
+
+
+
+
+ )}
+ {!isError && (
+
(
+ {
+ setSelectedIntegration(item.id);
+ setActiveDetailTab('overview');
+ }}
+ >
+
+
+
+
+
+ {item.title}
+
+ {item.description}
+
+
+
+ {
+ event.stopPropagation();
+ setSelectedIntegration(item.id);
+ setActiveDetailTab('configuration');
+ }}
+ >
+ Configure
+
+
+ )}
+ />
+ )}
+
+ );
+}
+
+export default IntegrationsList;
diff --git a/frontend/src/pages/Integrations/index.ts b/frontend/src/pages/Integrations/index.ts
new file mode 100644
index 0000000000..806360c344
--- /dev/null
+++ b/frontend/src/pages/Integrations/index.ts
@@ -0,0 +1,3 @@
+import Integrations from './Integrations';
+
+export default Integrations;
diff --git a/frontend/src/pages/Integrations/utils.ts b/frontend/src/pages/Integrations/utils.ts
new file mode 100644
index 0000000000..81c70b6091
--- /dev/null
+++ b/frontend/src/pages/Integrations/utils.ts
@@ -0,0 +1,9 @@
+import history from 'lib/history';
+
+export const handleContactSupport = (isCloudUser: boolean): void => {
+ if (isCloudUser) {
+ history.push('/support');
+ } else {
+ window.open('https://signoz.io/slack', '_blank');
+ }
+};
diff --git a/frontend/src/pages/IntegrationsMarketPlace/IntegrationsMarketPlace.tsx b/frontend/src/pages/IntegrationsMarketPlace/IntegrationsMarketPlace.tsx
new file mode 100644
index 0000000000..c63f8a659a
--- /dev/null
+++ b/frontend/src/pages/IntegrationsMarketPlace/IntegrationsMarketPlace.tsx
@@ -0,0 +1,9 @@
+function IntegrationsMarketPlace(): JSX.Element {
+ return (
+
+
IntegrationsMarketPlace
+
+ );
+}
+
+export default IntegrationsMarketPlace;
diff --git a/frontend/src/pages/IntegrationsMarketPlace/index.ts b/frontend/src/pages/IntegrationsMarketPlace/index.ts
new file mode 100644
index 0000000000..6c088880e7
--- /dev/null
+++ b/frontend/src/pages/IntegrationsMarketPlace/index.ts
@@ -0,0 +1,3 @@
+import IntegrationsMarketPlace from './IntegrationsMarketPlace';
+
+export default IntegrationsMarketPlace;
diff --git a/frontend/src/pages/IntegrationsModulePage/IntegrationsModulePage.styles.scss b/frontend/src/pages/IntegrationsModulePage/IntegrationsModulePage.styles.scss
new file mode 100644
index 0000000000..4ff58bea40
--- /dev/null
+++ b/frontend/src/pages/IntegrationsModulePage/IntegrationsModulePage.styles.scss
@@ -0,0 +1,27 @@
+.integrations-module-container {
+ .ant-tabs-nav {
+ padding: 0 16px;
+ margin-bottom: 0px;
+
+ &::before {
+ border-bottom: 1px solid var(--bg-slate-400) !important;
+ }
+ }
+
+ .tab-item {
+ display: flex;
+ justify-content: center;
+ align-items: center;
+ gap: 8px;
+ }
+}
+
+.lightMode {
+ .integrations-module-container {
+ .ant-tabs-nav {
+ &::before {
+ border-bottom: 1px solid var(--bg-vanilla-400) !important;
+ }
+ }
+ }
+}
diff --git a/frontend/src/pages/IntegrationsModulePage/IntegrationsModulePage.tsx b/frontend/src/pages/IntegrationsModulePage/IntegrationsModulePage.tsx
new file mode 100644
index 0000000000..bdcf05b2de
--- /dev/null
+++ b/frontend/src/pages/IntegrationsModulePage/IntegrationsModulePage.tsx
@@ -0,0 +1,21 @@
+import './IntegrationsModulePage.styles.scss';
+
+import RouteTab from 'components/RouteTab';
+import { TabRoutes } from 'components/RouteTab/types';
+import history from 'lib/history';
+import { useLocation } from 'react-use';
+
+import { installedIntegrations } from './constants';
+
+function IntegrationsModulePage(): JSX.Element {
+ const { pathname } = useLocation();
+
+ const routes: TabRoutes[] = [installedIntegrations];
+ return (
+
+
+
+ );
+}
+
+export default IntegrationsModulePage;
diff --git a/frontend/src/pages/IntegrationsModulePage/constants.tsx b/frontend/src/pages/IntegrationsModulePage/constants.tsx
new file mode 100644
index 0000000000..d0100798a8
--- /dev/null
+++ b/frontend/src/pages/IntegrationsModulePage/constants.tsx
@@ -0,0 +1,15 @@
+import { TabRoutes } from 'components/RouteTab/types';
+import ROUTES from 'constants/routes';
+import { Compass } from 'lucide-react';
+import Integrations from 'pages/Integrations';
+
+export const installedIntegrations: TabRoutes = {
+ Component: Integrations,
+ name: (
+
+ Integrations
+
+ ),
+ route: ROUTES.INTEGRATIONS_INSTALLED,
+ key: ROUTES.INTEGRATIONS_INSTALLED,
+};
diff --git a/frontend/src/pages/IntegrationsModulePage/index.ts b/frontend/src/pages/IntegrationsModulePage/index.ts
new file mode 100644
index 0000000000..690904079a
--- /dev/null
+++ b/frontend/src/pages/IntegrationsModulePage/index.ts
@@ -0,0 +1,3 @@
+import IntegrationsModulePage from './IntegrationsModulePage';
+
+export default IntegrationsModulePage;
diff --git a/frontend/src/pages/LogsExplorer/LogsExplorer.styles.scss b/frontend/src/pages/LogsExplorer/LogsExplorer.styles.scss
new file mode 100644
index 0000000000..95d53fe9a4
--- /dev/null
+++ b/frontend/src/pages/LogsExplorer/LogsExplorer.styles.scss
@@ -0,0 +1,11 @@
+.log-explorer-query-container {
+ display: flex;
+ flex-direction: column;
+ flex: 1;
+
+ .logs-explorer-views {
+ flex: 1;
+ display: flex;
+ flex-direction: column;
+ }
+}
\ No newline at end of file
diff --git a/frontend/src/pages/LogsExplorer/index.tsx b/frontend/src/pages/LogsExplorer/index.tsx
index 8b91b955ea..0cc2c07b4d 100644
--- a/frontend/src/pages/LogsExplorer/index.tsx
+++ b/frontend/src/pages/LogsExplorer/index.tsx
@@ -1,8 +1,8 @@
-import { Col, Row } from 'antd';
+import './LogsExplorer.styles.scss';
+
import ExplorerCard from 'components/ExplorerCard/ExplorerCard';
import LogExplorerQuerySection from 'container/LogExplorerQuerySection';
import LogsExplorerViews from 'container/LogsExplorerViews';
-// import LogsTopNav from 'container/LogsTopNav';
import LeftToolbarActions from 'container/QueryBuilder/components/ToolbarActions/LeftToolbarActions';
import RightToolbarActions from 'container/QueryBuilder/components/ToolbarActions/RightToolbarActions';
import Toolbar from 'container/Toolbar/Toolbar';
@@ -87,19 +87,19 @@ function LogsExplorer(): JSX.Element {
/>
-
-
+
);
diff --git a/frontend/src/pages/LogsExplorer/styles.ts b/frontend/src/pages/LogsExplorer/styles.ts
index 3e479cc001..54d553bc3c 100644
--- a/frontend/src/pages/LogsExplorer/styles.ts
+++ b/frontend/src/pages/LogsExplorer/styles.ts
@@ -3,6 +3,9 @@ import { themeColors } from 'constants/theme';
import styled from 'styled-components';
export const WrapperStyled = styled.div`
+ display: flex;
+ flex-direction: column;
+ flex: 1;
color: ${themeColors.lightWhite};
`;
diff --git a/frontend/src/pages/LogsModulePage/LogsModulePage.styles.scss b/frontend/src/pages/LogsModulePage/LogsModulePage.styles.scss
index 144264b532..acba2781df 100644
--- a/frontend/src/pages/LogsModulePage/LogsModulePage.styles.scss
+++ b/frontend/src/pages/LogsModulePage/LogsModulePage.styles.scss
@@ -1,9 +1,11 @@
.logs-module-container {
- // margin: 0 -1rem; // as we have added a margin of 0 1rem components container, have to adjust the margin with negative to style the logs explorer as we want
+ flex: 1;
+ display: flex;
+ flex-direction: column;
- // .ant-tabs-content-holder {
- // margin: 0 -1rem;
- // }
+ .ant-tabs {
+ flex: 1;
+ }
.ant-tabs-nav {
padding: 0 16px;
@@ -14,6 +16,22 @@
}
}
+ .ant-tabs-content-holder {
+ display: flex;
+
+ .ant-tabs-content {
+ flex: 1;
+ display: flex;
+ flex-direction: column;
+
+ .ant-tabs-tabpane {
+ flex: 1;
+ display: flex;
+ flex-direction: column;
+ }
+ }
+ }
+
.tab-item {
display: flex;
justify-content: center;
@@ -30,4 +48,4 @@
}
}
}
-}
\ No newline at end of file
+}
diff --git a/frontend/src/pages/MetricsApplication/ApDex/ApDexApplication.tsx b/frontend/src/pages/MetricsApplication/ApDex/ApDexApplication.tsx
index 3f088a37a8..00b29c7809 100644
--- a/frontend/src/pages/MetricsApplication/ApDex/ApDexApplication.tsx
+++ b/frontend/src/pages/MetricsApplication/ApDex/ApDexApplication.tsx
@@ -10,7 +10,9 @@ import { Button } from '../styles';
import ApDexSettings from './ApDexSettings';
function ApDexApplication(): JSX.Element {
- const { servicename } = useParams();
+ const { servicename: encodedServiceName } = useParams();
+ const servicename = decodeURIComponent(encodedServiceName);
+
const {
data,
isLoading,
diff --git a/frontend/src/pages/MetricsApplication/index.tsx b/frontend/src/pages/MetricsApplication/index.tsx
index 94cbd5d99e..1b0229e5d1 100644
--- a/frontend/src/pages/MetricsApplication/index.tsx
+++ b/frontend/src/pages/MetricsApplication/index.tsx
@@ -13,7 +13,11 @@ import { MetricsApplicationTab, TAB_KEY_VS_LABEL } from './types';
import useMetricsApplicationTabKey from './useMetricsApplicationTabKey';
function MetricsApplication(): JSX.Element {
- const { servicename } = useParams<{ servicename: string }>();
+ const { servicename: encodedServiceName } = useParams<{
+ servicename: string;
+ }>();
+
+ const servicename = decodeURIComponent(encodedServiceName);
const activeKey = useMetricsApplicationTabKey();
diff --git a/frontend/src/pages/Pipelines/Pipelines.styles.scss b/frontend/src/pages/Pipelines/Pipelines.styles.scss
index 8521aab75e..78578006ee 100644
--- a/frontend/src/pages/Pipelines/Pipelines.styles.scss
+++ b/frontend/src/pages/Pipelines/Pipelines.styles.scss
@@ -2,4 +2,8 @@
.ant-tabs-content {
padding: 0 16px;
}
+
+ .ant-tabs-tabpane-hidden {
+ display: none !important;
+ }
}
diff --git a/frontend/src/pages/SaveView/SaveView.styles.scss b/frontend/src/pages/SaveView/SaveView.styles.scss
index 461914c1d0..292a0b8d06 100644
--- a/frontend/src/pages/SaveView/SaveView.styles.scss
+++ b/frontend/src/pages/SaveView/SaveView.styles.scss
@@ -1,171 +1,170 @@
.save-view-container {
- margin-top: 70px;
- display: flex;
- justify-content: center;
- width: 100%;
+ margin-top: 70px;
+ display: flex;
+ justify-content: center;
+ width: 100%;
- .save-view-content {
- width: calc(100% - 30px);
- max-width: 736px;
+ .save-view-content {
+ width: calc(100% - 30px);
+ max-width: 736px;
-
- .title {
- color: var(--bg-vanilla-100);
- font-size: var(--font-size-lg);
- font-style: normal;
- font-weight: var(--font-weight-normal);
- line-height: 28px; /* 155.556% */
- letter-spacing: -0.09px;
- }
-
- .subtitle {
- color: var(---bg-vanilla-400);
- font-size: var(--font-size-sm);
- font-style: normal;
- font-weight: var(--font-weight-normal);
- line-height: 20px; /* 142.857% */
- letter-spacing: -0.07px;
- }
-
- .ant-input-affix-wrapper {
- margin-top: 16px;
- margin-bottom: 8px;
- }
+ .title {
+ color: var(--bg-vanilla-100);
+ font-size: var(--font-size-lg);
+ font-style: normal;
+ font-weight: var(--font-weight-normal);
+ line-height: 28px; /* 155.556% */
+ letter-spacing: -0.09px;
+ }
- .ant-table-row {
- .ant-table-cell {
- padding: 0;
- border: none;
- background: var(--bg-ink-500);
-
- }
- .column-render {
- margin: 8px 0 !important;
- padding: 16px;
- border-radius: 6px;
- border: 1px solid var(--bg-slate-500);
- background: var(--bg-ink-400);
+ .subtitle {
+ color: var(---bg-vanilla-400);
+ font-size: var(--font-size-sm);
+ font-style: normal;
+ font-weight: var(--font-weight-normal);
+ line-height: 20px; /* 142.857% */
+ letter-spacing: -0.07px;
+ }
- .title-with-action {
- display: flex;
- justify-content: space-between;
- align-items: center;
+ .ant-input-affix-wrapper {
+ margin-top: 16px;
+ margin-bottom: 8px;
+ }
- .save-view-title {
- display: flex;
- align-items: center;
- gap: 6px;
- .dot {
- min-height: 6px;
- min-width: 6px;
- border-radius: 50%;
- }
- .ant-typography {
- color: var(--bg-vanilla-400);
- font-size: var(--font-size-sm);
- font-style: normal;
- font-weight: var(--font-weight-medium);
- line-height: 20px;
- letter-spacing: -0.07px;
- }
- }
+ .ant-table-row {
+ .ant-table-cell {
+ padding: 0;
+ border: none;
+ background: var(--bg-ink-500);
+ }
+ .column-render {
+ margin: 8px 0 !important;
+ padding: 16px;
+ border-radius: 6px;
+ border: 1px solid var(--bg-slate-500);
+ background: var(--bg-ink-400);
- .action-btn {
- display: flex;
- align-items: center;
- gap: 20px;
- cursor: pointer;
- }
+ .title-with-action {
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
- }
- .view-details {
- margin-top: 8px;
- display: flex;
- align-items: center;
+ .save-view-title {
+ display: flex;
+ align-items: center;
+ gap: 6px;
+ .dot {
+ min-height: 6px;
+ min-width: 6px;
+ border-radius: 50%;
+ }
+ .ant-typography {
+ color: var(--bg-vanilla-400);
+ font-size: var(--font-size-sm);
+ font-style: normal;
+ font-weight: var(--font-weight-medium);
+ line-height: 20px;
+ letter-spacing: -0.07px;
+ }
+ }
- .view-tag {
- width: 14px;
- height: 14px;
- border-radius: 50px;
- background: var(--bg-slate-300);
- display: flex;
- justify-content: center;
- align-items: center;
+ .action-btn {
+ display: flex;
+ align-items: center;
+ gap: 20px;
+ cursor: pointer;
- .tag-text {
- color: var(--bg-vanilla-400);
- leading-trim: both;
- text-edge: cap;
- font-size: 10px;
- font-style: normal;
- font-weight: var(--font-weight-normal);
- line-height: normal;
- letter-spacing: -0.05px;
- }
- }
+ .hidden {
+ display: none;
+ }
+ }
+ }
+ .view-details {
+ margin-top: 8px;
+ display: flex;
+ align-items: center;
- .view-created-by {
- margin-left: 8px;
- }
+ .view-tag {
+ width: 14px;
+ height: 14px;
+ border-radius: 50px;
+ background: var(--bg-slate-300);
+ display: flex;
+ justify-content: center;
+ align-items: center;
- .view-created-at {
- margin-left: 24px;
- display: flex;
- align-items: center;
- .ant-typography {
- margin-left: 6px;
- color: var(--bg-vanilla-400);
- font-size: var(--font-size-sm);
- font-style: normal;
- font-weight: var(--font-weight-normal);
- line-height: 18px; /* 128.571% */
- letter-spacing: -0.07px;
- }
- }
- }
- }
- }
+ .tag-text {
+ color: var(--bg-vanilla-400);
+ leading-trim: both;
+ text-edge: cap;
+ font-size: 10px;
+ font-style: normal;
+ font-weight: var(--font-weight-normal);
+ line-height: normal;
+ letter-spacing: -0.05px;
+ }
+ }
- .ant-pagination-item {
+ .view-created-by {
+ margin-left: 8px;
+ }
- display: flex;
- justify-content: center;
- align-items: center;
+ .view-created-at {
+ margin-left: 24px;
+ display: flex;
+ align-items: center;
+ .ant-typography {
+ margin-left: 6px;
+ color: var(--bg-vanilla-400);
+ font-size: var(--font-size-sm);
+ font-style: normal;
+ font-weight: var(--font-weight-normal);
+ line-height: 18px; /* 128.571% */
+ letter-spacing: -0.07px;
+ }
+ }
+ }
+ }
+ }
- > a {
- color: var(--bg-vanilla-400);
- font-variant-numeric: lining-nums tabular-nums slashed-zero;
- font-feature-settings: 'dlig' on, 'salt' on, 'case' on, 'cpsp' on;
- font-size: var(--font-size-sm);
- font-style: normal;
- font-weight: var(--font-weight-normal);
- line-height: 20px; /* 142.857% */
- }
+ .ant-pagination-item {
+ display: flex;
+ justify-content: center;
+ align-items: center;
- }
+ > a {
+ color: var(--bg-vanilla-400);
+ font-variant-numeric: lining-nums tabular-nums slashed-zero;
+ font-feature-settings: 'dlig' on, 'salt' on, 'case' on, 'cpsp' on;
+ font-size: var(--font-size-sm);
+ font-style: normal;
+ font-weight: var(--font-weight-normal);
+ line-height: 20px; /* 142.857% */
+ }
+ }
- .ant-pagination-item-active {
- background-color: var(--bg-robin-500);
- > a {
- color: var(--bg-ink-500) !important;
- font-size: var(--font-size-sm);
- font-style: normal;
- font-weight: var(--font-weight-medium);
- line-height: 20px;
- }
- }
- }
-}
+ .ant-pagination-item-active {
+ background-color: var(--bg-robin-500);
+ > a {
+ color: var(--bg-ink-500) !important;
+ font-size: var(--font-size-sm);
+ font-style: normal;
+ font-weight: var(--font-weight-medium);
+ line-height: 20px;
+ }
+ }
+ }
+}
.delete-view-modal {
- width: calc(100% - 30px) !important; /* Adjust the 20px as needed */
- max-width: 384px;
+ width: calc(100% - 30px) !important; /* Adjust the 20px as needed */
+ max-width: 384px;
.ant-modal-content {
padding: 0;
border-radius: 4px;
border: 1px solid var(--bg-slate-500);
background: var(--bg-ink-400);
- box-shadow: 0px -4px 16px 2px rgba(0, 0, 0, 0.20);
+ box-shadow: 0px -4px 16px 2px rgba(0, 0, 0, 0.2);
.ant-modal-header {
padding: 16px;
@@ -177,11 +176,11 @@
.ant-typography {
color: var(--bg-vanilla-400);
- font-size: var(--font-size-sm);
- font-style: normal;
- font-weight: var(--font-weight-normal);
- line-height: 20px;
- letter-spacing: -0.07px;
+ font-size: var(--font-size-sm);
+ font-style: normal;
+ font-weight: var(--font-weight-normal);
+ line-height: 20px;
+ letter-spacing: -0.07px;
}
.save-view-input {
@@ -211,7 +210,6 @@
}
}
}
-
}
.ant-modal-footer {
@@ -223,127 +221,126 @@
.cancel-btn {
display: flex;
align-items: center;
- border: none;
+ border: none;
border-radius: 2px;
- background: var(--bg-slate-500);
+ background: var(--bg-slate-500);
}
- .delete-btn {
- display: flex;
+ .delete-btn {
+ display: flex;
align-items: center;
- border: none;
- border-radius: 2px;
- background: var(--bg-cherry-500);
- margin-left: 12px;
- }
+ border: none;
+ border-radius: 2px;
+ background: var(--bg-cherry-500);
+ margin-left: 12px;
+ }
- .delete-btn:hover {
- color: var(--bg-vanilla-100);
- background: var(--bg-cherry-600);
- }
+ .delete-btn:hover {
+ color: var(--bg-vanilla-100);
+ background: var(--bg-cherry-600);
+ }
}
}
.title {
color: var(--bg-vanilla-100);
- font-size: var(--font-size-sm);
- font-style: normal;
- font-weight: var(--font-weight-medium);
- line-height: 20px; /* 142.857% */
+ font-size: var(--font-size-sm);
+ font-style: normal;
+ font-weight: var(--font-weight-medium);
+ line-height: 20px; /* 142.857% */
}
}
.lightMode {
- .save-view-container {
- .save-view-content {
+ .save-view-container {
+ .save-view-content {
+ .title {
+ color: var(--bg-ink-500);
+ }
- .title {
- color: var(--bg-ink-500);
- }
+ .ant-table-row {
+ .ant-table-cell {
+ background: var(--bg-vanilla-200);
+ }
- .ant-table-row {
- .ant-table-cell {
- background: var(--bg-vanilla-200);
- }
+ &:hover {
+ .ant-table-cell {
+ background: var(--bg-vanilla-200) !important;
+ }
+ }
- &:hover {
- .ant-table-cell {
- background: var(--bg-vanilla-200) !important;
- }
- }
+ .column-render {
+ border: 1px solid var(--bg-vanilla-200);
+ background: var(--bg-vanilla-100);
- .column-render {
- border: 1px solid var(--bg-vanilla-200);
- background: var(--bg-vanilla-100);
+ .title-with-action {
+ .save-view-title {
+ .ant-typography {
+ color: var(--bg-ink-500);
+ }
+ }
- .title-with-action {
- .save-view-title {
- .ant-typography {
- color: var(--bg-ink-500);
- }
- }
+ .action-btn {
+ .ant-typography {
+ color: var(--bg-ink-500);
+ }
+ }
+ }
- .action-btn {
- .ant-typography {
- color: var(--bg-ink-500);
- }
- }
- }
+ .view-details {
+ .view-tag {
+ background: var(--bg-vanilla-200);
+ .tag-text {
+ color: var(--bg-ink-500);
+ }
+ }
- .view-details {
- .view-tag {
- background: var(--bg-vanilla-200);
- .tag-text {
- color: var(--bg-ink-500);
- }
- }
+ .view-created-by {
+ color: var(--bg-ink-500);
+ }
- .view-created-by {
- color: var(--bg-ink-500);
- }
+ .view-created-at {
+ .ant-typography {
+ color: var(--bg-ink-500);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
- .view-created-at {
- .ant-typography {
- color: var(--bg-ink-500);
- }
- }
- }
- }
- }
- }
- }
+ .delete-view-modal {
+ .ant-modal-content {
+ border: 1px solid var(--bg-vanilla-200);
+ background: var(--bg-vanilla-100);
- .delete-view-modal {
- .ant-modal-content {
- border: 1px solid var(--bg-vanilla-200);
- background: var(--bg-vanilla-100);
+ .ant-modal-header {
+ background: var(--bg-vanilla-100);
- .ant-modal-header {
- background: var(--bg-vanilla-100);
+ .title {
+ color: var(--bg-ink-500);
+ }
+ }
- .title {
- color: var(--bg-ink-500);
- }
- }
+ .ant-modal-body {
+ .ant-typography {
+ color: var(--bg-ink-500);
+ }
- .ant-modal-body {
- .ant-typography {
- color: var(--bg-ink-500);
- }
+ .save-view-input {
+ .ant-input {
+ background: var(--bg-vanilla-200);
+ color: var(--bg-ink-500);
+ }
+ }
+ }
- .save-view-input {
- .ant-input {
- background: var(--bg-vanilla-200);
- color: var(--bg-ink-500);
- }
- }
- }
-
- .ant-modal-footer {
- .cancel-btn {
- background: var(--bg-vanilla-300);
- color: var(--bg-ink-400);
- }
- }
- }
- }
-}
\ No newline at end of file
+ .ant-modal-footer {
+ .cancel-btn {
+ background: var(--bg-vanilla-300);
+ color: var(--bg-ink-400);
+ }
+ }
+ }
+ }
+}
diff --git a/frontend/src/pages/SaveView/index.tsx b/frontend/src/pages/SaveView/index.tsx
index afdf05686b..a0ec01a5fd 100644
--- a/frontend/src/pages/SaveView/index.tsx
+++ b/frontend/src/pages/SaveView/index.tsx
@@ -32,14 +32,20 @@ import {
} from 'lucide-react';
import { ChangeEvent, useEffect, useState } from 'react';
import { useTranslation } from 'react-i18next';
+import { useSelector } from 'react-redux';
import { useLocation } from 'react-router-dom';
+import { AppState } from 'store/reducers';
import { ICompositeMetricQuery } from 'types/api/alerts/compositeQuery';
import { ViewProps } from 'types/api/saveViews/types';
import { DataSource } from 'types/common/queryBuilder';
+import AppReducer from 'types/reducer/app';
+import { USER_ROLES } from 'types/roles';
import { ROUTES_VS_SOURCEPAGE, SOURCEPAGE_VS_ROUTES } from './constants';
import { deleteViewHandler } from './utils';
+const allowedRoles = [USER_ROLES.ADMIN, USER_ROLES.AUTHOR, USER_ROLES.EDITOR];
+
function SaveView(): JSX.Element {
const { pathname } = useLocation();
const sourcepage = ROUTES_VS_SOURCEPAGE[pathname];
@@ -61,6 +67,8 @@ function SaveView(): JSX.Element {
setIsDeleteModalOpen(false);
};
+ const { role } = useSelector((state) => state.app);
+
const handleDeleteModelOpen = (uuid: string, name: string): void => {
setActiveViewKey(uuid);
setActiveViewName(name);
@@ -217,6 +225,9 @@ function SaveView(): JSX.Element {
// Combine time and date
const formattedDateAndTime = `${formattedTime} ⎯ ${formattedDate}`;
+
+ const isEditDeleteSupported = allowedRoles.includes(role as string);
+
return (
@@ -234,11 +245,13 @@ function SaveView(): JSX.Element {
handleEditModelOpen(view, bgColor)}
/>
handleRedirectQuery(view)} />
handleDeleteModelOpen(view.uuid, view.name)}
/>
diff --git a/frontend/src/pages/Services/Metrics.test.tsx b/frontend/src/pages/Services/Metrics.test.tsx
index 37c13ee84c..fcafd76466 100644
--- a/frontend/src/pages/Services/Metrics.test.tsx
+++ b/frontend/src/pages/Services/Metrics.test.tsx
@@ -6,9 +6,11 @@ describe('Services', () => {
test('Should render the component', () => {
render( );
- const inputBox = screen.getByRole('combobox');
+ const inputBox = screen.getByTestId('resource-attributes-filter');
expect(inputBox).toBeInTheDocument();
+ expect(screen.getByTestId('resource-environment-filter')).toBeInTheDocument();
+
const application = screen.getByRole('columnheader', {
name: /application search/i,
});
diff --git a/frontend/src/pages/TracesExplorer/index.tsx b/frontend/src/pages/TracesExplorer/index.tsx
index 3d39aab2cb..99527fba98 100644
--- a/frontend/src/pages/TracesExplorer/index.tsx
+++ b/frontend/src/pages/TracesExplorer/index.tsx
@@ -5,7 +5,7 @@ import axios from 'axios';
import ExplorerCard from 'components/ExplorerCard/ExplorerCard';
import { AVAILABLE_EXPORT_PANEL_TYPES } from 'constants/panelTypes';
import { initialQueriesMap, PANEL_TYPES } from 'constants/queryBuilder';
-import ExplorerOptions from 'container/ExplorerOptions/ExplorerOptions';
+import ExplorerOptionWrapper from 'container/ExplorerOptions/ExplorerOptionWrapper';
import ExportPanel from 'container/ExportPanel';
import RightToolbarActions from 'container/QueryBuilder/components/ToolbarActions/RightToolbarActions';
import DateTimeSelector from 'container/TopNav/DateTimeSelectionV2';
@@ -208,12 +208,12 @@ function TracesExplorer(): JSX.Element {
onChange={handleExplorerTabChange}
/>
-
>
diff --git a/frontend/src/providers/Dashboard/Dashboard.tsx b/frontend/src/providers/Dashboard/Dashboard.tsx
index a7fa94c044..326a4aae83 100644
--- a/frontend/src/providers/Dashboard/Dashboard.tsx
+++ b/frontend/src/providers/Dashboard/Dashboard.tsx
@@ -52,6 +52,7 @@ const DashboardContext = createContext({
updatedTimeRef: {} as React.MutableRefObject,
toScrollWidgetId: '',
setToScrollWidgetId: () => {},
+ updateLocalStorageDashboardVariables: () => {},
});
interface Props {
@@ -96,9 +97,10 @@ export function DashboardProvider({
const [selectedDashboard, setSelectedDashboard] = useState();
- const { currentDashboard } = useDashboardVariablesFromLocalStorage(
- dashboardId,
- );
+ const {
+ currentDashboard,
+ updateLocalStorageDashboardVariables,
+ } = useDashboardVariablesFromLocalStorage(dashboardId);
const updatedTimeRef = useRef(null); // Using ref to store the updated time
const modalRef = useRef(null);
@@ -320,6 +322,7 @@ export function DashboardProvider({
setSelectedDashboard,
updatedTimeRef,
setToScrollWidgetId,
+ updateLocalStorageDashboardVariables,
}),
// eslint-disable-next-line react-hooks/exhaustive-deps
[
@@ -330,6 +333,8 @@ export function DashboardProvider({
dashboardId,
layouts,
toScrollWidgetId,
+ updateLocalStorageDashboardVariables,
+ currentDashboard,
],
);
diff --git a/frontend/src/providers/Dashboard/types.ts b/frontend/src/providers/Dashboard/types.ts
index a8e249015e..1f171cb621 100644
--- a/frontend/src/providers/Dashboard/types.ts
+++ b/frontend/src/providers/Dashboard/types.ts
@@ -19,4 +19,15 @@ export interface IDashboardContext {
updatedTimeRef: React.MutableRefObject;
toScrollWidgetId: string;
setToScrollWidgetId: React.Dispatch>;
+ updateLocalStorageDashboardVariables: (
+ id: string,
+ selectedValue:
+ | string
+ | number
+ | boolean
+ | (string | number | boolean)[]
+ | null
+ | undefined,
+ allSelected: boolean,
+ ) => void;
}
diff --git a/frontend/src/providers/QueryBuilder.tsx b/frontend/src/providers/QueryBuilder.tsx
index 80bc673a83..1fde9fc224 100644
--- a/frontend/src/providers/QueryBuilder.tsx
+++ b/frontend/src/providers/QueryBuilder.tsx
@@ -68,6 +68,7 @@ export const QueryBuilderContext = createContext({
removeQueryBuilderEntityByIndex: () => {},
removeQueryTypeItemByIndex: () => {},
addNewBuilderQuery: () => {},
+ cloneQuery: () => {},
addNewFormula: () => {},
addNewQueryItem: () => {},
redirectWithQueryBuilderData: () => {},
@@ -307,6 +308,23 @@ export function QueryBuilderProvider({
[initialDataSource],
);
+ const cloneNewBuilderQuery = useCallback(
+ (queries: IBuilderQuery[], query: IBuilderQuery): IBuilderQuery => {
+ const existNames = queries.map((item) => item.queryName);
+ const clonedQuery: IBuilderQuery = {
+ ...query,
+ queryName: createNewBuilderItemName({ existNames, sourceNames: alphabet }),
+ expression: createNewBuilderItemName({
+ existNames,
+ sourceNames: alphabet,
+ }),
+ };
+
+ return clonedQuery;
+ },
+ [],
+ );
+
const createNewBuilderFormula = useCallback((formulas: IBuilderFormula[]) => {
const existNames = formulas.map((item) => item.queryName);
@@ -373,6 +391,28 @@ export function QueryBuilderProvider({
});
}, [createNewBuilderQuery]);
+ const cloneQuery = useCallback(
+ (type: string, query: IBuilderQuery): void => {
+ setCurrentQuery((prevState) => {
+ if (prevState.builder.queryData.length >= MAX_QUERIES) return prevState;
+
+ const clonedQuery = cloneNewBuilderQuery(
+ prevState.builder.queryData,
+ query,
+ );
+
+ return {
+ ...prevState,
+ builder: {
+ ...prevState.builder,
+ queryData: [...prevState.builder.queryData, clonedQuery],
+ },
+ };
+ });
+ },
+ [cloneNewBuilderQuery],
+ );
+
const addNewFormula = useCallback(() => {
setCurrentQuery((prevState) => {
if (prevState.builder.queryFormulas.length >= MAX_FORMULAS) return prevState;
@@ -647,6 +687,7 @@ export function QueryBuilderProvider({
handleSetConfig,
removeQueryBuilderEntityByIndex,
removeQueryTypeItemByIndex,
+ cloneQuery,
addNewBuilderQuery,
addNewFormula,
addNewQueryItem,
@@ -671,6 +712,7 @@ export function QueryBuilderProvider({
handleSetConfig,
removeQueryBuilderEntityByIndex,
removeQueryTypeItemByIndex,
+ cloneQuery,
addNewBuilderQuery,
addNewFormula,
addNewQueryItem,
diff --git a/frontend/src/store/actions/metrics/getService.ts b/frontend/src/store/actions/metrics/getService.ts
index 8de8f3c134..8afdd9bff9 100644
--- a/frontend/src/store/actions/metrics/getService.ts
+++ b/frontend/src/store/actions/metrics/getService.ts
@@ -17,13 +17,6 @@ export const GetService = (
try {
const { globalTime } = getState();
- if (
- props.maxTime !== globalTime.maxTime &&
- props.minTime !== globalTime.minTime
- ) {
- return;
- }
-
const { maxTime, minTime } = GetMinMax(globalTime.selectedTime, [
globalTime.minTime / 1000000,
globalTime.maxTime / 1000000,
diff --git a/frontend/src/store/reducers/app.ts b/frontend/src/store/reducers/app.ts
index 9e0db3cd6f..4db3965cad 100644
--- a/frontend/src/store/reducers/app.ts
+++ b/frontend/src/store/reducers/app.ts
@@ -224,7 +224,6 @@ const appReducer = (
}
case UPDATE_USER_FLAG: {
- console.log('herei n update user flag');
return {
...state,
userFlags: { ...state.userFlags, ...action.payload.flags },
diff --git a/frontend/src/types/api/alerts/def.ts b/frontend/src/types/api/alerts/def.ts
index 42d599948d..c773cb78a2 100644
--- a/frontend/src/types/api/alerts/def.ts
+++ b/frontend/src/types/api/alerts/def.ts
@@ -6,6 +6,8 @@ export const defaultMatchType = '1';
// default eval window
export const defaultEvalWindow = '5m0s';
+export const defaultFrequency = '1m0s';
+
// default compare op: above
export const defaultCompareOp = '1';
@@ -14,6 +16,7 @@ export interface AlertDef {
alertType?: string;
alert?: string;
ruleType?: string;
+ frequency?: string;
condition: RuleCondition;
labels?: Labels;
annotations?: Labels;
@@ -22,6 +25,7 @@ export interface AlertDef {
disabled?: boolean;
preferredChannels?: string[];
broadcastToAll?: boolean;
+ version?: string;
}
export interface RuleCondition {
@@ -31,6 +35,8 @@ export interface RuleCondition {
matchType?: string;
targetUnit?: string;
selectedQueryName?: string;
+ alertOnAbsent?: boolean | undefined;
+ absentFor?: number | undefined;
}
export interface Labels {
diff --git a/frontend/src/types/api/channels/createEmail.ts b/frontend/src/types/api/channels/createEmail.ts
new file mode 100644
index 0000000000..652e848773
--- /dev/null
+++ b/frontend/src/types/api/channels/createEmail.ts
@@ -0,0 +1,8 @@
+import { EmailChannel } from 'container/CreateAlertChannels/config';
+
+export type Props = EmailChannel;
+
+export interface PayloadProps {
+ data: string;
+ status: string;
+}
diff --git a/frontend/src/types/api/channels/editEmail.ts b/frontend/src/types/api/channels/editEmail.ts
new file mode 100644
index 0000000000..3dfe404e28
--- /dev/null
+++ b/frontend/src/types/api/channels/editEmail.ts
@@ -0,0 +1,10 @@
+import { EmailChannel } from 'container/CreateAlertChannels/config';
+
+export interface Props extends EmailChannel {
+ id: string;
+}
+
+export interface PayloadProps {
+ data: string;
+ status: string;
+}
diff --git a/frontend/src/types/api/dashboard/create.ts b/frontend/src/types/api/dashboard/create.ts
index 9b0e26457d..b553ecd17b 100644
--- a/frontend/src/types/api/dashboard/create.ts
+++ b/frontend/src/types/api/dashboard/create.ts
@@ -4,6 +4,7 @@ export type Props =
| {
title: Dashboard['data']['title'];
uploadedGrafana: boolean;
+ version?: string;
}
| { DashboardData: DashboardData; uploadedGrafana: boolean };
diff --git a/frontend/src/types/api/dashboard/getAll.ts b/frontend/src/types/api/dashboard/getAll.ts
index 2111d3d57b..ba23e55186 100644
--- a/frontend/src/types/api/dashboard/getAll.ts
+++ b/frontend/src/types/api/dashboard/getAll.ts
@@ -55,6 +55,7 @@ export interface Dashboard {
}
export interface DashboardData {
+ uuid?: string;
description?: string;
tags?: string[];
name?: string;
@@ -62,6 +63,7 @@ export interface DashboardData {
title: string;
layout?: Layout[];
variables: Record;
+ version?: string;
}
export interface IBaseWidget {
diff --git a/frontend/src/types/api/integrations/types.ts b/frontend/src/types/api/integrations/types.ts
new file mode 100644
index 0000000000..b9f5e55480
--- /dev/null
+++ b/frontend/src/types/api/integrations/types.ts
@@ -0,0 +1,103 @@
+interface IntegrationsProps {
+ author: {
+ email: string;
+ homepage: string;
+ name: string;
+ };
+ description: string;
+ id: string;
+ icon: string;
+ is_installed: boolean;
+ title: string;
+}
+
+export interface AllIntegrationsProps {
+ status: string;
+ data: {
+ integrations: IntegrationsProps[];
+ };
+}
+
+export interface IntegrationDetailedProps {
+ description: string;
+ id: string;
+ installation: {
+ installed_at: string;
+ } | null;
+ title: string;
+ author: {
+ email: string;
+ homepage: string;
+ name: string;
+ };
+ icon: string;
+ connection_status: {
+ logs: {
+ last_received_ts_ms: number;
+ last_received_from: string;
+ } | null;
+ metrics: {
+ last_received_ts_ms: number;
+ last_received_from: string;
+ } | null;
+ };
+ categories: string[];
+ assets: {
+ logs: {
+ pipelines: [];
+ };
+ dashboards: [];
+ alerts: [];
+ };
+ overview: string;
+ configuration: [
+ {
+ title: string;
+ instructions: string;
+ },
+ ];
+ data_collected: {
+ logs: string[];
+ metrics: string[];
+ };
+}
+export interface GetIntegrationProps {
+ data: IntegrationDetailedProps;
+}
+
+export interface IntegrationConnectionStatus {
+ logs: {
+ last_received_ts_ms: number;
+ last_received_from: string;
+ } | null;
+ metrics: {
+ last_received_ts_ms: number;
+ last_received_from: string;
+ } | null;
+}
+
+export interface GetIntegrationStatusProps {
+ data: IntegrationConnectionStatus;
+}
+
+export interface GetIntegrationPayloadProps {
+ integrationId: string;
+ enabled?: boolean;
+}
+
+export interface InstallIntegrationKeyProps {
+ integration_id: string;
+ config: any;
+}
+
+export interface InstalledIntegrationsSuccessResponse {
+ data: IntegrationsProps;
+}
+
+export interface UninstallIntegrationProps {
+ integration_id: string;
+}
+
+export interface UninstallIntegrationSuccessResponse {
+ data: any;
+}
diff --git a/frontend/src/types/api/queryBuilder/queryBuilderData.ts b/frontend/src/types/api/queryBuilder/queryBuilderData.ts
index ff8c143bf2..6a54254617 100644
--- a/frontend/src/types/api/queryBuilder/queryBuilderData.ts
+++ b/frontend/src/types/api/queryBuilder/queryBuilderData.ts
@@ -47,12 +47,21 @@ export type OrderByPayload = {
order: string;
};
+export interface QueryFunctionProps {
+ name: string;
+ args: string[];
+}
+
// Type for query builder
export type IBuilderQuery = {
queryName: string;
dataSource: DataSource;
aggregateOperator: string;
aggregateAttribute: BaseAutocompleteData;
+ timeAggregation: string;
+ spaceAggregation?: string;
+ temporality?: string;
+ functions: QueryFunctionProps[];
filters: TagFilter;
groupBy: BaseAutocompleteData[];
expression: string;
diff --git a/frontend/src/types/api/widgets/getQuery.ts b/frontend/src/types/api/widgets/getQuery.ts
index 0b36af1541..5f455698dd 100644
--- a/frontend/src/types/api/widgets/getQuery.ts
+++ b/frontend/src/types/api/widgets/getQuery.ts
@@ -14,6 +14,8 @@ export interface QueryData {
queryName: string;
legend?: string;
values: [number, string][];
+ quantity?: number[];
+ unit?: string;
}
export interface SeriesItem {
@@ -28,6 +30,9 @@ export interface QueryDataV3 {
queryName: string;
legend?: string;
series: SeriesItem[] | null;
+ quantity?: number;
+ unitPrice?: number;
+ unit?: string;
}
export interface Props {
diff --git a/frontend/src/types/common/operations.types.ts b/frontend/src/types/common/operations.types.ts
index 58fd4533b9..9421509e9f 100644
--- a/frontend/src/types/common/operations.types.ts
+++ b/frontend/src/types/common/operations.types.ts
@@ -4,6 +4,7 @@ import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteRe
import {
IBuilderFormula,
IBuilderQuery,
+ QueryFunctionProps,
} from 'types/api/queryBuilder/queryBuilderData';
import { DataSource } from 'types/common/queryBuilder';
@@ -13,6 +14,7 @@ type UseQueryOperationsParams = Pick &
Pick & {
formula?: IBuilderFormula;
isListViewPanel?: boolean;
+ entityVersion: string;
};
export type HandleChangeQueryData = <
@@ -37,12 +39,15 @@ export type UseQueryOperations = (
isTracePanelType: boolean;
isMetricsDataSource: boolean;
operators: SelectOption[];
+ spaceAggregationOptions: SelectOption[];
listOfAdditionalFilters: string[];
handleChangeOperator: (value: string) => void;
+ handleSpaceAggregationChange: (value: string) => void;
handleChangeAggregatorAttribute: (value: BaseAutocompleteData) => void;
handleChangeDataSource: (newSource: DataSource) => void;
handleDeleteQuery: () => void;
handleChangeQueryData: HandleChangeQueryData;
handleChangeFormulaData: HandleChangeFormulaData;
+ handleQueryFunctionsUpdates: (functions: QueryFunctionProps[]) => void;
listOfAdditionalFormulaFilters: string[];
};
diff --git a/frontend/src/types/common/queryBuilder.ts b/frontend/src/types/common/queryBuilder.ts
index e8dad50083..02ea8beebb 100644
--- a/frontend/src/types/common/queryBuilder.ts
+++ b/frontend/src/types/common/queryBuilder.ts
@@ -61,7 +61,14 @@ export enum BoolOperators {
COUNT_DISTINCT = 'count_distinct',
}
+export enum Temporality {
+ Unspecified = 'Unspecified',
+ Delta = 'Delta',
+ Cumulative = 'Cumulative',
+}
+
export enum MetricAggregateOperator {
+ EMPTY = '', // used as time aggregator for histograms
NOOP = 'noop',
COUNT = 'count',
COUNT_DISTINCT = 'count_distinct',
@@ -92,6 +99,8 @@ export enum MetricAggregateOperator {
HIST_QUANTILE_90 = 'hist_quantile_90',
HIST_QUANTILE_95 = 'hist_quantile_95',
HIST_QUANTILE_99 = 'hist_quantile_99',
+ INCREASE = 'increase',
+ LATEST = 'latest',
}
export enum TracesAggregatorOperator {
@@ -142,6 +151,24 @@ export enum LogsAggregatorOperator {
RATE_MAX = 'rate_max',
}
+export enum QueryFunctionsTypes {
+ CUTOFF_MIN = 'cutOffMin',
+ CUTOFF_MAX = 'cutOffMax',
+ CLAMP_MIN = 'clampMin',
+ CLAMP_MAX = 'clampMax',
+ ABSOLUTE = 'absolute',
+ LOG_2 = 'log2',
+ LOG_10 = 'log10',
+ CUMULATIVE_SUM = 'cumSum',
+ EWMA_3 = 'ewma3',
+ EWMA_5 = 'ewma5',
+ EWMA_7 = 'ewma7',
+ MEDIAN_3 = 'median3',
+ MEDIAN_5 = 'median5',
+ MEDIAN_7 = 'median7',
+ TIME_SHIFT = 'timeShift',
+}
+
export type PanelTypeKeys =
| 'TIME_SERIES'
| 'VALUE'
@@ -184,6 +211,7 @@ export type QueryBuilderContextType = {
) => void;
addNewBuilderQuery: () => void;
addNewFormula: () => void;
+ cloneQuery: (type: string, query: IBuilderQuery) => void;
addNewQueryItem: (type: EQueryType.PROM | EQueryType.CLICKHOUSE) => void;
redirectWithQueryBuilderData: (
query: Query,
diff --git a/frontend/src/utils/permission/index.ts b/frontend/src/utils/permission/index.ts
index 265ffe02a5..b18be180cd 100644
--- a/frontend/src/utils/permission/index.ts
+++ b/frontend/src/utils/permission/index.ts
@@ -82,6 +82,10 @@ export const routePermission: Record = {
LOGS_PIPELINES: ['ADMIN', 'EDITOR', 'VIEWER'],
TRACE_EXPLORER: ['ADMIN', 'EDITOR', 'VIEWER'],
GET_STARTED: ['ADMIN', 'EDITOR', 'VIEWER'],
+ GET_STARTED_APPLICATION_MONITORING: ['ADMIN', 'EDITOR', 'VIEWER'],
+ GET_STARTED_INFRASTRUCTURE_MONITORING: ['ADMIN', 'EDITOR', 'VIEWER'],
+ GET_STARTED_LOGS_MANAGEMENT: ['ADMIN', 'EDITOR', 'VIEWER'],
+ GET_STARTED_AWS_MONITORING: ['ADMIN', 'EDITOR', 'VIEWER'],
WORKSPACE_LOCKED: ['ADMIN', 'EDITOR', 'VIEWER'],
BILLING: ['ADMIN', 'EDITOR', 'VIEWER'],
SUPPORT: ['ADMIN', 'EDITOR', 'VIEWER'],
@@ -92,4 +96,7 @@ export const routePermission: Record = {
LOGS_BASE: [],
OLD_LOGS_EXPLORER: [],
SHORTCUTS: ['ADMIN', 'EDITOR', 'VIEWER'],
+ INTEGRATIONS_BASE: ['ADMIN', 'EDITOR', 'VIEWER'],
+ INTEGRATIONS_INSTALLED: ['ADMIN', 'EDITOR', 'VIEWER'],
+ INTEGRATIONS_MARKETPLACE: ['ADMIN', 'EDITOR', 'VIEWER'],
};
diff --git a/frontend/tests/dashboards/utils.ts b/frontend/tests/dashboards/utils.ts
index b69aedf905..4db3769c8e 100644
--- a/frontend/tests/dashboards/utils.ts
+++ b/frontend/tests/dashboards/utils.ts
@@ -1,4 +1,5 @@
import { Page } from '@playwright/test';
+
import { JsonApplicationType } from '../fixtures/constant';
// API endpoints
@@ -41,6 +42,99 @@ export const timeSeriesGraphName = 'Time1';
let widgetsId: string;
+export const insertWidgetIdInResponse = (widgetID: string): any => ({
+ status: 'success',
+ data: {
+ id: 219,
+ uuid: 'd697fddb-a771-4bb4-aa38-810f000ed96a',
+ created_at: '2023-11-17T20:44:03.167646604Z',
+ created_by: 'vikrant@signoz.io',
+ updated_at: '2023-11-17T20:51:23.058536475Z',
+ updated_by: 'vikrant@signoz.io',
+ data: {
+ description: 'Playwright Dashboard T',
+ layout: [
+ {
+ h: 3,
+ i: '9fbcf0db-1572-4572-bf6b-0a84dd10ed85',
+ w: 6,
+ x: 0,
+ y: 0,
+ },
+ ],
+ version: 'v3',
+ name: '',
+ tags: [],
+ title: 'Playwright Dashboard',
+ variables: {},
+ widgets: [
+ {
+ description: '',
+ id: widgetID,
+ isStacked: false,
+ nullZeroValues: '',
+ opacity: '',
+ panelTypes: 'graph',
+ query: {
+ builder: {
+ queryData: [
+ {
+ aggregateAttribute: {
+ dataType: '',
+ id: '------',
+ isColumn: false,
+ isJSON: false,
+ key: '',
+ type: '',
+ },
+ aggregateOperator: 'count',
+ dataSource: 'metrics',
+ disabled: false,
+ expression: 'A',
+ filters: {
+ items: [],
+ op: 'AND',
+ },
+ groupBy: [],
+ having: [],
+ legend: '',
+ limit: null,
+ orderBy: [],
+ queryName: 'A',
+ reduceTo: 'avg',
+ stepInterval: 60,
+ },
+ ],
+ queryFormulas: [],
+ },
+ clickhouse_sql: [
+ {
+ disabled: false,
+ legend: '',
+ name: 'A',
+ query: '',
+ },
+ ],
+ id: '6b4011e4-bcea-497d-81a9-0ee7816b679d',
+ promql: [
+ {
+ disabled: false,
+ legend: '',
+ name: 'A',
+ query: '',
+ },
+ ],
+ queryType: 'builder',
+ },
+ timePreferance: 'GLOBAL_TIME',
+ title: '',
+ },
+ ],
+ },
+ isLocked: 0,
+ },
+});
+
// mock API calls
export const dashboardsListAndCreate = async (
page: Page,
@@ -76,7 +170,8 @@ export const getTimeSeriesQueryData = async (
page: Page,
response: any,
): Promise => {
- await page.route(`**/${queryRangeApiEndpoint}`, (route) =>
+ // eslint-disable-next-line sonarjs/no-identical-functions
+ await page.route(`**/${queryRangeApiEndpoint}`, (route): any =>
route.fulfill({
status: 200,
contentType: JsonApplicationType,
@@ -84,97 +179,3 @@ export const getTimeSeriesQueryData = async (
}),
);
};
-
-export const insertWidgetIdInResponse = (widgetID: string) => {
- return {
- status: 'success',
- data: {
- id: 219,
- uuid: 'd697fddb-a771-4bb4-aa38-810f000ed96a',
- created_at: '2023-11-17T20:44:03.167646604Z',
- created_by: 'vikrant@signoz.io',
- updated_at: '2023-11-17T20:51:23.058536475Z',
- updated_by: 'vikrant@signoz.io',
- data: {
- description: 'Playwright Dashboard T',
- layout: [
- {
- h: 3,
- i: '9fbcf0db-1572-4572-bf6b-0a84dd10ed85',
- w: 6,
- x: 0,
- y: 0,
- },
- ],
- name: '',
- tags: [],
- title: 'Playwright Dashboard',
- variables: {},
- widgets: [
- {
- description: '',
- id: widgetID,
- isStacked: false,
- nullZeroValues: '',
- opacity: '',
- panelTypes: 'graph',
- query: {
- builder: {
- queryData: [
- {
- aggregateAttribute: {
- dataType: '',
- id: '------',
- isColumn: false,
- isJSON: false,
- key: '',
- type: '',
- },
- aggregateOperator: 'count',
- dataSource: 'metrics',
- disabled: false,
- expression: 'A',
- filters: {
- items: [],
- op: 'AND',
- },
- groupBy: [],
- having: [],
- legend: '',
- limit: null,
- orderBy: [],
- queryName: 'A',
- reduceTo: 'sum',
- stepInterval: 60,
- },
- ],
- queryFormulas: [],
- },
- clickhouse_sql: [
- {
- disabled: false,
- legend: '',
- name: 'A',
- query: '',
- },
- ],
- id: '6b4011e4-bcea-497d-81a9-0ee7816b679d',
- promql: [
- {
- disabled: false,
- legend: '',
- name: 'A',
- query: '',
- },
- ],
- queryType: 'builder',
- },
- timePreferance: 'GLOBAL_TIME',
- title: '',
- },
- ],
- },
- isLocked: 0,
- },
- };
-};
diff --git a/frontend/tests/fixtures/api/dashboard/dashboardGetCallWithTimeSeriesWidget200.json b/frontend/tests/fixtures/api/dashboard/dashboardGetCallWithTimeSeriesWidget200.json
index e65361d6e7..1c8f1fe8ea 100644
--- a/frontend/tests/fixtures/api/dashboard/dashboardGetCallWithTimeSeriesWidget200.json
+++ b/frontend/tests/fixtures/api/dashboard/dashboardGetCallWithTimeSeriesWidget200.json
@@ -56,7 +56,7 @@
"limit": null,
"orderBy": [],
"queryName": "A",
- "reduceTo": "sum",
+ "reduceTo": "avg",
"stepInterval": 60
}
],
diff --git a/frontend/tests/fixtures/api/dashboard/putDashboardWithTimeSeries200.json b/frontend/tests/fixtures/api/dashboard/putDashboardWithTimeSeries200.json
index 7dab6c646b..bbfe51f9c9 100644
--- a/frontend/tests/fixtures/api/dashboard/putDashboardWithTimeSeries200.json
+++ b/frontend/tests/fixtures/api/dashboard/putDashboardWithTimeSeries200.json
@@ -56,7 +56,7 @@
"limit": null,
"orderBy": [],
"queryName": "A",
- "reduceTo": "sum",
+ "reduceTo": "avg",
"stepInterval": 60
}
],
diff --git a/frontend/tests/fixtures/api/traces/traceExplorerViews200.json b/frontend/tests/fixtures/api/traces/traceExplorerViews200.json
index fa929a1061..801a56425e 100644
--- a/frontend/tests/fixtures/api/traces/traceExplorerViews200.json
+++ b/frontend/tests/fixtures/api/traces/traceExplorerViews200.json
@@ -40,7 +40,7 @@
"order": "desc"
}
],
- "reduceTo": "sum"
+ "reduceTo": "avg"
}
},
"panelType": "table",
diff --git a/frontend/webpack.config.js b/frontend/webpack.config.js
index 281067ad47..9a9bd39807 100644
--- a/frontend/webpack.config.js
+++ b/frontend/webpack.config.js
@@ -64,10 +64,12 @@ if (process.env.BUNDLE_ANALYSER === 'true') {
*/
const config = {
mode: 'development',
- devtool: 'source-map',
+ devtool: 'eval-source-map',
entry: resolve(__dirname, './src/index.tsx'),
devServer: {
- historyApiFallback: true,
+ historyApiFallback: {
+ disableDotRule: true,
+ },
open: true,
hot: true,
liveReload: true,
diff --git a/frontend/webpack.config.prod.js b/frontend/webpack.config.prod.js
index a2b3ecb40e..22e33d7976 100644
--- a/frontend/webpack.config.prod.js
+++ b/frontend/webpack.config.prod.js
@@ -79,6 +79,7 @@ if (process.env.BUNDLE_ANALYSER === 'true') {
const config = {
mode: 'production',
+ devtool: 'eval-source-map',
entry: resolve(__dirname, './src/index.tsx'),
output: {
path: resolve(__dirname, './build'),
diff --git a/frontend/yarn.lock b/frontend/yarn.lock
index 6474b180c1..df275f43e4 100644
--- a/frontend/yarn.lock
+++ b/frontend/yarn.lock
@@ -4198,6 +4198,13 @@
dependencies:
"@types/unist" "^2"
+"@types/hast@^3.0.0":
+ version "3.0.4"
+ resolved "https://registry.yarnpkg.com/@types/hast/-/hast-3.0.4.tgz#1d6b39993b82cea6ad783945b0508c25903e15aa"
+ integrity sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==
+ dependencies:
+ "@types/unist" "*"
+
"@types/history@^4.7.11":
version "4.7.11"
resolved "https://registry.npmjs.org/@types/history/-/history-4.7.11.tgz"
@@ -4297,6 +4304,13 @@
dependencies:
"@types/unist" "^2"
+"@types/mdast@^4.0.0":
+ version "4.0.3"
+ resolved "https://registry.yarnpkg.com/@types/mdast/-/mdast-4.0.3.tgz#1e011ff013566e919a4232d1701ad30d70cab333"
+ integrity sha512-LsjtqsyF+d2/yFOYaN22dHZI1Cpwkrj+g06G8+qtUKlhovPW89YhqSnfKtMbkgmEtYpH2gydRNULd6y8mciAFg==
+ dependencies:
+ "@types/unist" "*"
+
"@types/mdx@^2.0.0":
version "2.0.7"
resolved "https://registry.yarnpkg.com/@types/mdx/-/mdx-2.0.7.tgz#c7482e995673e01b83f8e96df83b3843ea76401f"
@@ -4577,6 +4591,11 @@
resolved "https://registry.npmjs.org/@types/trusted-types/-/trusted-types-2.0.3.tgz"
integrity sha512-NfQ4gyz38SL8sDNrSixxU2Os1a5xcdFxipAFxYEuLUlvU2uDwS4NUpsImcf1//SlWItCVMMLiylsxbmNMToV/g==
+"@types/unist@*", "@types/unist@^3.0.0":
+ version "3.0.2"
+ resolved "https://registry.yarnpkg.com/@types/unist/-/unist-3.0.2.tgz#6dd61e43ef60b34086287f83683a5c1b2dc53d20"
+ integrity sha512-dqId9J8K/vGi5Zr7oo212BGii5m3q5Hxlkwy3WpYuKPklmBEvsbMYYyLxAQpSffdLl/gdW0XUpKWFvYmyoWCoQ==
+
"@types/unist@^2", "@types/unist@^2.0.0":
version "2.0.8"
resolved "https://registry.yarnpkg.com/@types/unist/-/unist-2.0.8.tgz#bb197b9639aa1a04cf464a617fe800cccd92ad5c"
@@ -4799,6 +4818,11 @@
resolved "https://registry.npmjs.org/@ungap/custom-elements/-/custom-elements-1.2.0.tgz"
integrity sha512-zdSuu79stAwVUtzkQU9B5jhGh2LavtkeX4kxd2jtMJmZt7QqRJ1KJW5bukt/vUOaUs3z674GHd+nqYm0bu0Gyg==
+"@ungap/structured-clone@^1.0.0":
+ version "1.2.0"
+ resolved "https://registry.yarnpkg.com/@ungap/structured-clone/-/structured-clone-1.2.0.tgz#756641adb587851b5ccb3e095daf27ae581c8406"
+ integrity sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==
+
"@volar/language-core@1.11.1", "@volar/language-core@~1.11.1":
version "1.11.1"
resolved "https://registry.yarnpkg.com/@volar/language-core/-/language-core-1.11.1.tgz#ecdf12ea8dc35fb8549e517991abcbf449a5ad4f"
@@ -7620,6 +7644,13 @@ detect-node@^2.0.4, detect-node@^2.1.0:
resolved "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz"
integrity sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g==
+devlop@^1.0.0:
+ version "1.1.0"
+ resolved "https://registry.yarnpkg.com/devlop/-/devlop-1.1.0.tgz#4db7c2ca4dc6e0e834c30be70c94bbc976dc7018"
+ integrity sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==
+ dependencies:
+ dequal "^2.0.0"
+
diff-sequences@^27.5.1:
version "27.5.1"
resolved "https://registry.npmjs.org/diff-sequences/-/diff-sequences-27.5.1.tgz"
@@ -9376,6 +9407,20 @@ hast-util-from-parse5@^7.0.0:
vfile-location "^4.0.0"
web-namespaces "^2.0.0"
+hast-util-from-parse5@^8.0.0:
+ version "8.0.1"
+ resolved "https://registry.yarnpkg.com/hast-util-from-parse5/-/hast-util-from-parse5-8.0.1.tgz#654a5676a41211e14ee80d1b1758c399a0327651"
+ integrity sha512-Er/Iixbc7IEa7r/XLtuG52zoqn/b3Xng/w6aZQ0xGVxzhw5xUFxcRqdPzP6yFi/4HBYRaifaI5fQ1RH8n0ZeOQ==
+ dependencies:
+ "@types/hast" "^3.0.0"
+ "@types/unist" "^3.0.0"
+ devlop "^1.0.0"
+ hastscript "^8.0.0"
+ property-information "^6.0.0"
+ vfile "^6.0.0"
+ vfile-location "^5.0.0"
+ web-namespaces "^2.0.0"
+
hast-util-has-property@^2.0.0:
version "2.0.1"
resolved "https://registry.yarnpkg.com/hast-util-has-property/-/hast-util-has-property-2.0.1.tgz#8ec99c3e8f02626304ee438cdb9f0528b017e083"
@@ -9408,6 +9453,13 @@ hast-util-parse-selector@^3.0.0:
dependencies:
"@types/hast" "^2.0.0"
+hast-util-parse-selector@^4.0.0:
+ version "4.0.0"
+ resolved "https://registry.yarnpkg.com/hast-util-parse-selector/-/hast-util-parse-selector-4.0.0.tgz#352879fa86e25616036037dd8931fb5f34cb4a27"
+ integrity sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A==
+ dependencies:
+ "@types/hast" "^3.0.0"
+
hast-util-raw@^7.0.0, hast-util-raw@^7.2.0:
version "7.2.3"
resolved "https://registry.yarnpkg.com/hast-util-raw/-/hast-util-raw-7.2.3.tgz#dcb5b22a22073436dbdc4aa09660a644f4991d99"
@@ -9425,6 +9477,25 @@ hast-util-raw@^7.0.0, hast-util-raw@^7.2.0:
web-namespaces "^2.0.0"
zwitch "^2.0.0"
+hast-util-raw@^9.0.0:
+ version "9.0.2"
+ resolved "https://registry.yarnpkg.com/hast-util-raw/-/hast-util-raw-9.0.2.tgz#39b4a4886bd9f0a5dd42e86d02c966c2c152884c"
+ integrity sha512-PldBy71wO9Uq1kyaMch9AHIghtQvIwxBUkv823pKmkTM3oV1JxtsTNYdevMxvUHqcnOAuO65JKU2+0NOxc2ksA==
+ dependencies:
+ "@types/hast" "^3.0.0"
+ "@types/unist" "^3.0.0"
+ "@ungap/structured-clone" "^1.0.0"
+ hast-util-from-parse5 "^8.0.0"
+ hast-util-to-parse5 "^8.0.0"
+ html-void-elements "^3.0.0"
+ mdast-util-to-hast "^13.0.0"
+ parse5 "^7.0.0"
+ unist-util-position "^5.0.0"
+ unist-util-visit "^5.0.0"
+ vfile "^6.0.0"
+ web-namespaces "^2.0.0"
+ zwitch "^2.0.0"
+
hast-util-select@^5.0.5, hast-util-select@~5.0.1:
version "5.0.5"
resolved "https://registry.yarnpkg.com/hast-util-select/-/hast-util-select-5.0.5.tgz#be9ccb71d2278681ca024727f12abd4f93b3e9bc"
@@ -9496,6 +9567,19 @@ hast-util-to-parse5@^7.0.0:
web-namespaces "^2.0.0"
zwitch "^2.0.0"
+hast-util-to-parse5@^8.0.0:
+ version "8.0.0"
+ resolved "https://registry.yarnpkg.com/hast-util-to-parse5/-/hast-util-to-parse5-8.0.0.tgz#477cd42d278d4f036bc2ea58586130f6f39ee6ed"
+ integrity sha512-3KKrV5ZVI8if87DVSi1vDeByYrkGzg4mEfeu4alwgmmIeARiBLKCZS2uw5Gb6nU9x9Yufyj3iudm6i7nl52PFw==
+ dependencies:
+ "@types/hast" "^3.0.0"
+ comma-separated-tokens "^2.0.0"
+ devlop "^1.0.0"
+ property-information "^6.0.0"
+ space-separated-tokens "^2.0.0"
+ web-namespaces "^2.0.0"
+ zwitch "^2.0.0"
+
hast-util-to-string@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/hast-util-to-string/-/hast-util-to-string-2.0.0.tgz#b008b0a4ea472bf34dd390b7eea1018726ae152a"
@@ -9530,6 +9614,17 @@ hastscript@^7.0.0:
property-information "^6.0.0"
space-separated-tokens "^2.0.0"
+hastscript@^8.0.0:
+ version "8.0.0"
+ resolved "https://registry.yarnpkg.com/hastscript/-/hastscript-8.0.0.tgz#4ef795ec8dee867101b9f23cc830d4baf4fd781a"
+ integrity sha512-dMOtzCEd3ABUeSIISmrETiKuyydk1w0pa+gE/uormcTpSYuaNJPbX1NU3JLyscSLjwAQM8bWMhhIlnCqnRvDTw==
+ dependencies:
+ "@types/hast" "^3.0.0"
+ comma-separated-tokens "^2.0.0"
+ hast-util-parse-selector "^4.0.0"
+ property-information "^6.0.0"
+ space-separated-tokens "^2.0.0"
+
he@^1.2.0:
version "1.2.0"
resolved "https://registry.npmjs.org/he/-/he-1.2.0.tgz"
@@ -9643,6 +9738,11 @@ html-void-elements@^2.0.0:
resolved "https://registry.yarnpkg.com/html-void-elements/-/html-void-elements-2.0.1.tgz#29459b8b05c200b6c5ee98743c41b979d577549f"
integrity sha512-0quDb7s97CfemeJAnW9wC0hw78MtW7NU3hqtCD75g2vFlDLt36llsYD7uB7SUzojLMP24N5IatXf7ylGXiGG9A==
+html-void-elements@^3.0.0:
+ version "3.0.0"
+ resolved "https://registry.yarnpkg.com/html-void-elements/-/html-void-elements-3.0.0.tgz#fc9dbd84af9e747249034d4d62602def6517f1d7"
+ integrity sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==
+
html-webpack-plugin@5.5.0:
version "5.5.0"
resolved "https://registry.yarnpkg.com/html-webpack-plugin/-/html-webpack-plugin-5.5.0.tgz#c3911936f57681c1f9f4d8b68c158cd9dfe52f50"
@@ -11880,6 +11980,21 @@ mdast-util-to-hast@^12.1.0:
unist-util-position "^4.0.0"
unist-util-visit "^4.0.0"
+mdast-util-to-hast@^13.0.0:
+ version "13.1.0"
+ resolved "https://registry.yarnpkg.com/mdast-util-to-hast/-/mdast-util-to-hast-13.1.0.tgz#1ae54d903150a10fe04d59f03b2b95fd210b2124"
+ integrity sha512-/e2l/6+OdGp/FB+ctrJ9Avz71AN/GRH3oi/3KAx/kMnoUsD6q0woXlDT8lLEeViVKE7oZxE7RXzvO3T8kF2/sA==
+ dependencies:
+ "@types/hast" "^3.0.0"
+ "@types/mdast" "^4.0.0"
+ "@ungap/structured-clone" "^1.0.0"
+ devlop "^1.0.0"
+ micromark-util-sanitize-uri "^2.0.0"
+ trim-lines "^3.0.0"
+ unist-util-position "^5.0.0"
+ unist-util-visit "^5.0.0"
+ vfile "^6.0.0"
+
mdast-util-to-markdown@^1.0.0, mdast-util-to-markdown@^1.3.0:
version "1.5.0"
resolved "https://registry.yarnpkg.com/mdast-util-to-markdown/-/mdast-util-to-markdown-1.5.0.tgz#c13343cb3fc98621911d33b5cd42e7d0731171c6"
@@ -12216,6 +12331,14 @@ micromark-util-character@^1.0.0:
micromark-util-symbol "^1.0.0"
micromark-util-types "^1.0.0"
+micromark-util-character@^2.0.0:
+ version "2.1.0"
+ resolved "https://registry.yarnpkg.com/micromark-util-character/-/micromark-util-character-2.1.0.tgz#31320ace16b4644316f6bf057531689c71e2aee1"
+ integrity sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==
+ dependencies:
+ micromark-util-symbol "^2.0.0"
+ micromark-util-types "^2.0.0"
+
micromark-util-chunked@^1.0.0:
version "1.1.0"
resolved "https://registry.yarnpkg.com/micromark-util-chunked/-/micromark-util-chunked-1.1.0.tgz#37a24d33333c8c69a74ba12a14651fd9ea8a368b"
@@ -12262,6 +12385,11 @@ micromark-util-encode@^1.0.0:
resolved "https://registry.yarnpkg.com/micromark-util-encode/-/micromark-util-encode-1.1.0.tgz#92e4f565fd4ccb19e0dcae1afab9a173bbeb19a5"
integrity sha512-EuEzTWSTAj9PA5GOAs992GzNh2dGQO52UvAbtSOMvXTxv3Criqb6IOzJUBCmEqrrXSblJIJBbFFv6zPxpreiJw==
+micromark-util-encode@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.yarnpkg.com/micromark-util-encode/-/micromark-util-encode-2.0.0.tgz#0921ac7953dc3f1fd281e3d1932decfdb9382ab1"
+ integrity sha512-pS+ROfCXAGLWCOc8egcBvT0kf27GoWMqtdarNfDcjb6YLuV5cM3ioG45Ys2qOVqeqSbjaKg72vU+Wby3eddPsA==
+
micromark-util-events-to-acorn@^1.0.0:
version "1.2.3"
resolved "https://registry.yarnpkg.com/micromark-util-events-to-acorn/-/micromark-util-events-to-acorn-1.2.3.tgz#a4ab157f57a380e646670e49ddee97a72b58b557"
@@ -12304,6 +12432,15 @@ micromark-util-sanitize-uri@^1.0.0, micromark-util-sanitize-uri@^1.1.0:
micromark-util-encode "^1.0.0"
micromark-util-symbol "^1.0.0"
+micromark-util-sanitize-uri@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.yarnpkg.com/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.0.tgz#ec8fbf0258e9e6d8f13d9e4770f9be64342673de"
+ integrity sha512-WhYv5UEcZrbAtlsnPuChHUAsu/iBPOVaEVsntLBIdpibO0ddy8OzavZz3iL2xVvBZOpolujSliP65Kq0/7KIYw==
+ dependencies:
+ micromark-util-character "^2.0.0"
+ micromark-util-encode "^2.0.0"
+ micromark-util-symbol "^2.0.0"
+
micromark-util-subtokenize@^1.0.0:
version "1.1.0"
resolved "https://registry.yarnpkg.com/micromark-util-subtokenize/-/micromark-util-subtokenize-1.1.0.tgz#941c74f93a93eaf687b9054aeb94642b0e92edb1"
@@ -12319,11 +12456,21 @@ micromark-util-symbol@^1.0.0:
resolved "https://registry.yarnpkg.com/micromark-util-symbol/-/micromark-util-symbol-1.1.0.tgz#813cd17837bdb912d069a12ebe3a44b6f7063142"
integrity sha512-uEjpEYY6KMs1g7QfJ2eX1SQEV+ZT4rUD3UcF6l57acZvLNK7PBZL+ty82Z1qhK1/yXIY4bdx04FKMgR0g4IAag==
+micromark-util-symbol@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.yarnpkg.com/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz#12225c8f95edf8b17254e47080ce0862d5db8044"
+ integrity sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==
+
micromark-util-types@^1.0.0, micromark-util-types@^1.0.1:
version "1.1.0"
resolved "https://registry.yarnpkg.com/micromark-util-types/-/micromark-util-types-1.1.0.tgz#e6676a8cae0bb86a2171c498167971886cb7e283"
integrity sha512-ukRBgie8TIAcacscVHSiddHjO4k/q3pnedmzMQ4iwDcK0FtFCohKOlFbaOL/mPgfnPsL3C1ZyxJa4sbWrBl3jg==
+micromark-util-types@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.yarnpkg.com/micromark-util-types/-/micromark-util-types-2.0.0.tgz#63b4b7ffeb35d3ecf50d1ca20e68fc7caa36d95e"
+ integrity sha512-oNh6S2WMHWRZrmutsRmDDfkzKtxF+bc2VxLC9dvtrDIRFln627VsFP6fLMgTryGDljgLPjkrzQSDcPrjPyDJ5w==
+
micromark@^3.0.0:
version "3.2.0"
resolved "https://registry.yarnpkg.com/micromark/-/micromark-3.2.0.tgz#1af9fef3f995ea1ea4ac9c7e2f19c48fd5c006e9"
@@ -13265,6 +13412,13 @@ parse5@6.0.1, parse5@^6.0.0, parse5@^6.0.1:
resolved "https://registry.yarnpkg.com/parse5/-/parse5-6.0.1.tgz#e1a1c085c569b3dc08321184f19a39cc27f7c30b"
integrity sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw==
+parse5@^7.0.0:
+ version "7.1.2"
+ resolved "https://registry.yarnpkg.com/parse5/-/parse5-7.1.2.tgz#0736bebbfd77793823240a23b7fc5e010b7f8e32"
+ integrity sha512-Czj1WaSVpaoj0wbhMzLmWD69anp2WH7FXMB9n1Sy8/ZFF9jolSQVMu1Ij5WIyGmcBmhk7EOndpO4mIpihVqAXw==
+ dependencies:
+ entities "^4.4.0"
+
parseurl@~1.3.2, parseurl@~1.3.3:
version "1.3.3"
resolved "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz"
@@ -15013,6 +15167,15 @@ rehype-prism-plus@~1.6.1:
unist-util-filter "^4.0.0"
unist-util-visit "^4.0.0"
+rehype-raw@7.0.0:
+ version "7.0.0"
+ resolved "https://registry.yarnpkg.com/rehype-raw/-/rehype-raw-7.0.0.tgz#59d7348fd5dbef3807bbaa1d443efd2dd85ecee4"
+ integrity sha512-/aE8hCfKlQeA8LmyeyQvQF3eBiLRGNlfBJEvWH7ivp9sBqs7TNqBL5X3v157rM4IFETqDnIOO+z5M/biZbo9Ww==
+ dependencies:
+ "@types/hast" "^3.0.0"
+ hast-util-raw "^9.0.0"
+ vfile "^6.0.0"
+
rehype-raw@^6.1.1:
version "6.1.1"
resolved "https://registry.yarnpkg.com/rehype-raw/-/rehype-raw-6.1.1.tgz#81bbef3793bd7abacc6bf8335879d1b6c868c9d4"
@@ -16844,6 +17007,13 @@ unist-util-is@^5.0.0:
dependencies:
"@types/unist" "^2.0.0"
+unist-util-is@^6.0.0:
+ version "6.0.0"
+ resolved "https://registry.yarnpkg.com/unist-util-is/-/unist-util-is-6.0.0.tgz#b775956486aff107a9ded971d996c173374be424"
+ integrity sha512-2qCTHimwdxLfz+YzdGfkqNlH0tLi9xjTnHddPmJwtIG9MGsdbutfTc4P+haPD7l7Cjxf/WZj+we5qfVPvvxfYw==
+ dependencies:
+ "@types/unist" "^3.0.0"
+
unist-util-position-from-estree@^1.0.0, unist-util-position-from-estree@^1.1.0:
version "1.1.2"
resolved "https://registry.yarnpkg.com/unist-util-position-from-estree/-/unist-util-position-from-estree-1.1.2.tgz#8ac2480027229de76512079e377afbcabcfcce22"
@@ -16858,6 +17028,13 @@ unist-util-position@^4.0.0:
dependencies:
"@types/unist" "^2.0.0"
+unist-util-position@^5.0.0:
+ version "5.0.0"
+ resolved "https://registry.yarnpkg.com/unist-util-position/-/unist-util-position-5.0.0.tgz#678f20ab5ca1207a97d7ea8a388373c9cf896be4"
+ integrity sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==
+ dependencies:
+ "@types/unist" "^3.0.0"
+
unist-util-remove-position@^4.0.0:
version "4.0.2"
resolved "https://registry.yarnpkg.com/unist-util-remove-position/-/unist-util-remove-position-4.0.2.tgz#a89be6ea72e23b1a402350832b02a91f6a9afe51"
@@ -16873,6 +17050,13 @@ unist-util-stringify-position@^3.0.0:
dependencies:
"@types/unist" "^2.0.0"
+unist-util-stringify-position@^4.0.0:
+ version "4.0.0"
+ resolved "https://registry.yarnpkg.com/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz#449c6e21a880e0855bf5aabadeb3a740314abac2"
+ integrity sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==
+ dependencies:
+ "@types/unist" "^3.0.0"
+
unist-util-visit-parents@^5.0.0, unist-util-visit-parents@^5.1.1:
version "5.1.3"
resolved "https://registry.yarnpkg.com/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz#b4520811b0ca34285633785045df7a8d6776cfeb"
@@ -16881,6 +17065,14 @@ unist-util-visit-parents@^5.0.0, unist-util-visit-parents@^5.1.1:
"@types/unist" "^2.0.0"
unist-util-is "^5.0.0"
+unist-util-visit-parents@^6.0.0:
+ version "6.0.1"
+ resolved "https://registry.yarnpkg.com/unist-util-visit-parents/-/unist-util-visit-parents-6.0.1.tgz#4d5f85755c3b8f0dc69e21eca5d6d82d22162815"
+ integrity sha512-L/PqWzfTP9lzzEa6CKs0k2nARxTdZduw3zyh8d2NVBnsyvHjSX4TWse388YrrQKbvI8w20fGjGlhgT96WwKykw==
+ dependencies:
+ "@types/unist" "^3.0.0"
+ unist-util-is "^6.0.0"
+
unist-util-visit@^4.0.0, unist-util-visit@^4.1.0, unist-util-visit@^4.1.2, unist-util-visit@~4.1.0:
version "4.1.2"
resolved "https://registry.yarnpkg.com/unist-util-visit/-/unist-util-visit-4.1.2.tgz#125a42d1eb876283715a3cb5cceaa531828c72e2"
@@ -16890,6 +17082,15 @@ unist-util-visit@^4.0.0, unist-util-visit@^4.1.0, unist-util-visit@^4.1.2, unist
unist-util-is "^5.0.0"
unist-util-visit-parents "^5.1.1"
+unist-util-visit@^5.0.0:
+ version "5.0.0"
+ resolved "https://registry.yarnpkg.com/unist-util-visit/-/unist-util-visit-5.0.0.tgz#a7de1f31f72ffd3519ea71814cccf5fd6a9217d6"
+ integrity sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg==
+ dependencies:
+ "@types/unist" "^3.0.0"
+ unist-util-is "^6.0.0"
+ unist-util-visit-parents "^6.0.0"
+
universalify@^0.1.0:
version "0.1.2"
resolved "https://registry.yarnpkg.com/universalify/-/universalify-0.1.2.tgz#b646f69be3942dabcecc9d6639c80dc105efaa66"
@@ -17126,6 +17327,14 @@ vfile-location@^4.0.0:
"@types/unist" "^2.0.0"
vfile "^5.0.0"
+vfile-location@^5.0.0:
+ version "5.0.2"
+ resolved "https://registry.yarnpkg.com/vfile-location/-/vfile-location-5.0.2.tgz#220d9ca1ab6f8b2504a4db398f7ebc149f9cb464"
+ integrity sha512-NXPYyxyBSH7zB5U6+3uDdd6Nybz6o6/od9rk8bp9H8GR3L+cm/fC0uUTbqBmUTnMCUDslAGBOIKNfvvb+gGlDg==
+ dependencies:
+ "@types/unist" "^3.0.0"
+ vfile "^6.0.0"
+
vfile-message@^3.0.0:
version "3.1.4"
resolved "https://registry.yarnpkg.com/vfile-message/-/vfile-message-3.1.4.tgz#15a50816ae7d7c2d1fa87090a7f9f96612b59dea"
@@ -17134,6 +17343,14 @@ vfile-message@^3.0.0:
"@types/unist" "^2.0.0"
unist-util-stringify-position "^3.0.0"
+vfile-message@^4.0.0:
+ version "4.0.2"
+ resolved "https://registry.yarnpkg.com/vfile-message/-/vfile-message-4.0.2.tgz#c883c9f677c72c166362fd635f21fc165a7d1181"
+ integrity sha512-jRDZ1IMLttGj41KcZvlrYAaI3CfqpLpfpf+Mfig13viT6NKvRzWZ+lXz0Y5D60w6uJIBAOGq9mSHf0gktF0duw==
+ dependencies:
+ "@types/unist" "^3.0.0"
+ unist-util-stringify-position "^4.0.0"
+
vfile@^5.0.0:
version "5.3.7"
resolved "https://registry.yarnpkg.com/vfile/-/vfile-5.3.7.tgz#de0677e6683e3380fafc46544cfe603118826ab7"
@@ -17144,6 +17361,15 @@ vfile@^5.0.0:
unist-util-stringify-position "^3.0.0"
vfile-message "^3.0.0"
+vfile@^6.0.0:
+ version "6.0.1"
+ resolved "https://registry.yarnpkg.com/vfile/-/vfile-6.0.1.tgz#1e8327f41eac91947d4fe9d237a2dd9209762536"
+ integrity sha512-1bYqc7pt6NIADBJ98UiG0Bn/CHIVOoZ/IyEkqIruLg0mE1BKzkOXY2D6CSqQIcKqgadppE5lrxgWXJmXd7zZJw==
+ dependencies:
+ "@types/unist" "^3.0.0"
+ unist-util-stringify-position "^4.0.0"
+ vfile-message "^4.0.0"
+
vite-plugin-dts@^3.6.4:
version "3.7.0"
resolved "https://registry.yarnpkg.com/vite-plugin-dts/-/vite-plugin-dts-3.7.0.tgz#654ee7c38c0cdd4589b9bc198a264f34172bd870"
diff --git a/go.mod b/go.mod
index ec4d7506ff..4e293763aa 100644
--- a/go.mod
+++ b/go.mod
@@ -3,7 +3,7 @@ module go.signoz.io/signoz
go 1.21.3
require (
- github.com/ClickHouse/clickhouse-go/v2 v2.15.0
+ github.com/ClickHouse/clickhouse-go/v2 v2.20.0
github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd
github.com/SigNoz/signoz-otel-collector v0.88.12
github.com/SigNoz/zap_otlp/zap_otlp_encoder v0.0.0-20230822164844-1b861a431974
@@ -48,7 +48,7 @@ require (
github.com/smartystreets/assertions v1.13.1
github.com/smartystreets/goconvey v1.8.1
github.com/soheilhy/cmux v0.1.5
- github.com/srikanthccv/ClickHouse-go-mock v0.4.0
+ github.com/srikanthccv/ClickHouse-go-mock v0.7.0
github.com/stretchr/testify v1.8.4
go.opentelemetry.io/collector/component v0.88.0
go.opentelemetry.io/collector/confmap v0.88.0
@@ -61,13 +61,13 @@ require (
go.opentelemetry.io/collector/processor v0.88.0
go.opentelemetry.io/collector/receiver v0.88.0
go.opentelemetry.io/collector/service v0.88.0
- go.opentelemetry.io/otel v1.22.0
- go.opentelemetry.io/otel/sdk v1.22.0
+ go.opentelemetry.io/otel v1.24.0
+ go.opentelemetry.io/otel/sdk v1.23.1
go.uber.org/multierr v1.11.0
- go.uber.org/zap v1.26.0
- golang.org/x/crypto v0.17.0
+ go.uber.org/zap v1.27.0
+ golang.org/x/crypto v0.19.0
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1
- golang.org/x/net v0.19.0
+ golang.org/x/net v0.21.0
golang.org/x/oauth2 v0.13.0
google.golang.org/grpc v1.59.0
google.golang.org/protobuf v1.31.0
@@ -83,10 +83,10 @@ require (
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect
- github.com/ClickHouse/ch-go v0.58.2 // indirect
- github.com/DATA-DOG/go-sqlmock v1.5.0 // indirect
+ github.com/ClickHouse/ch-go v0.61.3 // indirect
+ github.com/DATA-DOG/go-sqlmock v1.5.2 // indirect
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
- github.com/andybalholm/brotli v1.0.6 // indirect
+ github.com/andybalholm/brotli v1.1.0 // indirect
github.com/aws/aws-sdk-go v1.45.26 // indirect
github.com/beevik/etree v1.1.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
@@ -122,7 +122,7 @@ require (
github.com/josharian/intern v1.0.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/jtolds/gls v4.20.0+incompatible // indirect
- github.com/klauspost/compress v1.17.5 // indirect
+ github.com/klauspost/compress v1.17.7 // indirect
github.com/klauspost/cpuid v1.2.3 // indirect
github.com/knadh/koanf/v2 v2.0.1 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
@@ -141,7 +141,7 @@ require (
github.com/oklog/run v1.1.0 // indirect
github.com/oklog/ulid v1.3.1 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.88.0 // indirect
- github.com/paulmach/orb v0.10.0 // indirect
+ github.com/paulmach/orb v0.11.1 // indirect
github.com/pierrec/lz4/v4 v4.1.21 // indirect
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
@@ -154,7 +154,7 @@ require (
github.com/robfig/cron/v3 v3.0.1 // indirect
github.com/segmentio/asm v1.2.0 // indirect
github.com/segmentio/backo-go v1.0.1 // indirect
- github.com/shirou/gopsutil/v3 v3.23.9 // indirect
+ github.com/shirou/gopsutil/v3 v3.23.12 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/shopspring/decimal v1.3.1 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
@@ -183,14 +183,14 @@ require (
go.opentelemetry.io/otel/exporters/prometheus v0.42.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.42.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.19.0 // indirect
- go.opentelemetry.io/otel/metric v1.22.0 // indirect
+ go.opentelemetry.io/otel/metric v1.24.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.19.0 // indirect
- go.opentelemetry.io/otel/trace v1.22.0 // indirect
+ go.opentelemetry.io/otel/trace v1.24.0 // indirect
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
- go.uber.org/goleak v1.2.1 // indirect
+ go.uber.org/goleak v1.3.0 // indirect
golang.org/x/sync v0.6.0 // indirect
- golang.org/x/sys v0.16.0 // indirect
+ golang.org/x/sys v0.17.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.3.0 // indirect
gonum.org/v1/gonum v0.14.0 // indirect
@@ -203,4 +203,4 @@ require (
k8s.io/utils v0.0.0-20230711102312-30195339c3c7 // indirect
)
-replace github.com/prometheus/prometheus => github.com/SigNoz/prometheus v1.9.78
+replace github.com/prometheus/prometheus => github.com/SigNoz/prometheus v1.9.79-0.1
diff --git a/go.sum b/go.sum
index ae300b0f17..63e9eab7d0 100644
--- a/go.sum
+++ b/go.sum
@@ -84,22 +84,20 @@ github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkM
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/ClickHouse/ch-go v0.58.2 h1:jSm2szHbT9MCAB1rJ3WuCJqmGLi5UTjlNu+f530UTS0=
-github.com/ClickHouse/ch-go v0.58.2/go.mod h1:Ap/0bEmiLa14gYjCiRkYGbXvbe8vwdrfTYWhsuQ99aw=
-github.com/ClickHouse/clickhouse-go/v2 v2.15.0 h1:G0hTKyO8fXXR1bGnZ0DY3vTG01xYfOGW76zgjg5tmC4=
-github.com/ClickHouse/clickhouse-go/v2 v2.15.0/go.mod h1:kXt1SRq0PIRa6aKZD7TnFnY9PQKmc2b13sHtOYcK6cQ=
-github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
-github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
+github.com/ClickHouse/ch-go v0.61.3 h1:MmBwUhXrAOBZK7n/sWBzq6FdIQ01cuF2SaaO8KlDRzI=
+github.com/ClickHouse/ch-go v0.61.3/go.mod h1:1PqXjMz/7S1ZUaKvwPA3i35W2bz2mAMFeCi6DIXgGwQ=
+github.com/ClickHouse/clickhouse-go/v2 v2.20.0 h1:bvlLQ31XJfl7MxIqAq2l1G6JhHYzqEXdvfpMeU6bkKc=
+github.com/ClickHouse/clickhouse-go/v2 v2.20.0/go.mod h1:VQfyA+tCwCRw2G7ogfY8V0fq/r0yJWzy8UDrjiP/Lbs=
+github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
+github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/SigNoz/govaluate v0.0.0-20220522085550-d19c08c206cb h1:bneLSKPf9YUSFmafKx32bynV6QrzViL/s+ZDvQxH1E4=
-github.com/SigNoz/govaluate v0.0.0-20220522085550-d19c08c206cb/go.mod h1:JznGDNg9x1cujDKa22RaQOimOvvEfy3nxzDGd8XDgmA=
github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd h1:Bk43AsDYe0fhkbj57eGXx8H3ZJ4zhmQXBnrW523ktj8=
github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd/go.mod h1:nxRcH/OEdM8QxzH37xkGzomr1O0JpYBRS6pwjsWW6Pc=
-github.com/SigNoz/prometheus v1.9.78 h1:bB3yuDrRzi/Mv00kWayR9DZbyjTuGfendSqISyDcXiY=
-github.com/SigNoz/prometheus v1.9.78/go.mod h1:MffmFu2qFILQrOHehx3D0XjYtaZMVfI+Ppeiv98x4Ww=
+github.com/SigNoz/prometheus v1.9.79-0.1 h1:RjsOw7oXVKx7IDA+/sRXW2x5pnw60/tT9MMuEz3+8DU=
+github.com/SigNoz/prometheus v1.9.79-0.1/go.mod h1:MffmFu2qFILQrOHehx3D0XjYtaZMVfI+Ppeiv98x4Ww=
github.com/SigNoz/signoz-otel-collector v0.88.12 h1:UwkVi1o2NY9gRgCLBtWVKr+UDxb4FaTs63Sb20qgf8w=
github.com/SigNoz/signoz-otel-collector v0.88.12/go.mod h1:RH9OEjni6tkh9RgN/meSPxv3kykjcFscqMwJgbUAXmo=
github.com/SigNoz/zap_otlp v0.1.0 h1:T7rRcFN87GavY8lDGZj0Z3Xv6OhJA6Pj3I9dNPmqvRc=
@@ -115,8 +113,8 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc=
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
-github.com/andybalholm/brotli v1.0.6 h1:Yf9fFpf49Zrxb9NlQaluyE92/+X7UVHlhMNJN2sxfOI=
-github.com/andybalholm/brotli v1.0.6/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
+github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M=
+github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/antonmedv/expr v1.15.3 h1:q3hOJZNvLvhqE8OHBs1cFRdbXFNKuA+bHmRaI+AmRmI=
github.com/antonmedv/expr v1.15.3/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE=
@@ -191,14 +189,14 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/digitalocean/godo v1.99.0 h1:gUHO7n9bDaZFWvbzOum4bXE0/09ZuYA9yA8idQHX57E=
github.com/digitalocean/godo v1.99.0/go.mod h1:SsS2oXo2rznfM/nORlZ/6JaUJZFhmKTib1YhopUc8NA=
+github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
+github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
-github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
-github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM=
-github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 h1:IPrmumsT9t5BS7XcPhgsCTlkWbYg80SEXUzDpReaU6Y=
-github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11/go.mod h1:a6bNUGTbQBsY6VRHTr4h/rkOXjl244DyRD0tx3fgq4Q=
+github.com/docker/docker v25.0.3+incompatible h1:D5fy/lYmY7bvZa0XTZ5/UJPljor41F+vdyJG5luQLfQ=
+github.com/docker/docker v25.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
+github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
@@ -519,9 +517,10 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.17.5 h1:d4vBd+7CHydUqpFBgUEKkSdtSugf9YFmSkvUYPquI5E=
-github.com/klauspost/compress v1.17.5/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
+github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg=
+github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/cpuid v1.2.3 h1:CCtW0xUnWGVINKvE/WWOYKdsPV6mawAtvQuSl8guwQs=
github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/knadh/koanf v1.5.0 h1:q2TSd/3Pyc/5yP9ldIrSdIz26MCcyNQzW0pEAugLPNs=
@@ -665,8 +664,8 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
-github.com/paulmach/orb v0.10.0 h1:guVYVqzxHE/CQ1KpfGO077TR0ATHSNjp4s6XGLn3W9s=
-github.com/paulmach/orb v0.10.0/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU=
+github.com/paulmach/orb v0.11.1 h1:3koVegMC4X/WeiXYz9iswopaTwMem53NzTJuTF20JzU=
+github.com/paulmach/orb v0.11.1/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU=
github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY=
github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
@@ -759,8 +758,8 @@ github.com/segmentio/backo-go v1.0.1 h1:68RQccglxZeyURy93ASB/2kc9QudzgIDexJ927N+
github.com/segmentio/backo-go v1.0.1/go.mod h1:9/Rh6yILuLysoQnZ2oNooD2g7aBnvM7r/fNVxRNWfBc=
github.com/sethvargo/go-password v0.2.0 h1:BTDl4CC/gjf/axHMaDQtw507ogrXLci6XRiLc7i/UHI=
github.com/sethvargo/go-password v0.2.0/go.mod h1:Ym4Mr9JXLBycr02MFuVQ/0JHidNetSgbzutTr3zsYXE=
-github.com/shirou/gopsutil/v3 v3.23.9 h1:ZI5bWVeu2ep4/DIxB4U9okeYJ7zp/QLTO4auRb/ty/E=
-github.com/shirou/gopsutil/v3 v3.23.9/go.mod h1:x/NWSb71eMcjFIO0vhyGW5nZ7oSIgVjrCnADckb85GA=
+github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4=
+github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM=
github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU=
@@ -793,8 +792,10 @@ github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/srikanthccv/ClickHouse-go-mock v0.4.0 h1:tLk7qoDLg7Z5YD5mOmNqjRDbsm6ehJVXOFvSnG+gQAg=
-github.com/srikanthccv/ClickHouse-go-mock v0.4.0/go.mod h1:kRG9cuhS527AMXqKYgsii/CP28L/22fyJcOBExmLpEw=
+github.com/srikanthccv/ClickHouse-go-mock v0.6.0 h1:Dok9g6l1OwBWOv9c6PHR1XeGE3YfwNcft40rRjpS/TU=
+github.com/srikanthccv/ClickHouse-go-mock v0.6.0/go.mod h1:IJZ/eL1h4cOy/Jo3PzNKXSPmqRus15BC2MbduYPpA/g=
+github.com/srikanthccv/ClickHouse-go-mock v0.7.0 h1:XhRMX2663xkDGq3DYavw8m75O94s9u76hOIjo9QBl8c=
+github.com/srikanthccv/ClickHouse-go-mock v0.7.0/go.mod h1:IJZ/eL1h4cOy/Jo3PzNKXSPmqRus15BC2MbduYPpA/g=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
@@ -892,8 +893,8 @@ go.opentelemetry.io/contrib/propagators/b3 v1.20.0 h1:Yty9Vs4F3D6/liF1o6FNt0PvN8
go.opentelemetry.io/contrib/propagators/b3 v1.20.0/go.mod h1:On4VgbkqYL18kbJlWsa18+cMNe6rYpBnPi1ARI/BrsU=
go.opentelemetry.io/contrib/zpages v0.45.0 h1:jIwHHGoWzJoZdbIUtWdErjL85Gni6BignnAFqDtMRL4=
go.opentelemetry.io/contrib/zpages v0.45.0/go.mod h1:4mIdA5hqH6hEx9sZgV50qKfQO8aIYolUZboHmz+G7vw=
-go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y=
-go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI=
+go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
+go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
go.opentelemetry.io/otel/bridge/opencensus v0.42.0 h1:QvC+bcZkWMphWPiVqRQygMj6M0/3TOuJEO+erRA7kI8=
go.opentelemetry.io/otel/bridge/opencensus v0.42.0/go.mod h1:XJojP7g5DqYdiyArix/H9i1XzPPlIUc9dGLKtF9copI=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0 h1:ZtfnDL+tUrs1F0Pzfwbg2d59Gru9NCH3bgSHBM6LDwU=
@@ -914,14 +915,14 @@ go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.42.0 h1:4jJuoeOo9W6hZn
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.42.0/go.mod h1:/MtYTE1SfC2QIcE0bDot6fIX+h+WvXjgTqgn9P0LNPE=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.19.0 h1:Nw7Dv4lwvGrI68+wULbcq7su9K2cebeCUrDjVrUJHxM=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.19.0/go.mod h1:1MsF6Y7gTqosgoZvHlzcaaM8DIMNZgJh87ykokoNH7Y=
-go.opentelemetry.io/otel/metric v1.22.0 h1:lypMQnGyJYeuYPhOM/bgjbFM6WE44W1/T45er4d8Hhg=
-go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY=
-go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw=
-go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc=
+go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI=
+go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
+go.opentelemetry.io/otel/sdk v1.23.1 h1:O7JmZw0h76if63LQdsBMKQDWNb5oEcOThG9IrxscV+E=
+go.opentelemetry.io/otel/sdk v1.23.1/go.mod h1:LzdEVR5am1uKOOwfBWFef2DCi1nu3SA8XQxx2IerWFk=
go.opentelemetry.io/otel/sdk/metric v1.19.0 h1:EJoTO5qysMsYCa+w4UghwFV/ptQgqSL/8Ni+hx+8i1k=
go.opentelemetry.io/otel/sdk/metric v1.19.0/go.mod h1:XjG0jQyFJrv2PbMvwND7LwCEhsJzCzV5210euduKcKY=
-go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx83XD0=
-go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo=
+go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
+go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
@@ -929,14 +930,14 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
-go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
-go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
-go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
-go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
+go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@@ -947,8 +948,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
-golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
+golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
+golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -1045,8 +1046,8 @@ golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
-golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
-golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
+golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
+golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -1179,14 +1180,14 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=
-golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
+golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
-golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
+golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U=
+golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
diff --git a/pkg/query-service/README.md b/pkg/query-service/README.md
index 25b8b65eef..83925eece7 100644
--- a/pkg/query-service/README.md
+++ b/pkg/query-service/README.md
@@ -13,7 +13,7 @@ https://github.com/SigNoz/signoz/blob/main/CONTRIBUTING.md#to-run-clickhouse-set
- Change the alertmanager section in `signoz/deploy/docker/clickhouse-setup/docker-compose.yaml` as follows:
```console
alertmanager:
- image: signoz/alertmanager:0.23.4
+ image: signoz/alertmanager:0.23.5
volumes:
- ./data/alertmanager:/data
expose:
diff --git a/pkg/query-service/agentConf/db.go b/pkg/query-service/agentConf/db.go
index ffbc2f53a8..04ab780db6 100644
--- a/pkg/query-service/agentConf/db.go
+++ b/pkg/query-service/agentConf/db.go
@@ -151,7 +151,7 @@ func (r *Repo) insertConfig(
// allowing empty elements for logs - use case is deleting all pipelines
if len(elements) == 0 && c.ElementType != ElementTypeLogPipelines {
- zap.S().Error("insert config called with no elements ", c.ElementType)
+ zap.L().Error("insert config called with no elements ", zap.String("ElementType", string(c.ElementType)))
return model.BadRequest(fmt.Errorf("config must have atleast one element"))
}
@@ -159,7 +159,7 @@ func (r *Repo) insertConfig(
// the version can not be set by the user, we want to auto-assign the versions
// in a monotonically increasing order starting with 1. hence, we reject insert
// requests with version anything other than 0. here, 0 indicates un-assigned
- zap.S().Error("invalid version assignment while inserting agent config", c.Version, c.ElementType)
+ zap.L().Error("invalid version assignment while inserting agent config", zap.Int("version", c.Version), zap.String("ElementType", string(c.ElementType)))
return model.BadRequest(fmt.Errorf(
"user defined versions are not supported in the agent config",
))
@@ -167,7 +167,7 @@ func (r *Repo) insertConfig(
configVersion, err := r.GetLatestVersion(ctx, c.ElementType)
if err != nil && err.Type() != model.ErrorNotFound {
- zap.S().Error("failed to fetch latest config version", err)
+ zap.L().Error("failed to fetch latest config version", zap.Error(err))
return model.InternalError(fmt.Errorf("failed to fetch latest config version"))
}
@@ -212,7 +212,7 @@ func (r *Repo) insertConfig(
c.DeployResult)
if dbErr != nil {
- zap.S().Error("error in inserting config version: ", zap.Error(dbErr))
+ zap.L().Error("error in inserting config version: ", zap.Error(dbErr))
return model.InternalError(errors.Wrap(dbErr, "failed to insert ingestion rule"))
}
@@ -258,7 +258,7 @@ func (r *Repo) updateDeployStatus(ctx context.Context,
_, err := r.db.ExecContext(ctx, updateQuery, status, result, lastHash, lastconf, version, string(elementType))
if err != nil {
- zap.S().Error("failed to update deploy status", err)
+ zap.L().Error("failed to update deploy status", zap.Error(err))
return model.BadRequest(fmt.Errorf("failed to update deploy status"))
}
@@ -276,7 +276,7 @@ func (r *Repo) updateDeployStatusByHash(
_, err := r.db.ExecContext(ctx, updateQuery, status, result, confighash)
if err != nil {
- zap.S().Error("failed to update deploy status", err)
+ zap.L().Error("failed to update deploy status", zap.Error(err))
return model.InternalError(errors.Wrap(err, "failed to update deploy status"))
}
diff --git a/pkg/query-service/agentConf/manager.go b/pkg/query-service/agentConf/manager.go
index 0e77383f7e..c9a7335e0b 100644
--- a/pkg/query-service/agentConf/manager.go
+++ b/pkg/query-service/agentConf/manager.go
@@ -111,10 +111,6 @@ func (m *Manager) RecommendAgentConfig(currentConfYaml []byte) (
return nil, "", errors.Wrap(apiErr.ToError(), "failed to get latest agent config version")
}
- if latestConfig == nil {
- continue
- }
-
updatedConf, serializedSettingsUsed, apiErr := feature.RecommendAgentConfig(
recommendation, latestConfig,
)
@@ -124,13 +120,24 @@ func (m *Manager) RecommendAgentConfig(currentConfYaml []byte) (
))
}
recommendation = updatedConf
- configId := fmt.Sprintf("%s:%d", featureType, latestConfig.Version)
+
+ // It is possible for a feature to recommend collector config
+ // before any user created config versions exist.
+ //
+ // For example, log pipeline config for installed integrations will
+ // have to be recommended even if the user hasn't created any pipelines yet
+ configVersion := -1
+ if latestConfig != nil {
+ configVersion = latestConfig.Version
+ }
+ configId := fmt.Sprintf("%s:%d", featureType, configVersion)
+
settingVersionsUsed = append(settingVersionsUsed, configId)
m.updateDeployStatus(
context.Background(),
featureType,
- latestConfig.Version,
+ configVersion,
string(DeployInitiated),
"Deployment has started",
configId,
@@ -209,23 +216,27 @@ func StartNewVersion(
return cfg, nil
}
+func NotifyConfigUpdate(ctx context.Context) {
+ m.notifyConfigUpdateSubscribers()
+}
+
func Redeploy(ctx context.Context, typ ElementTypeDef, version int) *model.ApiError {
configVersion, err := GetConfigVersion(ctx, typ, version)
if err != nil {
- zap.S().Debug("failed to fetch config version during redeploy", err)
+ zap.L().Error("failed to fetch config version during redeploy", zap.Error(err))
return model.WrapApiError(err, "failed to fetch details of the config version")
}
if configVersion == nil || (configVersion != nil && configVersion.LastConf == "") {
- zap.S().Debug("config version has no conf yaml", configVersion)
+ zap.L().Debug("config version has no conf yaml", zap.Any("configVersion", configVersion))
return model.BadRequest(fmt.Errorf("the config version can not be redeployed"))
}
switch typ {
case ElementTypeSamplingRules:
var config *tsp.Config
if err := yaml.Unmarshal([]byte(configVersion.LastConf), &config); err != nil {
- zap.S().Error("failed to read last conf correctly", err)
+ zap.L().Debug("failed to read last conf correctly", zap.Error(err))
return model.BadRequest(fmt.Errorf("failed to read the stored config correctly"))
}
@@ -237,7 +248,7 @@ func Redeploy(ctx context.Context, typ ElementTypeDef, version int) *model.ApiEr
opamp.AddToTracePipelineSpec("signoz_tail_sampling")
configHash, err := opamp.UpsertControlProcessors(ctx, "traces", processorConf, m.OnConfigUpdate)
if err != nil {
- zap.S().Error("failed to call agent config update for trace processor:", err)
+ zap.L().Error("failed to call agent config update for trace processor", zap.Error(err))
return model.InternalError(fmt.Errorf("failed to deploy the config"))
}
@@ -245,7 +256,7 @@ func Redeploy(ctx context.Context, typ ElementTypeDef, version int) *model.ApiEr
case ElementTypeDropRules:
var filterConfig *filterprocessor.Config
if err := yaml.Unmarshal([]byte(configVersion.LastConf), &filterConfig); err != nil {
- zap.S().Error("failed to read last conf correctly", err)
+ zap.L().Error("failed to read last conf correctly", zap.Error(err))
return model.InternalError(fmt.Errorf("failed to read the stored config correctly"))
}
processorConf := map[string]interface{}{
@@ -255,7 +266,7 @@ func Redeploy(ctx context.Context, typ ElementTypeDef, version int) *model.ApiEr
opamp.AddToMetricsPipelineSpec("filter")
configHash, err := opamp.UpsertControlProcessors(ctx, "metrics", processorConf, m.OnConfigUpdate)
if err != nil {
- zap.S().Error("failed to call agent config update for trace processor:", err)
+ zap.L().Error("failed to call agent config update for trace processor", zap.Error(err))
return err
}
@@ -281,13 +292,13 @@ func UpsertFilterProcessor(ctx context.Context, version int, config *filterproce
opamp.AddToMetricsPipelineSpec("filter")
configHash, err := opamp.UpsertControlProcessors(ctx, "metrics", processorConf, m.OnConfigUpdate)
if err != nil {
- zap.S().Error("failed to call agent config update for trace processor:", err)
+ zap.L().Error("failed to call agent config update for trace processor", zap.Error(err))
return err
}
processorConfYaml, yamlErr := yaml.Marshal(config)
if yamlErr != nil {
- zap.S().Warnf("unexpected error while transforming processor config to yaml", yamlErr)
+ zap.L().Warn("unexpected error while transforming processor config to yaml", zap.Error(yamlErr))
}
m.updateDeployStatus(ctx, ElementTypeDropRules, version, string(DeployInitiated), "Deployment started", configHash, string(processorConfYaml))
@@ -306,7 +317,7 @@ func (m *Manager) OnConfigUpdate(agentId string, hash string, err error) {
message := "Deployment was successful"
defer func() {
- zap.S().Info(status, zap.String("agentId", agentId), zap.String("agentResponse", message))
+ zap.L().Info(status, zap.String("agentId", agentId), zap.String("agentResponse", message))
}()
if err != nil {
@@ -332,13 +343,13 @@ func UpsertSamplingProcessor(ctx context.Context, version int, config *tsp.Confi
opamp.AddToTracePipelineSpec("signoz_tail_sampling")
configHash, err := opamp.UpsertControlProcessors(ctx, "traces", processorConf, m.OnConfigUpdate)
if err != nil {
- zap.S().Error("failed to call agent config update for trace processor:", err)
+ zap.L().Error("failed to call agent config update for trace processor", zap.Error(err))
return err
}
processorConfYaml, yamlErr := yaml.Marshal(config)
if yamlErr != nil {
- zap.S().Warnf("unexpected error while transforming processor config to yaml", yamlErr)
+ zap.L().Warn("unexpected error while transforming processor config to yaml", zap.Error(yamlErr))
}
m.updateDeployStatus(ctx, ElementTypeSamplingRules, version, string(DeployInitiated), "Deployment started", configHash, string(processorConfYaml))
diff --git a/pkg/query-service/app/clickhouseReader/options.go b/pkg/query-service/app/clickhouseReader/options.go
index 0defced7ed..d92b5ee38f 100644
--- a/pkg/query-service/app/clickhouseReader/options.go
+++ b/pkg/query-service/app/clickhouseReader/options.go
@@ -106,7 +106,7 @@ func defaultConnector(cfg *namespaceConfig) (clickhouse.Conn, error) {
options.DialTimeout = cfg.DialTimeout
}
- zap.S().Infof("Connecting to Clickhouse at %s, Secure: %t, MaxIdleConns: %d, MaxOpenConns: %d, DialTimeout: %s", options.Addr, options.TLS != nil, options.MaxIdleConns, options.MaxOpenConns, options.DialTimeout)
+ zap.L().Info("Connecting to Clickhouse", zap.String("at", options.Addr[0]), zap.Int("MaxIdleConns", options.MaxIdleConns), zap.Int("MaxOpenConns", options.MaxOpenConns), zap.Duration("DialTimeout", options.DialTimeout))
db, err := clickhouse.Open(options)
if err != nil {
return nil, err
diff --git a/pkg/query-service/app/clickhouseReader/reader.go b/pkg/query-service/app/clickhouseReader/reader.go
index 1834aa0ff0..a1b12d9415 100644
--- a/pkg/query-service/app/clickhouseReader/reader.go
+++ b/pkg/query-service/app/clickhouseReader/reader.go
@@ -44,9 +44,11 @@ import (
"go.uber.org/zap"
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
+ "go.signoz.io/signoz/pkg/query-service/app/explorer"
"go.signoz.io/signoz/pkg/query-service/app/logs"
"go.signoz.io/signoz/pkg/query-service/app/services"
"go.signoz.io/signoz/pkg/query-service/auth"
+ "go.signoz.io/signoz/pkg/query-service/common"
"go.signoz.io/signoz/pkg/query-service/constants"
am "go.signoz.io/signoz/pkg/query-service/integrations/alertManager"
"go.signoz.io/signoz/pkg/query-service/interfaces"
@@ -71,6 +73,7 @@ const (
signozSampleLocalTableName = "samples_v2"
signozSampleTableName = "distributed_samples_v2"
signozTSTableName = "distributed_time_series_v2"
+ signozTSTableNameV4 = "distributed_time_series_v4"
signozTSTableNameV41Day = "distributed_time_series_v4_1day"
minTimespanForProgressiveSearch = time.Hour
@@ -138,19 +141,31 @@ func NewReader(
db, err := initialize(options)
if err != nil {
- zap.S().Error("failed to initialize ClickHouse: ", err)
- os.Exit(1)
+ zap.L().Fatal("failed to initialize ClickHouse", zap.Error(err))
}
+ return NewReaderFromClickhouseConnection(db, options, localDB, configFile, featureFlag, cluster)
+}
+
+func NewReaderFromClickhouseConnection(
+ db driver.Conn,
+ options *Options,
+ localDB *sqlx.DB,
+ configFile string,
+ featureFlag interfaces.FeatureLookup,
+ cluster string,
+) *ClickHouseReader {
alertManager, err := am.New("")
if err != nil {
- zap.S().Errorf("msg: failed to initialize alert manager: ", "/t error:", err)
- zap.S().Errorf("msg: check if the alert manager URL is correctly set and valid")
+ zap.L().Error("failed to initialize alert manager", zap.Error(err))
+ zap.L().Error("check if the alert manager URL is correctly set and valid")
os.Exit(1)
}
+ wrap := clickhouseConnWrapper{conn: db}
+
return &ClickHouseReader{
- db: db,
+ db: wrap,
localDB: localDB,
TraceDB: options.primary.TraceDB,
alertManager: alertManager,
@@ -331,20 +346,6 @@ func (r *ClickHouseReader) Start(readerReady chan bool) {
reloadReady.Close()
- // ! commented the alert manager can now
- // call query service to do this
- // channels, apiErrorObj := r.GetChannels()
-
- // if apiErrorObj != nil {
- // zap.S().Errorf("Not able to read channels from DB")
- // }
- // for _, channel := range *channels {
- // apiErrorObj = r.LoadChannel(&channel)
- // if apiErrorObj != nil {
- // zap.S().Errorf("Not able to load channel with id=%d loaded from DB", channel.Id, channel.Data)
- // }
- // }
-
<-cancel
return nil
@@ -428,14 +429,14 @@ func (r *ClickHouseReader) LoadChannel(channel *model.ChannelItem) *model.ApiErr
response, err := http.Post(constants.GetAlertManagerApiPrefix()+"v1/receivers", "application/json", bytes.NewBuffer([]byte(channel.Data)))
if err != nil {
- zap.S().Errorf("Error in getting response of API call to alertmanager/v1/receivers\n", err)
+ zap.L().Error("Error in getting response of API call to alertmanager/v1/receivers", zap.Error(err))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
if response.StatusCode > 299 {
responseData, _ := io.ReadAll(response.Body)
- err := fmt.Errorf("Error in getting 2xx response in API call to alertmanager/v1/receivers\n Status: %s \n Data: %s", response.Status, string(responseData))
- zap.S().Error(err)
+ err := fmt.Errorf("Error in getting 2xx response in API call to alertmanager/v1/receivers")
+ zap.L().Error("Error in getting 2xx response in API call to alertmanager/v1/receivers", zap.String("Status", response.Status), zap.String("Data", string(responseData)))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
@@ -452,17 +453,15 @@ func (r *ClickHouseReader) GetChannel(id string) (*model.ChannelItem, *model.Api
stmt, err := r.localDB.Preparex(query)
- zap.S().Info(query, idInt)
-
if err != nil {
- zap.S().Debug("Error in preparing sql query for GetChannel : ", err)
+ zap.L().Error("Error in preparing sql query for GetChannel", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
err = stmt.Get(&channel, idInt)
if err != nil {
- zap.S().Debug(fmt.Sprintf("Error in getting channel with id=%d : ", idInt), err)
+ zap.L().Error("Error in getting channel with id", zap.Int("id", idInt), zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
@@ -488,14 +487,14 @@ func (r *ClickHouseReader) DeleteChannel(id string) *model.ApiError {
{
stmt, err := tx.Prepare(`DELETE FROM notification_channels WHERE id=$1;`)
if err != nil {
- zap.S().Errorf("Error in preparing statement for INSERT to notification_channels\n", err)
+ zap.L().Error("Error in preparing statement for INSERT to notification_channels", zap.Error(err))
tx.Rollback()
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
defer stmt.Close()
if _, err := stmt.Exec(idInt); err != nil {
- zap.S().Errorf("Error in Executing prepared statement for INSERT to notification_channels\n", err)
+ zap.L().Error("Error in Executing prepared statement for INSERT to notification_channels", zap.Error(err))
tx.Rollback() // return an error too, we may want to wrap them
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
@@ -509,7 +508,7 @@ func (r *ClickHouseReader) DeleteChannel(id string) *model.ApiError {
err = tx.Commit()
if err != nil {
- zap.S().Errorf("Error in committing transaction for DELETE command to notification_channels\n", err)
+ zap.L().Error("Error in committing transaction for DELETE command to notification_channels", zap.Error(err))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
@@ -525,10 +524,10 @@ func (r *ClickHouseReader) GetChannels() (*[]model.ChannelItem, *model.ApiError)
err := r.localDB.Select(&channels, query)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
@@ -593,7 +592,7 @@ func (r *ClickHouseReader) EditChannel(receiver *am.Receiver, id string) (*am.Re
// check if channel type is supported in the current user plan
if err := r.featureFlags.CheckFeature(fmt.Sprintf("ALERT_CHANNEL_%s", strings.ToUpper(channel_type))); err != nil {
- zap.S().Warn("an unsupported feature was blocked", err)
+ zap.L().Warn("an unsupported feature was blocked", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("unsupported feature. please upgrade your plan to access this feature")}
}
@@ -603,14 +602,14 @@ func (r *ClickHouseReader) EditChannel(receiver *am.Receiver, id string) (*am.Re
stmt, err := tx.Prepare(`UPDATE notification_channels SET updated_at=$1, type=$2, data=$3 WHERE id=$4;`)
if err != nil {
- zap.S().Errorf("Error in preparing statement for UPDATE to notification_channels\n", err)
+ zap.L().Error("Error in preparing statement for UPDATE to notification_channels", zap.Error(err))
tx.Rollback()
return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
defer stmt.Close()
if _, err := stmt.Exec(time.Now(), channel_type, string(receiverString), idInt); err != nil {
- zap.S().Errorf("Error in Executing prepared statement for UPDATE to notification_channels\n", err)
+ zap.L().Error("Error in Executing prepared statement for UPDATE to notification_channels", zap.Error(err))
tx.Rollback() // return an error too, we may want to wrap them
return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
@@ -624,7 +623,7 @@ func (r *ClickHouseReader) EditChannel(receiver *am.Receiver, id string) (*am.Re
err = tx.Commit()
if err != nil {
- zap.S().Errorf("Error in committing transaction for INSERT to notification_channels\n", err)
+ zap.L().Error("Error in committing transaction for INSERT to notification_channels", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
@@ -638,7 +637,7 @@ func (r *ClickHouseReader) CreateChannel(receiver *am.Receiver) (*am.Receiver, *
// check if channel type is supported in the current user plan
if err := r.featureFlags.CheckFeature(fmt.Sprintf("ALERT_CHANNEL_%s", strings.ToUpper(channel_type))); err != nil {
- zap.S().Warn("an unsupported feature was blocked", err)
+ zap.L().Warn("an unsupported feature was blocked", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("unsupported feature. please upgrade your plan to access this feature")}
}
@@ -652,14 +651,14 @@ func (r *ClickHouseReader) CreateChannel(receiver *am.Receiver) (*am.Receiver, *
{
stmt, err := tx.Prepare(`INSERT INTO notification_channels (created_at, updated_at, name, type, data) VALUES($1,$2,$3,$4,$5);`)
if err != nil {
- zap.S().Errorf("Error in preparing statement for INSERT to notification_channels\n", err)
+ zap.L().Error("Error in preparing statement for INSERT to notification_channels", zap.Error(err))
tx.Rollback()
return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
defer stmt.Close()
if _, err := stmt.Exec(time.Now(), time.Now(), receiver.Name, channel_type, string(receiverString)); err != nil {
- zap.S().Errorf("Error in Executing prepared statement for INSERT to notification_channels\n", err)
+ zap.L().Error("Error in Executing prepared statement for INSERT to notification_channels", zap.Error(err))
tx.Rollback() // return an error too, we may want to wrap them
return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
@@ -673,7 +672,7 @@ func (r *ClickHouseReader) CreateChannel(receiver *am.Receiver) (*am.Receiver, *
err = tx.Commit()
if err != nil {
- zap.S().Errorf("Error in committing transaction for INSERT to notification_channels\n", err)
+ zap.L().Error("Error in committing transaction for INSERT to notification_channels", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
@@ -726,10 +725,10 @@ func (r *ClickHouseReader) GetServicesList(ctx context.Context) (*[]string, erro
rows, err := r.db.Query(ctx, query)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, fmt.Errorf("Error in processing sql query")
}
@@ -744,33 +743,47 @@ func (r *ClickHouseReader) GetServicesList(ctx context.Context) (*[]string, erro
return &services, nil
}
-func (r *ClickHouseReader) GetTopLevelOperations(ctx context.Context, skipConfig *model.SkipConfig) (*map[string][]string, *model.ApiError) {
+func (r *ClickHouseReader) GetTopLevelOperations(ctx context.Context, skipConfig *model.SkipConfig, start, end time.Time) (*map[string][]string, *map[string][]string, *model.ApiError) {
+ start = start.In(time.UTC)
+
+ // The `top_level_operations` that have `time` >= start
operations := map[string][]string{}
- query := fmt.Sprintf(`SELECT DISTINCT name, serviceName FROM %s.%s`, r.TraceDB, r.topLevelOperationsTable)
+ // All top level operations for a service
+ allOperations := map[string][]string{}
+ query := fmt.Sprintf(`SELECT DISTINCT name, serviceName, time FROM %s.%s`, r.TraceDB, r.topLevelOperationsTable)
rows, err := r.db.Query(ctx, query)
if err != nil {
- zap.S().Error("Error in processing sql query: ", err)
- return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
+ zap.L().Error("Error in processing sql query", zap.Error(err))
+ return nil, nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
defer rows.Close()
for rows.Next() {
var name, serviceName string
- if err := rows.Scan(&name, &serviceName); err != nil {
- return nil, &model.ApiError{Typ: model.ErrorInternal, Err: fmt.Errorf("Error in reading data")}
+ var t time.Time
+ if err := rows.Scan(&name, &serviceName, &t); err != nil {
+ return nil, nil, &model.ApiError{Typ: model.ErrorInternal, Err: fmt.Errorf("error in reading data")}
}
if _, ok := operations[serviceName]; !ok {
operations[serviceName] = []string{}
}
+ if _, ok := allOperations[serviceName]; !ok {
+ allOperations[serviceName] = []string{}
+ }
if skipConfig.ShouldSkip(serviceName, name) {
continue
}
- operations[serviceName] = append(operations[serviceName], name)
+ allOperations[serviceName] = append(allOperations[serviceName], name)
+ // We can't use the `end` because the `top_level_operations` table has the most recent instances of the operations
+ // We can only use the `start` time to filter the operations
+ if t.After(start) {
+ operations[serviceName] = append(operations[serviceName], name)
+ }
}
- return &operations, nil
+ return &operations, &allOperations, nil
}
func (r *ClickHouseReader) GetServices(ctx context.Context, queryParams *model.GetServicesParams, skipConfig *model.SkipConfig) (*[]model.ServiceItem, *model.ApiError) {
@@ -779,7 +792,7 @@ func (r *ClickHouseReader) GetServices(ctx context.Context, queryParams *model.G
return nil, &model.ApiError{Typ: model.ErrorExec, Err: ErrNoIndexTable}
}
- topLevelOps, apiErr := r.GetTopLevelOperations(ctx, skipConfig)
+ topLevelOps, allTopLevelOps, apiErr := r.GetTopLevelOperations(ctx, skipConfig, *queryParams.Start, *queryParams.End)
if apiErr != nil {
return nil, apiErr
}
@@ -798,6 +811,22 @@ func (r *ClickHouseReader) GetServices(ctx context.Context, queryParams *model.G
defer func() { <-sem }()
var serviceItem model.ServiceItem
var numErrors uint64
+
+ // Even if the total number of operations within the time range is less and the all
+ // the top level operations are high, we want to warn to let user know the issue
+ // with the instrumentation
+ serviceItem.DataWarning = model.DataWarning{
+ TopLevelOps: (*allTopLevelOps)[svc],
+ }
+
+ // default max_query_size = 262144
+ // Let's assume the average size of the item in `ops` is 50 bytes
+ // We can have 262144/50 = 5242 items in the `ops` array
+ // Although we have make it as big as 5k, We cap the number of items
+ // in the `ops` array to 1500
+
+ ops = ops[:int(math.Min(1500, float64(len(ops))))]
+
query := fmt.Sprintf(
`SELECT
quantile(0.99)(durationNano) as p99,
@@ -828,7 +857,7 @@ func (r *ClickHouseReader) GetServices(ctx context.Context, queryParams *model.G
query += subQuery
args = append(args, argsSubQuery...)
if errStatus != nil {
- zap.S().Error("Error in processing sql query: ", errStatus)
+ zap.L().Error("Error in processing sql query", zap.Error(errStatus))
return
}
err := r.db.QueryRow(
@@ -842,15 +871,19 @@ func (r *ClickHouseReader) GetServices(ctx context.Context, queryParams *model.G
}
if err != nil {
- zap.S().Error("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return
}
subQuery, argsSubQuery, errStatus = buildQueryWithTagParams(ctx, tags)
+ if errStatus != nil {
+ zap.L().Error("Error building query with tag params", zap.Error(errStatus))
+ return
+ }
query += subQuery
args = append(args, argsSubQuery...)
err = r.db.QueryRow(ctx, errorQuery, args...).Scan(&numErrors)
if err != nil {
- zap.S().Error("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return
}
@@ -872,7 +905,7 @@ func (r *ClickHouseReader) GetServices(ctx context.Context, queryParams *model.G
func (r *ClickHouseReader) GetServiceOverview(ctx context.Context, queryParams *model.GetServiceOverviewParams, skipConfig *model.SkipConfig) (*[]model.ServiceOverviewItem, *model.ApiError) {
- topLevelOps, apiErr := r.GetTopLevelOperations(ctx, skipConfig)
+ topLevelOps, _, apiErr := r.GetTopLevelOperations(ctx, skipConfig, *queryParams.Start, *queryParams.End)
if apiErr != nil {
return nil, apiErr
}
@@ -916,11 +949,11 @@ func (r *ClickHouseReader) GetServiceOverview(ctx context.Context, queryParams *
query += " GROUP BY time ORDER BY time DESC"
err := r.db.Select(ctx, &serviceOverviewItems, query, args...)
- zap.S().Debug(query)
+ zap.L().Debug("running query", zap.String("query", query))
if err != nil {
- zap.S().Error("Error in processing sql query: ", err)
- return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
+ zap.L().Error("Error in processing sql query", zap.Error(err))
+ return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
serviceErrorItems := []model.ServiceErrorItem{}
@@ -944,10 +977,8 @@ func (r *ClickHouseReader) GetServiceOverview(ctx context.Context, queryParams *
query += " GROUP BY time ORDER BY time DESC"
err = r.db.Select(ctx, &serviceErrorItems, query, args...)
- zap.S().Debug(query)
-
if err != nil {
- zap.S().Error("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
}
@@ -1083,10 +1114,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode
finalQuery += " GROUP BY serviceName"
var dBResponse []model.DBResponseServiceName
err := r.db.Select(ctx, &dBResponse, finalQuery, args...)
- zap.S().Info(finalQuery)
+ zap.L().Info(finalQuery)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)}
}
for _, service := range dBResponse {
@@ -1100,10 +1131,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode
finalQuery += " GROUP BY httpCode"
var dBResponse []model.DBResponseHttpCode
err := r.db.Select(ctx, &dBResponse, finalQuery, args...)
- zap.S().Info(finalQuery)
+ zap.L().Info(finalQuery)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)}
}
for _, service := range dBResponse {
@@ -1117,10 +1148,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode
finalQuery += " GROUP BY httpRoute"
var dBResponse []model.DBResponseHttpRoute
err := r.db.Select(ctx, &dBResponse, finalQuery, args...)
- zap.S().Info(finalQuery)
+ zap.L().Info(finalQuery)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)}
}
for _, service := range dBResponse {
@@ -1134,10 +1165,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode
finalQuery += " GROUP BY httpUrl"
var dBResponse []model.DBResponseHttpUrl
err := r.db.Select(ctx, &dBResponse, finalQuery, args...)
- zap.S().Info(finalQuery)
+ zap.L().Info(finalQuery)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)}
}
for _, service := range dBResponse {
@@ -1151,10 +1182,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode
finalQuery += " GROUP BY httpMethod"
var dBResponse []model.DBResponseHttpMethod
err := r.db.Select(ctx, &dBResponse, finalQuery, args...)
- zap.S().Info(finalQuery)
+ zap.L().Info(finalQuery)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)}
}
for _, service := range dBResponse {
@@ -1168,10 +1199,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode
finalQuery += " GROUP BY httpHost"
var dBResponse []model.DBResponseHttpHost
err := r.db.Select(ctx, &dBResponse, finalQuery, args...)
- zap.S().Info(finalQuery)
+ zap.L().Info(finalQuery)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)}
}
for _, service := range dBResponse {
@@ -1185,10 +1216,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode
finalQuery += " GROUP BY name"
var dBResponse []model.DBResponseOperation
err := r.db.Select(ctx, &dBResponse, finalQuery, args...)
- zap.S().Info(finalQuery)
+ zap.L().Info(finalQuery)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)}
}
for _, service := range dBResponse {
@@ -1202,10 +1233,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode
finalQuery += " GROUP BY component"
var dBResponse []model.DBResponseComponent
err := r.db.Select(ctx, &dBResponse, finalQuery, args...)
- zap.S().Info(finalQuery)
+ zap.L().Info(finalQuery)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)}
}
for _, service := range dBResponse {
@@ -1218,10 +1249,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode
finalQuery += query
var dBResponse []model.DBResponseTotal
err := r.db.Select(ctx, &dBResponse, finalQuery, args...)
- zap.S().Info(finalQuery)
+ zap.L().Info(finalQuery)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)}
}
@@ -1229,10 +1260,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode
finalQuery2 += query
var dBResponse2 []model.DBResponseTotal
err = r.db.Select(ctx, &dBResponse2, finalQuery2, args...)
- zap.S().Info(finalQuery2)
+ zap.L().Info(finalQuery2)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)}
}
if len(dBResponse) > 0 && len(dBResponse2) > 0 {
@@ -1254,9 +1285,9 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode
finalQuery += query
var dBResponse []model.DBResponseMinMax
err = r.db.Select(ctx, &dBResponse, finalQuery, args...)
- zap.S().Info(finalQuery)
+ zap.L().Info(finalQuery)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)}
}
if len(dBResponse) > 0 {
@@ -1269,10 +1300,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode
finalQuery += " ORDER BY durationNano LIMIT 1"
var dBResponse []model.DBResponseTotal
err = r.db.Select(ctx, &dBResponse, finalQuery, args...)
- zap.S().Info(finalQuery)
+ zap.L().Info(finalQuery)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)}
}
@@ -1281,10 +1312,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode
finalQuery += " ORDER BY durationNano DESC LIMIT 1"
var dBResponse2 []model.DBResponseTotal
err = r.db.Select(ctx, &dBResponse2, finalQuery, args...)
- zap.S().Info(finalQuery)
+ zap.L().Info(finalQuery)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)}
}
if len(dBResponse) > 0 {
@@ -1300,10 +1331,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode
finalQuery += " GROUP BY rpcMethod"
var dBResponse []model.DBResponseRPCMethod
err := r.db.Select(ctx, &dBResponse, finalQuery, args...)
- zap.S().Info(finalQuery)
+ zap.L().Info(finalQuery)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query: %s", err)}
}
for _, service := range dBResponse {
@@ -1318,10 +1349,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode
finalQuery += " GROUP BY responseStatusCode"
var dBResponse []model.DBResponseStatusCodeMethod
err := r.db.Select(ctx, &dBResponse, finalQuery, args...)
- zap.S().Info(finalQuery)
+ zap.L().Info(finalQuery)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query: %s", err)}
}
for _, service := range dBResponse {
@@ -1446,10 +1477,10 @@ func (r *ClickHouseReader) GetFilteredSpans(ctx context.Context, queryParams *mo
projectionOptQuery := "SET allow_experimental_projection_optimization = 1"
err := r.db.Exec(ctx, projectionOptQuery)
- zap.S().Info(projectionOptQuery)
+ zap.L().Info(projectionOptQuery)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
}
if queryParams.Order == constants.Descending {
@@ -1484,10 +1515,10 @@ func (r *ClickHouseReader) GetFilteredSpans(ctx context.Context, queryParams *mo
}
}
- zap.S().Info(baseQuery)
+ zap.L().Info(baseQuery)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
}
@@ -1564,7 +1595,7 @@ func buildQueryWithTagParams(ctx context.Context, tags []model.TagQuery) (string
case model.NotExistsOperator:
subQuery, argsSubQuery = addExistsOperator(item, tagMapType, true)
default:
- return "", nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Tag Operator %s not supported", item.GetOperator())}
+ return "", nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("filter operator %s not supported", item.GetOperator())}
}
query += subQuery
args = append(args, argsSubQuery...)
@@ -1724,10 +1755,10 @@ func (r *ClickHouseReader) GetTagFilters(ctx context.Context, queryParams *model
finalQuery += query
err := r.db.Select(ctx, &tagFilters, finalQuery, args...)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
}
tagFiltersResult := model.TagFilters{
@@ -1846,10 +1877,10 @@ func (r *ClickHouseReader) GetTagValues(ctx context.Context, queryParams *model.
args = append(args, clickhouse.Named("limit", queryParams.Limit))
err := r.db.Select(ctx, &tagValues, finalQuery, args...)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
}
@@ -1908,10 +1939,8 @@ func (r *ClickHouseReader) GetTopOperations(ctx context.Context, queryParams *mo
}
err := r.db.Select(ctx, &topOperationsItems, query, args...)
- zap.S().Debug(query)
-
if err != nil {
- zap.S().Error("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
@@ -1940,10 +1969,10 @@ func (r *ClickHouseReader) GetUsage(ctx context.Context, queryParams *model.GetU
err := r.db.Select(ctx, &usageItems, query, namedArgs...)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, fmt.Errorf("Error in processing sql query")
}
@@ -1968,14 +1997,14 @@ func (r *ClickHouseReader) SearchTraces(ctx context.Context, traceId string, spa
err := r.db.Select(ctx, &searchScanResponses, query, traceId)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
- return nil, fmt.Errorf("Error in processing sql query")
+ zap.L().Error("Error in processing sql query", zap.Error(err))
+ return nil, fmt.Errorf("error in processing sql query")
}
end := time.Now()
- zap.S().Debug("getTraceSQLQuery took: ", end.Sub(start))
+ zap.L().Debug("getTraceSQLQuery took: ", zap.Duration("duration", end.Sub(start)))
searchSpansResult := []model.SearchSpansResult{{
Columns: []string{"__time", "SpanId", "TraceId", "ServiceName", "Name", "Kind", "DurationNano", "TagsKeys", "TagsValues", "References", "Events", "HasError"},
Events: make([][]interface{}, len(searchScanResponses)),
@@ -1991,7 +2020,7 @@ func (r *ClickHouseReader) SearchTraces(ctx context.Context, traceId string, spa
searchSpanResponses = append(searchSpanResponses, jsonItem)
}
end = time.Now()
- zap.S().Debug("getTraceSQLQuery unmarshal took: ", end.Sub(start))
+ zap.L().Debug("getTraceSQLQuery unmarshal took: ", zap.Duration("duration", end.Sub(start)))
err = r.featureFlags.CheckFeature(model.SmartTraceDetail)
smartAlgoEnabled := err == nil
@@ -2002,7 +2031,7 @@ func (r *ClickHouseReader) SearchTraces(ctx context.Context, traceId string, spa
return nil, err
}
end = time.Now()
- zap.S().Debug("smartTraceAlgo took: ", end.Sub(start))
+ zap.L().Debug("smartTraceAlgo took: ", zap.Duration("duration", end.Sub(start)))
} else {
for i, item := range searchSpanResponses {
spanEvents := item.GetValues()
@@ -2049,12 +2078,12 @@ func (r *ClickHouseReader) GetDependencyGraph(ctx context.Context, queryParams *
query += filterQuery + " GROUP BY src, dest;"
args = append(args, filterArgs...)
- zap.S().Debug(query, args)
+ zap.L().Debug("GetDependencyGraph query", zap.String("query", query), zap.Any("args", args))
err := r.db.Select(ctx, &response, query, args...)
if err != nil {
- zap.S().Error("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, fmt.Errorf("error in processing sql query %w", err)
}
@@ -2202,10 +2231,10 @@ func (r *ClickHouseReader) GetFilteredSpansAggregates(ctx context.Context, query
err := r.db.Select(ctx, &SpanAggregatesDBResponseItems, query, args...)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
}
@@ -2288,7 +2317,7 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context,
go func(tableName string) {
_, dbErr := r.localDB.Exec("INSERT INTO ttl_status (transaction_id, created_at, updated_at, table_name, ttl, status, cold_storage_ttl) VALUES (?, ?, ?, ?, ?, ?, ?)", uuid, time.Now(), time.Now(), tableName, params.DelDuration, constants.StatusPending, coldStorageDuration)
if dbErr != nil {
- zap.S().Error(fmt.Errorf("Error in inserting to ttl_status table: %s", dbErr.Error()))
+ zap.L().Error("Error in inserting to ttl_status table", zap.Error(dbErr))
return
}
req := fmt.Sprintf(
@@ -2300,32 +2329,32 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context,
}
err := r.setColdStorage(context.Background(), tableName, params.ColdStorageVolume)
if err != nil {
- zap.S().Error(fmt.Errorf("Error in setting cold storage: %s", err.Err.Error()))
+ zap.L().Error("Error in setting cold storage", zap.Error(err))
statusItem, err := r.checkTTLStatusItem(ctx, tableName)
if err == nil {
_, dbErr := r.localDB.Exec("UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?", time.Now(), constants.StatusFailed, statusItem.Id)
if dbErr != nil {
- zap.S().Debug("Error in processing ttl_status update sql query: ", dbErr)
+ zap.L().Error("Error in processing ttl_status update sql query", zap.Error(dbErr))
return
}
}
return
}
req += fmt.Sprint(" SETTINGS distributed_ddl_task_timeout = -1;")
- zap.S().Debugf("Executing TTL request: %s\n", req)
+ zap.L().Error("Executing TTL request: ", zap.String("request", req))
statusItem, _ := r.checkTTLStatusItem(ctx, tableName)
if err := r.db.Exec(context.Background(), req); err != nil {
- zap.S().Error(fmt.Errorf("Error in executing set TTL query: %s", err.Error()))
+ zap.L().Error("Error in executing set TTL query", zap.Error(err))
_, dbErr := r.localDB.Exec("UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?", time.Now(), constants.StatusFailed, statusItem.Id)
if dbErr != nil {
- zap.S().Debug("Error in processing ttl_status update sql query: ", dbErr)
+ zap.L().Error("Error in processing ttl_status update sql query", zap.Error(dbErr))
return
}
return
}
_, dbErr = r.localDB.Exec("UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?", time.Now(), constants.StatusSuccess, statusItem.Id)
if dbErr != nil {
- zap.S().Debug("Error in processing ttl_status update sql query: ", dbErr)
+ zap.L().Error("Error in processing ttl_status update sql query", zap.Error(dbErr))
return
}
}(tableName)
@@ -2343,7 +2372,7 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context,
go func(tableName string) {
_, dbErr := r.localDB.Exec("INSERT INTO ttl_status (transaction_id, created_at, updated_at, table_name, ttl, status, cold_storage_ttl) VALUES (?, ?, ?, ?, ?, ?, ?)", uuid, time.Now(), time.Now(), tableName, params.DelDuration, constants.StatusPending, coldStorageDuration)
if dbErr != nil {
- zap.S().Error(fmt.Errorf("Error in inserting to ttl_status table: %s", dbErr.Error()))
+ zap.L().Error("Error in inserting to ttl_status table", zap.Error(dbErr))
return
}
req := fmt.Sprintf(
@@ -2356,32 +2385,32 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context,
}
err := r.setColdStorage(context.Background(), tableName, params.ColdStorageVolume)
if err != nil {
- zap.S().Error(fmt.Errorf("Error in setting cold storage: %s", err.Err.Error()))
+ zap.L().Error("Error in setting cold storage", zap.Error(err))
statusItem, err := r.checkTTLStatusItem(ctx, tableName)
if err == nil {
_, dbErr := r.localDB.Exec("UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?", time.Now(), constants.StatusFailed, statusItem.Id)
if dbErr != nil {
- zap.S().Debug("Error in processing ttl_status update sql query: ", dbErr)
+ zap.L().Error("Error in processing ttl_status update sql query", zap.Error(dbErr))
return
}
}
return
}
req += fmt.Sprint(" SETTINGS distributed_ddl_task_timeout = -1")
- zap.S().Debugf("Executing TTL request: %s\n", req)
+ zap.L().Info("Executing TTL request: ", zap.String("request", req))
statusItem, _ := r.checkTTLStatusItem(ctx, tableName)
if err := r.db.Exec(ctx, req); err != nil {
- zap.S().Error(fmt.Errorf("error while setting ttl. Err=%v", err))
+ zap.L().Error("error while setting ttl.", zap.Error(err))
_, dbErr := r.localDB.Exec("UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?", time.Now(), constants.StatusFailed, statusItem.Id)
if dbErr != nil {
- zap.S().Debug("Error in processing ttl_status update sql query: ", dbErr)
+ zap.L().Error("Error in processing ttl_status update sql query", zap.Error(dbErr))
return
}
return
}
_, dbErr = r.localDB.Exec("UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?", time.Now(), constants.StatusSuccess, statusItem.Id)
if dbErr != nil {
- zap.S().Debug("Error in processing ttl_status update sql query: ", dbErr)
+ zap.L().Error("Error in processing ttl_status update sql query", zap.Error(dbErr))
return
}
}(tableName)
@@ -2397,7 +2426,7 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context,
go func(tableName string) {
_, dbErr := r.localDB.Exec("INSERT INTO ttl_status (transaction_id, created_at, updated_at, table_name, ttl, status, cold_storage_ttl) VALUES (?, ?, ?, ?, ?, ?, ?)", uuid, time.Now(), time.Now(), tableName, params.DelDuration, constants.StatusPending, coldStorageDuration)
if dbErr != nil {
- zap.S().Error(fmt.Errorf("error in inserting to ttl_status table: %s", dbErr.Error()))
+ zap.L().Error("error in inserting to ttl_status table", zap.Error(dbErr))
return
}
req := fmt.Sprintf(
@@ -2410,32 +2439,32 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context,
}
err := r.setColdStorage(context.Background(), tableName, params.ColdStorageVolume)
if err != nil {
- zap.S().Error(fmt.Errorf("error in setting cold storage: %s", err.Err.Error()))
+ zap.L().Error("error in setting cold storage", zap.Error(err))
statusItem, err := r.checkTTLStatusItem(ctx, tableName)
if err == nil {
_, dbErr := r.localDB.Exec("UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?", time.Now(), constants.StatusFailed, statusItem.Id)
if dbErr != nil {
- zap.S().Debug("Error in processing ttl_status update sql query: ", dbErr)
+ zap.L().Error("Error in processing ttl_status update sql query", zap.Error(dbErr))
return
}
}
return
}
req += fmt.Sprint(" SETTINGS distributed_ddl_task_timeout = -1")
- zap.S().Debugf("Executing TTL request: %s\n", req)
+ zap.L().Info("Executing TTL request: ", zap.String("request", req))
statusItem, _ := r.checkTTLStatusItem(ctx, tableName)
if err := r.db.Exec(ctx, req); err != nil {
- zap.S().Error(fmt.Errorf("error while setting ttl. Err=%v", err))
+ zap.L().Error("error while setting ttl", zap.Error(err))
_, dbErr := r.localDB.Exec("UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?", time.Now(), constants.StatusFailed, statusItem.Id)
if dbErr != nil {
- zap.S().Debug("Error in processing ttl_status update sql query: ", dbErr)
+ zap.L().Error("Error in processing ttl_status update sql query", zap.Error(dbErr))
return
}
return
}
_, dbErr = r.localDB.Exec("UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?", time.Now(), constants.StatusSuccess, statusItem.Id)
if dbErr != nil {
- zap.S().Debug("Error in processing ttl_status update sql query: ", dbErr)
+ zap.L().Error("Error in processing ttl_status update sql query", zap.Error(dbErr))
return
}
}(tableName)
@@ -2451,7 +2480,7 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context,
func (r *ClickHouseReader) deleteTtlTransactions(ctx context.Context, numberOfTransactionsStore int) {
_, err := r.localDB.Exec("DELETE FROM ttl_status WHERE transaction_id NOT IN (SELECT distinct transaction_id FROM ttl_status ORDER BY created_at DESC LIMIT ?)", numberOfTransactionsStore)
if err != nil {
- zap.S().Debug("Error in processing ttl_status delete sql query: ", err)
+ zap.L().Error("Error in processing ttl_status delete sql query", zap.Error(err))
}
}
@@ -2461,12 +2490,12 @@ func (r *ClickHouseReader) checkTTLStatusItem(ctx context.Context, tableName str
query := `SELECT id, status, ttl, cold_storage_ttl FROM ttl_status WHERE table_name = ? ORDER BY created_at DESC`
- zap.S().Info(query, tableName)
+ zap.L().Info("checkTTLStatusItem query", zap.String("query", query), zap.String("tableName", tableName))
stmt, err := r.localDB.Preparex(query)
if err != nil {
- zap.S().Debug("Error preparing query for checkTTLStatusItem: ", err)
+ zap.L().Error("Error preparing query for checkTTLStatusItem", zap.Error(err))
return model.TTLStatusItem{}, &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
@@ -2476,7 +2505,7 @@ func (r *ClickHouseReader) checkTTLStatusItem(ctx context.Context, tableName str
return model.TTLStatusItem{}, nil
}
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return model.TTLStatusItem{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing ttl_status check sql query")}
}
return statusItem[0], nil
@@ -2517,9 +2546,9 @@ func (r *ClickHouseReader) setColdStorage(ctx context.Context, tableName string,
if len(coldStorageVolume) > 0 {
policyReq := fmt.Sprintf("ALTER TABLE %s ON CLUSTER %s MODIFY SETTING storage_policy='tiered'", tableName, r.cluster)
- zap.S().Debugf("Executing Storage policy request: %s\n", policyReq)
+ zap.L().Info("Executing Storage policy request: ", zap.String("request", policyReq))
if err := r.db.Exec(ctx, policyReq); err != nil {
- zap.S().Error(fmt.Errorf("error while setting storage policy. Err=%v", err))
+ zap.L().Error("error while setting storage policy", zap.Error(err))
return &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error while setting storage policy. Err=%v", err)}
}
}
@@ -2532,12 +2561,10 @@ func (r *ClickHouseReader) GetDisks(ctx context.Context) (*[]model.DiskItem, *mo
query := "SELECT name,type FROM system.disks"
if err := r.db.Select(ctx, &diskItems, query); err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error while getting disks. Err=%v", err)}
}
- zap.S().Infof("Got response: %+v\n", diskItems)
-
return &diskItems, nil
}
@@ -2555,7 +2582,7 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, ttlParams *model.GetTTLPa
parseTTL := func(queryResp string) (int, int) {
- zap.S().Debugf("Parsing TTL from: %s", queryResp)
+ zap.L().Info("Parsing TTL from: ", zap.String("queryResp", queryResp))
deleteTTLExp := regexp.MustCompile(`toIntervalSecond\(([0-9]*)\)`)
moveTTLExp := regexp.MustCompile(`toIntervalSecond\(([0-9]*)\) TO VOLUME`)
@@ -2590,7 +2617,7 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, ttlParams *model.GetTTLPa
err := r.db.Select(ctx, &dbResp, query)
if err != nil {
- zap.S().Error(fmt.Errorf("error while getting ttl. Err=%v", err))
+ zap.L().Error("error while getting ttl", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error while getting ttl. Err=%v", err)}
}
if len(dbResp) == 0 {
@@ -2608,7 +2635,7 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, ttlParams *model.GetTTLPa
err := r.db.Select(ctx, &dbResp, query)
if err != nil {
- zap.S().Error(fmt.Errorf("error while getting ttl. Err=%v", err))
+ zap.L().Error("error while getting ttl", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error while getting ttl. Err=%v", err)}
}
if len(dbResp) == 0 {
@@ -2626,7 +2653,7 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, ttlParams *model.GetTTLPa
err := r.db.Select(ctx, &dbResp, query)
if err != nil {
- zap.S().Error(fmt.Errorf("error while getting ttl. Err=%v", err))
+ zap.L().Error("error while getting ttl", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error while getting ttl. Err=%v", err)}
}
if len(dbResp) == 0 {
@@ -2748,7 +2775,7 @@ func (r *ClickHouseReader) ListErrors(ctx context.Context, queryParams *model.Li
args = append(args, argsSubQuery...)
if errStatus != nil {
- zap.S().Error("Error in processing tags: ", errStatus)
+ zap.L().Error("Error in processing tags", zap.Error(errStatus))
return nil, errStatus
}
query = query + " GROUP BY groupID"
@@ -2776,10 +2803,10 @@ func (r *ClickHouseReader) ListErrors(ctx context.Context, queryParams *model.Li
}
err := r.db.Select(ctx, &getErrorResponses, query, args...)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
}
@@ -2808,15 +2835,15 @@ func (r *ClickHouseReader) CountErrors(ctx context.Context, queryParams *model.C
args = append(args, argsSubQuery...)
if errStatus != nil {
- zap.S().Error("Error in processing tags: ", errStatus)
+ zap.L().Error("Error in processing tags", zap.Error(errStatus))
return 0, errStatus
}
err := r.db.QueryRow(ctx, query, args...).Scan(&errorCount)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return 0, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
}
@@ -2826,7 +2853,7 @@ func (r *ClickHouseReader) CountErrors(ctx context.Context, queryParams *model.C
func (r *ClickHouseReader) GetErrorFromErrorID(ctx context.Context, queryParams *model.GetErrorParams) (*model.ErrorWithSpan, *model.ApiError) {
if queryParams.ErrorID == "" {
- zap.S().Debug("errorId missing from params")
+ zap.L().Error("errorId missing from params")
return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("ErrorID missing from params")}
}
var getErrorWithSpanReponse []model.ErrorWithSpan
@@ -2835,10 +2862,10 @@ func (r *ClickHouseReader) GetErrorFromErrorID(ctx context.Context, queryParams
args := []interface{}{clickhouse.Named("errorID", queryParams.ErrorID), clickhouse.Named("groupID", queryParams.GroupID), clickhouse.Named("timestamp", strconv.FormatInt(queryParams.Timestamp.UnixNano(), 10))}
err := r.db.Select(ctx, &getErrorWithSpanReponse, query, args...)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
}
@@ -2859,10 +2886,10 @@ func (r *ClickHouseReader) GetErrorFromGroupID(ctx context.Context, queryParams
err := r.db.Select(ctx, &getErrorWithSpanReponse, query, args...)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
}
@@ -2877,7 +2904,7 @@ func (r *ClickHouseReader) GetErrorFromGroupID(ctx context.Context, queryParams
func (r *ClickHouseReader) GetNextPrevErrorIDs(ctx context.Context, queryParams *model.GetErrorParams) (*model.NextPrevErrorIDs, *model.ApiError) {
if queryParams.ErrorID == "" {
- zap.S().Debug("errorId missing from params")
+ zap.L().Error("errorId missing from params")
return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("ErrorID missing from params")}
}
var err *model.ApiError
@@ -2886,12 +2913,12 @@ func (r *ClickHouseReader) GetNextPrevErrorIDs(ctx context.Context, queryParams
}
getNextPrevErrorIDsResponse.NextErrorID, getNextPrevErrorIDsResponse.NextTimestamp, err = r.getNextErrorID(ctx, queryParams)
if err != nil {
- zap.S().Debug("Unable to get next error ID due to err: ", err)
+ zap.L().Error("Unable to get next error ID due to err: ", zap.Error(err))
return nil, err
}
getNextPrevErrorIDsResponse.PrevErrorID, getNextPrevErrorIDsResponse.PrevTimestamp, err = r.getPrevErrorID(ctx, queryParams)
if err != nil {
- zap.S().Debug("Unable to get prev error ID due to err: ", err)
+ zap.L().Error("Unable to get prev error ID due to err: ", zap.Error(err))
return nil, err
}
return &getNextPrevErrorIDsResponse, nil
@@ -2907,17 +2934,17 @@ func (r *ClickHouseReader) getNextErrorID(ctx context.Context, queryParams *mode
err := r.db.Select(ctx, &getNextErrorIDReponse, query, args...)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return "", time.Time{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
}
if len(getNextErrorIDReponse) == 0 {
- zap.S().Info("NextErrorID not found")
+ zap.L().Info("NextErrorID not found")
return "", time.Time{}, nil
} else if len(getNextErrorIDReponse) == 1 {
- zap.S().Info("NextErrorID found")
+ zap.L().Info("NextErrorID found")
return getNextErrorIDReponse[0].NextErrorID, getNextErrorIDReponse[0].NextTimestamp, nil
} else {
if getNextErrorIDReponse[0].Timestamp.UnixNano() == getNextErrorIDReponse[1].Timestamp.UnixNano() {
@@ -2928,10 +2955,10 @@ func (r *ClickHouseReader) getNextErrorID(ctx context.Context, queryParams *mode
err := r.db.Select(ctx, &getNextErrorIDReponse, query, args...)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return "", time.Time{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
}
if len(getNextErrorIDReponse) == 0 {
@@ -2942,26 +2969,26 @@ func (r *ClickHouseReader) getNextErrorID(ctx context.Context, queryParams *mode
err := r.db.Select(ctx, &getNextErrorIDReponse, query, args...)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return "", time.Time{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
}
if len(getNextErrorIDReponse) == 0 {
- zap.S().Info("NextErrorID not found")
+ zap.L().Info("NextErrorID not found")
return "", time.Time{}, nil
} else {
- zap.S().Info("NextErrorID found")
+ zap.L().Info("NextErrorID found")
return getNextErrorIDReponse[0].NextErrorID, getNextErrorIDReponse[0].NextTimestamp, nil
}
} else {
- zap.S().Info("NextErrorID found")
+ zap.L().Info("NextErrorID found")
return getNextErrorIDReponse[0].NextErrorID, getNextErrorIDReponse[0].NextTimestamp, nil
}
} else {
- zap.S().Info("NextErrorID found")
+ zap.L().Info("NextErrorID found")
return getNextErrorIDReponse[0].NextErrorID, getNextErrorIDReponse[0].NextTimestamp, nil
}
}
@@ -2976,17 +3003,17 @@ func (r *ClickHouseReader) getPrevErrorID(ctx context.Context, queryParams *mode
err := r.db.Select(ctx, &getPrevErrorIDReponse, query, args...)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return "", time.Time{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
}
if len(getPrevErrorIDReponse) == 0 {
- zap.S().Info("PrevErrorID not found")
+ zap.L().Info("PrevErrorID not found")
return "", time.Time{}, nil
} else if len(getPrevErrorIDReponse) == 1 {
- zap.S().Info("PrevErrorID found")
+ zap.L().Info("PrevErrorID found")
return getPrevErrorIDReponse[0].PrevErrorID, getPrevErrorIDReponse[0].PrevTimestamp, nil
} else {
if getPrevErrorIDReponse[0].Timestamp.UnixNano() == getPrevErrorIDReponse[1].Timestamp.UnixNano() {
@@ -2997,10 +3024,10 @@ func (r *ClickHouseReader) getPrevErrorID(ctx context.Context, queryParams *mode
err := r.db.Select(ctx, &getPrevErrorIDReponse, query, args...)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return "", time.Time{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
}
if len(getPrevErrorIDReponse) == 0 {
@@ -3011,26 +3038,26 @@ func (r *ClickHouseReader) getPrevErrorID(ctx context.Context, queryParams *mode
err := r.db.Select(ctx, &getPrevErrorIDReponse, query, args...)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return "", time.Time{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
}
if len(getPrevErrorIDReponse) == 0 {
- zap.S().Info("PrevErrorID not found")
+ zap.L().Info("PrevErrorID not found")
return "", time.Time{}, nil
} else {
- zap.S().Info("PrevErrorID found")
+ zap.L().Info("PrevErrorID found")
return getPrevErrorIDReponse[0].PrevErrorID, getPrevErrorIDReponse[0].PrevTimestamp, nil
}
} else {
- zap.S().Info("PrevErrorID found")
+ zap.L().Info("PrevErrorID found")
return getPrevErrorIDReponse[0].PrevErrorID, getPrevErrorIDReponse[0].PrevTimestamp, nil
}
} else {
- zap.S().Info("PrevErrorID found")
+ zap.L().Info("PrevErrorID found")
return getPrevErrorIDReponse[0].PrevErrorID, getPrevErrorIDReponse[0].PrevTimestamp, nil
}
}
@@ -3061,7 +3088,7 @@ func (r *ClickHouseReader) GetMetricAutocompleteTagKey(ctx context.Context, para
}
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: err}
}
@@ -3100,7 +3127,7 @@ func (r *ClickHouseReader) GetMetricAutocompleteTagValue(ctx context.Context, pa
}
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: err}
}
@@ -3130,7 +3157,7 @@ func (r *ClickHouseReader) GetMetricAutocompleteMetricNames(ctx context.Context,
rows, err = r.db.Query(ctx, query, fmt.Sprintf("%%%s%%", matchText))
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: err}
}
@@ -3148,7 +3175,7 @@ func (r *ClickHouseReader) GetMetricAutocompleteMetricNames(ctx context.Context,
}
func (r *ClickHouseReader) GetMetricResultEE(ctx context.Context, query string) ([]*model.Series, string, error) {
- zap.S().Error("GetMetricResultEE is not implemented for opensource version")
+ zap.L().Error("GetMetricResultEE is not implemented for opensource version")
return nil, "", fmt.Errorf("GetMetricResultEE is not implemented for opensource version")
}
@@ -3157,12 +3184,12 @@ func (r *ClickHouseReader) GetMetricResult(ctx context.Context, query string) ([
defer utils.Elapsed("GetMetricResult")()
- zap.S().Infof("Executing metric result query: %s", query)
+ zap.L().Info("Executing metric result query: ", zap.String("query", query))
rows, err := r.db.Query(ctx, query)
if err != nil {
- zap.S().Debug("Error in processing query: ", err)
+ zap.L().Error("Error in processing query", zap.Error(err))
return nil, err
}
@@ -3239,7 +3266,7 @@ func (r *ClickHouseReader) GetMetricResult(ctx context.Context, query string) ([
groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int())
}
default:
- zap.S().Errorf("invalid var found in metric builder query result", v, colName)
+ zap.L().Error("invalid var found in metric builder query result", zap.Any("v", v), zap.String("colName", colName))
}
}
sort.Strings(groupBy)
@@ -3407,8 +3434,7 @@ func (r *ClickHouseReader) GetTagsInfoInLastHeartBeatInterval(ctx context.Contex
err := r.db.Select(ctx, &tagTelemetryDataList, queryStr)
if err != nil {
- zap.S().Info(queryStr)
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query: ", zap.Error(err))
return nil, err
}
@@ -3465,17 +3491,40 @@ func (r *ClickHouseReader) GetDashboardsInfo(ctx context.Context) (*model.Dashbo
var dashboardsData []dashboards.Dashboard
err := r.localDB.Select(&dashboardsData, query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return &dashboardsInfo, err
}
+ totalDashboardsWithPanelAndName := 0
for _, dashboard := range dashboardsData {
+ if isDashboardWithPanelAndName(dashboard.Data) {
+ totalDashboardsWithPanelAndName = totalDashboardsWithPanelAndName + 1
+ }
dashboardsInfo = countPanelsInDashboard(dashboard.Data)
}
dashboardsInfo.TotalDashboards = len(dashboardsData)
-
+ dashboardsInfo.TotalDashboardsWithPanelAndName = totalDashboardsWithPanelAndName
return &dashboardsInfo, nil
}
+func isDashboardWithPanelAndName(data map[string]interface{}) bool {
+ isDashboardName := false
+ isDashboardWithPanelAndName := false
+ if data != nil && data["title"] != nil && data["widgets"] != nil {
+ title, ok := data["title"].(string)
+ if ok && title != "Sample Title" {
+ isDashboardName = true
+ }
+ widgets, ok := data["widgets"].(interface{})
+ if ok && isDashboardName {
+ data, ok := widgets.([]interface{})
+ if ok && len(data) > 0 {
+ isDashboardWithPanelAndName = true
+ }
+ }
+ }
+
+ return isDashboardWithPanelAndName
+}
func countPanelsInDashboard(data map[string]interface{}) model.DashboardsInfo {
var logsPanelCount, tracesPanelCount, metricsPanelCount int
// totalPanels := 0
@@ -3528,14 +3577,14 @@ func (r *ClickHouseReader) GetAlertsInfo(ctx context.Context) (*model.AlertsInfo
var alertsData []string
err := r.localDB.Select(&alertsData, query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return &alertsInfo, err
}
for _, alert := range alertsData {
var rule rules.GettableRule
err = json.Unmarshal([]byte(alert), &rule)
if err != nil {
- zap.S().Errorf("msg:", "invalid rule data", "\t err:", err)
+ zap.L().Error("invalid rule data", zap.Error(err))
continue
}
if rule.AlertType == "LOGS_BASED_ALERT" {
@@ -3551,6 +3600,24 @@ func (r *ClickHouseReader) GetAlertsInfo(ctx context.Context) (*model.AlertsInfo
return &alertsInfo, nil
}
+func (r *ClickHouseReader) GetSavedViewsInfo(ctx context.Context) (*model.SavedViewsInfo, error) {
+ savedViewsInfo := model.SavedViewsInfo{}
+ savedViews, err := explorer.GetViews()
+ if err != nil {
+ zap.S().Debug("Error in fetching saved views info: ", err)
+ return &savedViewsInfo, err
+ }
+ savedViewsInfo.TotalSavedViews = len(savedViews)
+ for _, view := range savedViews {
+ if view.SourcePage == "traces" {
+ savedViewsInfo.TracesSavedViews += 1
+ } else if view.SourcePage == "logs" {
+ savedViewsInfo.LogsSavedViews += 1
+ }
+ }
+ return &savedViewsInfo, nil
+}
+
func (r *ClickHouseReader) GetLogFields(ctx context.Context) (*model.GetFieldsResponse, *model.ApiError) {
// response will contain top level fields from the otel log model
response := model.GetFieldsResponse{
@@ -3746,7 +3813,6 @@ func (r *ClickHouseReader) GetLogs(ctx context.Context, params *model.LogsFilter
}
query = fmt.Sprintf("%s order by %s %s limit %d", query, params.OrderBy, params.Order, params.Limit)
- zap.S().Debug(query)
err = r.db.Select(ctx, &response, query)
if err != nil {
return nil, &model.ApiError{Err: err, Typ: model.ErrorInternal}
@@ -3806,7 +3872,7 @@ func (r *ClickHouseReader) TailLogs(ctx context.Context, client *model.LogsTailC
case <-ctx.Done():
done := true
client.Done <- &done
- zap.S().Debug("closing go routine : " + client.Name)
+ zap.L().Debug("closing go routine : " + client.Name)
return
case <-ticker.C:
// get the new 100 logs as anything more older won't make sense
@@ -3818,11 +3884,10 @@ func (r *ClickHouseReader) TailLogs(ctx context.Context, client *model.LogsTailC
tmpQuery = fmt.Sprintf("%s and id > '%s'", tmpQuery, idStart)
}
tmpQuery = fmt.Sprintf("%s order by timestamp desc, id desc limit 100", tmpQuery)
- zap.S().Debug(tmpQuery)
response := []model.SignozLog{}
err := r.db.Select(ctx, &response, tmpQuery)
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error while getting logs", zap.Error(err))
client.Error <- err
return
}
@@ -3831,7 +3896,7 @@ func (r *ClickHouseReader) TailLogs(ctx context.Context, client *model.LogsTailC
case <-ctx.Done():
done := true
client.Done <- &done
- zap.S().Debug("closing go routine while sending logs : " + client.Name)
+ zap.L().Debug("closing go routine while sending logs : " + client.Name)
return
default:
client.Logs <- &response[i]
@@ -3896,7 +3961,6 @@ func (r *ClickHouseReader) AggregateLogs(ctx context.Context, params *model.Logs
query = fmt.Sprintf("%s GROUP BY ts_start_interval ORDER BY ts_start_interval", query)
}
- zap.S().Debug(query)
err = r.db.Select(ctx, &logAggregatesDBResponseItems, query)
if err != nil {
return nil, &model.ApiError{Err: err, Typ: model.ErrorInternal}
@@ -3935,10 +3999,10 @@ func (r *ClickHouseReader) QueryDashboardVars(ctx context.Context, query string)
var result model.DashboardVar
rows, err := r.db.Query(ctx, query)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, err
}
@@ -3974,23 +4038,30 @@ func (r *ClickHouseReader) GetMetricAggregateAttributes(ctx context.Context, req
var rows driver.Rows
var response v3.AggregateAttributeResponse
- query = fmt.Sprintf("SELECT DISTINCT metric_name, type from %s.%s WHERE metric_name ILIKE $1", signozMetricDBName, signozTSTableNameV41Day)
+ query = fmt.Sprintf("SELECT metric_name, type, is_monotonic, temporality FROM %s.%s WHERE metric_name ILIKE $1 GROUP BY metric_name, type, is_monotonic, temporality", signozMetricDBName, signozTSTableNameV41Day)
if req.Limit != 0 {
query = query + fmt.Sprintf(" LIMIT %d;", req.Limit)
}
rows, err = r.db.Query(ctx, query, fmt.Sprintf("%%%s%%", req.SearchText))
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error while executing query", zap.Error(err))
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
- var metricName, typ string
+ seen := make(map[string]struct{})
+
+ var metricName, typ, temporality string
+ var isMonotonic bool
for rows.Next() {
- if err := rows.Scan(&metricName, &typ); err != nil {
+ if err := rows.Scan(&metricName, &typ, &isMonotonic, &temporality); err != nil {
return nil, fmt.Errorf("error while scanning rows: %s", err.Error())
}
+ // Non-monotonic cumulative sums are treated as gauges
+ if typ == "Sum" && !isMonotonic && temporality == string(v3.Cumulative) {
+ typ = "Gauge"
+ }
// unlike traces/logs `tag`/`resource` type, the `Type` will be metric type
key := v3.AttributeKey{
Key: metricName,
@@ -3998,6 +4069,11 @@ func (r *ClickHouseReader) GetMetricAggregateAttributes(ctx context.Context, req
Type: v3.AttributeKeyType(typ),
IsColumn: true,
}
+ // remove duplicates
+ if _, ok := seen[metricName+typ]; ok {
+ continue
+ }
+ seen[metricName+typ] = struct{}{}
response.AttributeKeys = append(response.AttributeKeys, key)
}
@@ -4012,13 +4088,13 @@ func (r *ClickHouseReader) GetMetricAttributeKeys(ctx context.Context, req *v3.F
var response v3.FilterAttributeKeyResponse
// skips the internal attributes i.e attributes starting with __
- query = fmt.Sprintf("SELECT DISTINCT arrayJoin(tagKeys) as distinctTagKey from (SELECT DISTINCT(JSONExtractKeys(labels)) tagKeys from %s.%s WHERE metric_name=$1) WHERE distinctTagKey ILIKE $2 AND distinctTagKey NOT LIKE '\\_\\_%%'", signozMetricDBName, signozTSTableName)
+ query = fmt.Sprintf("SELECT arrayJoin(tagKeys) AS distinctTagKey FROM (SELECT JSONExtractKeys(labels) AS tagKeys FROM %s.%s WHERE metric_name=$1 AND unix_milli >= $2 GROUP BY tagKeys) WHERE distinctTagKey ILIKE $3 AND distinctTagKey NOT LIKE '\\_\\_%%' GROUP BY distinctTagKey", signozMetricDBName, signozTSTableNameV41Day)
if req.Limit != 0 {
query = query + fmt.Sprintf(" LIMIT %d;", req.Limit)
}
- rows, err = r.db.Query(ctx, query, req.AggregateAttribute, fmt.Sprintf("%%%s%%", req.SearchText))
+ rows, err = r.db.Query(ctx, query, req.AggregateAttribute, common.PastDayRoundOff(), fmt.Sprintf("%%%s%%", req.SearchText))
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error while executing query", zap.Error(err))
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4047,14 +4123,14 @@ func (r *ClickHouseReader) GetMetricAttributeValues(ctx context.Context, req *v3
var rows driver.Rows
var attributeValues v3.FilterAttributeValueResponse
- query = fmt.Sprintf("SELECT DISTINCT(JSONExtractString(labels, $1)) from %s.%s WHERE metric_name=$2 AND JSONExtractString(labels, $3) ILIKE $4", signozMetricDBName, signozTSTableName)
+ query = fmt.Sprintf("SELECT JSONExtractString(labels, $1) AS tagValue FROM %s.%s WHERE metric_name=$2 AND JSONExtractString(labels, $3) ILIKE $4 AND unix_milli >= $5 GROUP BY tagValue", signozMetricDBName, signozTSTableNameV41Day)
if req.Limit != 0 {
query = query + fmt.Sprintf(" LIMIT %d;", req.Limit)
}
- rows, err = r.db.Query(ctx, query, req.FilterAttributeKey, req.AggregateAttribute, req.FilterAttributeKey, fmt.Sprintf("%%%s%%", req.SearchText))
+ rows, err = r.db.Query(ctx, query, req.FilterAttributeKey, req.AggregateAttribute, req.FilterAttributeKey, fmt.Sprintf("%%%s%%", req.SearchText), common.PastDayRoundOff())
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error while executing query", zap.Error(err))
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4076,7 +4152,7 @@ func (r *ClickHouseReader) GetLatencyMetricMetadata(ctx context.Context, metricN
query := fmt.Sprintf("SELECT DISTINCT(temporality) from %s.%s WHERE metric_name='%s' AND JSONExtractString(labels, 'service_name') = '%s'", signozMetricDBName, signozTSTableName, metricName, serviceName)
rows, err := r.db.Query(ctx, query, metricName)
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error while executing query", zap.Error(err))
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4095,7 +4171,7 @@ func (r *ClickHouseReader) GetLatencyMetricMetadata(ctx context.Context, metricN
query = fmt.Sprintf("SELECT DISTINCT(JSONExtractString(labels, 'le')) as le from %s.%s WHERE metric_name='%s' AND JSONExtractString(labels, 'service_name') = '%s' ORDER BY le", signozMetricDBName, signozTSTableName, metricName, serviceName)
rows, err = r.db.Query(ctx, query, metricName)
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error while executing query", zap.Error(err))
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4111,7 +4187,7 @@ func (r *ClickHouseReader) GetLatencyMetricMetadata(ctx context.Context, metricN
// ideally this should not happen but we have seen ClickHouse
// returning empty string for some values
if err != nil {
- zap.S().Error("error while parsing le value: ", err)
+ zap.L().Error("error while parsing le value", zap.Error(err))
continue
}
if math.IsInf(le, 0) {
@@ -4133,7 +4209,7 @@ func (r *ClickHouseReader) GetMetricMetadata(ctx context.Context, metricName, se
query := fmt.Sprintf("SELECT DISTINCT temporality, description, type, unit, is_monotonic from %s.%s WHERE metric_name=$1", signozMetricDBName, signozTSTableNameV41Day)
rows, err := r.db.Query(ctx, query, metricName)
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error while fetching metric metadata", zap.Error(err))
return nil, fmt.Errorf("error while fetching metric metadata: %s", err.Error())
}
defer rows.Close()
@@ -4152,7 +4228,7 @@ func (r *ClickHouseReader) GetMetricMetadata(ctx context.Context, metricName, se
query = fmt.Sprintf("SELECT DISTINCT(JSONExtractString(labels, 'le')) as le from %s.%s WHERE metric_name=$1 AND type = 'Histogram' AND JSONExtractString(labels, 'service_name') = $2 ORDER BY le", signozMetricDBName, signozTSTableNameV41Day)
rows, err = r.db.Query(ctx, query, metricName, serviceName)
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error while executing query", zap.Error(err))
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4168,7 +4244,7 @@ func (r *ClickHouseReader) GetMetricMetadata(ctx context.Context, metricName, se
// ideally this should not happen but we have seen ClickHouse
// returning empty string for some values
if err != nil {
- zap.S().Error("error while parsing le value: ", err)
+ zap.L().Error("error while parsing le value", zap.Error(err))
continue
}
if math.IsInf(le, 0) {
@@ -4188,6 +4264,67 @@ func (r *ClickHouseReader) GetMetricMetadata(ctx context.Context, metricName, se
}, nil
}
+func (r *ClickHouseReader) GetLatestReceivedMetric(
+ ctx context.Context, metricNames []string,
+) (*model.MetricStatus, *model.ApiError) {
+ if len(metricNames) < 1 {
+ return nil, nil
+ }
+
+ quotedMetricNames := []string{}
+ for _, m := range metricNames {
+ quotedMetricNames = append(quotedMetricNames, fmt.Sprintf(`'%s'`, m))
+ }
+ commaSeparatedMetricNames := strings.Join(quotedMetricNames, ", ")
+
+ query := fmt.Sprintf(`
+ SELECT metric_name, labels, unix_milli
+ from %s.%s
+ where metric_name in (
+ %s
+ )
+ order by unix_milli desc
+ limit 1
+ `, signozMetricDBName, signozTSTableNameV4, commaSeparatedMetricNames,
+ )
+
+ rows, err := r.db.Query(ctx, query)
+ if err != nil {
+ return nil, model.InternalError(fmt.Errorf(
+ "couldn't query clickhouse for received metrics status: %w", err,
+ ))
+ }
+ defer rows.Close()
+
+ var result *model.MetricStatus
+
+ if rows.Next() {
+
+ result = &model.MetricStatus{}
+ var labelsJson string
+
+ err := rows.Scan(
+ &result.MetricName,
+ &labelsJson,
+ &result.LastReceivedTsMillis,
+ )
+ if err != nil {
+ return nil, model.InternalError(fmt.Errorf(
+ "couldn't scan metric status row: %w", err,
+ ))
+ }
+
+ err = json.Unmarshal([]byte(labelsJson), &result.LastReceivedLabels)
+ if err != nil {
+ return nil, model.InternalError(fmt.Errorf(
+ "couldn't unmarshal metric labels json: %w", err,
+ ))
+ }
+ }
+
+ return result, nil
+}
+
func isColumn(tableStatement, attrType, field, datType string) bool {
// value of attrType will be `resource` or `tag`, if `tag` change it to `attribute`
name := utils.GetClickhouseColumnName(attrType, datType, field)
@@ -4241,7 +4378,7 @@ func (r *ClickHouseReader) GetLogAggregateAttributes(ctx context.Context, req *v
query = fmt.Sprintf("SELECT DISTINCT(tagKey), tagType, tagDataType from %s.%s WHERE %s limit $2", r.logsDB, r.logsTagAttributeTable, where)
rows, err = r.db.Query(ctx, query, fmt.Sprintf("%%%s%%", req.SearchText), req.Limit)
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error while executing query", zap.Error(err))
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4295,7 +4432,7 @@ func (r *ClickHouseReader) GetLogAttributeKeys(ctx context.Context, req *v3.Filt
}
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error while executing query", zap.Error(err))
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4407,7 +4544,7 @@ func (r *ClickHouseReader) GetLogAttributeValues(ctx context.Context, req *v3.Fi
}
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error while executing query", zap.Error(err))
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4468,7 +4605,7 @@ func readRow(vars []interface{}, columnNames []string) ([]string, map[string]str
var metric map[string]string
err := json.Unmarshal([]byte(*v), &metric)
if err != nil {
- zap.S().Errorf("unexpected error encountered %v", err)
+ zap.L().Error("unexpected error encountered", zap.Error(err))
}
for key, val := range metric {
groupBy = append(groupBy, val)
@@ -4524,7 +4661,7 @@ func readRow(vars []interface{}, columnNames []string) ([]string, map[string]str
groupAttributes[colName] = fmt.Sprintf("%v", *v)
default:
- zap.S().Errorf("unsupported var type %v found in query builder query result for column %s", v, colName)
+ zap.L().Error("unsupported var type found in query builder query result", zap.Any("v", v), zap.String("colName", colName))
}
}
return groupBy, groupAttributes, groupAttributesArray, point
@@ -4598,15 +4735,31 @@ func readRowsForTimeSeriesResult(rows driver.Rows, vars []interface{}, columnNam
return seriesList, nil
}
+func logComment(ctx context.Context) string {
+ // Get the key-value pairs from context for log comment
+ kv := ctx.Value("log_comment")
+ if kv == nil {
+ return ""
+ }
+
+ logCommentKVs, ok := kv.(map[string]string)
+ if !ok {
+ return ""
+ }
+
+ x, _ := json.Marshal(logCommentKVs)
+ return string(x)
+}
+
// GetTimeSeriesResultV3 runs the query and returns list of time series
func (r *ClickHouseReader) GetTimeSeriesResultV3(ctx context.Context, query string) ([]*v3.Series, error) {
- defer utils.Elapsed("GetTimeSeriesResultV3", query)()
+ defer utils.Elapsed("GetTimeSeriesResultV3", query, fmt.Sprintf("logComment: %s", logComment(ctx)))()
rows, err := r.db.Query(ctx, query)
if err != nil {
- zap.S().Errorf("error while reading time series result %v", err)
+ zap.L().Error("error while reading time series result", zap.Error(err))
return nil, err
}
defer rows.Close()
@@ -4626,12 +4779,12 @@ func (r *ClickHouseReader) GetTimeSeriesResultV3(ctx context.Context, query stri
// GetListResultV3 runs the query and returns list of rows
func (r *ClickHouseReader) GetListResultV3(ctx context.Context, query string) ([]*v3.Row, error) {
- defer utils.Elapsed("GetListResultV3", query)()
+ defer utils.Elapsed("GetListResultV3", query, fmt.Sprintf("logComment: %s", logComment(ctx)))()
rows, err := r.db.Query(ctx, query)
if err != nil {
- zap.S().Errorf("error while reading time series result %v", err)
+ zap.L().Error("error while reading time series result", zap.Error(err))
return nil, err
}
defer rows.Close()
@@ -4774,7 +4927,7 @@ func (r *ClickHouseReader) GetTraceAggregateAttributes(ctx context.Context, req
rows, err = r.db.Query(ctx, query, fmt.Sprintf("%%%s%%", req.SearchText))
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error while executing query", zap.Error(err))
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4815,7 +4968,7 @@ func (r *ClickHouseReader) GetTraceAttributeKeys(ctx context.Context, req *v3.Fi
rows, err = r.db.Query(ctx, query, fmt.Sprintf("%%%s%%", req.SearchText))
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error while executing query", zap.Error(err))
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4869,7 +5022,7 @@ func (r *ClickHouseReader) GetTraceAttributeValues(ctx context.Context, req *v3.
query = fmt.Sprintf("SELECT DISTINCT stringTagValue from %s.%s WHERE tagKey = $1 AND stringTagValue ILIKE $2 AND tagType=$3 limit $4", r.TraceDB, r.spanAttributeTable)
rows, err = r.db.Query(ctx, query, req.FilterAttributeKey, fmt.Sprintf("%%%s%%", req.SearchText), req.TagType, req.Limit)
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error while executing query", zap.Error(err))
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4885,7 +5038,7 @@ func (r *ClickHouseReader) GetTraceAttributeValues(ctx context.Context, req *v3.
query = fmt.Sprintf("SELECT DISTINCT float64TagValue from %s.%s where tagKey = $1 AND toString(float64TagValue) ILIKE $2 AND tagType=$3 limit $4", r.TraceDB, r.spanAttributeTable)
rows, err = r.db.Query(ctx, query, req.FilterAttributeKey, fmt.Sprintf("%%%s%%", req.SearchText), req.TagType, req.Limit)
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error while executing query", zap.Error(err))
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4919,7 +5072,7 @@ func (r *ClickHouseReader) GetSpanAttributeKeys(ctx context.Context) (map[string
rows, err = r.db.Query(ctx, query)
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error while executing query", zap.Error(err))
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4957,7 +5110,7 @@ func (r *ClickHouseReader) LiveTailLogsV3(ctx context.Context, query string, tim
case <-ctx.Done():
done := true
client.Done <- &done
- zap.S().Debug("closing go routine : " + client.Name)
+ zap.L().Debug("closing go routine : " + client.Name)
return
case <-ticker.C:
// get the new 100 logs as anything more older won't make sense
@@ -4972,7 +5125,7 @@ func (r *ClickHouseReader) LiveTailLogsV3(ctx context.Context, query string, tim
response := []model.SignozLog{}
err := r.db.Select(ctx, &response, tmpQuery)
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error while getting logs", zap.Error(err))
client.Error <- err
return
}
diff --git a/pkg/query-service/app/clickhouseReader/wrapper.go b/pkg/query-service/app/clickhouseReader/wrapper.go
new file mode 100644
index 0000000000..6c75fa4b20
--- /dev/null
+++ b/pkg/query-service/app/clickhouseReader/wrapper.go
@@ -0,0 +1,82 @@
+package clickhouseReader
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "github.com/ClickHouse/clickhouse-go/v2"
+ "github.com/ClickHouse/clickhouse-go/v2/lib/driver"
+)
+
+type clickhouseConnWrapper struct {
+ conn clickhouse.Conn
+}
+
+func (c clickhouseConnWrapper) Close() error {
+ return c.conn.Close()
+}
+
+func (c clickhouseConnWrapper) Ping(ctx context.Context) error {
+ return c.conn.Ping(ctx)
+}
+
+func (c clickhouseConnWrapper) Stats() driver.Stats {
+ return c.conn.Stats()
+}
+
+func (c clickhouseConnWrapper) logComment(ctx context.Context) context.Context {
+ // Get the key-value pairs from context for log comment
+ kv := ctx.Value("log_comment")
+ if kv == nil {
+ return ctx
+ }
+
+ logCommentKVs, ok := kv.(map[string]string)
+ if !ok {
+ return ctx
+ }
+
+ logComment := ""
+ for k, v := range logCommentKVs {
+ logComment += fmt.Sprintf("%s=%s, ", k, v)
+ }
+ logComment = strings.TrimSuffix(logComment, ", ")
+
+ ctx = clickhouse.Context(ctx, clickhouse.WithSettings(clickhouse.Settings{
+ "log_comment": logComment,
+ }))
+ return ctx
+}
+
+func (c clickhouseConnWrapper) Query(ctx context.Context, query string, args ...interface{}) (driver.Rows, error) {
+ return c.conn.Query(c.logComment(ctx), query, args...)
+}
+
+func (c clickhouseConnWrapper) QueryRow(ctx context.Context, query string, args ...interface{}) driver.Row {
+ return c.conn.QueryRow(c.logComment(ctx), query, args...)
+}
+
+func (c clickhouseConnWrapper) Select(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
+ return c.conn.Select(c.logComment(ctx), dest, query, args...)
+}
+
+func (c clickhouseConnWrapper) Exec(ctx context.Context, query string, args ...interface{}) error {
+ return c.conn.Exec(c.logComment(ctx), query, args...)
+}
+
+func (c clickhouseConnWrapper) AsyncInsert(ctx context.Context, query string, wait bool, args ...interface{}) error {
+ return c.conn.AsyncInsert(c.logComment(ctx), query, wait, args...)
+}
+
+func (c clickhouseConnWrapper) PrepareBatch(ctx context.Context, query string, opts ...driver.PrepareBatchOption) (driver.Batch, error) {
+ return c.conn.PrepareBatch(c.logComment(ctx), query, opts...)
+}
+
+func (c clickhouseConnWrapper) ServerVersion() (*driver.ServerVersion, error) {
+ return c.conn.ServerVersion()
+}
+
+func (c clickhouseConnWrapper) Contributors() []string {
+ return c.conn.Contributors()
+}
diff --git a/pkg/query-service/app/dashboards/model.go b/pkg/query-service/app/dashboards/model.go
index 698b697279..c69f30a6bd 100644
--- a/pkg/query-service/app/dashboards/model.go
+++ b/pkg/query-service/app/dashboards/model.go
@@ -25,12 +25,14 @@ import (
var db *sqlx.DB
// User for mapping job,instance from grafana
-var instanceEQRE = regexp.MustCompile("instance(?s)=(?s)\\\"{{.instance}}\\\"")
-var nodeEQRE = regexp.MustCompile("instance(?s)=(?s)\\\"{{.node}}\\\"")
-var jobEQRE = regexp.MustCompile("job(?s)=(?s)\\\"{{.job}}\\\"")
-var instanceRERE = regexp.MustCompile("instance(?s)=~(?s)\\\"{{.instance}}\\\"")
-var nodeRERE = regexp.MustCompile("instance(?s)=~(?s)\\\"{{.node}}\\\"")
-var jobRERE = regexp.MustCompile("job(?s)=~(?s)\\\"{{.job}}\\\"")
+var (
+ instanceEQRE = regexp.MustCompile("instance(?s)=(?s)\\\"{{.instance}}\\\"")
+ nodeEQRE = regexp.MustCompile("instance(?s)=(?s)\\\"{{.node}}\\\"")
+ jobEQRE = regexp.MustCompile("job(?s)=(?s)\\\"{{.job}}\\\"")
+ instanceRERE = regexp.MustCompile("instance(?s)=~(?s)\\\"{{.instance}}\\\"")
+ nodeRERE = regexp.MustCompile("instance(?s)=~(?s)\\\"{{.node}}\\\"")
+ jobRERE = regexp.MustCompile("job(?s)=~(?s)\\\"{{.job}}\\\"")
+)
// InitDB sets up setting up the connection pool global variable.
func InitDB(dataSourceName string) (*sqlx.DB, error) {
@@ -188,10 +190,13 @@ func CreateDashboard(ctx context.Context, data map[string]interface{}, fm interf
dash.UpdateBy = &userEmail
dash.UpdateSlug()
dash.Uuid = uuid.New().String()
+ if data["uuid"] != nil {
+ dash.Uuid = data["uuid"].(string)
+ }
mapData, err := json.Marshal(dash.Data)
if err != nil {
- zap.S().Errorf("Error in marshalling data field in dashboard: ", dash, err)
+ zap.L().Error("Error in marshalling data field in dashboard: ", zap.Any("dashboard", dash), zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: err}
}
@@ -207,11 +212,10 @@ func CreateDashboard(ctx context.Context, data map[string]interface{}, fm interf
dash.Uuid, dash.CreatedAt, userEmail, dash.UpdatedAt, userEmail, mapData)
if err != nil {
- zap.S().Errorf("Error in inserting dashboard data: ", dash, err)
+ zap.L().Error("Error in inserting dashboard data: ", zap.Any("dashboard", dash), zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: err}
}
lastInsertId, err := result.LastInsertId()
-
if err != nil {
return nil, &model.ApiError{Typ: model.ErrorExec, Err: err}
}
@@ -242,7 +246,7 @@ func DeleteDashboard(ctx context.Context, uuid string, fm interfaces.FeatureLook
dashboard, dErr := GetDashboard(ctx, uuid)
if dErr != nil {
- zap.S().Errorf("Error in getting dashboard: ", uuid, dErr)
+ zap.L().Error("Error in getting dashboard: ", zap.String("uuid", uuid), zap.Any("error", dErr))
return dErr
}
@@ -255,7 +259,6 @@ func DeleteDashboard(ctx context.Context, uuid string, fm interfaces.FeatureLook
query := `DELETE FROM dashboards WHERE uuid=?`
result, err := db.Exec(query, uuid)
-
if err != nil {
return &model.ApiError{Typ: model.ErrorExec, Err: err}
}
@@ -293,7 +296,7 @@ func UpdateDashboard(ctx context.Context, uuid string, data map[string]interface
mapData, err := json.Marshal(data)
if err != nil {
- zap.S().Errorf("Error in marshalling data field in dashboard: ", data, err)
+ zap.L().Error("Error in marshalling data field in dashboard: ", zap.Any("data", data), zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorBadData, Err: err}
}
@@ -334,7 +337,7 @@ func UpdateDashboard(ctx context.Context, uuid string, data map[string]interface
dashboard.UpdatedAt, userEmail, mapData, dashboard.Uuid)
if err != nil {
- zap.S().Errorf("Error in inserting dashboard data: ", data, err)
+ zap.L().Error("Error in inserting dashboard data", zap.Any("data", data), zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: err}
}
if existingCount != newCount {
@@ -355,7 +358,7 @@ func LockUnlockDashboard(ctx context.Context, uuid string, lock bool) *model.Api
_, err := db.Exec(query, uuid)
if err != nil {
- zap.S().Errorf("Error in updating dashboard: ", uuid, err)
+ zap.L().Error("Error in updating dashboard", zap.String("uuid", uuid), zap.Error(err))
return &model.ApiError{Typ: model.ErrorExec, Err: err}
}
@@ -367,10 +370,10 @@ func updateFeatureUsage(fm interfaces.FeatureLookup, usage int64) *model.ApiErro
if err != nil {
switch err.(type) {
case model.ErrFeatureUnavailable:
- zap.S().Errorf("feature unavailable", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err))
+ zap.L().Error("feature unavailable", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err))
return model.BadRequest(err)
default:
- zap.S().Errorf("feature check failed", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err))
+ zap.L().Error("feature check failed", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err))
return model.BadRequest(err)
}
}
@@ -394,10 +397,10 @@ func checkFeatureUsage(fm interfaces.FeatureLookup, usage int64) *model.ApiError
if err != nil {
switch err.(type) {
case model.ErrFeatureUnavailable:
- zap.S().Errorf("feature unavailable", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err))
+ zap.L().Error("feature unavailable", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err))
return model.BadRequest(err)
default:
- zap.S().Errorf("feature check failed", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err))
+ zap.L().Error("feature check failed", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err))
return model.BadRequest(err)
}
}
@@ -419,7 +422,6 @@ func (d *Dashboard) UpdateSlug() {
}
func IsPostDataSane(data *map[string]interface{}) error {
-
val, ok := (*data)["title"]
if !ok || val == nil {
return fmt.Errorf("title not found in post data")
@@ -533,13 +535,13 @@ func TransformGrafanaJSONToSignoz(grafanaJSON model.GrafanaJSON) model.Dashboard
if template.Type == "query" {
if template.Datasource == nil {
- zap.S().Warnf("Skipping panel %d as it has no datasource", templateIdx)
+ zap.L().Warn("Skipping panel as it has no datasource", zap.Int("templateIdx", templateIdx))
continue
}
// Skip if the source is not prometheus
source, stringOk := template.Datasource.(string)
if stringOk && !strings.Contains(strings.ToLower(source), "prometheus") {
- zap.S().Warnf("Skipping template %d as it is not prometheus", templateIdx)
+ zap.L().Warn("Skipping template as it is not prometheus", zap.Int("templateIdx", templateIdx))
continue
}
var result model.Datasource
@@ -551,12 +553,12 @@ func TransformGrafanaJSONToSignoz(grafanaJSON model.GrafanaJSON) model.Dashboard
}
}
if result.Type != "prometheus" && result.Type != "" {
- zap.S().Warnf("Skipping template %d as it is not prometheus", templateIdx)
+ zap.L().Warn("Skipping template as it is not prometheus", zap.Int("templateIdx", templateIdx))
continue
}
if !stringOk && !structOk {
- zap.S().Warnf("Didn't recognize source, skipping")
+ zap.L().Warn("Didn't recognize source, skipping")
continue
}
typ = "QUERY"
@@ -627,13 +629,13 @@ func TransformGrafanaJSONToSignoz(grafanaJSON model.GrafanaJSON) model.Dashboard
continue
}
if panel.Datasource == nil {
- zap.S().Warnf("Skipping panel %d as it has no datasource", idx)
+ zap.L().Warn("Skipping panel as it has no datasource", zap.Int("idx", idx))
continue
}
// Skip if the datasource is not prometheus
source, stringOk := panel.Datasource.(string)
if stringOk && !strings.Contains(strings.ToLower(source), "prometheus") {
- zap.S().Warnf("Skipping panel %d as it is not prometheus", idx)
+ zap.L().Warn("Skipping panel as it is not prometheus", zap.Int("idx", idx))
continue
}
var result model.Datasource
@@ -645,12 +647,12 @@ func TransformGrafanaJSONToSignoz(grafanaJSON model.GrafanaJSON) model.Dashboard
}
}
if result.Type != "prometheus" && result.Type != "" {
- zap.S().Warnf("Skipping panel %d as it is not prometheus", idx)
+ zap.L().Warn("Skipping panel as it is not prometheus", zap.Int("idx", idx))
continue
}
if !stringOk && !structOk {
- zap.S().Warnf("Didn't recognize source, skipping")
+ zap.L().Warn("Didn't recognize source, skipping")
continue
}
diff --git a/pkg/query-service/app/dashboards/provision.go b/pkg/query-service/app/dashboards/provision.go
index 6f60dc50fe..fb97a960c1 100644
--- a/pkg/query-service/app/dashboards/provision.go
+++ b/pkg/query-service/app/dashboards/provision.go
@@ -10,55 +10,70 @@ import (
"go.signoz.io/signoz/pkg/query-service/constants"
"go.signoz.io/signoz/pkg/query-service/interfaces"
+ "go.signoz.io/signoz/pkg/query-service/model"
)
func readCurrentDir(dir string, fm interfaces.FeatureLookup) error {
file, err := os.Open(dir)
if err != nil {
- zap.S().Errorf("failed opening directory: %s", err)
- return err
+ zap.L().Warn("failed opening directory", zap.Error(err))
+ return nil
}
defer file.Close()
list, _ := file.Readdirnames(0) // 0 to read all files and folders
for _, filename := range list {
- zap.S().Info("Provisioning dashboard: ", filename)
+ zap.L().Info("Provisioning dashboard: ", zap.String("filename", filename))
// using filepath.Join for platform specific path creation
// which is equivalent to "dir+/+filename" (on unix based systems) but cleaner
plan, err := os.ReadFile(filepath.Join(dir, filename))
if err != nil {
- zap.S().Errorf("Creating Dashboards: Error in reading json fron file: %s\t%s", filename, err)
+ zap.L().Error("Creating Dashboards: Error in reading json fron file", zap.String("filename", filename), zap.Error(err))
continue
}
var data map[string]interface{}
err = json.Unmarshal(plan, &data)
if err != nil {
- zap.S().Errorf("Creating Dashboards: Error in unmarshalling json from file: %s\t%s", filename, err)
+ zap.L().Error("Creating Dashboards: Error in unmarshalling json from file", zap.String("filename", filename), zap.Error(err))
continue
}
err = IsPostDataSane(&data)
if err != nil {
- zap.S().Infof("Creating Dashboards: Error in file: %s\t%s", filename, err)
+ zap.L().Info("Creating Dashboards: Error in file", zap.String("filename", filename), zap.Error(err))
continue
}
- _, apiErr := GetDashboard(context.Background(), data["uuid"].(string))
- if apiErr == nil {
- zap.S().Infof("Creating Dashboards: Error in file: %s\t%s", filename, "Dashboard already present in database")
+ id := data["uuid"]
+ if id == nil {
+ _, apiErr := CreateDashboard(context.Background(), data, fm)
+ if apiErr != nil {
+ zap.L().Error("Creating Dashboards: Error in file", zap.String("filename", filename), zap.Error(apiErr.Err))
+ }
continue
}
- _, apiErr = CreateDashboard(context.Background(), data, fm)
+ apiErr := upsertDashboard(id.(string), data, filename, fm)
if apiErr != nil {
- zap.S().Errorf("Creating Dashboards: Error in file: %s\t%s", filename, apiErr.Err)
- continue
+ zap.L().Error("Creating Dashboards: Error upserting dashboard", zap.String("filename", filename), zap.Error(apiErr.Err))
}
-
}
return nil
}
+func upsertDashboard(uuid string, data map[string]interface{}, filename string, fm interfaces.FeatureLookup) *model.ApiError {
+ _, apiErr := GetDashboard(context.Background(), uuid)
+ if apiErr == nil {
+ zap.S().Infof("Creating Dashboards: Already exists: %s\t%s", filename, "Dashboard already present in database, Updating dashboard")
+ _, apiErr := UpdateDashboard(context.Background(), uuid, data, fm)
+ return apiErr
+ }
+
+ zap.S().Infof("Creating Dashboards: UUID not found: %s\t%s", filename, "Dashboard not present in database, Creating dashboard")
+ _, apiErr = CreateDashboard(context.Background(), data, fm)
+ return apiErr
+}
+
func LoadDashboardFiles(fm interfaces.FeatureLookup) error {
dashboardsPath := constants.GetOrDefaultEnv("DASHBOARDS_PATH", "./config/dashboards")
return readCurrentDir(dashboardsPath, fm)
diff --git a/pkg/query-service/app/formula.go b/pkg/query-service/app/formula.go
index 657a7bcad9..619ae15cb3 100644
--- a/pkg/query-service/app/formula.go
+++ b/pkg/query-service/app/formula.go
@@ -87,23 +87,6 @@ func joinAndCalculate(results []*v3.Result, uniqueLabelSet map[string]string, ex
}
}
- vars := expression.Vars()
- var doesNotHaveAllVars bool
- for _, v := range vars {
- if _, ok := seriesMap[v]; !ok {
- doesNotHaveAllVars = true
- break
- }
- }
-
- // There is no series that matches the label set from all queries
- // TODO: Does the lack of a series from one query mean that the result should be nil?
- // Or should we interpret the series as having a value of 0 at all timestamps?
- // The current behaviour with ClickHouse is to show no data
- if doesNotHaveAllVars {
- return nil, nil
- }
-
resultSeries := &v3.Series{
Labels: uniqueLabelSet,
}
@@ -120,6 +103,13 @@ func joinAndCalculate(results []*v3.Result, uniqueLabelSet map[string]string, ex
for queryName, series := range seriesMap {
values[queryName] = series[timestamp]
}
+
+ // If the value is not present in the values map, set it to 0
+ for _, v := range expression.Vars() {
+ if _, ok := values[v]; !ok {
+ values[v] = 0
+ }
+ }
newValue, err := expression.Evaluate(values)
if err != nil {
return nil, err
diff --git a/pkg/query-service/app/formula_test.go b/pkg/query-service/app/formula_test.go
index 17a073ae32..365d794836 100644
--- a/pkg/query-service/app/formula_test.go
+++ b/pkg/query-service/app/formula_test.go
@@ -235,7 +235,39 @@ func TestProcessResults(t *testing.T) {
},
},
want: &v3.Result{
- Series: []*v3.Series{},
+ Series: []*v3.Series{
+ {
+ Labels: map[string]string{
+ "service_name": "frontend",
+ "operation": "GET /api",
+ },
+ Points: []v3.Point{
+ {
+ Timestamp: 1,
+ Value: 10,
+ },
+ {
+ Timestamp: 2,
+ Value: 20,
+ },
+ },
+ },
+ {
+ Labels: map[string]string{
+ "service_name": "redis",
+ },
+ Points: []v3.Point{
+ {
+ Timestamp: 1,
+ Value: 30,
+ },
+ {
+ Timestamp: 3,
+ Value: 40,
+ },
+ },
+ },
+ },
},
},
}
@@ -350,6 +382,21 @@ func TestProcessResultsErrorRate(t *testing.T) {
},
want: &v3.Result{
Series: []*v3.Series{
+ {
+ Labels: map[string]string{
+ "service_name": "frontend",
+ },
+ Points: []v3.Point{
+ {
+ Timestamp: 1,
+ Value: 0,
+ },
+ {
+ Timestamp: 2,
+ Value: 0,
+ },
+ },
+ },
{
Labels: map[string]string{
"service_name": "redis",
@@ -365,6 +412,21 @@ func TestProcessResultsErrorRate(t *testing.T) {
},
},
},
+ {
+ Labels: map[string]string{
+ "service_name": "route",
+ },
+ Points: []v3.Point{
+ {
+ Timestamp: 1,
+ Value: 0,
+ },
+ {
+ Timestamp: 2,
+ Value: 0,
+ },
+ },
+ },
},
},
},
@@ -906,132 +968,118 @@ func TestFormula(t *testing.T) {
},
},
},
- want: &v3.Result{},
- },
- {
- name: "Group keys on both sides are overlapping but do not match exactly",
- expression: "A/B",
- results: []*v3.Result{
- {
- QueryName: "A",
- Series: []*v3.Series{
- {
- Labels: map[string]string{
- "host_name": "ip-10-420-69-1",
- "state": "running",
- },
- Points: []v3.Point{
- {
- Timestamp: 1,
- Value: 10,
- },
- {
- Timestamp: 2,
- Value: 20,
- },
- {
- Timestamp: 4,
- Value: 40,
- },
- {
- Timestamp: 5,
- Value: 50,
- },
- {
- Timestamp: 7,
- Value: 70,
- },
- },
+ want: &v3.Result{
+ Series: []*v3.Series{
+ {
+ Labels: map[string]string{
+ "host_name": "ip-10-420-69-1",
+ "state": "running",
},
- {
- Labels: map[string]string{
- "host_name": "ip-10-420-69-2",
- "state": "idle",
+ Points: []v3.Point{
+ {
+ Timestamp: 1,
+ Value: math.Inf(0),
},
- Points: []v3.Point{
- {
- Timestamp: 1,
- Value: 12,
- },
- {
- Timestamp: 2,
- Value: 45,
- },
- {
- Timestamp: 3,
- Value: 30,
- },
- {
- Timestamp: 4,
- Value: 40,
- },
- {
- Timestamp: 5,
- Value: 50,
- },
+ {
+ Timestamp: 2,
+ Value: math.Inf(0),
+ },
+ {
+ Timestamp: 4,
+ Value: math.Inf(0),
+ },
+ {
+ Timestamp: 5,
+ Value: math.Inf(0),
+ },
+ {
+ Timestamp: 7,
+ Value: math.Inf(0),
},
},
},
- },
- {
- QueryName: "B",
- Series: []*v3.Series{
- {
- Labels: map[string]string{
- "os.type": "linux",
- "state": "running",
+ {
+ Labels: map[string]string{
+ "host_name": "ip-10-420-69-2",
+ "state": "idle",
+ },
+ Points: []v3.Point{
+ {
+ Timestamp: 1,
+ Value: math.Inf(0),
},
- Points: []v3.Point{
- {
- Timestamp: 1,
- Value: 22,
- },
- {
- Timestamp: 2,
- Value: 65,
- },
- {
- Timestamp: 3,
- Value: 30,
- },
- {
- Timestamp: 4,
- Value: 40,
- },
- {
- Timestamp: 5,
- Value: 50,
- },
+ {
+ Timestamp: 2,
+ Value: math.Inf(0),
+ },
+ {
+ Timestamp: 3,
+ Value: math.Inf(0),
+ },
+ {
+ Timestamp: 4,
+ Value: math.Inf(0),
+ },
+ {
+ Timestamp: 5,
+ Value: math.Inf(0),
},
},
- {
- Labels: map[string]string{
- "os.type": "windows",
- "state": "busy",
+ },
+ {
+ Labels: map[string]string{
+ "host_name": "ip-10-420-69-1",
+ "state": "not_running_chalamet",
+ },
+ Points: []v3.Point{
+ {
+ Timestamp: 1,
+ Value: 0,
},
- Points: []v3.Point{
- {
- Timestamp: 1,
- Value: 22,
- },
- {
- Timestamp: 2,
- Value: 65,
- },
- {
- Timestamp: 4,
- Value: 40,
- },
- {
- Timestamp: 5,
- Value: 50,
- },
+ {
+ Timestamp: 2,
+ Value: 0,
+ },
+ {
+ Timestamp: 3,
+ Value: 0,
+ },
+ {
+ Timestamp: 4,
+ Value: 0,
+ },
+ {
+ Timestamp: 5,
+ Value: 0,
+ },
+ },
+ },
+ {
+ Labels: map[string]string{
+ "host_name": "ip-10-420-69-2",
+ "state": "busy",
+ },
+ Points: []v3.Point{
+ {
+ Timestamp: 1,
+ Value: 0,
+ },
+ {
+ Timestamp: 2,
+ Value: 0,
+ },
+ {
+ Timestamp: 4,
+ Value: 0,
+ },
+ {
+ Timestamp: 5,
+ Value: 0,
},
},
},
},
},
- want: &v3.Result{},
},
{
name: "Group keys on the left side are a superset of the right side",
@@ -1193,6 +1241,59 @@ func TestFormula(t *testing.T) {
},
},
},
+ {
+ Labels: map[string]string{
+ "host_name": "ip-10-420-69-2",
+ "state": "idle",
+ "os.type": "linux",
+ },
+ Points: []v3.Point{
+ {
+ Timestamp: 1,
+ Value: math.Inf(0),
+ },
+ {
+ Timestamp: 2,
+ Value: math.Inf(0),
+ },
+ {
+ Timestamp: 3,
+ Value: math.Inf(0),
+ },
+ {
+ Timestamp: 4,
+ Value: math.Inf(0),
+ },
+ {
+ Timestamp: 5,
+ Value: math.Inf(0),
+ },
+ },
+ },
+ {
+ Labels: map[string]string{
+ "state": "busy",
+ "os.type": "linux",
+ },
+ Points: []v3.Point{
+ {
+ Timestamp: 1,
+ Value: 0,
+ },
+ {
+ Timestamp: 2,
+ Value: 0,
+ },
+ {
+ Timestamp: 4,
+ Value: 0,
+ },
+ {
+ Timestamp: 5,
+ Value: 0,
+ },
+ },
+ },
},
},
},
@@ -1454,18 +1555,22 @@ func TestFormula(t *testing.T) {
expression, err := govaluate.NewEvaluableExpression(tt.expression)
if err != nil {
t.Errorf("Error parsing expression: %v", err)
+ return
}
got, err := processResults(tt.results, expression)
if err != nil {
t.Errorf("Error processing results: %v", err)
+ return
}
if len(got.Series) != len(tt.want.Series) {
t.Errorf("processResults(): number of series - got = %v, want %v", len(got.Series), len(tt.want.Series))
+ return
}
for i := range got.Series {
if len(got.Series[i].Points) != len(tt.want.Series[i].Points) {
t.Errorf("processResults(): number of points - got = %v, want %v", len(got.Series[i].Points), len(tt.want.Series[i].Points))
+ return
}
for j := range got.Series[i].Points {
if got.Series[i].Points[j].Value != tt.want.Series[i].Points[j].Value {
diff --git a/pkg/query-service/app/http_handler.go b/pkg/query-service/app/http_handler.go
index 784fdb940d..c025345cef 100644
--- a/pkg/query-service/app/http_handler.go
+++ b/pkg/query-service/app/http_handler.go
@@ -8,6 +8,8 @@ import (
"fmt"
"io"
"net/http"
+ "regexp"
+ "slices"
"strconv"
"strings"
"sync"
@@ -23,6 +25,7 @@ import (
"go.signoz.io/signoz/pkg/query-service/agentConf"
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
"go.signoz.io/signoz/pkg/query-service/app/explorer"
+ "go.signoz.io/signoz/pkg/query-service/app/integrations"
"go.signoz.io/signoz/pkg/query-service/app/logs"
logsv3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3"
"go.signoz.io/signoz/pkg/query-service/app/metrics"
@@ -94,6 +97,8 @@ type APIHandler struct {
maxOpenConns int
dialTimeout time.Duration
+ IntegrationsController *integrations.Controller
+
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
// SetupCompleted indicates if SigNoz is ready for general use.
@@ -125,8 +130,12 @@ type APIHandlerOpts struct {
// feature flags querier
FeatureFlags interfaces.FeatureLookup
+ // Integrations
+ IntegrationsController *integrations.Controller
+
// Log parsing pipelines
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
+
// cache
Cache cache.Cache
@@ -174,6 +183,7 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
alertManager: alertManager,
ruleManager: opts.RuleManager,
featureFlags: opts.FeatureFlags,
+ IntegrationsController: opts.IntegrationsController,
LogsParsingPipelineController: opts.LogsParsingPipelineController,
querier: querier,
querierV2: querierv2,
@@ -197,7 +207,7 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
hasUsers, err := aH.appDao.GetUsersWithOpts(context.Background(), 1)
if err.Error() != "" {
// raise warning but no panic as this is a recoverable condition
- zap.S().Warnf("unexpected error while fetch user count while initializing base api handler", err.Error())
+ zap.L().Warn("unexpected error while fetch user count while initializing base api handler", zap.Error(err))
}
if len(hasUsers) != 0 {
// first user is already created, we can mark the app ready for general use.
@@ -263,7 +273,7 @@ func RespondError(w http.ResponseWriter, apiErr model.BaseApiError, data interfa
Data: data,
})
if err != nil {
- zap.S().Error("msg", "error marshalling json response", "err", err)
+ zap.L().Error("error marshalling json response", zap.Error(err))
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
@@ -293,7 +303,7 @@ func RespondError(w http.ResponseWriter, apiErr model.BaseApiError, data interfa
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
if n, err := w.Write(b); err != nil {
- zap.S().Error("msg", "error writing response", "bytesWritten", n, "err", err)
+ zap.L().Error("error writing response", zap.Int("bytesWritten", n), zap.Error(err))
}
}
@@ -304,7 +314,7 @@ func writeHttpResponse(w http.ResponseWriter, data interface{}) {
Data: data,
})
if err != nil {
- zap.S().Error("msg", "error marshalling json response", "err", err)
+ zap.L().Error("error marshalling json response", zap.Error(err))
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
@@ -312,7 +322,7 @@ func writeHttpResponse(w http.ResponseWriter, data interface{}) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
if n, err := w.Write(b); err != nil {
- zap.S().Error("msg", "error writing response", "bytesWritten", n, "err", err)
+ zap.L().Error("error writing response", zap.Int("bytesWritten", n), zap.Error(err))
}
}
@@ -557,7 +567,7 @@ func (aH *APIHandler) addTemporality(ctx context.Context, qp *v3.QueryRangeParam
var err error
if aH.preferDelta {
- zap.S().Debug("fetching metric temporality")
+ zap.L().Debug("fetching metric temporality")
metricNameToTemporality, err = aH.reader.FetchTemporality(ctx, metricNames)
if err != nil {
return err
@@ -585,7 +595,7 @@ func (aH *APIHandler) QueryRangeMetricsV2(w http.ResponseWriter, r *http.Request
metricsQueryRangeParams, apiErrorObj := parser.ParseMetricQueryRangeParams(r)
if apiErrorObj != nil {
- zap.S().Errorf(apiErrorObj.Err.Error())
+ zap.L().Error("error parsing metric query range params", zap.Error(apiErrorObj.Err))
RespondError(w, apiErrorObj, nil)
return
}
@@ -858,11 +868,15 @@ func (aH *APIHandler) listRules(w http.ResponseWriter, r *http.Request) {
func (aH *APIHandler) getDashboards(w http.ResponseWriter, r *http.Request) {
allDashboards, err := dashboards.GetDashboards(r.Context())
-
if err != nil {
RespondError(w, err, nil)
return
}
+
+ ic := aH.IntegrationsController
+ installedIntegrationDashboards, err := ic.GetDashboardsForInstalledIntegrations(r.Context())
+ allDashboards = append(allDashboards, installedIntegrationDashboards...)
+
tagsFromReq, ok := r.URL.Query()["tags"]
if !ok || len(tagsFromReq) == 0 || tagsFromReq[0] == "" {
aH.Respond(w, allDashboards)
@@ -1031,8 +1045,19 @@ func (aH *APIHandler) getDashboard(w http.ResponseWriter, r *http.Request) {
dashboard, apiError := dashboards.GetDashboard(r.Context(), uuid)
if apiError != nil {
- RespondError(w, apiError, nil)
- return
+ if apiError.Type() != model.ErrorNotFound {
+ RespondError(w, apiError, nil)
+ return
+ }
+
+ dashboard, apiError = aH.IntegrationsController.GetInstalledIntegrationDashboardById(
+ r.Context(), uuid,
+ )
+ if apiError != nil {
+ RespondError(w, apiError, nil)
+ return
+ }
+
}
aH.Respond(w, dashboard)
@@ -1105,7 +1130,7 @@ func (aH *APIHandler) testRule(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
body, err := io.ReadAll(r.Body)
if err != nil {
- zap.S().Errorf("Error in getting req body in test rule API\n", err)
+ zap.L().Error("Error in getting req body in test rule API", zap.Error(err))
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
@@ -1148,7 +1173,7 @@ func (aH *APIHandler) patchRule(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
body, err := io.ReadAll(r.Body)
if err != nil {
- zap.S().Errorf("msg: error in getting req body of patch rule API\n", "\t error:", err)
+ zap.L().Error("error in getting req body of patch rule API\n", zap.Error(err))
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
@@ -1169,7 +1194,7 @@ func (aH *APIHandler) editRule(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
body, err := io.ReadAll(r.Body)
if err != nil {
- zap.S().Errorf("msg: error in getting req body of edit rule API\n", "\t error:", err)
+ zap.L().Error("error in getting req body of edit rule API", zap.Error(err))
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
@@ -1220,14 +1245,14 @@ func (aH *APIHandler) testChannel(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
body, err := io.ReadAll(r.Body)
if err != nil {
- zap.S().Errorf("Error in getting req body of testChannel API\n", err)
+ zap.L().Error("Error in getting req body of testChannel API", zap.Error(err))
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
receiver := &am.Receiver{}
if err := json.Unmarshal(body, receiver); err != nil { // Parse []byte to go struct pointer
- zap.S().Errorf("Error in parsing req body of testChannel API\n", err)
+ zap.L().Error("Error in parsing req body of testChannel API\n", zap.Error(err))
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
@@ -1247,14 +1272,14 @@ func (aH *APIHandler) editChannel(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
body, err := io.ReadAll(r.Body)
if err != nil {
- zap.S().Errorf("Error in getting req body of editChannel API\n", err)
+ zap.L().Error("Error in getting req body of editChannel API", zap.Error(err))
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
receiver := &am.Receiver{}
if err := json.Unmarshal(body, receiver); err != nil { // Parse []byte to go struct pointer
- zap.S().Errorf("Error in parsing req body of editChannel API\n", err)
+ zap.L().Error("Error in parsing req body of editChannel API", zap.Error(err))
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
@@ -1275,14 +1300,14 @@ func (aH *APIHandler) createChannel(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
body, err := io.ReadAll(r.Body)
if err != nil {
- zap.S().Errorf("Error in getting req body of createChannel API\n", err)
+ zap.L().Error("Error in getting req body of createChannel API", zap.Error(err))
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
receiver := &am.Receiver{}
if err := json.Unmarshal(body, receiver); err != nil { // Parse []byte to go struct pointer
- zap.S().Errorf("Error in parsing req body of createChannel API\n", err)
+ zap.L().Error("Error in parsing req body of createChannel API", zap.Error(err))
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
@@ -1322,7 +1347,7 @@ func (aH *APIHandler) createRule(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
body, err := io.ReadAll(r.Body)
if err != nil {
- zap.S().Errorf("Error in getting req body for create rule API\n", err)
+ zap.L().Error("Error in getting req body for create rule API", zap.Error(err))
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
@@ -1349,7 +1374,7 @@ func (aH *APIHandler) queryRangeMetrics(w http.ResponseWriter, r *http.Request)
return
}
- // zap.S().Info(query, apiError)
+ // zap.L().Info(query, apiError)
ctx := r.Context()
if to := r.FormValue("timeout"); to != "" {
@@ -1371,7 +1396,7 @@ func (aH *APIHandler) queryRangeMetrics(w http.ResponseWriter, r *http.Request)
}
if res.Err != nil {
- zap.S().Error(res.Err)
+ zap.L().Error("error in query range metrics", zap.Error(res.Err))
}
if res.Err != nil {
@@ -1404,7 +1429,7 @@ func (aH *APIHandler) queryMetrics(w http.ResponseWriter, r *http.Request) {
return
}
- // zap.S().Info(query, apiError)
+ // zap.L().Info(query, apiError)
ctx := r.Context()
if to := r.FormValue("timeout"); to != "" {
@@ -1426,7 +1451,7 @@ func (aH *APIHandler) queryMetrics(w http.ResponseWriter, r *http.Request) {
}
if res.Err != nil {
- zap.S().Error(res.Err)
+ zap.L().Error("error in query range metrics", zap.Error(res.Err))
}
if res.Err != nil {
@@ -1532,7 +1557,9 @@ func (aH *APIHandler) getServiceOverview(w http.ResponseWriter, r *http.Request)
func (aH *APIHandler) getServicesTopLevelOps(w http.ResponseWriter, r *http.Request) {
- result, apiErr := aH.reader.GetTopLevelOperations(r.Context(), aH.skipConfig)
+ var start, end time.Time
+
+ result, _, apiErr := aH.reader.GetTopLevelOperations(r.Context(), aH.skipConfig, start, end)
if apiErr != nil {
RespondError(w, apiErr, nil)
return
@@ -2018,7 +2045,7 @@ func (aH *APIHandler) loginUser(w http.ResponseWriter, r *http.Request) {
func (aH *APIHandler) listUsers(w http.ResponseWriter, r *http.Request) {
users, err := dao.DB().GetUsers(context.Background())
if err != nil {
- zap.S().Debugf("[listUsers] Failed to query list of users, err: %v", err)
+ zap.L().Error("[listUsers] Failed to query list of users", zap.Error(err))
RespondError(w, err, nil)
return
}
@@ -2035,7 +2062,7 @@ func (aH *APIHandler) getUser(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
user, err := dao.DB().GetUser(ctx, id)
if err != nil {
- zap.S().Debugf("[getUser] Failed to query user, err: %v", err)
+ zap.L().Error("[getUser] Failed to query user", zap.Error(err))
RespondError(w, err, "Failed to get user")
return
}
@@ -2065,7 +2092,7 @@ func (aH *APIHandler) editUser(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
old, apiErr := dao.DB().GetUser(ctx, id)
if apiErr != nil {
- zap.S().Debugf("[editUser] Failed to query user, err: %v", err)
+ zap.L().Error("[editUser] Failed to query user", zap.Error(err))
RespondError(w, apiErr, nil)
return
}
@@ -2149,7 +2176,7 @@ func (aH *APIHandler) patchUserFlag(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
b, err := io.ReadAll(r.Body)
if err != nil {
- zap.S().Errorf("failed read user flags from http request for userId ", userId, "with error: ", err)
+ zap.L().Error("failed read user flags from http request for userId ", zap.String("userId", userId), zap.Error(err))
RespondError(w, model.BadRequestStr("received user flags in invalid format"), nil)
return
}
@@ -2157,7 +2184,7 @@ func (aH *APIHandler) patchUserFlag(w http.ResponseWriter, r *http.Request) {
err = json.Unmarshal(b, &flags)
if err != nil {
- zap.S().Errorf("failed parsing user flags for userId ", userId, "with error: ", err)
+ zap.L().Error("failed parsing user flags for userId ", zap.String("userId", userId), zap.Error(err))
RespondError(w, model.BadRequestStr("received user flags in invalid format"), nil)
return
}
@@ -2321,7 +2348,7 @@ func (aH *APIHandler) resetPassword(w http.ResponseWriter, r *http.Request) {
}
if err := auth.ResetPassword(context.Background(), req); err != nil {
- zap.S().Debugf("resetPassword failed, err: %v\n", err)
+ zap.L().Error("resetPassword failed", zap.Error(err))
if aH.HandleError(w, err, http.StatusInternalServerError) {
return
}
@@ -2336,10 +2363,9 @@ func (aH *APIHandler) changePassword(w http.ResponseWriter, r *http.Request) {
return
}
- if err := auth.ChangePassword(context.Background(), req); err != nil {
- if aH.HandleError(w, err, http.StatusInternalServerError) {
- return
- }
+ if apiErr := auth.ChangePassword(context.Background(), req); apiErr != nil {
+ RespondError(w, apiErr, nil)
+ return
}
aH.WriteJSON(w, r, map[string]string{"data": "password changed successfully"})
@@ -2365,7 +2391,7 @@ func (aH *APIHandler) HandleError(w http.ResponseWriter, err error, statusCode i
return false
}
if statusCode == http.StatusInternalServerError {
- zap.S().Error("HTTP handler, Internal Server Error", zap.Error(err))
+ zap.L().Error("HTTP handler, Internal Server Error", zap.Error(err))
}
structuredResp := structuredResponse{
Errors: []structuredError{
@@ -2392,6 +2418,299 @@ func (aH *APIHandler) WriteJSON(w http.ResponseWriter, r *http.Request, response
w.Write(resp)
}
+// Integrations
+func (ah *APIHandler) RegisterIntegrationRoutes(router *mux.Router, am *AuthMiddleware) {
+ subRouter := router.PathPrefix("/api/v1/integrations").Subrouter()
+
+ subRouter.HandleFunc(
+ "/install", am.ViewAccess(ah.InstallIntegration),
+ ).Methods(http.MethodPost)
+
+ subRouter.HandleFunc(
+ "/uninstall", am.ViewAccess(ah.UninstallIntegration),
+ ).Methods(http.MethodPost)
+
+ // Used for polling for status in v0
+ subRouter.HandleFunc(
+ "/{integrationId}/connection_status", am.ViewAccess(ah.GetIntegrationConnectionStatus),
+ ).Methods(http.MethodGet)
+
+ subRouter.HandleFunc(
+ "/{integrationId}", am.ViewAccess(ah.GetIntegration),
+ ).Methods(http.MethodGet)
+
+ subRouter.HandleFunc(
+ "", am.ViewAccess(ah.ListIntegrations),
+ ).Methods(http.MethodGet)
+}
+
+func (ah *APIHandler) ListIntegrations(
+ w http.ResponseWriter, r *http.Request,
+) {
+ params := map[string]string{}
+ for k, values := range r.URL.Query() {
+ params[k] = values[0]
+ }
+
+ resp, apiErr := ah.IntegrationsController.ListIntegrations(
+ r.Context(), params,
+ )
+ if apiErr != nil {
+ RespondError(w, apiErr, "Failed to fetch integrations")
+ return
+ }
+ ah.Respond(w, resp)
+}
+
+func (ah *APIHandler) GetIntegration(
+ w http.ResponseWriter, r *http.Request,
+) {
+ integrationId := mux.Vars(r)["integrationId"]
+ integration, apiErr := ah.IntegrationsController.GetIntegration(
+ r.Context(), integrationId,
+ )
+ if apiErr != nil {
+ RespondError(w, apiErr, "Failed to fetch integration details")
+ return
+ }
+
+ ah.Respond(w, integration)
+}
+
+func (ah *APIHandler) GetIntegrationConnectionStatus(
+ w http.ResponseWriter, r *http.Request,
+) {
+ integrationId := mux.Vars(r)["integrationId"]
+ isInstalled, apiErr := ah.IntegrationsController.IsIntegrationInstalled(
+ r.Context(), integrationId,
+ )
+ if apiErr != nil {
+ RespondError(w, apiErr, "failed to check if integration is installed")
+ return
+ }
+
+ // Do not spend resources calculating connection status unless installed.
+ if !isInstalled {
+ ah.Respond(w, &integrations.IntegrationConnectionStatus{})
+ return
+ }
+
+ connectionTests, apiErr := ah.IntegrationsController.GetIntegrationConnectionTests(
+ r.Context(), integrationId,
+ )
+ if apiErr != nil {
+ RespondError(w, apiErr, "failed to fetch integration connection tests")
+ return
+ }
+
+ lookbackSecondsStr := r.URL.Query().Get("lookback_seconds")
+ lookbackSeconds, err := strconv.ParseInt(lookbackSecondsStr, 10, 64)
+ if err != nil {
+ lookbackSeconds = 15 * 60
+ }
+
+ connectionStatus, apiErr := ah.calculateConnectionStatus(
+ r.Context(), connectionTests, lookbackSeconds,
+ )
+ if apiErr != nil {
+ RespondError(w, apiErr, "Failed to calculate integration connection status")
+ return
+ }
+
+ ah.Respond(w, connectionStatus)
+}
+
+func (ah *APIHandler) calculateConnectionStatus(
+ ctx context.Context,
+ connectionTests *integrations.IntegrationConnectionTests,
+ lookbackSeconds int64,
+) (*integrations.IntegrationConnectionStatus, *model.ApiError) {
+ // Calculate connection status for signals in parallel
+
+ result := &integrations.IntegrationConnectionStatus{}
+ errors := []*model.ApiError{}
+ var resultLock sync.Mutex
+
+ var wg sync.WaitGroup
+
+ // Calculate logs connection status
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ logsConnStatus, apiErr := ah.calculateLogsConnectionStatus(
+ ctx, connectionTests.Logs, lookbackSeconds,
+ )
+
+ resultLock.Lock()
+ defer resultLock.Unlock()
+
+ if apiErr != nil {
+ errors = append(errors, apiErr)
+ } else {
+ result.Logs = logsConnStatus
+ }
+ }()
+
+ // Calculate metrics connection status
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ if connectionTests.Metrics == nil || len(connectionTests.Metrics) < 1 {
+ return
+ }
+
+ statusForLastReceivedMetric, apiErr := ah.reader.GetLatestReceivedMetric(
+ ctx, connectionTests.Metrics,
+ )
+
+ resultLock.Lock()
+ defer resultLock.Unlock()
+
+ if apiErr != nil {
+ errors = append(errors, apiErr)
+
+ } else if statusForLastReceivedMetric != nil {
+ resourceSummaryParts := []string{}
+ for k, v := range statusForLastReceivedMetric.LastReceivedLabels {
+ interestingLabels := []string{
+ "container_name", "host_name", "node_name",
+ "pod_name", "deployment_name", "cluster_name",
+ "namespace_name", "job_name", "service_name",
+ }
+ isInterestingKey := !strings.HasPrefix(k, "_") && slices.ContainsFunc(
+ interestingLabels, func(l string) bool { return strings.Contains(k, l) },
+ )
+ if isInterestingKey {
+ resourceSummaryParts = append(resourceSummaryParts, fmt.Sprintf(
+ "%s=%s", k, v,
+ ))
+ }
+ }
+
+ result.Metrics = &integrations.SignalConnectionStatus{
+ LastReceivedFrom: strings.Join(resourceSummaryParts, ", "),
+ LastReceivedTsMillis: statusForLastReceivedMetric.LastReceivedTsMillis,
+ }
+ }
+ }()
+
+ wg.Wait()
+
+ if len(errors) > 0 {
+ return nil, errors[0]
+ }
+
+ return result, nil
+}
+
+func (ah *APIHandler) calculateLogsConnectionStatus(
+ ctx context.Context,
+ logsConnectionTest *v3.FilterSet,
+ lookbackSeconds int64,
+) (*integrations.SignalConnectionStatus, *model.ApiError) {
+ if logsConnectionTest == nil {
+ return nil, nil
+ }
+
+ qrParams := &v3.QueryRangeParamsV3{
+ Start: time.Now().UnixMilli() - (lookbackSeconds * 1000),
+ End: time.Now().UnixMilli(),
+ CompositeQuery: &v3.CompositeQuery{
+ PanelType: v3.PanelTypeList,
+ QueryType: v3.QueryTypeBuilder,
+ BuilderQueries: map[string]*v3.BuilderQuery{
+ "A": {
+ PageSize: 1,
+ Filters: logsConnectionTest,
+ QueryName: "A",
+ DataSource: v3.DataSourceLogs,
+ Expression: "A",
+ AggregateOperator: v3.AggregateOperatorNoOp,
+ },
+ },
+ },
+ }
+ queryRes, err, _ := ah.querier.QueryRange(
+ ctx, qrParams, map[string]v3.AttributeKey{},
+ )
+ if err != nil {
+ return nil, model.InternalError(fmt.Errorf(
+ "could not query for integration connection status: %w", err,
+ ))
+ }
+ if len(queryRes) > 0 && queryRes[0].List != nil && len(queryRes[0].List) > 0 {
+ lastLog := queryRes[0].List[0]
+
+ resourceSummaryParts := []string{}
+ lastLogResourceAttribs := lastLog.Data["resources_string"]
+ if lastLogResourceAttribs != nil {
+ resourceAttribs, ok := lastLogResourceAttribs.(*map[string]string)
+ if !ok {
+ return nil, model.InternalError(fmt.Errorf(
+ "could not cast log resource attribs",
+ ))
+ }
+ for k, v := range *resourceAttribs {
+ resourceSummaryParts = append(resourceSummaryParts, fmt.Sprintf(
+ "%s=%s", k, v,
+ ))
+ }
+ }
+ lastLogResourceSummary := strings.Join(resourceSummaryParts, ", ")
+
+ return &integrations.SignalConnectionStatus{
+ LastReceivedTsMillis: lastLog.Timestamp.UnixMilli(),
+ LastReceivedFrom: lastLogResourceSummary,
+ }, nil
+ }
+
+ return nil, nil
+}
+
+func (ah *APIHandler) InstallIntegration(
+ w http.ResponseWriter, r *http.Request,
+) {
+ req := integrations.InstallIntegrationRequest{}
+
+ err := json.NewDecoder(r.Body).Decode(&req)
+ if err != nil {
+ RespondError(w, model.BadRequest(err), nil)
+ return
+ }
+
+ integration, apiErr := ah.IntegrationsController.Install(
+ r.Context(), &req,
+ )
+ if apiErr != nil {
+ RespondError(w, apiErr, nil)
+ return
+ }
+
+ ah.Respond(w, integration)
+}
+
+func (ah *APIHandler) UninstallIntegration(
+ w http.ResponseWriter, r *http.Request,
+) {
+ req := integrations.UninstallIntegrationRequest{}
+
+ err := json.NewDecoder(r.Body).Decode(&req)
+ if err != nil {
+ RespondError(w, model.BadRequest(err), nil)
+ return
+ }
+
+ apiErr := ah.IntegrationsController.Uninstall(r.Context(), &req)
+ if apiErr != nil {
+ RespondError(w, apiErr, nil)
+ return
+ }
+
+ ah.Respond(w, map[string]interface{}{})
+}
+
// logs
func (aH *APIHandler) RegisterLogsRoutes(router *mux.Router, am *AuthMiddleware) {
subRouter := router.PathPrefix("/api/v1/logs").Subrouter()
@@ -2490,10 +2809,10 @@ func (aH *APIHandler) tailLogs(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "data: %v\n\n", buf.String())
flusher.Flush()
case <-client.Done:
- zap.S().Debug("done!")
+ zap.L().Debug("done!")
return
case err := <-client.Error:
- zap.S().Error("error occured!", err)
+ zap.L().Error("error occured", zap.Error(err))
return
}
}
@@ -2585,16 +2904,17 @@ func (ah *APIHandler) listLogsPipelines(ctx context.Context) (
*logparsingpipeline.PipelinesResponse, *model.ApiError,
) {
// get lateset agent config
+ latestVersion := -1
lastestConfig, err := agentConf.GetLatestVersion(ctx, logPipelines)
- if err != nil {
- if err.Type() != model.ErrorNotFound {
- return nil, model.WrapApiError(err, "failed to get latest agent config version")
- } else {
- return nil, nil
- }
+ if err != nil && err.Type() != model.ErrorNotFound {
+ return nil, model.WrapApiError(err, "failed to get latest agent config version")
}
- payload, err := ah.LogsParsingPipelineController.GetPipelinesByVersion(ctx, lastestConfig.Version)
+ if lastestConfig != nil {
+ latestVersion = lastestConfig.Version
+ }
+
+ payload, err := ah.LogsParsingPipelineController.GetPipelinesByVersion(ctx, latestVersion)
if err != nil {
return nil, model.WrapApiError(err, "failed to get pipelines")
}
@@ -2643,7 +2963,7 @@ func (ah *APIHandler) CreateLogsPipeline(w http.ResponseWriter, r *http.Request)
postable []logparsingpipeline.PostablePipeline,
) (*logparsingpipeline.PipelinesResponse, *model.ApiError) {
if len(postable) == 0 {
- zap.S().Warnf("found no pipelines in the http request, this will delete all the pipelines")
+ zap.L().Warn("found no pipelines in the http request, this will delete all the pipelines")
}
for _, p := range postable {
@@ -3083,7 +3403,7 @@ func (aH *APIHandler) QueryRangeV3Format(w http.ResponseWriter, r *http.Request)
queryRangeParams, apiErrorObj := ParseQueryRangeParams(r)
if apiErrorObj != nil {
- zap.S().Errorf(apiErrorObj.Err.Error())
+ zap.L().Error(apiErrorObj.Err.Error())
RespondError(w, apiErrorObj, nil)
return
}
@@ -3129,12 +3449,19 @@ func (aH *APIHandler) queryRangeV3(ctx context.Context, queryRangeParams *v3.Que
applyMetricLimit(result, queryRangeParams)
+ sendQueryResultEvents(r, result, queryRangeParams)
+ // only adding applyFunctions instead of postProcess since experssion are
+ // are executed in clickhouse directly and we wanted to add support for timeshift
+ if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeBuilder {
+ applyFunctions(result, queryRangeParams)
+ }
+
resp := v3.QueryRangeResponse{
Result: result,
}
// This checks if the time for context to complete has exceeded.
- // it adds flag to notify the user of incomplete respone
+ // it adds flag to notify the user of incomplete response
select {
case <-ctx.Done():
resp.ContextTimeout = true
@@ -3146,11 +3473,93 @@ func (aH *APIHandler) queryRangeV3(ctx context.Context, queryRangeParams *v3.Que
aH.Respond(w, resp)
}
+func sendQueryResultEvents(r *http.Request, result []*v3.Result, queryRangeParams *v3.QueryRangeParamsV3) {
+ referrer := r.Header.Get("Referer")
+
+ dashboardMatched, err := regexp.MatchString(`/dashboard/[a-zA-Z0-9\-]+/(new|edit)(?:\?.*)?$`, referrer)
+ if err != nil {
+ zap.L().Error("error while matching the referrer", zap.Error(err))
+ }
+ alertMatched, err := regexp.MatchString(`/alerts/(new|edit)(?:\?.*)?$`, referrer)
+ if err != nil {
+ zap.L().Error("error while matching the alert: ", zap.Error(err))
+ }
+
+ if alertMatched || dashboardMatched {
+
+ if len(result) > 0 && (len(result[0].Series) > 0 || len(result[0].List) > 0) {
+
+ userEmail, err := auth.GetEmailFromJwt(r.Context())
+ if err == nil {
+ signozLogsUsed, signozMetricsUsed, signozTracesUsed := telemetry.GetInstance().CheckSigNozSignals(queryRangeParams)
+ if signozLogsUsed || signozMetricsUsed || signozTracesUsed {
+
+ if dashboardMatched {
+ var dashboardID, widgetID string
+ var dashboardIDMatch, widgetIDMatch []string
+ dashboardIDRegex, err := regexp.Compile(`/dashboard/([a-f0-9\-]+)/`)
+ if err == nil {
+ dashboardIDMatch = dashboardIDRegex.FindStringSubmatch(referrer)
+ } else {
+ zap.S().Errorf("error while matching the dashboardIDRegex: %v", err)
+ }
+ widgetIDRegex, err := regexp.Compile(`widgetId=([a-f0-9\-]+)`)
+ if err == nil {
+ widgetIDMatch = widgetIDRegex.FindStringSubmatch(referrer)
+ } else {
+ zap.S().Errorf("error while matching the widgetIDRegex: %v", err)
+ }
+
+ if len(dashboardIDMatch) > 1 {
+ dashboardID = dashboardIDMatch[1]
+ }
+
+ if len(widgetIDMatch) > 1 {
+ widgetID = widgetIDMatch[1]
+ }
+ telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_SUCCESSFUL_DASHBOARD_PANEL_QUERY, map[string]interface{}{
+ "queryType": queryRangeParams.CompositeQuery.QueryType,
+ "panelType": queryRangeParams.CompositeQuery.PanelType,
+ "tracesUsed": signozTracesUsed,
+ "logsUsed": signozLogsUsed,
+ "metricsUsed": signozMetricsUsed,
+ "dashboardId": dashboardID,
+ "widgetId": widgetID,
+ }, userEmail)
+ }
+ if alertMatched {
+ var alertID string
+ var alertIDMatch []string
+ alertIDRegex, err := regexp.Compile(`ruleId=(\d+)`)
+ if err != nil {
+ zap.S().Errorf("error while matching the alertIDRegex: %v", err)
+ } else {
+ alertIDMatch = alertIDRegex.FindStringSubmatch(referrer)
+ }
+
+ if len(alertIDMatch) > 1 {
+ alertID = alertIDMatch[1]
+ }
+ telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_SUCCESSFUL_ALERT_QUERY, map[string]interface{}{
+ "queryType": queryRangeParams.CompositeQuery.QueryType,
+ "panelType": queryRangeParams.CompositeQuery.PanelType,
+ "tracesUsed": signozTracesUsed,
+ "logsUsed": signozLogsUsed,
+ "metricsUsed": signozMetricsUsed,
+ "alertId": alertID,
+ }, userEmail)
+ }
+ }
+ }
+ }
+ }
+}
+
func (aH *APIHandler) QueryRangeV3(w http.ResponseWriter, r *http.Request) {
queryRangeParams, apiErrorObj := ParseQueryRangeParams(r)
if apiErrorObj != nil {
- zap.S().Errorf(apiErrorObj.Err.Error())
+ zap.L().Error("error parsing metric query range params", zap.Error(apiErrorObj.Err))
RespondError(w, apiErrorObj, nil)
return
}
@@ -3159,7 +3568,7 @@ func (aH *APIHandler) QueryRangeV3(w http.ResponseWriter, r *http.Request) {
temporalityErr := aH.addTemporality(r.Context(), queryRangeParams)
if temporalityErr != nil {
- zap.S().Errorf("Error while adding temporality for metrics: %v", temporalityErr)
+ zap.L().Error("Error while adding temporality for metrics", zap.Error(temporalityErr))
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: temporalityErr}, nil)
return
}
@@ -3175,7 +3584,7 @@ func (aH *APIHandler) liveTailLogs(w http.ResponseWriter, r *http.Request) {
queryRangeParams, apiErrorObj := ParseQueryRangeParams(r)
if apiErrorObj != nil {
- zap.S().Errorf(apiErrorObj.Err.Error())
+ zap.L().Error(apiErrorObj.Err.Error())
RespondError(w, apiErrorObj, nil)
return
}
@@ -3236,10 +3645,10 @@ func (aH *APIHandler) liveTailLogs(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "data: %v\n\n", buf.String())
flusher.Flush()
case <-client.Done:
- zap.S().Debug("done!")
+ zap.L().Debug("done!")
return
case err := <-client.Error:
- zap.S().Error("error occurred!", err)
+ zap.L().Error("error occurred", zap.Error(err))
fmt.Fprintf(w, "event: error\ndata: %v\n\n", err.Error())
flusher.Flush()
return
@@ -3304,7 +3713,7 @@ func (aH *APIHandler) queryRangeV4(ctx context.Context, queryRangeParams *v3.Que
RespondError(w, apiErrObj, errQuriesByName)
return
}
-
+ sendQueryResultEvents(r, result, queryRangeParams)
resp := v3.QueryRangeResponse{
Result: result,
}
@@ -3316,7 +3725,7 @@ func (aH *APIHandler) QueryRangeV4(w http.ResponseWriter, r *http.Request) {
queryRangeParams, apiErrorObj := ParseQueryRangeParams(r)
if apiErrorObj != nil {
- zap.S().Errorf(apiErrorObj.Err.Error())
+ zap.L().Error("error parsing metric query range params", zap.Error(apiErrorObj.Err))
RespondError(w, apiErrorObj, nil)
return
}
@@ -3324,7 +3733,7 @@ func (aH *APIHandler) QueryRangeV4(w http.ResponseWriter, r *http.Request) {
// add temporality for each metric
temporalityErr := aH.populateTemporality(r.Context(), queryRangeParams)
if temporalityErr != nil {
- zap.S().Errorf("Error while adding temporality for metrics: %v", temporalityErr)
+ zap.L().Error("Error while adding temporality for metrics", zap.Error(temporalityErr))
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: temporalityErr}, nil)
return
}
@@ -3368,12 +3777,12 @@ func postProcessResult(result []*v3.Result, queryRangeParams *v3.QueryRangeParam
expression, err := govaluate.NewEvaluableExpressionWithFunctions(query.Expression, evalFuncs())
// This shouldn't happen here, because it should have been caught earlier in validation
if err != nil {
- zap.S().Errorf("error in expression: %s", err.Error())
+ zap.L().Error("error in expression", zap.Error(err))
return nil, err
}
formulaResult, err := processResults(result, expression)
if err != nil {
- zap.S().Errorf("error in expression: %s", err.Error())
+ zap.L().Error("error in expression", zap.Error(err))
return nil, err
}
formulaResult.QueryName = query.QueryName
@@ -3403,7 +3812,7 @@ func applyFunctions(results []*v3.Result, queryRangeParams *v3.QueryRangeParamsV
for idx, result := range results {
builderQueries := queryRangeParams.CompositeQuery.BuilderQueries
- if builderQueries != nil && (builderQueries[result.QueryName].DataSource == v3.DataSourceMetrics) {
+ if builderQueries != nil {
functions := builderQueries[result.QueryName].Functions
for _, function := range functions {
diff --git a/pkg/query-service/app/integrations/builtin.go b/pkg/query-service/app/integrations/builtin.go
new file mode 100644
index 0000000000..cf98b3ff9d
--- /dev/null
+++ b/pkg/query-service/app/integrations/builtin.go
@@ -0,0 +1,265 @@
+package integrations
+
+import (
+ "context"
+ "embed"
+ "strings"
+ "unicode"
+
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io/fs"
+ "path"
+
+ koanfJson "github.com/knadh/koanf/parsers/json"
+ "go.signoz.io/signoz/pkg/query-service/model"
+ "golang.org/x/exp/maps"
+ "golang.org/x/exp/slices"
+)
+
+type BuiltInIntegrations struct{}
+
+var builtInIntegrations map[string]IntegrationDetails
+
+func (bi *BuiltInIntegrations) list(ctx context.Context) (
+ []IntegrationDetails, *model.ApiError,
+) {
+ integrations := maps.Values(builtInIntegrations)
+ slices.SortFunc(integrations, func(i1, i2 IntegrationDetails) bool {
+ return i1.Id < i2.Id
+ })
+ return integrations, nil
+}
+
+func (bi *BuiltInIntegrations) get(
+ ctx context.Context, integrationIds []string,
+) (
+ map[string]IntegrationDetails, *model.ApiError,
+) {
+ result := map[string]IntegrationDetails{}
+ for _, iid := range integrationIds {
+ i, exists := builtInIntegrations[iid]
+ if exists {
+ result[iid] = i
+ }
+ }
+ return result, nil
+}
+
+//go:embed builtin_integrations/*
+var integrationFiles embed.FS
+
+func init() {
+ err := readBuiltIns()
+ if err != nil {
+ panic(fmt.Errorf("couldn't read builtin integrations: %w", err))
+ }
+}
+
+func readBuiltIns() error {
+ rootDirName := "builtin_integrations"
+ builtinDirs, err := fs.ReadDir(integrationFiles, rootDirName)
+ if err != nil {
+ return fmt.Errorf("couldn't list integrations dirs: %w", err)
+ }
+
+ builtInIntegrations = map[string]IntegrationDetails{}
+ for _, d := range builtinDirs {
+ if !d.IsDir() {
+ continue
+ }
+
+ integrationDir := path.Join(rootDirName, d.Name())
+ i, err := readBuiltInIntegration(integrationDir)
+ if err != nil {
+ return fmt.Errorf("couldn't parse integration %s from files: %w", d.Name(), err)
+ }
+
+ _, exists := builtInIntegrations[i.Id]
+ if exists {
+ return fmt.Errorf(
+ "duplicate integration for id %s at %s", i.Id, d.Name(),
+ )
+ }
+ builtInIntegrations[i.Id] = *i
+ }
+ return nil
+}
+
+func readBuiltInIntegration(dirpath string) (
+ *IntegrationDetails, error,
+) {
+ integrationJsonPath := path.Join(dirpath, "integration.json")
+
+ serializedSpec, err := integrationFiles.ReadFile(integrationJsonPath)
+ if err != nil {
+ return nil, fmt.Errorf("couldn't find integration.json in %s: %w", dirpath, err)
+ }
+
+ integrationSpec, err := koanfJson.Parser().Unmarshal(serializedSpec)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "couldn't parse integration json from %s: %w", integrationJsonPath, err,
+ )
+ }
+
+ hydrated, err := hydrateFileUris(integrationSpec, dirpath)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "couldn't hydrate files referenced in integration %s: %w", integrationJsonPath, err,
+ )
+ }
+
+ hydratedSpec := hydrated.(map[string]interface{})
+ hydratedSpecJson, err := koanfJson.Parser().Marshal(hydratedSpec)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "couldn't serialize hydrated integration spec back to JSON %s: %w", integrationJsonPath, err,
+ )
+ }
+
+ var integration IntegrationDetails
+ err = json.Unmarshal(hydratedSpecJson, &integration)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "couldn't parse hydrated JSON spec read from %s: %w",
+ integrationJsonPath, err,
+ )
+ }
+
+ err = validateIntegration(integration)
+ if err != nil {
+ return nil, fmt.Errorf("invalid integration spec %s: %w", integration.Id, err)
+ }
+
+ integration.Id = "builtin-" + integration.Id
+ if len(integration.DataCollected.Metrics) > 0 {
+ metricsForConnTest := []string{}
+ for _, collectedMetric := range integration.DataCollected.Metrics {
+ promName := toPromMetricName(collectedMetric.Name)
+ metricsForConnTest = append(metricsForConnTest, promName)
+ }
+ integration.ConnectionTests.Metrics = metricsForConnTest
+ }
+
+ return &integration, nil
+}
+
+func validateIntegration(i IntegrationDetails) error {
+ // Validate dashboard data
+ seenDashboardIds := map[string]interface{}{}
+ for _, dd := range i.Assets.Dashboards {
+ did, exists := dd["id"]
+ if !exists {
+ return fmt.Errorf("id is required. not specified in dashboard titled %v", dd["title"])
+ }
+ dashboardId, ok := did.(string)
+ if !ok {
+ return fmt.Errorf("id must be string in dashboard titled %v", dd["title"])
+ }
+ if _, seen := seenDashboardIds[dashboardId]; seen {
+ return fmt.Errorf("multiple dashboards found with id %s", dashboardId)
+ }
+ seenDashboardIds[dashboardId] = nil
+ }
+
+ // TODO(Raj): Validate all parts of plugged in integrations
+
+ return nil
+}
+
+func hydrateFileUris(spec interface{}, basedir string) (interface{}, error) {
+ if specMap, ok := spec.(map[string]interface{}); ok {
+ result := map[string]interface{}{}
+ for k, v := range specMap {
+ hydrated, err := hydrateFileUris(v, basedir)
+ if err != nil {
+ return nil, err
+ }
+ result[k] = hydrated
+ }
+ return result, nil
+
+ } else if specSlice, ok := spec.([]interface{}); ok {
+ result := []interface{}{}
+ for _, v := range specSlice {
+ hydrated, err := hydrateFileUris(v, basedir)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, hydrated)
+ }
+ return result, nil
+
+ } else if maybeFileUri, ok := spec.(string); ok {
+ return readFileIfUri(maybeFileUri, basedir)
+ }
+
+ return spec, nil
+
+}
+
+func readFileIfUri(maybeFileUri string, basedir string) (interface{}, error) {
+ fileUriPrefix := "file://"
+ if !strings.HasPrefix(maybeFileUri, fileUriPrefix) {
+ return maybeFileUri, nil
+ }
+
+ relativePath := maybeFileUri[len(fileUriPrefix):]
+ fullPath := path.Join(basedir, relativePath)
+
+ fileContents, err := integrationFiles.ReadFile(fullPath)
+ if err != nil {
+ return nil, fmt.Errorf("couldn't read referenced file: %w", err)
+ }
+ if strings.HasSuffix(maybeFileUri, ".md") {
+ return string(fileContents), nil
+
+ } else if strings.HasSuffix(maybeFileUri, ".json") {
+ parsed, err := koanfJson.Parser().Unmarshal(fileContents)
+ if err != nil {
+ return nil, fmt.Errorf("couldn't parse referenced JSON file: %w", err)
+ }
+ return parsed, nil
+
+ } else if strings.HasSuffix(maybeFileUri, ".svg") {
+ base64Svg := base64.StdEncoding.EncodeToString(fileContents)
+ dataUri := fmt.Sprintf("data:image/svg+xml;base64,%s", base64Svg)
+ return dataUri, nil
+
+ }
+
+ return nil, fmt.Errorf("unsupported file type %s", maybeFileUri)
+}
+
+// copied from signoz clickhouse exporter's `sanitize` which
+// in turn is copied from prometheus-go-metric-exporter
+//
+// replaces non-alphanumeric characters with underscores in s.
+func toPromMetricName(s string) string {
+ if len(s) == 0 {
+ return s
+ }
+
+ // Note: No length limit for label keys because Prometheus doesn't
+ // define a length limit, thus we should NOT be truncating label keys.
+ // See https://github.com/orijtech/prometheus-go-metrics-exporter/issues/4.
+
+ s = strings.Map(func(r rune) rune {
+ // sanitizeRune converts anything that is not a letter or digit to an underscore
+ if unicode.IsLetter(r) || unicode.IsDigit(r) {
+ return r
+ }
+ // Everything else turns into an underscore
+ return '_'
+ }, s)
+
+ if unicode.IsDigit(rune(s[0])) {
+ s = "key" + "_" + s
+ }
+ if s[0] == '_' {
+ s = "key" + s
+ }
+ return s
+}
diff --git a/pkg/query-service/app/integrations/builtin_integrations/mongo/assets/dashboards/overview.json b/pkg/query-service/app/integrations/builtin_integrations/mongo/assets/dashboards/overview.json
new file mode 100644
index 0000000000..5b993cb2ca
--- /dev/null
+++ b/pkg/query-service/app/integrations/builtin_integrations/mongo/assets/dashboards/overview.json
@@ -0,0 +1,797 @@
+{
+ "id": "mongo-overview",
+ "description": "This dashboard provides a high-level overview of your MongoDB. It includes read/write performance, most-used replicas, collection metrics etc...",
+ "layout": [
+ {
+ "h": 3,
+ "i": "0c3d2b15-89be-4d62-a821-b26d93332ed3",
+ "moved": false,
+ "static": false,
+ "w": 6,
+ "x": 6,
+ "y": 3
+ },
+ {
+ "h": 3,
+ "i": "14504a3c-4a05-4d22-bab3-e22e94f51380",
+ "moved": false,
+ "static": false,
+ "w": 6,
+ "x": 0,
+ "y": 6
+ },
+ {
+ "h": 3,
+ "i": "dcfb3829-c3f2-44bb-907d-8dc8a6dc4aab",
+ "moved": false,
+ "static": false,
+ "w": 6,
+ "x": 0,
+ "y": 3
+ },
+ {
+ "h": 3,
+ "i": "bfc9e80b-02bf-4122-b3da-3dd943d35012",
+ "moved": false,
+ "static": false,
+ "w": 6,
+ "x": 6,
+ "y": 0
+ },
+ {
+ "h": 3,
+ "i": "4c07a7d2-893a-46c2-bcdb-a19b6efeac3a",
+ "moved": false,
+ "static": false,
+ "w": 6,
+ "x": 0,
+ "y": 0
+ },
+ {
+ "h": 3,
+ "i": "a5a64eec-1034-4aa6-8cb1-05673c4426c6",
+ "moved": false,
+ "static": false,
+ "w": 6,
+ "x": 6,
+ "y": 6
+ },
+ {
+ "h": 3,
+ "i": "503af589-ef4d-4fe3-8934-c8f7eb480d9a",
+ "moved": false,
+ "static": false,
+ "w": 6,
+ "x": 0,
+ "y": 9
+ }
+ ],
+ "name": "",
+ "tags": [
+ "mongo",
+ "database"
+ ],
+ "title": "Mongo overview",
+ "variables": {
+ "a2c21714-a814-4d31-9b56-7367c3208801": {
+ "allSelected": true,
+ "customValue": "",
+ "description": "List of hosts sending mongo metrics",
+ "id": "a2c21714-a814-4d31-9b56-7367c3208801",
+ "modificationUUID": "448e675a-4531-45b1-b434-a9ee809470d6",
+ "multiSelect": true,
+ "name": "host_name",
+ "order": 0,
+ "queryValue": "SELECT JSONExtractString(labels, 'host_name') AS host_name\nFROM signoz_metrics.distributed_time_series_v4_1day\nWHERE metric_name = 'mongodb_memory_usage'\nGROUP BY host_name",
+ "selectedValue": [
+ "Srikanths-MacBook-Pro.local"
+ ],
+ "showALLOption": true,
+ "sort": "ASC",
+ "textboxValue": "",
+ "type": "QUERY"
+ }
+ },
+ "widgets": [
+ {
+ "description": "Total number of operations",
+ "fillSpans": false,
+ "id": "4c07a7d2-893a-46c2-bcdb-a19b6efeac3a",
+ "isStacked": false,
+ "nullZeroValues": "zero",
+ "opacity": "1",
+ "panelTypes": "graph",
+ "query": {
+ "builder": {
+ "queryData": [
+ {
+ "aggregateAttribute": {
+ "dataType": "float64",
+ "id": "mongodb_operation_count--float64--Sum--true",
+ "isColumn": true,
+ "isJSON": false,
+ "key": "mongodb_operation_count",
+ "type": "Sum"
+ },
+ "aggregateOperator": "sum_rate",
+ "dataSource": "metrics",
+ "disabled": false,
+ "expression": "A",
+ "filters": {
+ "items": [
+ {
+ "id": "a468a30b",
+ "key": {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.host_name}}"
+ ]
+ }
+ ],
+ "op": "AND"
+ },
+ "groupBy": [
+ {
+ "dataType": "string",
+ "id": "operation--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "operation",
+ "type": "tag"
+ }
+ ],
+ "having": [],
+ "legend": "{{operation}}",
+ "limit": null,
+ "orderBy": [],
+ "queryName": "A",
+ "reduceTo": "sum",
+ "stepInterval": 60
+ }
+ ],
+ "queryFormulas": []
+ },
+ "clickhouse_sql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "id": "7da5d899-8b06-4139-9a89-47baf9551ff8",
+ "promql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "queryType": "builder"
+ },
+ "softMax": null,
+ "softMin": null,
+ "thresholds": [],
+ "timePreferance": "GLOBAL_TIME",
+ "title": "Operations count",
+ "yAxisUnit": "none"
+ },
+ {
+ "description": "The total time spent performing operations.",
+ "fillSpans": false,
+ "id": "bfc9e80b-02bf-4122-b3da-3dd943d35012",
+ "isStacked": false,
+ "nullZeroValues": "zero",
+ "opacity": "1",
+ "panelTypes": "graph",
+ "query": {
+ "builder": {
+ "queryData": [
+ {
+ "aggregateAttribute": {
+ "dataType": "float64",
+ "id": "mongodb_operation_time--float64--Sum--true",
+ "isColumn": true,
+ "isJSON": false,
+ "key": "mongodb_operation_time",
+ "type": "Sum"
+ },
+ "aggregateOperator": "sum_rate",
+ "dataSource": "metrics",
+ "disabled": false,
+ "expression": "A",
+ "filters": {
+ "items": [
+ {
+ "id": "31be3166",
+ "key": {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.host_name}}"
+ ]
+ }
+ ],
+ "op": "AND"
+ },
+ "groupBy": [
+ {
+ "dataType": "string",
+ "id": "operation--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "operation",
+ "type": "tag"
+ }
+ ],
+ "having": [],
+ "legend": "{{operation}}",
+ "limit": null,
+ "orderBy": [],
+ "queryName": "A",
+ "reduceTo": "sum",
+ "stepInterval": 60
+ }
+ ],
+ "queryFormulas": []
+ },
+ "clickhouse_sql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "id": "2ca35957-894a-46ae-a2a6-95d7e400d8e1",
+ "promql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "queryType": "builder"
+ },
+ "softMax": null,
+ "softMin": null,
+ "thresholds": [],
+ "timePreferance": "GLOBAL_TIME",
+ "title": "Total operations time",
+ "yAxisUnit": "ms"
+ },
+ {
+ "description": "The number of cache operations",
+ "fillSpans": false,
+ "id": "dcfb3829-c3f2-44bb-907d-8dc8a6dc4aab",
+ "isStacked": false,
+ "nullZeroValues": "zero",
+ "opacity": "1",
+ "panelTypes": "graph",
+ "query": {
+ "builder": {
+ "queryData": [
+ {
+ "aggregateAttribute": {
+ "dataType": "float64",
+ "id": "mongodb_cache_operations--float64--Sum--true",
+ "isColumn": true,
+ "isJSON": false,
+ "key": "mongodb_cache_operations",
+ "type": "Sum"
+ },
+ "aggregateOperator": "sum_rate",
+ "dataSource": "metrics",
+ "disabled": false,
+ "expression": "A",
+ "filters": {
+ "items": [
+ {
+ "id": "01b45814",
+ "key": {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.host_name}}"
+ ]
+ }
+ ],
+ "op": "AND"
+ },
+ "groupBy": [
+ {
+ "dataType": "string",
+ "id": "type--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "type",
+ "type": "tag"
+ }
+ ],
+ "having": [],
+ "legend": "{{type}}",
+ "limit": null,
+ "orderBy": [],
+ "queryName": "A",
+ "reduceTo": "sum",
+ "stepInterval": 60
+ }
+ ],
+ "queryFormulas": []
+ },
+ "clickhouse_sql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "id": "bb439198-dcf5-4767-b0d0-ab5785159b8d",
+ "promql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "queryType": "builder"
+ },
+ "softMax": null,
+ "softMin": null,
+ "thresholds": [],
+ "timePreferance": "GLOBAL_TIME",
+ "title": "Cache operations",
+ "yAxisUnit": "none"
+ },
+ {
+ "description": "",
+ "fillSpans": false,
+ "id": "14504a3c-4a05-4d22-bab3-e22e94f51380",
+ "isStacked": false,
+ "nullZeroValues": "zero",
+ "opacity": "1",
+ "panelTypes": "graph",
+ "query": {
+ "builder": {
+ "queryData": [
+ {
+ "aggregateAttribute": {
+ "dataType": "float64",
+ "id": "mongodb_operation_latency_time--float64--Gauge--true",
+ "isColumn": true,
+ "isJSON": false,
+ "key": "mongodb_operation_latency_time",
+ "type": "Gauge"
+ },
+ "aggregateOperator": "max",
+ "dataSource": "metrics",
+ "disabled": false,
+ "expression": "A",
+ "filters": {
+ "items": [
+ {
+ "id": "2e165319",
+ "key": {
+ "dataType": "string",
+ "id": "operation--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "operation",
+ "type": "tag"
+ },
+ "op": "=",
+ "value": "read"
+ },
+ {
+ "id": "888e920b",
+ "key": {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.host_name}}"
+ ]
+ }
+ ],
+ "op": "AND"
+ },
+ "groupBy": [],
+ "having": [],
+ "legend": "Latency",
+ "limit": null,
+ "orderBy": [],
+ "queryName": "A",
+ "reduceTo": "sum",
+ "stepInterval": 60
+ }
+ ],
+ "queryFormulas": []
+ },
+ "clickhouse_sql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "id": "4a9cafe8-778b-476c-b825-c04e165bf285",
+ "promql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "queryType": "builder"
+ },
+ "softMax": null,
+ "softMin": null,
+ "thresholds": [],
+ "timePreferance": "GLOBAL_TIME",
+ "title": "Read latency",
+ "yAxisUnit": "µs"
+ },
+ {
+ "description": "",
+ "fillSpans": false,
+ "id": "a5a64eec-1034-4aa6-8cb1-05673c4426c6",
+ "isStacked": false,
+ "nullZeroValues": "zero",
+ "opacity": "1",
+ "panelTypes": "graph",
+ "query": {
+ "builder": {
+ "queryData": [
+ {
+ "aggregateAttribute": {
+ "dataType": "float64",
+ "id": "mongodb_operation_latency_time--float64--Gauge--true",
+ "isColumn": true,
+ "isJSON": false,
+ "key": "mongodb_operation_latency_time",
+ "type": "Gauge"
+ },
+ "aggregateOperator": "max",
+ "dataSource": "metrics",
+ "disabled": false,
+ "expression": "A",
+ "filters": {
+ "items": [
+ {
+ "id": "53b37ca7",
+ "key": {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.host_name}}"
+ ]
+ },
+ {
+ "id": "9862c46c",
+ "key": {
+ "dataType": "string",
+ "id": "operation--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "operation",
+ "type": "tag"
+ },
+ "op": "=",
+ "value": "write"
+ }
+ ],
+ "op": "AND"
+ },
+ "groupBy": [],
+ "having": [],
+ "legend": "Latency",
+ "limit": null,
+ "orderBy": [],
+ "queryName": "A",
+ "reduceTo": "sum",
+ "stepInterval": 60
+ }
+ ],
+ "queryFormulas": []
+ },
+ "clickhouse_sql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "id": "446827eb-a4f2-4ff3-966b-fb65288c983b",
+ "promql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "queryType": "builder"
+ },
+ "softMax": null,
+ "softMin": null,
+ "thresholds": [],
+ "timePreferance": "GLOBAL_TIME",
+ "title": "Write latency",
+ "yAxisUnit": "µs"
+ },
+ {
+ "description": "",
+ "fillSpans": false,
+ "id": "503af589-ef4d-4fe3-8934-c8f7eb480d9a",
+ "isStacked": false,
+ "nullZeroValues": "zero",
+ "opacity": "1",
+ "panelTypes": "graph",
+ "query": {
+ "builder": {
+ "queryData": [
+ {
+ "aggregateAttribute": {
+ "dataType": "float64",
+ "id": "mongodb_operation_latency_time--float64--Gauge--true",
+ "isColumn": true,
+ "isJSON": false,
+ "key": "mongodb_operation_latency_time",
+ "type": "Gauge"
+ },
+ "aggregateOperator": "max",
+ "dataSource": "metrics",
+ "disabled": false,
+ "expression": "A",
+ "filters": {
+ "items": [
+ {
+ "id": "c33ad4b6",
+ "key": {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.host_name}}"
+ ]
+ },
+ {
+ "id": "c70ecfd0",
+ "key": {
+ "dataType": "string",
+ "id": "operation--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "operation",
+ "type": "tag"
+ },
+ "op": "=",
+ "value": "command"
+ }
+ ],
+ "op": "AND"
+ },
+ "groupBy": [],
+ "having": [],
+ "legend": "Latency",
+ "limit": null,
+ "orderBy": [],
+ "queryName": "A",
+ "reduceTo": "sum",
+ "stepInterval": 60
+ }
+ ],
+ "queryFormulas": []
+ },
+ "clickhouse_sql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "id": "7b7b977d-0921-4552-8cfe-d82dfde63ef4",
+ "promql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "queryType": "builder"
+ },
+ "softMax": null,
+ "softMin": null,
+ "thresholds": [],
+ "timePreferance": "GLOBAL_TIME",
+ "title": "Command latency",
+ "yAxisUnit": "µs"
+ },
+ {
+ "description": "",
+ "fillSpans": false,
+ "id": "0c3d2b15-89be-4d62-a821-b26d93332ed3",
+ "isStacked": false,
+ "nullZeroValues": "zero",
+ "opacity": "1",
+ "panelTypes": "graph",
+ "query": {
+ "builder": {
+ "queryData": [
+ {
+ "aggregateAttribute": {
+ "dataType": "float64",
+ "id": "mongodb_network_io_receive--float64--Sum--true",
+ "isColumn": true,
+ "isJSON": false,
+ "key": "mongodb_network_io_receive",
+ "type": "Sum"
+ },
+ "aggregateOperator": "avg",
+ "dataSource": "metrics",
+ "disabled": false,
+ "expression": "A",
+ "filters": {
+ "items": [
+ {
+ "id": "5c9d7fe3",
+ "key": {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.host_name}}"
+ ]
+ }
+ ],
+ "op": "AND"
+ },
+ "groupBy": [
+ {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ }
+ ],
+ "having": [],
+ "legend": "Bytes received :: {{host_name}}",
+ "limit": null,
+ "orderBy": [],
+ "queryName": "A",
+ "reduceTo": "sum",
+ "stepInterval": 60
+ },
+ {
+ "aggregateAttribute": {
+ "dataType": "float64",
+ "id": "mongodb_network_io_transmit--float64--Sum--true",
+ "isColumn": true,
+ "isJSON": false,
+ "key": "mongodb_network_io_transmit",
+ "type": "Sum"
+ },
+ "aggregateOperator": "avg",
+ "dataSource": "metrics",
+ "disabled": false,
+ "expression": "B",
+ "filters": {
+ "items": [
+ {
+ "id": "96520885",
+ "key": {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.host_name}}"
+ ]
+ }
+ ],
+ "op": "AND"
+ },
+ "groupBy": [
+ {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ }
+ ],
+ "having": [],
+ "legend": "Bytes transmitted :: {{host_name}}",
+ "limit": null,
+ "orderBy": [],
+ "queryName": "B",
+ "reduceTo": "sum",
+ "stepInterval": 60
+ }
+ ],
+ "queryFormulas": []
+ },
+ "clickhouse_sql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "id": "41eea5bc-f9cf-45c2-92fb-ef226d6b540b",
+ "promql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "queryType": "builder"
+ },
+ "softMax": null,
+ "softMin": null,
+ "thresholds": [],
+ "timePreferance": "GLOBAL_TIME",
+ "title": "Network IO",
+ "yAxisUnit": "bytes"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/pkg/query-service/app/integrations/builtin_integrations/mongo/config/collect-logs.md b/pkg/query-service/app/integrations/builtin_integrations/mongo/config/collect-logs.md
new file mode 100644
index 0000000000..fa55ca9a63
--- /dev/null
+++ b/pkg/query-service/app/integrations/builtin_integrations/mongo/config/collect-logs.md
@@ -0,0 +1,117 @@
+### Collect MongoDB Logs
+
+You can configure MongoDB logs collection by providing the required collector config to your collector.
+
+#### Create collector config file
+
+Save the following config for collecting MongoDB logs in a file named `mongodb-logs-collection-config.yaml`
+
+```yaml
+receivers:
+ filelog/mongodb:
+ include: ["${env:MONGODB_LOG_FILE}"]
+ operators:
+ # Parse structured mongodb logs
+ # For more details, see https://www.mongodb.com/docs/manual/reference/log-messages/#structured-logging
+ - type: json_parser
+ if: body matches '^\\s*{\\s*".*}\\s*$'
+ parse_from: body
+ parse_to: attributes
+ timestamp:
+ parse_from: attributes.t.$$date
+ layout: '2006-01-02T15:04:05.000-07:00'
+ layout_type: gotime
+ severity:
+ parse_from: attributes.s
+ overwrite_text: true
+ mapping:
+ debug:
+ - D1
+ - D2
+ - D3
+ - D4
+ - D5
+ info: I
+ warn: W
+ error: E
+ fatal: F
+ - type: flatten
+ if: attributes.attr != nil
+ field: attributes.attr
+ - type: move
+ if: attributes.msg != nil
+ from: attributes.msg
+ to: body
+ - type: move
+ if: attributes.c != nil
+ from: attributes.c
+ to: attributes.component
+ - type: move
+ if: attributes.id != nil
+ from: attributes.id
+ to: attributes.mongo_log_id
+ - type: remove
+ if: attributes.t != nil
+ field: attributes.t
+ - type: remove
+ if: attributes.s != nil
+ field: attributes.s
+ - type: add
+ field: attributes.source
+ value: mongodb
+
+processors:
+ batch:
+ send_batch_size: 10000
+ send_batch_max_size: 11000
+ timeout: 10s
+
+exporters:
+ # export to SigNoz cloud
+ otlp/mongodb-logs:
+ endpoint: "${env:OTLP_DESTINATION_ENDPOINT}"
+ tls:
+ insecure: false
+ headers:
+ "signoz-access-token": "${env:SIGNOZ_INGESTION_KEY}"
+
+ # export to local collector
+ # otlp/mongodb-logs:
+ # endpoint: "localhost:4317"
+ # tls:
+ # insecure: true
+
+
+service:
+ pipelines:
+ logs/mongodb:
+ receivers: [filelog/mongodb]
+ processors: [batch]
+ exporters: [otlp/mongodb-logs]
+```
+
+#### Set Environment Variables
+
+Set the following environment variables in your otel-collector environment:
+
+```bash
+
+# path of MongoDB server log file. must be accessible by the otel collector
+export MONGODB_LOG_FILE=/var/log/mongodb.log
+
+# region specific SigNoz cloud ingestion endpoint
+export OTLP_DESTINATION_ENDPOINT="ingest.us.signoz.cloud:443"
+
+# your SigNoz ingestion key
+export SIGNOZ_INGESTION_KEY="signoz-ingestion-key"
+
+```
+
+#### Use collector config file
+
+Make the collector config file available to your otel collector and use it by adding the following flag to the command for running your collector
+```bash
+--config mongodb-logs-collection-config.yaml
+```
+Note: the collector can use multiple config files, specified by multiple occurrences of the --config flag.
+
diff --git a/pkg/query-service/app/integrations/builtin_integrations/mongo/config/collect-metrics.md b/pkg/query-service/app/integrations/builtin_integrations/mongo/config/collect-metrics.md
new file mode 100644
index 0000000000..dcbc7dd582
--- /dev/null
+++ b/pkg/query-service/app/integrations/builtin_integrations/mongo/config/collect-metrics.md
@@ -0,0 +1,104 @@
+### Collect MongoDB Metrics
+
+You can configure MongoDB metrics collection by providing the required collector config to your collector.
+
+#### Create collector config file
+
+Save the following config for collecting mongodb metrics in a file named `mongodb-metrics-collection-config.yaml`
+
+```yaml
+receivers:
+ mongodb:
+ # - For standalone MongoDB deployments this is the hostname and port of the mongod instance
+ # - For replica sets specify the hostnames and ports of the mongod instances that are in the replica set configuration. If the replica_set field is specified, nodes will be autodiscovered.
+ # - For a sharded MongoDB deployment, please specify a list of the mongos hosts.
+ hosts:
+ - endpoint: ${env:MONGODB_ENDPOINT}
+ # If authentication is required, the user can with clusterMonitor permissions can be provided here
+ username: ${env:MONGODB_USERNAME}
+ # If authentication is required, the password can be provided here.
+ password: ${env:MONGODB_PASSWORD}
+ collection_interval: 60s
+ # If TLS is enabled, the following fields can be used to configure the connection
+ tls:
+ insecure: true
+ insecure_skip_verify: true
+ # ca_file: /etc/ssl/certs/ca-certificates.crt
+ # cert_file: /etc/ssl/certs/mongodb.crt
+ # key_file: /etc/ssl/certs/mongodb.key
+ metrics:
+ mongodb.lock.acquire.count:
+ enabled: true
+ mongodb.lock.acquire.time:
+ enabled: true
+ mongodb.lock.acquire.wait_count:
+ enabled: true
+ mongodb.lock.deadlock.count:
+ enabled: true
+ mongodb.operation.latency.time:
+ enabled: true
+
+processors:
+ # enriches the data with additional host information
+ # see https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/resourcedetectionprocessor#resource-detection-processor
+ resourcedetection/system:
+ # add additional detectors if needed
+ detectors: ["system"]
+ system:
+ hostname_sources: ["os"]
+
+exporters:
+ # export to SigNoz cloud
+ otlp/mongodb:
+ endpoint: "${env:OTLP_DESTINATION_ENDPOINT}"
+ tls:
+ insecure: false
+ headers:
+ "signoz-access-token": "${env:SIGNOZ_INGESTION_KEY}"
+
+ # export to local collector
+ # otlp/mongodb:
+ # endpoint: "localhost:4317"
+ # tls:
+ # insecure: true
+
+service:
+ pipelines:
+ metrics/mongodb:
+ receivers: [mongodb]
+ # note: remove this processor if the collector host is not running on the same host as the mongo instance
+ processors: [resourcedetection/system]
+ exporters: [otlp/mongodb]
+
+```
+
+#### Set Environment Variables
+
+Set the following environment variables in your otel-collector environment:
+
+```bash
+
+# MongoDB endpoint reachable from the otel collector"
+export MONGODB_ENDPOINT="host:port"
+
+# password for MongoDB monitoring user"
+export MONGODB_USERNAME="monitoring"
+
+# password for MongoDB monitoring user"
+export MONGODB_PASSWORD=""
+
+# region specific SigNoz cloud ingestion endpoint
+export OTLP_DESTINATION_ENDPOINT="ingest.us.signoz.cloud:443"
+
+# your SigNoz ingestion key
+export SIGNOZ_INGESTION_KEY="signoz-ingestion-key"
+
+```
+
+#### Use collector config file
+
+Make the collector config file available to your otel collector and use it by adding the following flag to the command for running your collector
+```bash
+--config mongodb-metrics-collection-config.yaml
+```
+Note: the collector can use multiple config files, specified by multiple occurrences of the --config flag.
diff --git a/pkg/query-service/app/integrations/builtin_integrations/mongo/config/prerequisites.md b/pkg/query-service/app/integrations/builtin_integrations/mongo/config/prerequisites.md
new file mode 100644
index 0000000000..5191bfb194
--- /dev/null
+++ b/pkg/query-service/app/integrations/builtin_integrations/mongo/config/prerequisites.md
@@ -0,0 +1,41 @@
+## Before You Begin
+
+To configure metrics and logs collection for MongoDB, you need the following.
+
+### Ensure MongoDB server is prepared for monitoring
+
+- **Ensure that the MongoDB server is running a supported version**
+ MongoDB versions 4.4+ are supported.
+ You can use the following statement to determine server version
+ ```js
+ db.version()
+ ```
+
+- **If collecting metrics, ensure that there is a MongoDB user with required permissions**
+ Mongodb recommends to set up a least privilege user (LPU) with a clusterMonitor role in order to collect metrics
+
+ To create a monitoring user, run:
+ ```js
+ use admin
+ db.createUser(
+ {
+ user: "monitoring",
+ pwd: "",
+ roles: ["clusterMonitor"]
+ }
+ );
+ ```
+
+
+### Ensure OTEL Collector is running with access to the MongoDB server
+
+- **Ensure that an OTEL collector is running in your deployment environment**
+ If needed, please [install an OTEL Collector](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/)
+ If already installed, ensure that the collector version is v0.88.0 or newer.
+
+ Also ensure that you can provide config files to the collector and that you can set environment variables and command line flags used for running it.
+
+- **Ensure that the OTEL collector can access the MongoDB server**
+ In order to collect metrics, the collector must be able to access the MongoDB server as a client using the monitoring user.
+
+ In order to collect logs, the collector must be able to read the MongoDB server log file.
diff --git a/pkg/query-service/app/integrations/builtin_integrations/mongo/icon.svg b/pkg/query-service/app/integrations/builtin_integrations/mongo/icon.svg
new file mode 100644
index 0000000000..4ffedc6339
--- /dev/null
+++ b/pkg/query-service/app/integrations/builtin_integrations/mongo/icon.svg
@@ -0,0 +1,28 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/pkg/query-service/app/integrations/builtin_integrations/mongo/integration.json b/pkg/query-service/app/integrations/builtin_integrations/mongo/integration.json
new file mode 100644
index 0000000000..b9543e0757
--- /dev/null
+++ b/pkg/query-service/app/integrations/builtin_integrations/mongo/integration.json
@@ -0,0 +1,260 @@
+{
+ "id": "mongo",
+ "title": "Mongo",
+ "description": "Monitor mongo using logs and metrics.",
+ "author": {
+ "name": "SigNoz",
+ "email": "integrations@signoz.io",
+ "homepage": "https://signoz.io"
+ },
+ "icon": "file://icon.svg",
+ "categories": [
+ "Database"
+ ],
+ "overview": "file://overview.md",
+ "configuration": [
+ {
+ "title": "Prerequisites",
+ "instructions": "file://config/prerequisites.md"
+ },
+ {
+ "title": "Collect Metrics",
+ "instructions": "file://config/collect-metrics.md"
+ },
+ {
+ "title": "Collect Logs",
+ "instructions": "file://config/collect-logs.md"
+ }
+ ],
+ "assets": {
+ "logs": {
+ "pipelines": []
+ },
+ "dashboards": [
+ "file://assets/dashboards/overview.json"
+ ],
+ "alerts": []
+ },
+ "connection_tests": {
+ "logs": {
+ "op": "AND",
+ "items": [
+ {
+ "key": {
+ "type": "tag",
+ "key": "source",
+ "dataType": "string"
+ },
+ "op": "=",
+ "value": "mongo"
+ }
+ ]
+ }
+ },
+ "data_collected": {
+ "logs": [
+ {
+ "name": "Timestamp",
+ "path": "timestamp",
+ "type": "timestamp"
+ },
+ {
+ "name": "Severity Text",
+ "path": "severity_text",
+ "type": "string"
+ },
+ {
+ "name": "Severity Number",
+ "path": "severity_number",
+ "type": "number"
+ },
+ {
+ "name": "MongoDB Component",
+ "path": "attributes.component",
+ "type": "string"
+ }
+ ],
+ "metrics": [
+ {
+ "description": "The number of cache operations of the instance.",
+ "unit": "number",
+ "type": "Sum",
+ "name": "mongodb_cache_operations"
+ },
+ {
+ "description": "The number of collections.",
+ "unit": "number",
+ "type": "Sum",
+ "name": "mongodb_collection_count"
+ },
+ {
+ "description": "The size of the collection. Data compression does not affect this value.",
+ "unit": "Bytes",
+ "type": "Sum",
+ "name": "mongodb_data_size"
+ },
+ {
+ "description": "The number of connections.",
+ "unit": "number",
+ "type": "Sum",
+ "name": "mongodb_connection_count"
+ },
+ {
+ "description": "The number of extents.",
+ "unit": "number",
+ "type": "Sum",
+ "name": "mongodb_extent_count"
+ },
+ {
+ "description": "The time the global lock has been held.",
+ "unit": "ms",
+ "type": "Sum",
+ "name": "mongodb_global_lock_time"
+ },
+ {
+ "description": "The number of indexes.",
+ "unit": "number",
+ "type": "Sum",
+ "name": "mongodb_index_count"
+ },
+ {
+ "description": "Sum of the space allocated to all indexes in the database, including free index space.",
+ "unit": "Bytes",
+ "type": "Sum",
+ "name": "mongodb_index_size"
+ },
+ {
+ "description": "The amount of memory used.",
+ "unit": "Bytes",
+ "type": "Sum",
+ "name": "mongodb_memory_usage"
+ },
+ {
+ "description": "The number of objects.",
+ "unit": "number",
+ "type": "Sum",
+ "name": "mongodb_object_count"
+ },
+ {
+ "description": "The latency of operations.",
+ "unit": "us",
+ "type": "Gauge",
+ "name": "mongodb_operation_latency_time"
+ },
+ {
+ "description": "The number of operations executed.",
+ "unit": "number",
+ "type": "Sum",
+ "name": "mongodb_operation_count"
+ },
+ {
+ "description": "The number of replicated operations executed.",
+ "unit": "number",
+ "type": "Sum",
+ "name": "mongodb_operation_repl_count"
+ },
+ {
+ "description": "The total amount of storage allocated to this collection.",
+ "unit": "Bytes",
+ "type": "Sum",
+ "name": "mongodb_storage_size"
+ },
+ {
+ "description": "The number of existing databases.",
+ "unit": "number",
+ "type": "Sum",
+ "name": "mongodb_database_count"
+ },
+ {
+ "description": "The number of times an index has been accessed.",
+ "unit": "number",
+ "type": "Sum",
+ "name": "mongodb_index_access_count"
+ },
+ {
+ "description": "The number of document operations executed.",
+ "unit": "number",
+ "type": "Sum",
+ "name": "mongodb_document_operation_count"
+ },
+ {
+ "description": "The number of bytes received.",
+ "unit": "Bytes",
+ "type": "Sum",
+ "name": "mongodb_network_io_receive"
+ },
+ {
+ "description": "The number of by transmitted.",
+ "unit": "Bytes",
+ "type": "Sum",
+ "name": "mongodb_network_io_transmit"
+ },
+ {
+ "description": "The number of requests received by the server.",
+ "unit": "number",
+ "type": "Sum",
+ "name": "mongodb_network_request_count"
+ },
+ {
+ "description": "The total time spent performing operations.",
+ "unit": "ms",
+ "type": "Sum",
+ "name": "mongodb_operation_time"
+ },
+ {
+ "description": "The total number of active sessions.",
+ "unit": "number",
+ "type": "Sum",
+ "name": "mongodb_session_count"
+ },
+ {
+ "description": "The number of open cursors maintained for clients.",
+ "unit": "number",
+ "type": "Sum",
+ "name": "mongodb_cursor_count"
+ },
+ {
+ "description": "The number of cursors that have timed out.",
+ "unit": "number",
+ "type": "Sum",
+ "name": "mongodb_cursor_timeout_count"
+ },
+ {
+ "description": "Number of times the lock was acquired in the specified mode.",
+ "unit": "number",
+ "type": "Sum",
+ "name": "mongodb_lock_acquire_count"
+ },
+ {
+ "description": "Number of times the lock acquisitions encountered waits because the locks were held in a conflicting mode.",
+ "unit": "number",
+ "type": "Sum",
+ "name": "mongodb_lock_acquire_wait_count"
+ },
+ {
+ "description": "Cumulative wait time for the lock acquisitions.",
+ "unit": "microseconds",
+ "type": "Sum",
+ "name": "mongodb_lock_acquire_time"
+ },
+ {
+ "description": "Number of times the lock acquisitions encountered deadlocks.",
+ "unit": "number",
+ "type": "Sum",
+ "name": "mongodb_lock_deadlock_count"
+ },
+ {
+ "description": "The health status of the server.",
+ "unit": "number",
+ "type": "Gauge",
+ "name": "mongodb_health"
+ },
+ {
+ "description": "The amount of time that the server has been running.",
+ "unit": "ms",
+ "type": "Sum",
+ "name": "mongodb_uptime"
+ }
+ ]
+ }
+}
\ No newline at end of file
diff --git a/pkg/query-service/app/integrations/builtin_integrations/mongo/overview.md b/pkg/query-service/app/integrations/builtin_integrations/mongo/overview.md
new file mode 100644
index 0000000000..c088a9aa44
--- /dev/null
+++ b/pkg/query-service/app/integrations/builtin_integrations/mongo/overview.md
@@ -0,0 +1,6 @@
+### Monitor MongoDB with SigNoz
+
+Collect key MongoDB metrics and view them with an out of the box dashboard.
+
+Collect and parse MongoDB logs to populate timestamp, severity, and other log attributes for better querying and aggregation.
+
diff --git a/pkg/query-service/app/integrations/builtin_integrations/nginx/config/collect-logs.md b/pkg/query-service/app/integrations/builtin_integrations/nginx/config/collect-logs.md
new file mode 100644
index 0000000000..b421478ab9
--- /dev/null
+++ b/pkg/query-service/app/integrations/builtin_integrations/nginx/config/collect-logs.md
@@ -0,0 +1,139 @@
+### Collect Nginx Logs
+
+You can configure Nginx logs collection by providing the required collector config to your collector.
+
+#### Create collector config file
+
+Save the following config for collecting Nginx logs in a file named `nginx-logs-collection-config.yaml`
+
+```yaml
+receivers:
+ filelog/nginx-access-logs:
+ include: ["${env:NGINX_ACCESS_LOG_FILE}"]
+ operators:
+ # Parse the default nginx access log format. Nginx defaults to the "combined" log format
+ # $remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent"
+ # For more details, see https://nginx.org/en/docs/http/ngx_http_log_module.html
+ - type: regex_parser
+ if: body matches '^(?P[0-9\\.]+) - (?P[^\\s]+) \\[(?P.+)\\] "(?P\\w+?) (?P.+?)" (?P[0-9]+) (?P[0-9]+) "(?P.+?)" "(?P.+?)"$'
+ parse_from: body
+ parse_to: attributes
+ regex: '^(?P[0-9\.]+) - (?P[^\s]+) \[(?P.+)\] "(?P\w+?) (?P.+?)" (?P[0-9]+) (?P[0-9]+) "(?P.+?)" "(?P.+?)"$'
+ timestamp:
+ parse_from: attributes.ts
+ layout: "02/Jan/2006:15:04:05 -0700"
+ layout_type: gotime
+ severity:
+ parse_from: attributes.status
+ overwrite_text: true
+ mapping:
+ debug: "1xx"
+ info:
+ - "2xx"
+ - "3xx"
+ warn: "4xx"
+ error: "5xx"
+ - type: remove
+ if: attributes.ts != nil
+ field: attributes.ts
+ - type: add
+ field: attributes.source
+ value: nginx
+
+ filelog/nginx-error-logs:
+ include: ["${env:NGINX_ERROR_LOG_FILE}"]
+ operators:
+ # Parse the default nginx error log format.
+ # YYYY/MM/DD HH:MM:SS [LEVEL] PID#TID: *CID MESSAGE
+ # For more details, see https://github.com/phusion/nginx/blob/master/src/core/ngx_log.c
+ - type: regex_parser
+ if: body matches '^(?P.+?) \\[(?P\\w+)\\] (?P\\d+)#(?P\\d+). \\*(?P\\d+) (?P.+)$'
+ parse_from: body
+ parse_to: attributes
+ regex: '^(?P.+?) \[(?P\w+)\] (?P\d+)#(?P\d+). \*(?P\d+) (?P.+)$'
+ timestamp:
+ parse_from: attributes.ts
+ layout: "2006/01/02 15:04:05"
+ layout_type: gotime
+ severity:
+ parse_from: attributes.log_level
+ overwrite_text: true
+ mapping:
+ debug: "debug"
+ info:
+ - "info"
+ - "notice"
+ warn: "warn"
+ error:
+ - "error"
+ - "crit"
+ - "alert"
+ fatal: "emerg"
+ - type: remove
+ if: attributes.ts != nil
+ field: attributes.ts
+ - type: move
+ if: attributes.message != nil
+ from: attributes.message
+ to: body
+ - type: add
+ field: attributes.source
+ value: nginx
+
+processors:
+ batch:
+ send_batch_size: 10000
+ send_batch_max_size: 11000
+ timeout: 10s
+
+exporters:
+ # export to SigNoz cloud
+ otlp/nginx-logs:
+ endpoint: "${env:OTLP_DESTINATION_ENDPOINT}"
+ tls:
+ insecure: false
+ headers:
+ "signoz-access-token": "${env:SIGNOZ_INGESTION_KEY}"
+
+ # export to local collector
+ # otlp/nginx-logs:
+ # endpoint: "localhost:4317"
+ # tls:
+ # insecure: true
+
+service:
+ pipelines:
+ logs/nginx:
+ receivers: [filelog/nginx-access-logs, filelog/nginx-error-logs]
+ processors: [batch]
+ exporters: [otlp/nginx-logs]
+
+```
+
+#### Set Environment Variables
+
+Set the following environment variables in your otel-collector environment:
+
+```bash
+
+# path of Nginx access log file. must be accessible by the otel collector
+export NGINX_ACCESS_LOG_FILE=/var/log/nginx/access.log;
+
+# path of Nginx error log file. must be accessible by the otel collector
+export NGINX_ERROR_LOG_FILE=/var/log/nginx/error.log
+
+# region specific SigNoz cloud ingestion endpoint
+export OTLP_DESTINATION_ENDPOINT="ingest.us.signoz.cloud:443"
+
+# your SigNoz ingestion key
+export SIGNOZ_INGESTION_KEY="signoz-ingestion-key"
+
+```
+
+#### Use collector config file
+
+Make the collector config file available to your otel collector and use it by adding the following flag to the command for running your collector
+```bash
+--config nginx-logs-collection-config.yaml
+```
+Note: the collector can use multiple config files, specified by multiple occurrences of the --config flag.
diff --git a/pkg/query-service/app/integrations/builtin_integrations/nginx/config/prerequisites.md b/pkg/query-service/app/integrations/builtin_integrations/nginx/config/prerequisites.md
new file mode 100644
index 0000000000..f2ce762f21
--- /dev/null
+++ b/pkg/query-service/app/integrations/builtin_integrations/nginx/config/prerequisites.md
@@ -0,0 +1,19 @@
+## Before You Begin
+
+To configure logs collection for Nginx, you need the following.
+
+### Ensure Nginx server is running a supported version
+
+Ensure that your Nginx server is running a version newer than 1.0.0
+
+
+### Ensure OTEL Collector is running with access to the Nginx server
+
+- **Ensure that an OTEL collector is running in your deployment environment**
+ If needed, please [install an OTEL Collector](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/)
+ If already installed, ensure that the collector version is v0.88.0 or newer.
+
+ Also ensure that you can provide config files to the collector and that you can set environment variables and command line flags used for running it.
+
+- **Ensure that the OTEL collector can access the Nginx server**
+ In order to collect logs, the collector must be able to read Nginx server log files.
diff --git a/pkg/query-service/app/integrations/builtin_integrations/nginx/icon.svg b/pkg/query-service/app/integrations/builtin_integrations/nginx/icon.svg
new file mode 100644
index 0000000000..5687615020
--- /dev/null
+++ b/pkg/query-service/app/integrations/builtin_integrations/nginx/icon.svg
@@ -0,0 +1,8 @@
+
+
+
+
\ No newline at end of file
diff --git a/pkg/query-service/app/integrations/builtin_integrations/nginx/integration.json b/pkg/query-service/app/integrations/builtin_integrations/nginx/integration.json
new file mode 100644
index 0000000000..16f03bbed3
--- /dev/null
+++ b/pkg/query-service/app/integrations/builtin_integrations/nginx/integration.json
@@ -0,0 +1,104 @@
+{
+ "id": "nginx",
+ "title": "Nginx",
+ "description": "Monitor nginx using logs and metrics.",
+ "author": {
+ "name": "SigNoz",
+ "email": "integrations@signoz.io",
+ "homepage": "https://signoz.io"
+ },
+ "icon": "file://icon.svg",
+ "categories": [
+ "Ingress",
+ "HTTP"
+ ],
+ "overview": "file://overview.md",
+ "configuration": [
+ {
+ "title": "Prerequisites",
+ "instructions": "file://config/prerequisites.md"
+ },
+ {
+ "title": "Collect Logs",
+ "instructions": "file://config/collect-logs.md"
+ }
+ ],
+ "assets": {
+ "logs": {
+ "pipelines": []
+ },
+ "dashboards": null,
+ "alerts": null
+ },
+ "connection_tests": {
+ "logs": {
+ "op": "AND",
+ "items": [
+ {
+ "key": {
+ "type": "tag",
+ "key": "source",
+ "dataType": "string"
+ },
+ "op": "=",
+ "value": "nginx"
+ }
+ ]
+ }
+ },
+ "data_collected": {
+ "logs": [
+ {
+ "name": "Timestamp",
+ "path": "timestamp",
+ "type": "timestamp"
+ },
+ {
+ "name": "Severity Text",
+ "path": "severity_text",
+ "type": "string"
+ },
+ {
+ "name": "Severity Number",
+ "path": "severity_number",
+ "type": "number"
+ },
+ {
+ "name": "Body Bytes Sent",
+ "path": "attributes.body_bytes_sent",
+ "type": "string"
+ },
+ {
+ "name": "Referrer",
+ "path": "attributes.http_referrer",
+ "type": "string"
+ },
+ {
+ "name": "User Agent",
+ "path": "attributes.http_user_agent",
+ "type": "string"
+ },
+ {
+ "name": "Request Method",
+ "path": "attributes.request_method",
+ "type": "string"
+ },
+ {
+ "name": "Request Path",
+ "path": "attributes.request_path",
+ "type": "string"
+ },
+ {
+ "name": "Response Status Code",
+ "path": "attributes.status",
+ "type": "string"
+ },
+ {
+ "name": "Remote Address",
+ "path": "attributes.remote_addr",
+ "type": "string"
+ }
+ ],
+ "metrics": []
+ }
+}
\ No newline at end of file
diff --git a/pkg/query-service/app/integrations/builtin_integrations/nginx/overview.md b/pkg/query-service/app/integrations/builtin_integrations/nginx/overview.md
new file mode 100644
index 0000000000..8c17af806c
--- /dev/null
+++ b/pkg/query-service/app/integrations/builtin_integrations/nginx/overview.md
@@ -0,0 +1,3 @@
+### Monitor Nginx with SigNoz
+
+Collect and parse Nginx logs to populate timestamp, severity, and other log attributes for better querying and aggregation.
diff --git a/pkg/query-service/app/integrations/builtin_integrations/postgres/assets/dashboards/overview.json b/pkg/query-service/app/integrations/builtin_integrations/postgres/assets/dashboards/overview.json
new file mode 100644
index 0000000000..944e06b03f
--- /dev/null
+++ b/pkg/query-service/app/integrations/builtin_integrations/postgres/assets/dashboards/overview.json
@@ -0,0 +1,1869 @@
+{
+ "id": "postgres-overview",
+ "description": "This dashboard provides a high-level overview of your PostgreSQL databases. It includes replication, locks, and throughput etc...",
+ "layout": [
+ {
+ "h": 3,
+ "i": "9552123d-6265-48a7-8624-3f4a3fc3c9c0",
+ "moved": false,
+ "static": false,
+ "w": 6,
+ "x": 0,
+ "y": 18
+ },
+ {
+ "h": 3,
+ "i": "d7838815-4f5b-4454-86fd-f658b201f3a9",
+ "moved": false,
+ "static": false,
+ "w": 6,
+ "x": 6,
+ "y": 15
+ },
+ {
+ "h": 3,
+ "i": "f9a6f683-7455-4643-acc8-467cc5ea52cf",
+ "moved": false,
+ "static": false,
+ "w": 6,
+ "x": 0,
+ "y": 15
+ },
+ {
+ "h": 3,
+ "i": "8638a199-20a0-4255-b0a2-3b1ba06c485b",
+ "moved": false,
+ "static": false,
+ "w": 6,
+ "x": 3,
+ "y": 12
+ },
+ {
+ "h": 3,
+ "i": "e9341e70-ccb3-47fc-af95-56ba8942c4f2",
+ "moved": false,
+ "static": false,
+ "w": 6,
+ "x": 6,
+ "y": 9
+ },
+ {
+ "h": 3,
+ "i": "6b700035-e3c2-4c48-99fa-ebfd6202eed3",
+ "moved": false,
+ "static": false,
+ "w": 6,
+ "x": 0,
+ "y": 9
+ },
+ {
+ "h": 3,
+ "i": "bada7864-1d23-4d49-a868-c6b8a93c738f",
+ "moved": false,
+ "static": false,
+ "w": 6,
+ "x": 3,
+ "y": 6
+ },
+ {
+ "h": 3,
+ "i": "191d09a6-40b0-4de8-a5b0-aa4254454b99",
+ "moved": false,
+ "static": false,
+ "w": 6,
+ "x": 0,
+ "y": 0
+ },
+ {
+ "h": 3,
+ "i": "fa941c00-ce19-49cc-baf2-c38598767dee",
+ "moved": false,
+ "static": false,
+ "w": 6,
+ "x": 6,
+ "y": 0
+ },
+ {
+ "h": 3,
+ "i": "114fcf80-e1de-4716-b1aa-0e0738dba10e",
+ "moved": false,
+ "static": false,
+ "w": 6,
+ "x": 0,
+ "y": 3
+ },
+ {
+ "h": 3,
+ "i": "667428ef-9b9a-4e91-bd1e-938e0dc1ff32",
+ "moved": false,
+ "static": false,
+ "w": 6,
+ "x": 6,
+ "y": 3
+ }
+ ],
+ "name": "",
+ "tags": [
+ "postgres",
+ "database"
+ ],
+ "title": "Postgres overview",
+ "variables": {
+ "4250ef7b-8f42-4a24-902a-a764d070b92d": {
+ "allSelected": true,
+ "customValue": "",
+ "description": "List of hosts sending Postgres metrics",
+ "id": "4250ef7b-8f42-4a24-902a-a764d070b92d",
+ "key": "4250ef7b-8f42-4a24-902a-a764d070b92d",
+ "modificationUUID": "4427b655-c8d2-40ce-84ed-7cb058bd3041",
+ "multiSelect": true,
+ "name": "host_name",
+ "order": 0,
+ "queryValue": "SELECT JSONExtractString(labels, 'host_name') AS host_name\nFROM signoz_metrics.distributed_time_series_v4_1day\nWHERE metric_name = 'postgresql_operations'\nGROUP BY host_name",
+ "selectedValue": [
+ "Srikanths-MacBook-Pro.local"
+ ],
+ "showALLOption": true,
+ "sort": "ASC",
+ "textboxValue": "",
+ "type": "QUERY"
+ },
+ "8ecaee70-640f-46fd-83d9-a4fd18bc66e6": {
+ "customValue": "",
+ "description": "List of tables",
+ "id": "8ecaee70-640f-46fd-83d9-a4fd18bc66e6",
+ "modificationUUID": "a51321cd-47a2-470a-8df4-372e5bb36f2c",
+ "multiSelect": true,
+ "name": "table_name",
+ "order": 0,
+ "queryValue": "SELECT JSONExtractString(labels, 'postgresql_table_name') AS table_name\nFROM signoz_metrics.distributed_time_series_v4_1day\nWHERE metric_name = 'postgresql_operations' AND JSONExtractString(labels, 'postgresql_database_name') IN {{.db_name}}\nGROUP BY table_name",
+ "showALLOption": true,
+ "sort": "ASC",
+ "textboxValue": "",
+ "type": "QUERY",
+ "selectedValue": [
+ "public.activations",
+ "public.licenses",
+ "public.plans",
+ "public.subscription_items",
+ "public.subscriptions",
+ "public.trials",
+ "public.usage"
+ ],
+ "allSelected": true
+ },
+ "c66d1581-e5e1-440d-8ff6-ebcf078ab6dd": {
+ "allSelected": true,
+ "customValue": "",
+ "description": "List of databases",
+ "id": "c66d1581-e5e1-440d-8ff6-ebcf078ab6dd",
+ "key": "c66d1581-e5e1-440d-8ff6-ebcf078ab6dd",
+ "modificationUUID": "564a3f43-98f8-4189-b5e4-dcb518d73852",
+ "multiSelect": true,
+ "name": "db_name",
+ "order": 0,
+ "queryValue": "SELECT JSONExtractString(labels, 'postgresql_database_name') AS db_name\nFROM signoz_metrics.distributed_time_series_v4_1day\nWHERE metric_name = 'postgresql_operations'\nGROUP BY db_name",
+ "selectedValue": [
+ "postgres"
+ ],
+ "showALLOption": true,
+ "sort": "DISABLED",
+ "textboxValue": "",
+ "type": "QUERY"
+ }
+ },
+ "widgets": [
+ {
+ "description": "The average number of db insert operations.",
+ "fillSpans": false,
+ "id": "191d09a6-40b0-4de8-a5b0-aa4254454b99",
+ "isStacked": false,
+ "nullZeroValues": "zero",
+ "opacity": "1",
+ "panelTypes": "graph",
+ "query": {
+ "builder": {
+ "queryData": [
+ {
+ "aggregateAttribute": {
+ "dataType": "float64",
+ "id": "postgresql_operations--float64--Sum--true",
+ "isColumn": true,
+ "isJSON": false,
+ "key": "postgresql_operations",
+ "type": "Sum"
+ },
+ "aggregateOperator": "avg",
+ "dataSource": "metrics",
+ "disabled": false,
+ "expression": "A",
+ "filters": {
+ "items": [
+ {
+ "id": "c1dff946",
+ "key": {
+ "dataType": "string",
+ "id": "operation--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "operation",
+ "type": "tag"
+ },
+ "op": "=",
+ "value": "ins"
+ },
+ {
+ "id": "0cd6dc8f",
+ "key": {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.host_name}}"
+ ]
+ },
+ {
+ "id": "2e60e171",
+ "key": {
+ "dataType": "string",
+ "id": "postgresql_database_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_database_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.db_name}}"
+ ]
+ }
+ ],
+ "op": "AND"
+ },
+ "groupBy": [
+ {
+ "dataType": "string",
+ "id": "postgresql_database_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_database_name",
+ "type": "tag"
+ }
+ ],
+ "having": [],
+ "legend": "{{postgresql_database_name}}",
+ "limit": null,
+ "orderBy": [],
+ "queryName": "A",
+ "reduceTo": "sum",
+ "stepInterval": 60
+ }
+ ],
+ "queryFormulas": []
+ },
+ "clickhouse_sql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "id": "bf48ac4c-bc0c-41a0-87f4-6f8ae7888d1f",
+ "promql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "queryType": "builder"
+ },
+ "softMax": null,
+ "softMin": null,
+ "thresholds": [],
+ "timePreferance": "GLOBAL_TIME",
+ "title": "Inserts",
+ "yAxisUnit": "none"
+ },
+ {
+ "description": "The average number of db update operations.",
+ "fillSpans": false,
+ "id": "fa941c00-ce19-49cc-baf2-c38598767dee",
+ "isStacked": false,
+ "nullZeroValues": "zero",
+ "opacity": "1",
+ "panelTypes": "graph",
+ "query": {
+ "builder": {
+ "queryData": [
+ {
+ "aggregateAttribute": {
+ "dataType": "float64",
+ "id": "postgresql_operations--float64--Sum--true",
+ "isColumn": true,
+ "isJSON": false,
+ "key": "postgresql_operations",
+ "type": "Sum"
+ },
+ "aggregateOperator": "avg",
+ "dataSource": "metrics",
+ "disabled": false,
+ "expression": "A",
+ "filters": {
+ "items": [
+ {
+ "id": "98463ec9",
+ "key": {
+ "dataType": "string",
+ "id": "operation--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "operation",
+ "type": "tag"
+ },
+ "op": "=",
+ "value": "upd"
+ },
+ {
+ "id": "47db4e8e",
+ "key": {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.host_name}}"
+ ]
+ },
+ {
+ "id": "64020332",
+ "key": {
+ "dataType": "string",
+ "id": "postgresql_database_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_database_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.db_name}}"
+ ]
+ }
+ ],
+ "op": "AND"
+ },
+ "groupBy": [
+ {
+ "dataType": "string",
+ "id": "postgresql_database_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_database_name",
+ "type": "tag"
+ }
+ ],
+ "having": [],
+ "legend": "{{postgresql_database_name}}",
+ "limit": null,
+ "orderBy": [],
+ "queryName": "A",
+ "reduceTo": "sum",
+ "stepInterval": 60
+ }
+ ],
+ "queryFormulas": []
+ },
+ "clickhouse_sql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "id": "34a6ac3a-b7f6-4b5f-a084-a44378033d82",
+ "promql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "queryType": "builder"
+ },
+ "softMax": null,
+ "softMin": null,
+ "thresholds": [],
+ "timePreferance": "GLOBAL_TIME",
+ "title": "Updates",
+ "yAxisUnit": "none"
+ },
+ {
+ "description": "The average number of db delete operations.",
+ "fillSpans": false,
+ "id": "114fcf80-e1de-4716-b1aa-0e0738dba10e",
+ "isStacked": false,
+ "nullZeroValues": "zero",
+ "opacity": "1",
+ "panelTypes": "graph",
+ "query": {
+ "builder": {
+ "queryData": [
+ {
+ "aggregateAttribute": {
+ "dataType": "float64",
+ "id": "postgresql_operations--float64--Sum--true",
+ "isColumn": true,
+ "isJSON": false,
+ "key": "postgresql_operations",
+ "type": "Sum"
+ },
+ "aggregateOperator": "avg",
+ "dataSource": "metrics",
+ "disabled": false,
+ "expression": "A",
+ "filters": {
+ "items": [
+ {
+ "id": "62738de4",
+ "key": {
+ "dataType": "string",
+ "id": "operation--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "operation",
+ "type": "tag"
+ },
+ "op": "=",
+ "value": "del"
+ },
+ {
+ "id": "d18471e2",
+ "key": {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.host_name}}"
+ ]
+ },
+ {
+ "id": "9d153899",
+ "key": {
+ "dataType": "string",
+ "id": "postgresql_database_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_database_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.db_name}}"
+ ]
+ }
+ ],
+ "op": "AND"
+ },
+ "groupBy": [
+ {
+ "dataType": "string",
+ "id": "postgresql_database_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_database_name",
+ "type": "tag"
+ }
+ ],
+ "having": [],
+ "legend": "{{postgresql_database_name}}",
+ "limit": null,
+ "orderBy": [],
+ "queryName": "A",
+ "reduceTo": "sum",
+ "stepInterval": 60
+ }
+ ],
+ "queryFormulas": []
+ },
+ "clickhouse_sql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "id": "734393d1-76ed-4f4f-bef8-0a91d27ebec4",
+ "promql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "queryType": "builder"
+ },
+ "softMax": null,
+ "softMin": null,
+ "thresholds": [],
+ "timePreferance": "GLOBAL_TIME",
+ "title": "Deleted",
+ "yAxisUnit": "none"
+ },
+ {
+ "description": "The average number of db heap-only update operations.",
+ "fillSpans": false,
+ "id": "667428ef-9b9a-4e91-bd1e-938e0dc1ff32",
+ "isStacked": false,
+ "nullZeroValues": "zero",
+ "opacity": "1",
+ "panelTypes": "graph",
+ "query": {
+ "builder": {
+ "queryData": [
+ {
+ "aggregateAttribute": {
+ "dataType": "float64",
+ "id": "postgresql_operations--float64--Sum--true",
+ "isColumn": true,
+ "isJSON": false,
+ "key": "postgresql_operations",
+ "type": "Sum"
+ },
+ "aggregateOperator": "avg",
+ "dataSource": "metrics",
+ "disabled": false,
+ "expression": "A",
+ "filters": {
+ "items": [
+ {
+ "id": "a91e35c4",
+ "key": {
+ "dataType": "string",
+ "id": "operation--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "operation",
+ "type": "tag"
+ },
+ "op": "=",
+ "value": "hot_upd"
+ },
+ {
+ "id": "2b419378",
+ "key": {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.host_name}}"
+ ]
+ },
+ {
+ "id": "7b4a29a2",
+ "key": {
+ "dataType": "string",
+ "id": "postgresql_database_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_database_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.db_name}}"
+ ]
+ }
+ ],
+ "op": "AND"
+ },
+ "groupBy": [
+ {
+ "dataType": "string",
+ "id": "postgresql_database_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_database_name",
+ "type": "tag"
+ }
+ ],
+ "having": [],
+ "legend": "{{postgresql_database_name}}",
+ "limit": null,
+ "orderBy": [],
+ "queryName": "A",
+ "reduceTo": "sum",
+ "stepInterval": 60
+ }
+ ],
+ "queryFormulas": []
+ },
+ "clickhouse_sql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "id": "f43c2d19-4abc-4f5e-881b-db7add4a870a",
+ "promql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "queryType": "builder"
+ },
+ "softMax": null,
+ "softMin": null,
+ "thresholds": [],
+ "timePreferance": "GLOBAL_TIME",
+ "title": "Heap updates",
+ "yAxisUnit": "none"
+ },
+ {
+ "description": "",
+ "fillSpans": false,
+ "id": "bada7864-1d23-4d49-a868-c6b8a93c738f",
+ "isStacked": false,
+ "nullZeroValues": "zero",
+ "opacity": "1",
+ "panelTypes": "table",
+ "query": {
+ "builder": {
+ "queryData": [
+ {
+ "aggregateAttribute": {
+ "dataType": "float64",
+ "id": "postgresql_operations--float64--Sum--true",
+ "isColumn": true,
+ "isJSON": false,
+ "key": "postgresql_operations",
+ "type": "Sum"
+ },
+ "aggregateOperator": "avg",
+ "dataSource": "metrics",
+ "disabled": false,
+ "expression": "A",
+ "filters": {
+ "items": [
+ {
+ "id": "d6aeccf7",
+ "key": {
+ "dataType": "string",
+ "id": "operation--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "operation",
+ "type": "tag"
+ },
+ "op": "=",
+ "value": "ins"
+ },
+ {
+ "id": "4004a127",
+ "key": {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.host_name}}"
+ ]
+ },
+ {
+ "id": "ee4e9344",
+ "key": {
+ "dataType": "string",
+ "id": "postgresql_database_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_database_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.db_name}}"
+ ]
+ }
+ ],
+ "op": "AND"
+ },
+ "groupBy": [
+ {
+ "dataType": "string",
+ "id": "postgresql_database_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_database_name",
+ "type": "tag"
+ }
+ ],
+ "having": [],
+ "legend": "Inserted",
+ "limit": null,
+ "orderBy": [],
+ "queryName": "A",
+ "reduceTo": "sum",
+ "stepInterval": 60
+ },
+ {
+ "aggregateAttribute": {
+ "dataType": "float64",
+ "id": "postgresql_operations--float64--Sum--true",
+ "isColumn": true,
+ "isJSON": false,
+ "key": "postgresql_operations",
+ "type": "Sum"
+ },
+ "aggregateOperator": "avg",
+ "dataSource": "metrics",
+ "disabled": false,
+ "expression": "B",
+ "filters": {
+ "items": [
+ {
+ "id": "a12cceed",
+ "key": {
+ "dataType": "string",
+ "id": "operation--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "operation",
+ "type": "tag"
+ },
+ "op": "=",
+ "value": "upd"
+ },
+ {
+ "id": "11735104",
+ "key": {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.host_name}}"
+ ]
+ },
+ {
+ "id": "2d542482",
+ "key": {
+ "dataType": "string",
+ "id": "postgresql_database_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_database_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.db_name}}"
+ ]
+ }
+ ],
+ "op": "AND"
+ },
+ "groupBy": [
+ {
+ "dataType": "string",
+ "id": "postgresql_database_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_database_name",
+ "type": "tag"
+ }
+ ],
+ "having": [],
+ "legend": "Updated",
+ "limit": null,
+ "orderBy": [],
+ "queryName": "B",
+ "reduceTo": "sum",
+ "stepInterval": 60
+ },
+ {
+ "aggregateAttribute": {
+ "dataType": "float64",
+ "id": "postgresql_operations--float64--Sum--true",
+ "isColumn": true,
+ "isJSON": false,
+ "key": "postgresql_operations",
+ "type": "Sum"
+ },
+ "aggregateOperator": "avg",
+ "dataSource": "metrics",
+ "disabled": false,
+ "expression": "C",
+ "filters": {
+ "items": [
+ {
+ "id": "1bca3e46",
+ "key": {
+ "dataType": "string",
+ "id": "operation--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "operation",
+ "type": "tag"
+ },
+ "op": "=",
+ "value": "del"
+ },
+ {
+ "id": "3631755d",
+ "key": {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.host_name}}"
+ ]
+ },
+ {
+ "id": "44ffc874",
+ "key": {
+ "dataType": "string",
+ "id": "postgresql_database_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_database_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.db_name}}"
+ ]
+ }
+ ],
+ "op": "AND"
+ },
+ "groupBy": [
+ {
+ "dataType": "string",
+ "id": "postgresql_database_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_database_name",
+ "type": "tag"
+ }
+ ],
+ "having": [],
+ "legend": "Deleted",
+ "limit": null,
+ "orderBy": [],
+ "queryName": "C",
+ "reduceTo": "sum",
+ "stepInterval": 60
+ }
+ ],
+ "queryFormulas": []
+ },
+ "clickhouse_sql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "id": "5056105b-1c30-4d27-8187-64457f2a1ec6",
+ "promql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "queryType": "builder"
+ },
+ "softMax": null,
+ "softMin": null,
+ "thresholds": [],
+ "timePreferance": "GLOBAL_TIME",
+ "title": "Operation by database",
+ "yAxisUnit": "none"
+ },
+ {
+ "description": "The number of database locks.",
+ "fillSpans": false,
+ "id": "6b700035-e3c2-4c48-99fa-ebfd6202eed3",
+ "isStacked": false,
+ "nullZeroValues": "zero",
+ "opacity": "1",
+ "panelTypes": "graph",
+ "query": {
+ "builder": {
+ "queryData": [
+ {
+ "aggregateAttribute": {
+ "dataType": "float64",
+ "id": "postgresql_database_locks--float64--Gauge--true",
+ "isColumn": true,
+ "isJSON": false,
+ "key": "postgresql_database_locks",
+ "type": "Gauge"
+ },
+ "aggregateOperator": "sum",
+ "dataSource": "metrics",
+ "disabled": false,
+ "expression": "A",
+ "filters": {
+ "items": [
+ {
+ "id": "527a3124",
+ "key": {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.host_name}}"
+ ]
+ }
+ ],
+ "op": "AND"
+ },
+ "groupBy": [
+ {
+ "dataType": "string",
+ "id": "mode--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "mode",
+ "type": "tag"
+ }
+ ],
+ "having": [],
+ "legend": "{{mode}}",
+ "limit": null,
+ "orderBy": [],
+ "queryName": "A",
+ "reduceTo": "sum",
+ "stepInterval": 60
+ }
+ ],
+ "queryFormulas": []
+ },
+ "clickhouse_sql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "id": "877b0df3-9ae3-455e-ad27-bc3aa40b3f4c",
+ "promql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "queryType": "builder"
+ },
+ "softMax": null,
+ "softMin": null,
+ "thresholds": [],
+ "timePreferance": "GLOBAL_TIME",
+ "title": "Locks by lock mode",
+ "yAxisUnit": "none"
+ },
+ {
+ "description": "",
+ "fillSpans": false,
+ "id": "e9341e70-ccb3-47fc-af95-56ba8942c4f2",
+ "isStacked": false,
+ "nullZeroValues": "zero",
+ "opacity": "1",
+ "panelTypes": "graph",
+ "query": {
+ "builder": {
+ "queryData": [
+ {
+ "aggregateAttribute": {
+ "dataType": "float64",
+ "id": "postgresql_deadlocks--float64--Sum--true",
+ "isColumn": true,
+ "isJSON": false,
+ "key": "postgresql_deadlocks",
+ "type": "Sum"
+ },
+ "aggregateOperator": "sum_rate",
+ "dataSource": "metrics",
+ "disabled": false,
+ "expression": "A",
+ "filters": {
+ "items": [
+ {
+ "id": "ff14f172",
+ "key": {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.host_name}}"
+ ]
+ },
+ {
+ "id": "efb83717",
+ "key": {
+ "dataType": "string",
+ "id": "postgresql_database_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_database_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.db_name}}"
+ ]
+ }
+ ],
+ "op": "AND"
+ },
+ "groupBy": [
+ {
+ "dataType": "string",
+ "id": "postgresql_database_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_database_name",
+ "type": "tag"
+ }
+ ],
+ "having": [],
+ "legend": "{{postgresql_database_name}}",
+ "limit": null,
+ "orderBy": [],
+ "queryName": "A",
+ "reduceTo": "sum",
+ "stepInterval": 60
+ }
+ ],
+ "queryFormulas": []
+ },
+ "clickhouse_sql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "id": "5056105b-1c30-4d27-8187-64457f2a1ec6",
+ "promql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "queryType": "builder"
+ },
+ "softMax": null,
+ "softMin": null,
+ "thresholds": [],
+ "timePreferance": "GLOBAL_TIME",
+ "title": "Deadlocks count",
+ "yAxisUnit": "none"
+ },
+ {
+ "description": "",
+ "fillSpans": false,
+ "id": "8638a199-20a0-4255-b0a2-3b1ba06c485b",
+ "isStacked": false,
+ "nullZeroValues": "zero",
+ "opacity": "1",
+ "panelTypes": "graph",
+ "query": {
+ "builder": {
+ "queryData": [
+ {
+ "aggregateAttribute": {
+ "dataType": "float64",
+ "id": "postgresql_backends--float64--Sum--true",
+ "isColumn": true,
+ "isJSON": false,
+ "key": "postgresql_backends",
+ "type": "Sum"
+ },
+ "aggregateOperator": "avg",
+ "dataSource": "metrics",
+ "disabled": false,
+ "expression": "A",
+ "filters": {
+ "items": [
+ {
+ "id": "ed335b00",
+ "key": {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.host_name}}"
+ ]
+ },
+ {
+ "id": "20d2a4c5",
+ "key": {
+ "dataType": "string",
+ "id": "postgresql_database_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_database_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.db_name}}"
+ ]
+ }
+ ],
+ "op": "AND"
+ },
+ "groupBy": [
+ {
+ "dataType": "string",
+ "id": "postgresql_database_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_database_name",
+ "type": "tag"
+ }
+ ],
+ "having": [],
+ "legend": "{{postgresql_database_name}}",
+ "limit": null,
+ "orderBy": [],
+ "queryName": "A",
+ "reduceTo": "sum",
+ "stepInterval": 60
+ }
+ ],
+ "queryFormulas": []
+ },
+ "clickhouse_sql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "id": "205b99a0-2f1c-4bd2-9ba0-cc2da6ef247a",
+ "promql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "queryType": "builder"
+ },
+ "softMax": null,
+ "softMin": null,
+ "thresholds": [],
+ "timePreferance": "GLOBAL_TIME",
+ "title": "Connections per db",
+ "yAxisUnit": "none"
+ },
+ {
+ "description": "",
+ "fillSpans": false,
+ "id": "f9a6f683-7455-4643-acc8-467cc5ea52cf",
+ "isStacked": false,
+ "nullZeroValues": "zero",
+ "opacity": "1",
+ "panelTypes": "graph",
+ "query": {
+ "builder": {
+ "queryData": [
+ {
+ "aggregateAttribute": {
+ "dataType": "float64",
+ "id": "postgresql_rows--float64--Sum--true",
+ "isColumn": true,
+ "isJSON": false,
+ "key": "postgresql_rows",
+ "type": "Sum"
+ },
+ "aggregateOperator": "sum",
+ "dataSource": "metrics",
+ "disabled": false,
+ "expression": "A",
+ "filters": {
+ "items": [
+ {
+ "id": "70786905",
+ "key": {
+ "dataType": "string",
+ "id": "state--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "state",
+ "type": "tag"
+ },
+ "op": "=",
+ "value": "dead"
+ },
+ {
+ "id": "810e39a9",
+ "key": {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.host_name}}"
+ ]
+ },
+ {
+ "id": "3e5ef839",
+ "key": {
+ "dataType": "string",
+ "id": "postgresql_database_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_database_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.db_name}}"
+ ]
+ },
+ {
+ "id": "9e913563",
+ "key": {
+ "dataType": "string",
+ "id": "postgresql_table_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_table_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.table_name}}"
+ ]
+ }
+ ],
+ "op": "AND"
+ },
+ "groupBy": [],
+ "having": [],
+ "legend": "Dead rows",
+ "limit": null,
+ "orderBy": [],
+ "queryName": "A",
+ "reduceTo": "sum",
+ "stepInterval": 60
+ }
+ ],
+ "queryFormulas": []
+ },
+ "clickhouse_sql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "id": "cc7452c8-118b-4676-959e-7062bafc41ee",
+ "promql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "queryType": "builder"
+ },
+ "softMax": null,
+ "softMin": null,
+ "thresholds": [],
+ "timePreferance": "GLOBAL_TIME",
+ "title": "Dead rows",
+ "yAxisUnit": "none"
+ },
+ {
+ "description": "",
+ "fillSpans": false,
+ "id": "d7838815-4f5b-4454-86fd-f658b201f3a9",
+ "isStacked": false,
+ "nullZeroValues": "zero",
+ "opacity": "1",
+ "panelTypes": "graph",
+ "query": {
+ "builder": {
+ "queryData": [
+ {
+ "aggregateAttribute": {
+ "dataType": "float64",
+ "id": "postgresql_index_scans--float64--Sum--true",
+ "isColumn": true,
+ "isJSON": false,
+ "key": "postgresql_index_scans",
+ "type": "Sum"
+ },
+ "aggregateOperator": "sum_rate",
+ "dataSource": "metrics",
+ "disabled": false,
+ "expression": "A",
+ "filters": {
+ "items": [
+ {
+ "id": "da04d826",
+ "key": {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.host_name}}"
+ ]
+ },
+ {
+ "id": "590332a7",
+ "key": {
+ "dataType": "string",
+ "id": "postgresql_database_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_database_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.db_name}}"
+ ]
+ },
+ {
+ "id": "171b9516",
+ "key": {
+ "dataType": "string",
+ "id": "postgresql_table_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_table_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.table_name}}"
+ ]
+ }
+ ],
+ "op": "AND"
+ },
+ "groupBy": [
+ {
+ "dataType": "string",
+ "id": "postgresql_index_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_index_name",
+ "type": "tag"
+ }
+ ],
+ "having": [],
+ "legend": "{{postgresql_index_name}}",
+ "limit": null,
+ "orderBy": [],
+ "queryName": "A",
+ "reduceTo": "sum",
+ "stepInterval": 60
+ }
+ ],
+ "queryFormulas": []
+ },
+ "clickhouse_sql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "id": "2c6b630b-8bd9-4001-815b-f2b1f439a9dd",
+ "promql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "queryType": "builder"
+ },
+ "softMax": null,
+ "softMin": null,
+ "thresholds": [],
+ "timePreferance": "GLOBAL_TIME",
+ "title": "Index scans by index",
+ "yAxisUnit": "none"
+ },
+ {
+ "description": "",
+ "fillSpans": false,
+ "id": "9552123d-6265-48a7-8624-3f4a3fc3c9c0",
+ "isStacked": false,
+ "nullZeroValues": "zero",
+ "opacity": "1",
+ "panelTypes": "table",
+ "query": {
+ "builder": {
+ "queryData": [
+ {
+ "aggregateAttribute": {
+ "dataType": "float64",
+ "id": "postgresql_rows--float64--Sum--true",
+ "isColumn": true,
+ "isJSON": false,
+ "key": "postgresql_rows",
+ "type": "Sum"
+ },
+ "aggregateOperator": "avg",
+ "dataSource": "metrics",
+ "disabled": false,
+ "expression": "A",
+ "filters": {
+ "items": [
+ {
+ "id": "83f9cab9",
+ "key": {
+ "dataType": "string",
+ "id": "state--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "state",
+ "type": "tag"
+ },
+ "op": "=",
+ "value": "dead"
+ },
+ {
+ "id": "2a0284c2",
+ "key": {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.host_name}}"
+ ]
+ },
+ {
+ "id": "c2aaf758",
+ "key": {
+ "dataType": "string",
+ "id": "postgresql_database_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_database_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.db_name}}"
+ ]
+ },
+ {
+ "id": "a603fda9",
+ "key": {
+ "dataType": "string",
+ "id": "postgresql_table_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_table_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.table_name}}"
+ ]
+ }
+ ],
+ "op": "AND"
+ },
+ "groupBy": [
+ {
+ "dataType": "string",
+ "id": "postgresql_table_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_table_name",
+ "type": "tag"
+ }
+ ],
+ "having": [],
+ "legend": "Dead rows",
+ "limit": null,
+ "orderBy": [],
+ "queryName": "A",
+ "reduceTo": "sum",
+ "stepInterval": 60
+ },
+ {
+ "aggregateAttribute": {
+ "dataType": "float64",
+ "id": "postgresql_rows--float64--Sum--true",
+ "isColumn": true,
+ "isJSON": false,
+ "key": "postgresql_rows",
+ "type": "Sum"
+ },
+ "aggregateOperator": "avg",
+ "dataSource": "metrics",
+ "disabled": false,
+ "expression": "B",
+ "filters": {
+ "items": [
+ {
+ "id": "82f1f0f5",
+ "key": {
+ "dataType": "string",
+ "id": "state--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "state",
+ "type": "tag"
+ },
+ "op": "=",
+ "value": "live"
+ },
+ {
+ "id": "14de7a06",
+ "key": {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.host_name}}"
+ ]
+ },
+ {
+ "id": "0a88a27a",
+ "key": {
+ "dataType": "string",
+ "id": "postgresql_database_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_database_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.db_name}}"
+ ]
+ },
+ {
+ "id": "4417218d",
+ "key": {
+ "dataType": "string",
+ "id": "postgresql_table_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_table_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.table_name}}"
+ ]
+ }
+ ],
+ "op": "AND"
+ },
+ "groupBy": [
+ {
+ "dataType": "string",
+ "id": "postgresql_table_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_table_name",
+ "type": "tag"
+ }
+ ],
+ "having": [],
+ "legend": "Live rows",
+ "limit": null,
+ "orderBy": [],
+ "queryName": "B",
+ "reduceTo": "sum",
+ "stepInterval": 60
+ },
+ {
+ "aggregateAttribute": {
+ "dataType": "float64",
+ "id": "postgresql_index_scans--float64--Sum--true",
+ "isColumn": true,
+ "isJSON": false,
+ "key": "postgresql_index_scans",
+ "type": "Sum"
+ },
+ "aggregateOperator": "sum_rate",
+ "dataSource": "metrics",
+ "disabled": false,
+ "expression": "C",
+ "filters": {
+ "items": [
+ {
+ "id": "22795c15",
+ "key": {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.host_name}}"
+ ]
+ },
+ {
+ "id": "d7e7c193",
+ "key": {
+ "dataType": "string",
+ "id": "postgresql_database_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_database_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.db_name}}"
+ ]
+ },
+ {
+ "id": "d3ae1dbe",
+ "key": {
+ "dataType": "string",
+ "id": "postgresql_table_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_table_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.table_name}}"
+ ]
+ }
+ ],
+ "op": "AND"
+ },
+ "groupBy": [
+ {
+ "dataType": "string",
+ "id": "postgresql_table_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_table_name",
+ "type": "tag"
+ }
+ ],
+ "having": [],
+ "legend": "Index scans",
+ "limit": null,
+ "orderBy": [],
+ "queryName": "C",
+ "reduceTo": "sum",
+ "stepInterval": 60
+ },
+ {
+ "aggregateAttribute": {
+ "dataType": "float64",
+ "id": "postgresql_table_size--float64--Sum--true",
+ "isColumn": true,
+ "isJSON": false,
+ "key": "postgresql_table_size",
+ "type": "Sum"
+ },
+ "aggregateOperator": "avg",
+ "dataSource": "metrics",
+ "disabled": false,
+ "expression": "D",
+ "filters": {
+ "items": [
+ {
+ "id": "48c436ab",
+ "key": {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.host_name}}"
+ ]
+ },
+ {
+ "id": "cc617789",
+ "key": {
+ "dataType": "string",
+ "id": "postgresql_database_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_database_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.db_name}}"
+ ]
+ },
+ {
+ "id": "b4029d50",
+ "key": {
+ "dataType": "string",
+ "id": "postgresql_table_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_table_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.table_name}}"
+ ]
+ }
+ ],
+ "op": "AND"
+ },
+ "groupBy": [
+ {
+ "dataType": "string",
+ "id": "postgresql_table_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "postgresql_table_name",
+ "type": "tag"
+ }
+ ],
+ "having": [],
+ "legend": "Table size",
+ "limit": null,
+ "orderBy": [],
+ "queryName": "D",
+ "reduceTo": "sum",
+ "stepInterval": 60
+ }
+ ],
+ "queryFormulas": []
+ },
+ "clickhouse_sql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "id": "26a9dcbf-4fc7-4ddd-b786-2078def1f462",
+ "promql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "queryType": "builder"
+ },
+ "softMax": null,
+ "softMin": null,
+ "thresholds": [],
+ "timePreferance": "GLOBAL_TIME",
+ "title": "Table stats",
+ "yAxisUnit": "none"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/pkg/query-service/app/integrations/builtin_integrations/postgres/config/collect-logs.md b/pkg/query-service/app/integrations/builtin_integrations/postgres/config/collect-logs.md
new file mode 100644
index 0000000000..0c199061a7
--- /dev/null
+++ b/pkg/query-service/app/integrations/builtin_integrations/postgres/config/collect-logs.md
@@ -0,0 +1,111 @@
+### Collect Postgres Logs
+
+You can configure Postgres logs collection by providing the required collector config to your collector.
+
+#### Create collector config file
+
+Save the following config for collecting postgres logs in a file named `postgres-logs-collection-config.yaml`
+
+```yaml
+receivers:
+ filelog/postgresql:
+ include: ["${env:POSTGRESQL_LOG_FILE}"]
+ operators:
+ # Parse default postgresql text log format.
+ # `log_line_prefix` postgres setting defaults to '%m [%p] ' which logs the timestamp and the process ID
+ # See https://www.postgresql.org/docs/current/runtime-config-logging.html#GUC-LOG-LINE-PREFIX for more details
+ - type: regex_parser
+ if: body matches '^(?P\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}.?[0-9]*? [A-Z]*) \\[(?P[0-9]+)\\] (?P[A-Z]*). (?P.*)$'
+ parse_from: body
+ regex: '^(?P\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.?[0-9]*? [A-Z]*) \[(?P[0-9]+)\] (?P[A-Z]*). (?P.*)$'
+ timestamp:
+ parse_from: attributes.ts
+ layout: '%Y-%m-%d %H:%M:%S %Z'
+ severity:
+ parse_from: attributes.log_level
+ mapping:
+ debug:
+ - DEBUG1
+ - DEBUG2
+ - DEBUG3
+ - DEBUG4
+ - DEBUG5
+ info:
+ - INFO
+ - LOG
+ - NOTICE
+ - DETAIL
+ warn: WARNING
+ error: ERROR
+ fatal:
+ - FATAL
+ - PANIC
+ on_error: send
+ - type: move
+ if: attributes.message != nil
+ from: attributes.message
+ to: body
+ - type: remove
+ if: attributes.log_level != nil
+ field: attributes.log_level
+ - type: remove
+ if: attributes.ts != nil
+ field: attributes.ts
+ - type: add
+ field: attributes.source
+ value: postgres
+
+processors:
+ batch:
+ send_batch_size: 10000
+ send_batch_max_size: 11000
+ timeout: 10s
+
+exporters:
+ # export to SigNoz cloud
+ otlp/postgres-logs:
+ endpoint: "${env:OTLP_DESTINATION_ENDPOINT}"
+ tls:
+ insecure: false
+ headers:
+ "signoz-access-token": "${env:SIGNOZ_INGESTION_KEY}"
+
+ # export to local collector
+ # otlp/postgres-logs:
+ # endpoint: "localhost:4317"
+ # tls:
+ # insecure: true
+
+service:
+ pipelines:
+ logs/postgresql:
+ receivers: [filelog/postgresql]
+ processors: [batch]
+ exporters: [otlp/postgresql-logs]
+```
+
+#### Set Environment Variables
+
+Set the following environment variables in your otel-collector environment:
+
+```bash
+
+# path of Postgres server log file. must be accessible by the otel collector
+export POSTGRESQL_LOG_FILE=/usr/local/var/log/postgres.log
+
+# region specific SigNoz cloud ingestion endpoint
+export OTLP_DESTINATION_ENDPOINT="ingest.us.signoz.cloud:443"
+
+# your SigNoz ingestion key
+export SIGNOZ_INGESTION_KEY="signoz-ingestion-key"
+
+```
+
+#### Use collector config file
+
+Make the collector config file available to your otel collector and use it by adding the following flag to the command for running your collector
+```bash
+--config postgres-logs-collection-config.yaml
+```
+Note: the collector can use multiple config files, specified by multiple occurrences of the --config flag.
+
diff --git a/pkg/query-service/app/integrations/builtin_integrations/postgres/config/collect-metrics.md b/pkg/query-service/app/integrations/builtin_integrations/postgres/config/collect-metrics.md
new file mode 100644
index 0000000000..94a6fc7609
--- /dev/null
+++ b/pkg/query-service/app/integrations/builtin_integrations/postgres/config/collect-metrics.md
@@ -0,0 +1,101 @@
+### Collect Postgres Metrics
+
+You can configure Postgres metrics collection by providing the required collector config to your collector.
+
+#### Create collector config file
+
+Save the following config for collecting postgres metrics in a file named `postgres-metrics-collection-config.yaml`
+
+```yaml
+receivers:
+ postgresql:
+ # The endpoint of the postgresql server. Whether using TCP or Unix sockets, this value should be host:port. If transport is set to unix, the endpoint will internally be translated from host:port to /host.s.PGSQL.port
+ endpoint: ${env:POSTGRESQL_ENDPOINT}
+ # The frequency at which to collect metrics from the Postgres instance.
+ collection_interval: 60s
+ # The username used to access the postgres instance
+ username: ${env:POSTGRESQL_USERNAME}
+ # The password used to access the postgres instance
+ password: ${env:POSTGRESQL_PASSWORD}
+ # The list of databases for which the receiver will attempt to collect statistics. If an empty list is provided, the receiver will attempt to collect statistics for all non-template databases
+ databases: []
+ # # Defines the network to use for connecting to the server. Valid Values are `tcp` or `unix`
+ # transport: tcp
+ tls:
+ # set to false if SSL is enabled on the server
+ insecure: true
+ # ca_file: /etc/ssl/certs/ca-certificates.crt
+ # cert_file: /etc/ssl/certs/postgres.crt
+ # key_file: /etc/ssl/certs/postgres.key
+ metrics:
+ postgresql.database.locks:
+ enabled: true
+ postgresql.deadlocks:
+ enabled: true
+ postgresql.sequential_scans:
+ enabled: true
+
+processors:
+ # enriches the data with additional host information
+ # see https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/resourcedetectionprocessor#resource-detection-processor
+ resourcedetection/system:
+ # add additional detectors if needed
+ detectors: ["system"]
+ system:
+ hostname_sources: ["os"]
+
+exporters:
+ # export to SigNoz cloud
+ otlp/postgres:
+ endpoint: "${env:OTLP_DESTINATION_ENDPOINT}"
+ tls:
+ insecure: false
+ headers:
+ "signoz-access-token": "${env:SIGNOZ_INGESTION_KEY}"
+
+ # export to local collector
+ # otlp/postgres:
+ # endpoint: "localhost:4317"
+ # tls:
+ # insecure: true
+
+service:
+ pipelines:
+ metrics/postgresql:
+ receivers: [postgresql]
+ # note: remove this processor if the collector host is not running on the same host as the postgres instance
+ processors: [resourcedetection/system]
+ exporters: [otlp/postgres]
+```
+
+#### Set Environment Variables
+
+Set the following environment variables in your otel-collector environment:
+
+```bash
+
+# password for Postgres monitoring user"
+export POSTGRESQL_USERNAME="monitoring"
+
+# password for Postgres monitoring user"
+export POSTGRESQL_PASSWORD=""
+
+# Postgres endpoint reachable from the otel collector"
+export POSTGRESQL_ENDPOINT="host:port"
+
+
+# region specific SigNoz cloud ingestion endpoint
+export OTLP_DESTINATION_ENDPOINT="ingest.us.signoz.cloud:443"
+
+# your SigNoz ingestion key
+export SIGNOZ_INGESTION_KEY="signoz-ingestion-key"
+
+```
+
+#### Use collector config file
+
+Make the collector config file available to your otel collector and use it by adding the following flag to the command for running your collector
+```bash
+--config postgres-metrics-collection-config.yaml
+```
+Note: the collector can use multiple config files, specified by multiple occurrences of the --config flag.
diff --git a/pkg/query-service/app/integrations/builtin_integrations/postgres/config/prerequisites.md b/pkg/query-service/app/integrations/builtin_integrations/postgres/config/prerequisites.md
new file mode 100644
index 0000000000..e50282d2a8
--- /dev/null
+++ b/pkg/query-service/app/integrations/builtin_integrations/postgres/config/prerequisites.md
@@ -0,0 +1,40 @@
+## Before You Begin
+
+To configure metrics and logs collection for a Postgres server, you need the following.
+
+### Ensure Postgres server is prepared for monitoring
+
+- **Ensure that the Postgres server is running a supported version**
+ Postgres versions 9.6+ are supported.
+ You can use the following SQL statement to determine server version
+ ```SQL
+ SELECT version();
+ ```
+
+- **If collecting metrics, ensure that there is a Postgres user with required permissions**
+ To create a monitoring user for Postgres versions 10+, run:
+ ```SQL
+ create user monitoring with password '';
+ grant pg_monitor to monitoring;
+ grant SELECT ON pg_stat_database to monitoring;
+ ```
+
+ To create a monitoring user for Postgres versions >= 9.6 and <10, run:
+ ```SQL
+ create user monitoring with password '';
+ grant SELECT ON pg_stat_database to monitoring;
+ ```
+
+
+### Ensure OTEL Collector is running with access to the Postgres server
+
+- **Ensure that an OTEL collector is running in your deployment environment**
+ If needed, please [install an OTEL Collector](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/)
+ If already installed, ensure that the collector version is v0.88.0 or newer.
+
+ Also ensure that you can provide config files to the collector and that you can set environment variables and command line flags used for running it.
+
+- **Ensure that the OTEL collector can access the Postgres server**
+ In order to collect metrics, the collector must be able to access the Postgres server as a client using the monitoring user.
+
+ In order to collect logs, the collector must be able to read the Postgres server log file.
diff --git a/pkg/query-service/app/integrations/builtin_integrations/postgres/icon.svg b/pkg/query-service/app/integrations/builtin_integrations/postgres/icon.svg
new file mode 100644
index 0000000000..32bcf493ef
--- /dev/null
+++ b/pkg/query-service/app/integrations/builtin_integrations/postgres/icon.svg
@@ -0,0 +1,13 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/pkg/query-service/app/integrations/builtin_integrations/postgres/integration.json b/pkg/query-service/app/integrations/builtin_integrations/postgres/integration.json
new file mode 100644
index 0000000000..823ba61223
--- /dev/null
+++ b/pkg/query-service/app/integrations/builtin_integrations/postgres/integration.json
@@ -0,0 +1,242 @@
+{
+ "id": "postgres",
+ "title": "PostgreSQL",
+ "description": "Monitor Postgres with metrics and logs",
+ "author": {
+ "name": "SigNoz",
+ "email": "integrations@signoz.io",
+ "homepage": "https://signoz.io"
+ },
+ "icon": "file://icon.svg",
+ "categories": [
+ "Database"
+ ],
+ "overview": "file://overview.md",
+ "configuration": [
+ {
+ "title": "Prerequisites",
+ "instructions": "file://config/prerequisites.md"
+ },
+ {
+ "title": "Collect Metrics",
+ "instructions": "file://config/collect-metrics.md"
+ },
+ {
+ "title": "Collect Logs",
+ "instructions": "file://config/collect-logs.md"
+ }
+ ],
+ "assets": {
+ "logs": {
+ "pipelines": []
+ },
+ "dashboards": [
+ "file://assets/dashboards/overview.json"
+ ],
+ "alerts": []
+ },
+ "connection_tests": {
+ "logs": {
+ "op": "AND",
+ "items": [
+ {
+ "key": {
+ "type": "tag",
+ "key": "source",
+ "dataType": "string"
+ },
+ "op": "=",
+ "value": "postgres"
+ }
+ ]
+ }
+ },
+ "data_collected": {
+ "logs": [
+ {
+ "name": "Process ID",
+ "path": "attributes.pid",
+ "type": "string"
+ },
+ {
+ "name": "Timestamp",
+ "path": "timestamp",
+ "type": "timestamp"
+ },
+ {
+ "name": "Severity Text",
+ "path": "severity_text",
+ "type": "string"
+ },
+ {
+ "name": "Severity Number",
+ "path": "severity_number",
+ "type": "number"
+ }
+ ],
+ "metrics": [
+ {
+ "name": "postgresql_backends",
+ "type": "sum",
+ "unit": "number",
+ "description": "The number of backends."
+ },
+ {
+ "name": "postgresql_bgwriter_buffers_allocated",
+ "type": "sum",
+ "unit": "number",
+ "description": "Number of buffers allocated."
+ },
+ {
+ "name": "postgresql_bgwriter_buffers_writes",
+ "type": "sum",
+ "unit": "number",
+ "description": "Number of buffers written."
+ },
+ {
+ "name": "postgresql_bgwriter_checkpoint_count",
+ "type": "sum",
+ "unit": "number",
+ "description": "The number of checkpoints performed."
+ },
+ {
+ "name": "postgresql_bgwriter_duration",
+ "type": "sum",
+ "unit": "ms",
+ "description": "Total time spent writing and syncing files to disk by checkpoints."
+ },
+ {
+ "name": "postgresql_bgwriter_maxwritten",
+ "type": "sum",
+ "unit": "number",
+ "description": "Number of times the background writer stopped a cleaning scan because it had written too many buffers."
+ },
+ {
+ "name": "postgresql_blocks_read",
+ "type": "sum",
+ "unit": "number",
+ "description": "The number of blocks read."
+ },
+ {
+ "name": "postgresql_commits",
+ "type": "sum",
+ "unit": "number",
+ "description": "The number of commits."
+ },
+ {
+ "name": "postgresql_connection_max",
+ "type": "gauge",
+ "unit": "number",
+ "description": "Configured maximum number of client connections allowed"
+ },
+ {
+ "name": "postgresql_database_count",
+ "type": "sum",
+ "unit": "number",
+ "description": "Number of user databases."
+ },
+ {
+ "name": "postgresql_database_locks",
+ "type": "gauge",
+ "unit": "number",
+ "description": "The number of database locks."
+ },
+ {
+ "name": "postgresql_db_size",
+ "type": "sum",
+ "unit": "Bytes",
+ "description": "The database disk usage."
+ },
+ {
+ "name": "postgresql_deadlocks",
+ "type": "sum",
+ "unit": "number",
+ "description": "The number of deadlocks."
+ },
+ {
+ "name": "postgresql_index_scans",
+ "type": "sum",
+ "unit": "number",
+ "description": "The number of index scans on a table."
+ },
+ {
+ "name": "postgresql_index_size",
+ "type": "gauge",
+ "unit": "Bytes",
+ "description": "The size of the index on disk."
+ },
+ {
+ "name": "postgresql_operations",
+ "type": "sum",
+ "unit": "number",
+ "description": "The number of db row operations."
+ },
+ {
+ "name": "postgresql_replication_data_delay",
+ "type": "gauge",
+ "unit": "Bytes",
+ "description": "The amount of data delayed in replication."
+ },
+ {
+ "name": "postgresql_rollbacks",
+ "type": "sum",
+ "unit": "number",
+ "description": "The number of rollbacks."
+ },
+ {
+ "name": "postgresql_rows",
+ "type": "sum",
+ "unit": "number",
+ "description": "The number of rows in the database."
+ },
+ {
+ "name": "postgresql_sequential_scans",
+ "type": "sum",
+ "unit": "number",
+ "description": "The number of sequential scans."
+ },
+ {
+ "name": "postgresql_table_count",
+ "type": "sum",
+ "unit": "number",
+ "description": "Number of user tables in a database."
+ },
+ {
+ "name": "postgresql_table_size",
+ "type": "sum",
+ "unit": "Bytes",
+ "description": "Disk space used by a table."
+ },
+ {
+ "name": "postgresql_table_vacuum_count",
+ "type": "sum",
+ "unit": "number",
+ "description": "Number of times a table has manually been vacuumed."
+ },
+ {
+ "name": "postgresql_temp_files",
+ "type": "sum",
+ "unit": "number",
+ "description": "The number of temp files."
+ },
+ {
+ "name": "postgresql_wal_age",
+ "type": "gauge",
+ "unit": "seconds",
+ "description": "Age of the oldest WAL file."
+ },
+ {
+ "name": "postgresql_wal_delay",
+ "type": "gauge",
+ "unit": "seconds",
+ "description": "Time between flushing recent WAL locally and receiving notification that the standby server has completed an operation with it."
+ },
+ {
+ "name": "postgresql_wal_lag",
+ "type": "gauge",
+ "unit": "seconds",
+ "description": "Time between flushing recent WAL locally and receiving notification that the standby server has completed an operation with it."
+ }
+ ]
+ }
+}
\ No newline at end of file
diff --git a/pkg/query-service/app/integrations/builtin_integrations/postgres/overview.md b/pkg/query-service/app/integrations/builtin_integrations/postgres/overview.md
new file mode 100644
index 0000000000..ac6e061eca
--- /dev/null
+++ b/pkg/query-service/app/integrations/builtin_integrations/postgres/overview.md
@@ -0,0 +1,5 @@
+### Monitor Postgres with SigNoz
+
+Collect key Postgres metrics and view them with an out of the box dashboard.
+
+Collect and parse Postgres logs to populate timestamp, severity, and other log attributes for better querying and aggregation.
diff --git a/pkg/query-service/app/integrations/builtin_integrations/redis/assets/dashboards/overview.json b/pkg/query-service/app/integrations/builtin_integrations/redis/assets/dashboards/overview.json
new file mode 100644
index 0000000000..3fd2c255ce
--- /dev/null
+++ b/pkg/query-service/app/integrations/builtin_integrations/redis/assets/dashboards/overview.json
@@ -0,0 +1,924 @@
+{
+ "id": "redis-overview",
+ "description": "This dashboard shows the Redis instance overview. It includes latency, hit/miss rate, connections, and memory information.\n",
+ "layout": [
+ {
+ "h": 3,
+ "i": "d4c164bc-8fc2-4dbc-aadd-8d17479ca649",
+ "moved": false,
+ "static": false,
+ "w": 6,
+ "x": 0,
+ "y": 9
+ },
+ {
+ "h": 3,
+ "i": "2fbaef0d-3cdb-4ce3-aa3c-9bbbb41786d9",
+ "moved": false,
+ "static": false,
+ "w": 6,
+ "x": 3,
+ "y": 6
+ },
+ {
+ "h": 3,
+ "i": "f5ee1511-0d2b-4404-9ce0-e991837decc2",
+ "moved": false,
+ "static": false,
+ "w": 6,
+ "x": 6,
+ "y": 3
+ },
+ {
+ "h": 3,
+ "i": "b19c7058-b806-4ea2-974a-ca555b168991",
+ "moved": false,
+ "static": false,
+ "w": 6,
+ "x": 0,
+ "y": 3
+ },
+ {
+ "h": 3,
+ "i": "bf0deeeb-e926-4234-944c-82bacd96af47",
+ "moved": false,
+ "static": false,
+ "w": 6,
+ "x": 6,
+ "y": 0
+ },
+ {
+ "h": 3,
+ "i": "a77227c7-16f5-4353-952e-b183c715a61c",
+ "moved": false,
+ "static": false,
+ "w": 6,
+ "x": 0,
+ "y": 0
+ },
+ {
+ "h": 3,
+ "i": "9698cee2-b1f3-4c0b-8c9f-3da4f0e05f17",
+ "moved": false,
+ "static": false,
+ "w": 6,
+ "x": 6,
+ "y": 9
+ },
+ {
+ "h": 3,
+ "i": "64a5f303-d7db-44ff-9a0e-948e5c653320",
+ "moved": false,
+ "static": false,
+ "w": 6,
+ "x": 0,
+ "y": 12
+ },
+ {
+ "h": 3,
+ "i": "3e80a918-69af-4c9a-bc57-a94e1d41b05c",
+ "moved": false,
+ "static": false,
+ "w": 6,
+ "x": 6,
+ "y": 12
+ }
+ ],
+ "name": "",
+ "tags": [
+ "redis",
+ "database"
+ ],
+ "title": "Redis overview",
+ "variables": {
+ "94f19b3c-ad9f-4b47-a9b2-f312c09fa965": {
+ "allSelected": true,
+ "customValue": "",
+ "description": "List of hosts sending Redis metrics",
+ "id": "94f19b3c-ad9f-4b47-a9b2-f312c09fa965",
+ "key": "94f19b3c-ad9f-4b47-a9b2-f312c09fa965",
+ "modificationUUID": "4c5b0c03-9cbc-425b-8d8e-7152e5c39ba8",
+ "multiSelect": true,
+ "name": "host_name",
+ "order": 0,
+ "queryValue": "SELECT JSONExtractString(labels, 'host_name') AS host_name\nFROM signoz_metrics.distributed_time_series_v4_1day\nWHERE metric_name = 'redis_cpu_time'\nGROUP BY host_name",
+ "selectedValue": [
+ "Srikanths-MacBook-Pro.local"
+ ],
+ "showALLOption": true,
+ "sort": "ASC",
+ "textboxValue": "",
+ "type": "QUERY"
+ }
+ },
+ "widgets": [
+ {
+ "description": "Rate successful lookup of keys in the main dictionary",
+ "fillSpans": false,
+ "id": "a77227c7-16f5-4353-952e-b183c715a61c",
+ "isStacked": false,
+ "nullZeroValues": "zero",
+ "opacity": "1",
+ "panelTypes": "graph",
+ "query": {
+ "builder": {
+ "queryData": [
+ {
+ "aggregateAttribute": {
+ "dataType": "float64",
+ "id": "redis_keyspace_hits--float64--Sum--true",
+ "isColumn": true,
+ "isJSON": false,
+ "key": "redis_keyspace_hits",
+ "type": "Sum"
+ },
+ "aggregateOperator": "sum_rate",
+ "dataSource": "metrics",
+ "disabled": false,
+ "expression": "A",
+ "filters": {
+ "items": [
+ {
+ "id": "e99669ea",
+ "key": {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.host_name}}"
+ ]
+ }
+ ],
+ "op": "AND"
+ },
+ "groupBy": [],
+ "having": [],
+ "legend": "Hit/s across all hosts",
+ "limit": null,
+ "orderBy": [],
+ "queryName": "A",
+ "reduceTo": "sum",
+ "stepInterval": 60
+ }
+ ],
+ "queryFormulas": []
+ },
+ "clickhouse_sql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "id": "42c9c117-bfaf-49f7-b528-aad099392295",
+ "promql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "queryType": "builder"
+ },
+ "softMax": null,
+ "softMin": null,
+ "thresholds": [],
+ "timePreferance": "GLOBAL_TIME",
+ "title": "Hits/s",
+ "yAxisUnit": "none"
+ },
+ {
+ "description": "Number of clients pending on a blocking call",
+ "fillSpans": false,
+ "id": "bf0deeeb-e926-4234-944c-82bacd96af47",
+ "isStacked": false,
+ "nullZeroValues": "zero",
+ "opacity": "1",
+ "panelTypes": "graph",
+ "query": {
+ "builder": {
+ "queryData": [
+ {
+ "aggregateAttribute": {
+ "dataType": "float64",
+ "id": "redis_clients_blocked--float64--Sum--true",
+ "isColumn": true,
+ "isJSON": false,
+ "key": "redis_clients_blocked",
+ "type": "Sum"
+ },
+ "aggregateOperator": "sum",
+ "dataSource": "metrics",
+ "disabled": false,
+ "expression": "A",
+ "filters": {
+ "items": [
+ {
+ "id": "97247f25",
+ "key": {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.host_name}}"
+ ]
+ }
+ ],
+ "op": "AND"
+ },
+ "groupBy": [],
+ "having": [],
+ "legend": "Blocked clients across all hosts",
+ "limit": null,
+ "orderBy": [],
+ "queryName": "A",
+ "reduceTo": "sum",
+ "stepInterval": 60
+ }
+ ],
+ "queryFormulas": []
+ },
+ "clickhouse_sql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "id": "b77a9e11-fb98-4a95-88a8-c3ad25c14369",
+ "promql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "queryType": "builder"
+ },
+ "softMax": null,
+ "softMin": null,
+ "thresholds": [],
+ "timePreferance": "GLOBAL_TIME",
+ "title": "Clients blocked",
+ "yAxisUnit": "none"
+ },
+ {
+ "description": "",
+ "fillSpans": false,
+ "id": "b19c7058-b806-4ea2-974a-ca555b168991",
+ "isStacked": false,
+ "nullZeroValues": "zero",
+ "opacity": "1",
+ "panelTypes": "graph",
+ "query": {
+ "builder": {
+ "queryData": [
+ {
+ "aggregateAttribute": {
+ "dataType": "",
+ "id": "redis_db_keys------false",
+ "isColumn": false,
+ "key": "redis_db_keys",
+ "type": ""
+ },
+ "aggregateOperator": "sum",
+ "dataSource": "metrics",
+ "disabled": false,
+ "expression": "A",
+ "filters": {
+ "items": [],
+ "op": "AND"
+ },
+ "groupBy": [],
+ "having": [],
+ "legend": "",
+ "limit": null,
+ "orderBy": [],
+ "queryName": "A",
+ "reduceTo": "sum",
+ "stepInterval": 60
+ }
+ ],
+ "queryFormulas": []
+ },
+ "clickhouse_sql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "id": "b77a9e11-fb98-4a95-88a8-c3ad25c14369",
+ "promql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "queryType": "builder"
+ },
+ "softMax": null,
+ "softMin": null,
+ "thresholds": [],
+ "timePreferance": "GLOBAL_TIME",
+ "title": "Keyspace Keys",
+ "yAxisUnit": "none"
+ },
+ {
+ "description": "Number of changes since the last dump",
+ "fillSpans": false,
+ "id": "f5ee1511-0d2b-4404-9ce0-e991837decc2",
+ "isStacked": false,
+ "nullZeroValues": "zero",
+ "opacity": "1",
+ "panelTypes": "graph",
+ "query": {
+ "builder": {
+ "queryData": [
+ {
+ "aggregateAttribute": {
+ "dataType": "float64",
+ "id": "redis_rdb_changes_since_last_save--float64--Sum--true",
+ "isColumn": true,
+ "isJSON": false,
+ "key": "redis_rdb_changes_since_last_save",
+ "type": "Sum"
+ },
+ "aggregateOperator": "sum",
+ "dataSource": "metrics",
+ "disabled": false,
+ "expression": "A",
+ "filters": {
+ "items": [
+ {
+ "id": "d4aef346",
+ "key": {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.host_name}}"
+ ]
+ }
+ ],
+ "op": "AND"
+ },
+ "groupBy": [],
+ "having": [],
+ "legend": "Number of unsaved changes",
+ "limit": null,
+ "orderBy": [],
+ "queryName": "A",
+ "reduceTo": "sum",
+ "stepInterval": 60
+ }
+ ],
+ "queryFormulas": []
+ },
+ "clickhouse_sql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "id": "32cedddf-606d-4de1-8c1d-4b7049e6430c",
+ "promql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "queryType": "builder"
+ },
+ "softMax": null,
+ "softMin": null,
+ "thresholds": [],
+ "timePreferance": "GLOBAL_TIME",
+ "title": "Unsaved changes",
+ "yAxisUnit": "none"
+ },
+ {
+ "description": "",
+ "fillSpans": false,
+ "id": "2fbaef0d-3cdb-4ce3-aa3c-9bbbb41786d9",
+ "isStacked": false,
+ "nullZeroValues": "zero",
+ "opacity": "1",
+ "panelTypes": "graph",
+ "query": {
+ "builder": {
+ "queryData": [
+ {
+ "aggregateAttribute": {
+ "dataType": "float64",
+ "id": "redis_commands--float64--Gauge--true",
+ "isColumn": true,
+ "isJSON": false,
+ "key": "redis_commands",
+ "type": "Gauge"
+ },
+ "aggregateOperator": "sum",
+ "dataSource": "metrics",
+ "disabled": false,
+ "expression": "A",
+ "filters": {
+ "items": [
+ {
+ "id": "458dc402",
+ "key": {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.host_name}}"
+ ]
+ }
+ ],
+ "op": "AND"
+ },
+ "groupBy": [],
+ "having": [],
+ "legend": "ops/s",
+ "limit": null,
+ "orderBy": [],
+ "queryName": "A",
+ "reduceTo": "sum",
+ "stepInterval": 60
+ }
+ ],
+ "queryFormulas": []
+ },
+ "clickhouse_sql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "id": "c70de4dd-a68a-42df-a249-6610c296709c",
+ "promql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "queryType": "builder"
+ },
+ "softMax": null,
+ "softMin": null,
+ "thresholds": [],
+ "timePreferance": "GLOBAL_TIME",
+ "title": "Command/s",
+ "yAxisUnit": "ops"
+ },
+ {
+ "description": "",
+ "fillSpans": false,
+ "id": "d4c164bc-8fc2-4dbc-aadd-8d17479ca649",
+ "isStacked": false,
+ "nullZeroValues": "zero",
+ "opacity": "1",
+ "panelTypes": "graph",
+ "query": {
+ "builder": {
+ "queryData": [
+ {
+ "aggregateAttribute": {
+ "dataType": "float64",
+ "id": "redis_memory_used--float64--Gauge--true",
+ "isColumn": true,
+ "isJSON": false,
+ "key": "redis_memory_used",
+ "type": "Gauge"
+ },
+ "aggregateOperator": "sum",
+ "dataSource": "metrics",
+ "disabled": false,
+ "expression": "A",
+ "filters": {
+ "items": [
+ {
+ "id": "394a537e",
+ "key": {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.host_name}}"
+ ]
+ }
+ ],
+ "op": "AND"
+ },
+ "groupBy": [
+ {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ }
+ ],
+ "having": [],
+ "legend": "Used::{{host_name}}",
+ "limit": null,
+ "orderBy": [],
+ "queryName": "A",
+ "reduceTo": "sum",
+ "stepInterval": 60
+ },
+ {
+ "aggregateAttribute": {
+ "dataType": "float64",
+ "id": "redis_maxmemory--float64--Gauge--true",
+ "isColumn": true,
+ "isJSON": false,
+ "key": "redis_maxmemory",
+ "type": "Gauge"
+ },
+ "aggregateOperator": "max",
+ "dataSource": "metrics",
+ "disabled": false,
+ "expression": "B",
+ "filters": {
+ "items": [
+ {
+ "id": "0c0754da",
+ "key": {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.host_name}}"
+ ]
+ }
+ ],
+ "op": "AND"
+ },
+ "groupBy": [
+ {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ }
+ ],
+ "having": [],
+ "legend": "Max::{{host_name}}",
+ "limit": null,
+ "orderBy": [],
+ "queryName": "B",
+ "reduceTo": "sum",
+ "stepInterval": 60
+ }
+ ],
+ "queryFormulas": []
+ },
+ "clickhouse_sql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "id": "2f47df76-f09e-4152-8623-971f0fe66bfe",
+ "promql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "queryType": "builder"
+ },
+ "softMax": null,
+ "softMin": null,
+ "thresholds": [],
+ "timePreferance": "GLOBAL_TIME",
+ "title": "Memory usage",
+ "yAxisUnit": "bytes"
+ },
+ {
+ "description": "",
+ "fillSpans": false,
+ "id": "9698cee2-b1f3-4c0b-8c9f-3da4f0e05f17",
+ "isStacked": false,
+ "nullZeroValues": "zero",
+ "opacity": "1",
+ "panelTypes": "graph",
+ "query": {
+ "builder": {
+ "queryData": [
+ {
+ "aggregateAttribute": {
+ "dataType": "float64",
+ "id": "redis_memory_rss--float64--Gauge--true",
+ "isColumn": true,
+ "isJSON": false,
+ "key": "redis_memory_rss",
+ "type": "Gauge"
+ },
+ "aggregateOperator": "sum",
+ "dataSource": "metrics",
+ "disabled": false,
+ "expression": "A",
+ "filters": {
+ "items": [
+ {
+ "id": "4dc9ae49",
+ "key": {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.host_name}}"
+ ]
+ }
+ ],
+ "op": "AND"
+ },
+ "groupBy": [
+ {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ }
+ ],
+ "having": [],
+ "legend": "Rss::{{host_name}}",
+ "limit": null,
+ "orderBy": [],
+ "queryName": "A",
+ "reduceTo": "sum",
+ "stepInterval": 60
+ }
+ ],
+ "queryFormulas": []
+ },
+ "clickhouse_sql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "id": "fddd043c-1385-481c-9f4c-381f261e1dd9",
+ "promql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "queryType": "builder"
+ },
+ "softMax": null,
+ "softMin": null,
+ "thresholds": [],
+ "timePreferance": "GLOBAL_TIME",
+ "title": "RSS Memory",
+ "yAxisUnit": "bytes"
+ },
+ {
+ "description": "",
+ "fillSpans": false,
+ "id": "64a5f303-d7db-44ff-9a0e-948e5c653320",
+ "isStacked": false,
+ "nullZeroValues": "zero",
+ "opacity": "1",
+ "panelTypes": "graph",
+ "query": {
+ "builder": {
+ "queryData": [
+ {
+ "aggregateAttribute": {
+ "dataType": "float64",
+ "id": "redis_memory_fragmentation_ratio--float64--Gauge--true",
+ "isColumn": true,
+ "isJSON": false,
+ "key": "redis_memory_fragmentation_ratio",
+ "type": "Gauge"
+ },
+ "aggregateOperator": "avg",
+ "dataSource": "metrics",
+ "disabled": false,
+ "expression": "A",
+ "filters": {
+ "items": [
+ {
+ "id": "79dc25f3",
+ "key": {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.host_name}}"
+ ]
+ }
+ ],
+ "op": "AND"
+ },
+ "groupBy": [
+ {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ }
+ ],
+ "having": [],
+ "legend": "Rss::{{host_name}}",
+ "limit": null,
+ "orderBy": [],
+ "queryName": "A",
+ "reduceTo": "sum",
+ "stepInterval": 60
+ }
+ ],
+ "queryFormulas": []
+ },
+ "clickhouse_sql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "id": "3e802b07-0249-4d79-a5c7-6580ab535ad0",
+ "promql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "queryType": "builder"
+ },
+ "softMax": null,
+ "softMin": null,
+ "thresholds": [],
+ "timePreferance": "GLOBAL_TIME",
+ "title": "Fragmentation ratio",
+ "yAxisUnit": "short"
+ },
+ {
+ "description": "Number of evicted keys due to maxmemory limit",
+ "fillSpans": false,
+ "id": "3e80a918-69af-4c9a-bc57-a94e1d41b05c",
+ "isStacked": false,
+ "nullZeroValues": "zero",
+ "opacity": "1",
+ "panelTypes": "graph",
+ "query": {
+ "builder": {
+ "queryData": [
+ {
+ "aggregateAttribute": {
+ "dataType": "float64",
+ "id": "redis_keys_evicted--float64--Sum--true",
+ "isColumn": true,
+ "isJSON": false,
+ "key": "redis_keys_evicted",
+ "type": "Sum"
+ },
+ "aggregateOperator": "sum_rate",
+ "dataSource": "metrics",
+ "disabled": false,
+ "expression": "A",
+ "filters": {
+ "items": [
+ {
+ "id": "53d189ac",
+ "key": {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ },
+ "op": "in",
+ "value": [
+ "{{.host_name}}"
+ ]
+ }
+ ],
+ "op": "AND"
+ },
+ "groupBy": [
+ {
+ "dataType": "string",
+ "id": "host_name--string--tag--false",
+ "isColumn": false,
+ "isJSON": false,
+ "key": "host_name",
+ "type": "tag"
+ }
+ ],
+ "having": [],
+ "legend": "Rss::{{host_name}}",
+ "limit": null,
+ "orderBy": [],
+ "queryName": "A",
+ "reduceTo": "sum",
+ "stepInterval": 60
+ }
+ ],
+ "queryFormulas": []
+ },
+ "clickhouse_sql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "id": "15d1d9d7-eb10-464b-aa7b-33ff211996f7",
+ "promql": [
+ {
+ "disabled": false,
+ "legend": "",
+ "name": "A",
+ "query": ""
+ }
+ ],
+ "queryType": "builder"
+ },
+ "softMax": null,
+ "softMin": null,
+ "thresholds": [],
+ "timePreferance": "GLOBAL_TIME",
+ "title": "Eviction rate",
+ "yAxisUnit": "short"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/pkg/query-service/app/integrations/builtin_integrations/redis/config/collect-logs.md b/pkg/query-service/app/integrations/builtin_integrations/redis/config/collect-logs.md
new file mode 100644
index 0000000000..e8b26ef710
--- /dev/null
+++ b/pkg/query-service/app/integrations/builtin_integrations/redis/config/collect-logs.md
@@ -0,0 +1,102 @@
+### Collect Redis Logs
+
+You can configure Redis logs collection by providing the required collector config to your collector.
+
+#### Create collector config file
+
+Save the following config for collecting redis logs in a file named `redis-logs-collection-config.yaml`
+
+```yaml
+receivers:
+ filelog/redis:
+ include: ["${env:REDIS_LOG_FILE}"]
+ operators:
+ # Parse default redis log format
+ # pid:role timestamp log_level message
+ - type: regex_parser
+ if: body matches '^(?P\\d+):(?P\\w+) (?P\\d{2} \\w+ \\d{4} \\d{2}:\\d{2}:\\d{2}\\.\\d+) (?P[.\\-*#]) (?P.*)$'
+ parse_from: body
+ regex: '^(?P\d+):(?P\w+) (?P\d{2} \w+ \d{4} \d{2}:\d{2}:\d{2}\.\d+) (?P[.\-*#]) (?P.*)$'
+ timestamp:
+ parse_from: attributes.ts
+ layout: '02 Jan 2006 15:04:05.000'
+ layout_type: gotime
+ severity:
+ parse_from: attributes.log_level
+ overwrite_text: true
+ mapping:
+ debug: '.'
+ info:
+ - '-'
+ - '*'
+ warn: '#'
+ on_error: send
+ - type: move
+ if: attributes.message != nil
+ from: attributes.message
+ to: body
+ - type: remove
+ if: attributes.log_level != nil
+ field: attributes.log_level
+ - type: remove
+ if: attributes.ts != nil
+ field: attributes.ts
+ - type: add
+ field: attributes.source
+ value: redis
+
+processors:
+ batch:
+ send_batch_size: 10000
+ send_batch_max_size: 11000
+ timeout: 10s
+
+exporters:
+ # export to SigNoz cloud
+ otlp/redis-logs:
+ endpoint: "${env:OTLP_DESTINATION_ENDPOINT}"
+ tls:
+ insecure: false
+ headers:
+ "signoz-access-token": "${env:SIGNOZ_INGESTION_KEY}"
+
+ # export to local collector
+ # otlp/redis-logs:
+ # endpoint: "localhost:4317"
+ # tls:
+ # insecure: true
+
+
+service:
+ pipelines:
+ logs/redis:
+ receivers: [filelog/redis]
+ processors: [batch]
+ exporters: [otlp/redis-logs]
+```
+
+#### Set Environment Variables
+
+Set the following environment variables in your otel-collector environment:
+
+```bash
+
+# path of Redis server log file. must be accessible by the otel collector
+export REDIS_LOG_FILE=/var/log/redis.log
+
+# region specific SigNoz cloud ingestion endpoint
+export OTLP_DESTINATION_ENDPOINT="ingest.us.signoz.cloud:443"
+
+# your SigNoz ingestion key
+export SIGNOZ_INGESTION_KEY="signoz-ingestion-key"
+
+```
+
+#### Use collector config file
+
+Make the collector config file available to your otel collector and use it by adding the following flag to the command for running your collector
+```bash
+--config redis-logs-collection-config.yaml
+```
+Note: the collector can use multiple config files, specified by multiple occurrences of the --config flag.
+
diff --git a/pkg/query-service/app/integrations/builtin_integrations/redis/config/collect-metrics.md b/pkg/query-service/app/integrations/builtin_integrations/redis/config/collect-metrics.md
new file mode 100644
index 0000000000..1b6e4259b7
--- /dev/null
+++ b/pkg/query-service/app/integrations/builtin_integrations/redis/config/collect-metrics.md
@@ -0,0 +1,93 @@
+### Collect Redis Metrics
+
+You can configure Redis metrics collection by providing the required collector config to your collector.
+
+#### Create collector config file
+
+Save the following config for collecting Redis metrics in a file named `redis-metrics-collection-config.yaml`
+
+
+```yaml
+receivers:
+ redis:
+ # The hostname and port of the Redis instance, separated by a colon.
+ endpoint: ${env:REDIS_ENDPOINT}
+ # The frequency at which to collect metrics from the Redis instance.
+ collection_interval: 60s
+ # # The password used to access the Redis instance; must match the password specified in the requirepass server configuration option.
+ password: ${env:REDIS_PASSWORD}
+ # # Defines the network to use for connecting to the server. Valid Values are `tcp` or `Unix`
+ # transport: tcp
+ # tls:
+ # insecure: false
+ # ca_file: /etc/ssl/certs/ca-certificates.crt
+ # cert_file: /etc/ssl/certs/redis.crt
+ # key_file: /etc/ssl/certs/redis.key
+ metrics:
+ redis.maxmemory:
+ enabled: true
+ redis.cmd.latency:
+ enabled: true
+
+processors:
+ # enriches the data with additional host information
+ # see https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/resourcedetectionprocessor#resource-detection-processor
+ resourcedetection/system:
+ # add additional detectors if needed
+ detectors: ["system"]
+ system:
+ hostname_sources: ["os"]
+
+exporters:
+ # export to SigNoz cloud
+ otlp/redis:
+ endpoint: "${env:OTLP_DESTINATION_ENDPOINT}"
+ tls:
+ insecure: false
+ headers:
+ "signoz-access-token": "${env:SIGNOZ_INGESTION_KEY}"
+
+ # export to local collector
+ # otlp/redis:
+ # endpoint: "localhost:4317"
+ # tls:
+ # insecure: true
+
+service:
+ pipelines:
+ metrics/redis:
+ receivers: [redis]
+ # note: remove this processor if the collector host is not running on the same host as the redis instance
+ processors: [resourcedetection/system]
+ exporters: [otlp/redis]
+```
+
+#### Set Environment Variables
+
+Set the following environment variables in your otel-collector environment:
+
+```bash
+
+# redis endpoint reachable from the otel collector"
+export REDIS_ENDPOINT="localhost:6379"
+
+# password used to access the Redis instance.
+# must match the password specified in the requirepass server configuration option.
+# can be left empty if the redis server is not configured to require a password.
+export REDIS_PASSWORD=""
+
+# region specific SigNoz cloud ingestion endpoint
+export OTLP_DESTINATION_ENDPOINT="ingest.us.signoz.cloud:443"
+
+# your SigNoz ingestion key
+export SIGNOZ_INGESTION_KEY="signoz-ingestion-key"
+
+```
+
+#### Use collector config file
+
+Make the collector config file available to your otel collector and use it by adding the following flag to the command for running your collector
+```bash
+--config redis-metrics-collection-config.yaml
+```
+Note: the collector can use multiple config files, specified by multiple occurrences of the --config flag.
diff --git a/pkg/query-service/app/integrations/builtin_integrations/redis/config/prerequisites.md b/pkg/query-service/app/integrations/builtin_integrations/redis/config/prerequisites.md
new file mode 100644
index 0000000000..ea0b553abc
--- /dev/null
+++ b/pkg/query-service/app/integrations/builtin_integrations/redis/config/prerequisites.md
@@ -0,0 +1,20 @@
+## Before You Begin
+
+To configure metrics and logs collection for a Redis server, you need the following.
+
+### Ensure Redis server is running a supported version
+
+Redis server versions newer than 3.0 are supported.
+
+### Ensure OTEL Collector is running with access to the Redis server
+
+#### Ensure that an OTEL collector is running in your deployment environment
+If needed, please [install an OTEL Collector](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/)
+If already installed, ensure that the collector version is v0.88.0 or newer.
+
+Also ensure that you can provide config files to the collector and that you can set environment variables and command line flags used for running it.
+
+#### Ensure that the OTEL collector can access the Redis server
+In order to collect metrics, the collector must be able to access the Redis server as a client.
+
+In order to collect logs, the collector must be able to read the Redis server log file.
diff --git a/pkg/query-service/app/integrations/builtin_integrations/redis/icon.svg b/pkg/query-service/app/integrations/builtin_integrations/redis/icon.svg
new file mode 100644
index 0000000000..63e5dfd2e4
--- /dev/null
+++ b/pkg/query-service/app/integrations/builtin_integrations/redis/icon.svg
@@ -0,0 +1,11 @@
+
+
+
+
+
+
+
+
+
+
+
diff --git a/pkg/query-service/app/integrations/builtin_integrations/redis/integration.json b/pkg/query-service/app/integrations/builtin_integrations/redis/integration.json
new file mode 100644
index 0000000000..e3f5ef2e3c
--- /dev/null
+++ b/pkg/query-service/app/integrations/builtin_integrations/redis/integration.json
@@ -0,0 +1,271 @@
+{
+ "id": "redis",
+ "title": "Redis",
+ "description": "Monitor redis with metrics and logs",
+ "author": {
+ "name": "SigNoz",
+ "email": "integrations@signoz.io",
+ "homepage": "https://signoz.io"
+ },
+ "icon": "file://icon.svg",
+ "categories": [
+ "Database"
+ ],
+ "overview": "file://overview.md",
+ "configuration": [
+ {
+ "title": "Prerequisites",
+ "instructions": "file://config/prerequisites.md"
+ },
+ {
+ "title": "Collect Metrics",
+ "instructions": "file://config/collect-metrics.md"
+ },
+ {
+ "title": "Collect Logs",
+ "instructions": "file://config/collect-logs.md"
+ }
+ ],
+ "assets": {
+ "logs": {
+ "pipelines": []
+ },
+ "dashboards": [
+ "file://assets/dashboards/overview.json"
+ ],
+ "alerts": []
+ },
+ "connection_tests": {
+ "logs": {
+ "op": "AND",
+ "items": [
+ {
+ "key": {
+ "type": "tag",
+ "key": "source",
+ "dataType": "string"
+ },
+ "op": "=",
+ "value": "redis"
+ }
+ ]
+ }
+ },
+ "data_collected": {
+ "logs": [
+ {
+ "name": "Process ID",
+ "path": "attributes.pid",
+ "type": "string"
+ },
+ {
+ "name": "Process Role",
+ "path": "attributes.role",
+ "type": "string"
+ },
+ {
+ "name": "Timestamp",
+ "path": "timestamp",
+ "type": "timestamp"
+ },
+ {
+ "name": "Severity Text",
+ "path": "severity_text",
+ "type": "string"
+ },
+ {
+ "name": "Severity Number",
+ "path": "severity_number",
+ "type": "number"
+ }
+ ],
+ "metrics": [
+ {
+ "name": "redis_commands_processed",
+ "type": "Sum",
+ "unit": "number",
+ "description": "Total number of commands processed by the server"
+ },
+ {
+ "name": "redis_cpu_time",
+ "type": "Sum",
+ "unit": "s",
+ "description": "System CPU consumed by the Redis server in seconds since server start"
+ },
+ {
+ "name": "redis_keys_expired",
+ "type": "Sum",
+ "unit": "number",
+ "description": "Total number of key expiration events"
+ },
+ {
+ "name": "redis_db_expires",
+ "type": "Gauge",
+ "unit": "number",
+ "description": "Number of keyspace keys with an expiration"
+ },
+ {
+ "name": "redis_commands",
+ "type": "Gauge",
+ "unit": "ops/s",
+ "description": "Number of commands processed per second"
+ },
+ {
+ "name": "redis_replication_offset",
+ "type": "Gauge",
+ "unit": "Bytes",
+ "description": "The server's current replication offset"
+ },
+ {
+ "name": "redis_net_input",
+ "type": "Sum",
+ "unit": "Bytes",
+ "description": "The total number of bytes read from the network"
+ },
+ {
+ "name": "redis_clients_connected",
+ "type": "Sum",
+ "unit": "number",
+ "description": "Number of client connections (excluding connections from replicas)"
+ },
+ {
+ "name": "redis_keys_evicted",
+ "type": "Sum",
+ "unit": "number",
+ "description": "Number of evicted keys due to maxmemory limit"
+ },
+ {
+ "name": "redis_maxmemory",
+ "type": "Gauge",
+ "unit": "Bytes",
+ "description": "The value of the maxmemory configuration directive"
+ },
+ {
+ "name": "redis_clients_max_input_buffer",
+ "type": "Gauge",
+ "unit": "Bytes",
+ "description": "Biggest input buffer among current client connections"
+ },
+ {
+ "name": "redis_cmd_latency",
+ "type": "Gauge",
+ "unit": "s",
+ "description": "Command execution latency"
+ },
+ {
+ "name": "redis_memory_lua",
+ "type": "Gauge",
+ "unit": "Bytes",
+ "description": "Number of bytes used by the Lua engine"
+ },
+ {
+ "name": "redis_replication_backlog_first_byte_offset",
+ "type": "Gauge",
+ "unit": "Bytes",
+ "description": "The master offset of the replication backlog buffer"
+ },
+ {
+ "name": "redis_keyspace_hits",
+ "type": "Sum",
+ "unit": "number",
+ "description": "Number of successful lookup of keys in the main dictionary"
+ },
+ {
+ "name": "redis_clients_blocked",
+ "type": "Sum",
+ "unit": "number",
+ "description": "Number of clients pending on a blocking call"
+ },
+ {
+ "name": "redis_connections_rejected",
+ "type": "Sum",
+ "unit": "number",
+ "description": "Number of connections rejected because of maxclients limit"
+ },
+ {
+ "name": "redis_latest_fork",
+ "type": "Gauge",
+ "unit": "us",
+ "description": "Duration of the latest fork operation in microseconds"
+ },
+ {
+ "name": "redis_clients_max_output_buffer",
+ "type": "Gauge",
+ "unit": "Bytes",
+ "description": "Longest output list among current client connections"
+ },
+ {
+ "name": "redis_slaves_connected",
+ "type": "Sum",
+ "unit": "number",
+ "description": "Number of connected replicas"
+ },
+ {
+ "name": "redis_db_keys",
+ "type": "Gauge",
+ "unit": "number",
+ "description": "Number of keyspace keys"
+ },
+ {
+ "name": "redis_keyspace_misses",
+ "type": "Sum",
+ "unit": "number",
+ "description": "Number of failed lookup of keys in the main dictionary"
+ },
+ {
+ "name": "redis_uptime",
+ "type": "Sum",
+ "unit": "s",
+ "description": "Number of seconds since Redis server start"
+ },
+ {
+ "name": "redis_memory_used",
+ "type": "Gauge",
+ "unit": "Bytes",
+ "description": "Total number of bytes allocated by Redis using its allocator"
+ },
+ {
+ "name": "redis_net_output",
+ "type": "Sum",
+ "unit": "Bytes",
+ "description": "The total number of bytes written to the network"
+ },
+ {
+ "name": "redis_connections_received",
+ "type": "Sum",
+ "unit": "number",
+ "description": "Total number of connections accepted by the server"
+ },
+ {
+ "name": "redis_rdb_changes_since_last_save",
+ "type": "Sum",
+ "unit": "number",
+ "description": "Number of changes since the last dump"
+ },
+ {
+ "name": "redis_memory_rss",
+ "type": "Gauge",
+ "unit": "Bytes",
+ "description": "Number of bytes that Redis allocated as seen by the operating system"
+ },
+ {
+ "name": "redis_db_avg_ttl",
+ "type": "Gauge",
+ "unit": "ms",
+ "description": "Average keyspace keys TTL"
+ },
+ {
+ "name": "redis_memory_peak",
+ "type": "Gauge",
+ "unit": "Bytes",
+ "description": "Peak memory consumed by Redis (in bytes)"
+ },
+ {
+ "name": "redis_memory_fragmentation_ratio",
+ "type": "Gauge",
+ "unit": "number",
+ "description": "Ratio between used_memory_rss and used_memory"
+ }
+ ]
+ }
+}
\ No newline at end of file
diff --git a/pkg/query-service/app/integrations/builtin_integrations/redis/overview.md b/pkg/query-service/app/integrations/builtin_integrations/redis/overview.md
new file mode 100644
index 0000000000..8e5d517da2
--- /dev/null
+++ b/pkg/query-service/app/integrations/builtin_integrations/redis/overview.md
@@ -0,0 +1,5 @@
+### Monitor Redis with SigNoz
+
+Collect key Redis metrics and view them with an out of the box dashboard.
+
+Collect and parse Redis logs to populate timestamp, severity, and other log attributes for better querying and aggregation.
diff --git a/pkg/query-service/app/integrations/builtin_test.go b/pkg/query-service/app/integrations/builtin_test.go
new file mode 100644
index 0000000000..cb72d5dcba
--- /dev/null
+++ b/pkg/query-service/app/integrations/builtin_test.go
@@ -0,0 +1,32 @@
+package integrations
+
+import (
+ "context"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestBuiltinIntegrations(t *testing.T) {
+ require := require.New(t)
+
+ repo := BuiltInIntegrations{}
+
+ builtins, apiErr := repo.list(context.Background())
+ require.Nil(apiErr)
+ require.Greater(
+ len(builtins), 0,
+ "some built in integrations are expected to be bundled.",
+ )
+
+ nginxIntegrationId := "builtin-nginx"
+ res, apiErr := repo.get(context.Background(), []string{
+ nginxIntegrationId,
+ })
+ require.Nil(apiErr)
+
+ nginxIntegration, exists := res[nginxIntegrationId]
+ require.True(exists)
+ require.False(strings.HasPrefix(nginxIntegration.Overview, "file://"))
+}
diff --git a/pkg/query-service/app/integrations/controller.go b/pkg/query-service/app/integrations/controller.go
new file mode 100644
index 0000000000..8695c4b1cd
--- /dev/null
+++ b/pkg/query-service/app/integrations/controller.go
@@ -0,0 +1,141 @@
+package integrations
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/jmoiron/sqlx"
+ "go.signoz.io/signoz/pkg/query-service/agentConf"
+ "go.signoz.io/signoz/pkg/query-service/app/dashboards"
+ "go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
+ "go.signoz.io/signoz/pkg/query-service/model"
+)
+
+type Controller struct {
+ mgr *Manager
+}
+
+func NewController(db *sqlx.DB) (
+ *Controller, error,
+) {
+ mgr, err := NewManager(db)
+ if err != nil {
+ return nil, fmt.Errorf("couldn't create integrations manager: %w", err)
+ }
+
+ return &Controller{
+ mgr: mgr,
+ }, nil
+}
+
+type IntegrationsListResponse struct {
+ Integrations []IntegrationsListItem `json:"integrations"`
+
+ // Pagination details to come later
+}
+
+func (c *Controller) ListIntegrations(
+ ctx context.Context, params map[string]string,
+) (
+ *IntegrationsListResponse, *model.ApiError,
+) {
+ var filters *IntegrationsFilter
+ if isInstalledFilter, exists := params["is_installed"]; exists {
+ isInstalled := !(isInstalledFilter == "false")
+ filters = &IntegrationsFilter{
+ IsInstalled: &isInstalled,
+ }
+ }
+
+ integrations, apiErr := c.mgr.ListIntegrations(ctx, filters)
+ if apiErr != nil {
+ return nil, apiErr
+ }
+
+ return &IntegrationsListResponse{
+ Integrations: integrations,
+ }, nil
+}
+
+func (c *Controller) GetIntegration(
+ ctx context.Context, integrationId string,
+) (*Integration, *model.ApiError) {
+ return c.mgr.GetIntegration(ctx, integrationId)
+}
+
+func (c *Controller) IsIntegrationInstalled(
+ ctx context.Context,
+ integrationId string,
+) (bool, *model.ApiError) {
+ installation, apiErr := c.mgr.getInstalledIntegration(ctx, integrationId)
+ if apiErr != nil {
+ return false, apiErr
+ }
+ isInstalled := installation != nil
+ return isInstalled, nil
+}
+
+func (c *Controller) GetIntegrationConnectionTests(
+ ctx context.Context, integrationId string,
+) (*IntegrationConnectionTests, *model.ApiError) {
+ return c.mgr.GetIntegrationConnectionTests(ctx, integrationId)
+}
+
+type InstallIntegrationRequest struct {
+ IntegrationId string `json:"integration_id"`
+ Config map[string]interface{} `json:"config"`
+}
+
+func (c *Controller) Install(
+ ctx context.Context, req *InstallIntegrationRequest,
+) (*IntegrationsListItem, *model.ApiError) {
+ res, apiErr := c.mgr.InstallIntegration(
+ ctx, req.IntegrationId, req.Config,
+ )
+ if apiErr != nil {
+ return nil, apiErr
+ }
+ agentConf.NotifyConfigUpdate(ctx)
+ return res, nil
+}
+
+type UninstallIntegrationRequest struct {
+ IntegrationId string `json:"integration_id"`
+}
+
+func (c *Controller) Uninstall(
+ ctx context.Context, req *UninstallIntegrationRequest,
+) *model.ApiError {
+ if len(req.IntegrationId) < 1 {
+ return model.BadRequest(fmt.Errorf(
+ "integration_id is required.",
+ ))
+ }
+
+ apiErr := c.mgr.UninstallIntegration(
+ ctx, req.IntegrationId,
+ )
+ if apiErr != nil {
+ return apiErr
+ }
+ agentConf.NotifyConfigUpdate(ctx)
+ return nil
+}
+
+func (c *Controller) GetPipelinesForInstalledIntegrations(
+ ctx context.Context,
+) ([]logparsingpipeline.Pipeline, *model.ApiError) {
+ return c.mgr.GetPipelinesForInstalledIntegrations(ctx)
+}
+
+func (c *Controller) GetDashboardsForInstalledIntegrations(
+ ctx context.Context,
+) ([]dashboards.Dashboard, *model.ApiError) {
+ return c.mgr.GetDashboardsForInstalledIntegrations(ctx)
+}
+
+func (c *Controller) GetInstalledIntegrationDashboardById(
+ ctx context.Context, dashboardUuid string,
+) (*dashboards.Dashboard, *model.ApiError) {
+ return c.mgr.GetInstalledIntegrationDashboardById(ctx, dashboardUuid)
+}
diff --git a/pkg/query-service/app/integrations/manager.go b/pkg/query-service/app/integrations/manager.go
index 3caf352172..c3ebd21cc2 100644
--- a/pkg/query-service/app/integrations/manager.go
+++ b/pkg/query-service/app/integrations/manager.go
@@ -4,53 +4,112 @@ import (
"context"
"fmt"
"slices"
+ "strings"
"time"
+ "github.com/google/uuid"
+ "github.com/jmoiron/sqlx"
+ "go.signoz.io/signoz/pkg/query-service/app/dashboards"
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
"go.signoz.io/signoz/pkg/query-service/model"
+ v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
+ "go.signoz.io/signoz/pkg/query-service/rules"
+ "go.signoz.io/signoz/pkg/query-service/utils"
)
type IntegrationAuthor struct {
- Name string
- Email string
- HomePage string
+ Name string `json:"name"`
+ Email string `json:"email"`
+ HomePage string `json:"homepage"`
}
type IntegrationSummary struct {
- Id string
- Title string
- Description string // A short description
+ Id string `json:"id"`
+ Title string `json:"title"`
+ Description string `json:"description"` // A short description
- Author IntegrationAuthor
+ Author IntegrationAuthor `json:"author"`
+
+ Icon string `json:"icon"`
}
type IntegrationAssets struct {
- // Each integration is expected to specify all log transformations
- // in a single pipeline with a source based filter
- LogPipeline *logparsingpipeline.PostablePipeline
+ Logs LogsAssets `json:"logs"`
+ Dashboards []dashboards.Data `json:"dashboards"`
- // TBD: Dashboards, alerts, saved views, facets (indexed attribs)...
+ Alerts []rules.PostableRule `json:"alerts"`
+}
+
+type LogsAssets struct {
+ Pipelines []logparsingpipeline.PostablePipeline `json:"pipelines"`
+}
+
+type IntegrationConfigStep struct {
+ Title string `json:"title"`
+ Instructions string `json:"instructions"`
+}
+
+type DataCollectedForIntegration struct {
+ Logs []CollectedLogAttribute `json:"logs"`
+ Metrics []CollectedMetric `json:"metrics"`
+}
+
+type CollectedLogAttribute struct {
+ Name string `json:"name"`
+ Path string `json:"path"`
+ Type string `json:"type"`
+}
+
+type CollectedMetric struct {
+ Name string `json:"name"`
+ Type string `json:"type"`
+ Unit string `json:"unit"`
+}
+
+type SignalConnectionStatus struct {
+ LastReceivedTsMillis int64 `json:"last_received_ts_ms"` // epoch milliseconds
+ LastReceivedFrom string `json:"last_received_from"` // resource identifier
+}
+
+type IntegrationConnectionStatus struct {
+ Logs *SignalConnectionStatus `json:"logs"`
+ Metrics *SignalConnectionStatus `json:"metrics"`
+}
+
+type IntegrationConnectionTests struct {
+ // Filter to use for finding logs for the integration.
+ Logs *v3.FilterSet `json:"logs"`
+
+ // Metric names expected to have been received for the integration.
+ Metrics []string `json:"metrics"`
}
type IntegrationDetails struct {
IntegrationSummary
- IntegrationAssets
+
+ Categories []string `json:"categories"`
+ Overview string `json:"overview"` // markdown
+ Configuration []IntegrationConfigStep `json:"configuration"`
+ DataCollected DataCollectedForIntegration `json:"data_collected"`
+ Assets IntegrationAssets `json:"assets"`
+
+ ConnectionTests *IntegrationConnectionTests `json:"connection_tests"`
}
type IntegrationsListItem struct {
IntegrationSummary
- IsInstalled bool
+ IsInstalled bool `json:"is_installed"`
}
type InstalledIntegration struct {
- IntegrationId string `db:"integration_id"`
- Config InstalledIntegrationConfig `db:"config_json"`
- InstalledAt time.Time `db:"installed_at"`
+ IntegrationId string `json:"integration_id" db:"integration_id"`
+ Config InstalledIntegrationConfig `json:"config_json" db:"config_json"`
+ InstalledAt time.Time `json:"installed_at" db:"installed_at"`
}
type InstalledIntegrationConfig map[string]interface{}
type Integration struct {
IntegrationDetails
- Installation *InstalledIntegration
+ Installation *InstalledIntegration `json:"installation"`
}
type Manager struct {
@@ -58,6 +117,20 @@ type Manager struct {
installedIntegrationsRepo InstalledIntegrationsRepo
}
+func NewManager(db *sqlx.DB) (*Manager, error) {
+ iiRepo, err := NewInstalledIntegrationsSqliteRepo(db)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "could not init sqlite DB for installed integrations: %w", err,
+ )
+ }
+
+ return &Manager{
+ availableIntegrationsRepo: &BuiltInIntegrations{},
+ installedIntegrationsRepo: iiRepo,
+ }, nil
+}
+
type IntegrationsFilter struct {
IsInstalled *bool
}
@@ -132,6 +205,19 @@ func (m *Manager) GetIntegration(
}, nil
}
+func (m *Manager) GetIntegrationConnectionTests(
+ ctx context.Context,
+ integrationId string,
+) (*IntegrationConnectionTests, *model.ApiError) {
+ integrationDetails, apiErr := m.getIntegrationDetails(
+ ctx, integrationId,
+ )
+ if apiErr != nil {
+ return nil, apiErr
+ }
+ return integrationDetails.ConnectionTests, nil
+}
+
func (m *Manager) InstallIntegration(
ctx context.Context,
integrationId string,
@@ -164,11 +250,131 @@ func (m *Manager) UninstallIntegration(
return m.installedIntegrationsRepo.delete(ctx, integrationId)
}
+func (m *Manager) GetPipelinesForInstalledIntegrations(
+ ctx context.Context,
+) ([]logparsingpipeline.Pipeline, *model.ApiError) {
+ installedIntegrations, apiErr := m.getDetailsForInstalledIntegrations(ctx)
+ if apiErr != nil {
+ return nil, apiErr
+ }
+
+ pipelines := []logparsingpipeline.Pipeline{}
+ for _, ii := range installedIntegrations {
+ for _, p := range ii.Assets.Logs.Pipelines {
+ pp := logparsingpipeline.Pipeline{
+ // Alias is used for identifying integration pipelines. Id can't be used for this
+ // since versioning while saving pipelines requires a new id for each version
+ // to avoid altering history when pipelines are edited/reordered etc
+ Alias: AliasForIntegrationPipeline(ii.Id, p.Alias),
+ Id: uuid.NewString(),
+ OrderId: p.OrderId,
+ Enabled: p.Enabled,
+ Name: p.Name,
+ Description: &p.Description,
+ Filter: p.Filter,
+ Config: p.Config,
+ }
+ pipelines = append(pipelines, pp)
+ }
+ }
+
+ return pipelines, nil
+}
+
+func (m *Manager) dashboardUuid(integrationId string, dashboardId string) string {
+ return strings.Join([]string{"integration", integrationId, dashboardId}, "--")
+}
+
+func (m *Manager) parseDashboardUuid(dashboardUuid string) (
+ integrationId string, dashboardId string, err *model.ApiError,
+) {
+ parts := strings.SplitN(dashboardUuid, "--", 3)
+ if len(parts) != 3 || parts[0] != "integration" {
+ return "", "", model.BadRequest(fmt.Errorf(
+ "invalid installed integration dashboard id",
+ ))
+ }
+
+ return parts[1], parts[2], nil
+}
+
+func (m *Manager) GetInstalledIntegrationDashboardById(
+ ctx context.Context,
+ dashboardUuid string,
+) (*dashboards.Dashboard, *model.ApiError) {
+ integrationId, dashboardId, apiErr := m.parseDashboardUuid(dashboardUuid)
+ if apiErr != nil {
+ return nil, apiErr
+ }
+
+ integration, apiErr := m.GetIntegration(ctx, integrationId)
+ if apiErr != nil {
+ return nil, apiErr
+ }
+
+ if integration.Installation == nil {
+ return nil, model.BadRequest(fmt.Errorf(
+ "integration with id %s is not installed", integrationId,
+ ))
+ }
+
+ for _, dd := range integration.IntegrationDetails.Assets.Dashboards {
+ if dId, exists := dd["id"]; exists {
+ if id, ok := dId.(string); ok && id == dashboardId {
+ isLocked := 1
+ return &dashboards.Dashboard{
+ Uuid: m.dashboardUuid(integrationId, string(dashboardId)),
+ Locked: &isLocked,
+ Data: dd,
+ }, nil
+ }
+ }
+ }
+
+ return nil, model.NotFoundError(fmt.Errorf(
+ "integration dashboard with id %s not found", dashboardUuid,
+ ))
+}
+
+func (m *Manager) GetDashboardsForInstalledIntegrations(
+ ctx context.Context,
+) ([]dashboards.Dashboard, *model.ApiError) {
+ installedIntegrations, apiErr := m.getDetailsForInstalledIntegrations(ctx)
+ if apiErr != nil {
+ return nil, apiErr
+ }
+
+ result := []dashboards.Dashboard{}
+
+ for _, ii := range installedIntegrations {
+ for _, dd := range ii.Assets.Dashboards {
+ if dId, exists := dd["id"]; exists {
+ if dashboardId, ok := dId.(string); ok {
+ isLocked := 1
+ result = append(result, dashboards.Dashboard{
+ Uuid: m.dashboardUuid(ii.IntegrationSummary.Id, dashboardId),
+ Locked: &isLocked,
+ Data: dd,
+ })
+ }
+ }
+ }
+ }
+
+ return result, nil
+}
+
// Helpers.
func (m *Manager) getIntegrationDetails(
ctx context.Context,
integrationId string,
) (*IntegrationDetails, *model.ApiError) {
+ if len(strings.TrimSpace(integrationId)) < 1 {
+ return nil, model.BadRequest(fmt.Errorf(
+ "integrationId is required",
+ ))
+ }
+
ais, apiErr := m.availableIntegrationsRepo.get(
ctx, []string{integrationId},
)
@@ -206,3 +412,19 @@ func (m *Manager) getInstalledIntegration(
}
return &installation, nil
}
+
+func (m *Manager) getDetailsForInstalledIntegrations(
+ ctx context.Context,
+) (
+ map[string]IntegrationDetails, *model.ApiError,
+) {
+ installations, apiErr := m.installedIntegrationsRepo.list(ctx)
+ if apiErr != nil {
+ return nil, apiErr
+ }
+
+ installedIds := utils.MapSlice(installations, func(i InstalledIntegration) string {
+ return i.IntegrationId
+ })
+ return m.availableIntegrationsRepo.get(ctx, installedIds)
+}
diff --git a/pkg/query-service/app/integrations/pipeline_utils.go b/pkg/query-service/app/integrations/pipeline_utils.go
new file mode 100644
index 0000000000..49ab5dd82a
--- /dev/null
+++ b/pkg/query-service/app/integrations/pipeline_utils.go
@@ -0,0 +1,33 @@
+package integrations
+
+import (
+ "strings"
+
+ "go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
+ "go.signoz.io/signoz/pkg/query-service/constants"
+)
+
+const IntegrationPipelineIdSeparator string = "--"
+
+func AliasForIntegrationPipeline(
+ integrationId string, pipelineName string,
+) string {
+ return strings.Join(
+ []string{constants.IntegrationPipelineIdPrefix, integrationId, pipelineName},
+ IntegrationPipelineIdSeparator,
+ )
+}
+
+// Returns ptr to integration_id string if `p` is a pipeline for an installed integration.
+// Returns null otherwise.
+func IntegrationIdForPipeline(p logparsingpipeline.Pipeline) *string {
+ if strings.HasPrefix(p.Alias, constants.IntegrationPipelineIdPrefix) {
+ parts := strings.Split(p.Alias, IntegrationPipelineIdSeparator)
+ if len(parts) < 2 {
+ return nil
+ }
+ integrationId := parts[1]
+ return &integrationId
+ }
+ return nil
+}
diff --git a/pkg/query-service/app/integrations/sqlite_repo.go b/pkg/query-service/app/integrations/sqlite_repo.go
index 94e9c4d51d..2c3e9fc699 100644
--- a/pkg/query-service/app/integrations/sqlite_repo.go
+++ b/pkg/query-service/app/integrations/sqlite_repo.go
@@ -62,6 +62,7 @@ func (r *InstalledIntegrationsSqliteRepo) list(
config_json,
installed_at
from integrations_installed
+ order by installed_at
`,
)
if err != nil {
diff --git a/pkg/query-service/app/integrations/test_utils.go b/pkg/query-service/app/integrations/test_utils.go
index 6dcb9ec355..1ff964b3e6 100644
--- a/pkg/query-service/app/integrations/test_utils.go
+++ b/pkg/query-service/app/integrations/test_utils.go
@@ -2,37 +2,19 @@ package integrations
import (
"context"
- "os"
"slices"
"testing"
- "github.com/jmoiron/sqlx"
+ "go.signoz.io/signoz/pkg/query-service/app/dashboards"
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
"go.signoz.io/signoz/pkg/query-service/model"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
+ "go.signoz.io/signoz/pkg/query-service/rules"
+ "go.signoz.io/signoz/pkg/query-service/utils"
)
-func NewTestSqliteDB(t *testing.T) (
- db *sqlx.DB, dbFilePath string,
-) {
- testDBFile, err := os.CreateTemp("", "test-signoz-db-*")
- if err != nil {
- t.Fatalf("could not create temp file for test db: %v", err)
- }
- testDBFilePath := testDBFile.Name()
- t.Cleanup(func() { os.Remove(testDBFilePath) })
- testDBFile.Close()
-
- testDB, err := sqlx.Open("sqlite3", testDBFilePath)
- if err != nil {
- t.Fatalf("could not open test db sqlite file: %v", err)
- }
-
- return testDB, testDBFilePath
-}
-
func NewTestIntegrationsManager(t *testing.T) *Manager {
- testDB, _ := NewTestSqliteDB(t)
+ testDB := utils.NewQueryServiceDBForTests(t)
installedIntegrationsRepo, err := NewInstalledIntegrationsSqliteRepo(testDB)
if err != nil {
@@ -61,35 +43,70 @@ func (t *TestAvailableIntegrationsRepo) list(
Email: "integrations@signoz.io",
HomePage: "https://signoz.io",
},
+ Icon: `data:image/svg+xml;utf8, ... `,
},
- IntegrationAssets: IntegrationAssets{
- LogPipeline: &logparsingpipeline.PostablePipeline{
- Name: "pipeline1",
- Alias: "pipeline1",
- Enabled: true,
- Filter: &v3.FilterSet{
- Operator: "AND",
- Items: []v3.FilterItem{
- {
- Key: v3.AttributeKey{
- Key: "method",
- DataType: v3.AttributeKeyDataTypeString,
- Type: v3.AttributeKeyTypeTag,
+ Categories: []string{"testcat1", "testcat2"},
+ Overview: "test integration overview",
+ Configuration: []IntegrationConfigStep{
+ {
+ Title: "Step 1",
+ Instructions: "Set source attrib on your signals",
+ },
+ },
+ DataCollected: DataCollectedForIntegration{
+ Logs: []CollectedLogAttribute{},
+ Metrics: []CollectedMetric{},
+ },
+ Assets: IntegrationAssets{
+ Logs: LogsAssets{
+ Pipelines: []logparsingpipeline.PostablePipeline{
+ {
+ Name: "pipeline1",
+ Alias: "pipeline1",
+ Enabled: true,
+ Filter: &v3.FilterSet{
+ Operator: "AND",
+ Items: []v3.FilterItem{
+ {
+ Key: v3.AttributeKey{
+ Key: "source",
+ DataType: v3.AttributeKeyDataTypeString,
+ Type: v3.AttributeKeyTypeTag,
+ },
+ Operator: "=",
+ Value: "nginx",
+ },
+ },
+ },
+ Config: []logparsingpipeline.PipelineOperator{
+ {
+ OrderId: 1,
+ ID: "add",
+ Type: "add",
+ Field: "attributes.test",
+ Value: "val",
+ Enabled: true,
+ Name: "test add",
},
- Operator: "=",
- Value: "GET",
},
},
},
- Config: []logparsingpipeline.PipelineOperator{
+ },
+ Dashboards: []dashboards.Data{},
+ Alerts: []rules.PostableRule{},
+ },
+ ConnectionTests: &IntegrationConnectionTests{
+ Logs: &v3.FilterSet{
+ Operator: "AND",
+ Items: []v3.FilterItem{
{
- OrderId: 1,
- ID: "add",
- Type: "add",
- Field: "attributes.test",
- Value: "val",
- Enabled: true,
- Name: "test add",
+ Key: v3.AttributeKey{
+ Key: "source",
+ DataType: v3.AttributeKeyDataTypeString,
+ Type: v3.AttributeKeyTypeTag,
+ },
+ Operator: "=",
+ Value: "nginx",
},
},
},
@@ -104,35 +121,70 @@ func (t *TestAvailableIntegrationsRepo) list(
Email: "integrations@signoz.io",
HomePage: "https://signoz.io",
},
+ Icon: `data:image/svg+xml;utf8, ... `,
},
- IntegrationAssets: IntegrationAssets{
- LogPipeline: &logparsingpipeline.PostablePipeline{
- Name: "pipeline2",
- Alias: "pipeline2",
- Enabled: true,
- Filter: &v3.FilterSet{
- Operator: "AND",
- Items: []v3.FilterItem{
- {
- Key: v3.AttributeKey{
- Key: "method",
- DataType: v3.AttributeKeyDataTypeString,
- Type: v3.AttributeKeyTypeTag,
+ Categories: []string{"testcat1", "testcat2"},
+ Overview: "test integration overview",
+ Configuration: []IntegrationConfigStep{
+ {
+ Title: "Step 1",
+ Instructions: "Set source attrib on your signals",
+ },
+ },
+ DataCollected: DataCollectedForIntegration{
+ Logs: []CollectedLogAttribute{},
+ Metrics: []CollectedMetric{},
+ },
+ Assets: IntegrationAssets{
+ Logs: LogsAssets{
+ Pipelines: []logparsingpipeline.PostablePipeline{
+ {
+ Name: "pipeline2",
+ Alias: "pipeline2",
+ Enabled: true,
+ Filter: &v3.FilterSet{
+ Operator: "AND",
+ Items: []v3.FilterItem{
+ {
+ Key: v3.AttributeKey{
+ Key: "source",
+ DataType: v3.AttributeKeyDataTypeString,
+ Type: v3.AttributeKeyTypeTag,
+ },
+ Operator: "=",
+ Value: "redis",
+ },
+ },
+ },
+ Config: []logparsingpipeline.PipelineOperator{
+ {
+ OrderId: 1,
+ ID: "add",
+ Type: "add",
+ Field: "attributes.test",
+ Value: "val",
+ Enabled: true,
+ Name: "test add",
},
- Operator: "=",
- Value: "GET",
},
},
},
- Config: []logparsingpipeline.PipelineOperator{
+ },
+ Dashboards: []dashboards.Data{},
+ Alerts: []rules.PostableRule{},
+ },
+ ConnectionTests: &IntegrationConnectionTests{
+ Logs: &v3.FilterSet{
+ Operator: "AND",
+ Items: []v3.FilterItem{
{
- OrderId: 1,
- ID: "add",
- Type: "add",
- Field: "attributes.test",
- Value: "val",
- Enabled: true,
- Name: "test add",
+ Key: v3.AttributeKey{
+ Key: "source",
+ DataType: v3.AttributeKeyDataTypeString,
+ Type: v3.AttributeKeyTypeTag,
+ },
+ Operator: "=",
+ Value: "nginx",
},
},
},
diff --git a/pkg/query-service/app/limit.go b/pkg/query-service/app/limit.go
index 7b6d728dd0..ab68ae5ac4 100644
--- a/pkg/query-service/app/limit.go
+++ b/pkg/query-service/app/limit.go
@@ -20,7 +20,7 @@ func applyMetricLimit(results []*v3.Result, queryRangeParams *v3.QueryRangeParam
limit := builderQueries[result.QueryName].Limit
orderByList := builderQueries[result.QueryName].OrderBy
- if limit > 0 {
+ {
if len(orderByList) == 0 {
// If no orderBy is specified, sort by value in descending order
orderByList = []v3.OrderBy{{ColumnName: constants.SigNozOrderByValue, Order: "desc"}}
diff --git a/pkg/query-service/app/logparsingpipeline/collector_config.go b/pkg/query-service/app/logparsingpipeline/collector_config.go
index c370441210..17b8d96c1e 100644
--- a/pkg/query-service/app/logparsingpipeline/collector_config.go
+++ b/pkg/query-service/app/logparsingpipeline/collector_config.go
@@ -138,7 +138,7 @@ func buildLogsProcessors(current []string, logsParserPipeline []string) ([]strin
func checkDuplicateString(pipeline []string) bool {
exists := make(map[string]bool, len(pipeline))
- zap.S().Debugf("checking duplicate processors in the pipeline:", pipeline)
+ zap.L().Debug("checking duplicate processors in the pipeline:", zap.Any("pipeline", pipeline))
for _, processor := range pipeline {
name := processor
if _, ok := exists[name]; ok {
diff --git a/pkg/query-service/app/logparsingpipeline/controller.go b/pkg/query-service/app/logparsingpipeline/controller.go
index eed3befec5..2e6b0ba4d3 100644
--- a/pkg/query-service/app/logparsingpipeline/controller.go
+++ b/pkg/query-service/app/logparsingpipeline/controller.go
@@ -4,25 +4,38 @@ import (
"context"
"encoding/json"
"fmt"
+ "slices"
+ "strings"
+ "github.com/google/uuid"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
"go.signoz.io/signoz/pkg/query-service/agentConf"
"go.signoz.io/signoz/pkg/query-service/auth"
+ "go.signoz.io/signoz/pkg/query-service/constants"
"go.signoz.io/signoz/pkg/query-service/model"
- "go.uber.org/multierr"
+ "go.signoz.io/signoz/pkg/query-service/utils"
"go.uber.org/zap"
)
// Controller takes care of deployment cycle of log parsing pipelines.
type LogParsingPipelineController struct {
Repo
+
+ GetIntegrationPipelines func(context.Context) ([]Pipeline, *model.ApiError)
}
-func NewLogParsingPipelinesController(db *sqlx.DB, engine string) (*LogParsingPipelineController, error) {
+func NewLogParsingPipelinesController(
+ db *sqlx.DB,
+ engine string,
+ getIntegrationPipelines func(context.Context) ([]Pipeline, *model.ApiError),
+) (*LogParsingPipelineController, error) {
repo := NewRepo(db)
err := repo.InitDB(engine)
- return &LogParsingPipelineController{Repo: repo}, err
+ return &LogParsingPipelineController{
+ Repo: repo,
+ GetIntegrationPipelines: getIntegrationPipelines,
+ }, err
}
// PipelinesResponse is used to prepare http response for pipelines config related requests
@@ -47,29 +60,22 @@ func (ic *LogParsingPipelineController) ApplyPipelines(
var pipelines []Pipeline
// scan through postable pipelines, to select the existing pipelines or insert missing ones
- for _, r := range postable {
+ for idx, r := range postable {
// note: we process only new and changed pipelines here, deleted pipelines are not expected
// from client. if user deletes a pipelines, the client should not send that pipelines in the update.
// in effect, the new config version will not have that pipelines.
- if r.Id == "" {
- // looks like a new or changed pipeline, store it first
- inserted, err := ic.insertPipeline(ctx, &r)
- if err != nil {
- zap.S().Errorf("failed to insert edited pipeline %s", err.Error())
- return nil, model.WrapApiError(err, "failed to insert edited pipeline")
- } else {
- pipelines = append(pipelines, *inserted)
- }
- } else {
- selected, err := ic.GetPipeline(ctx, r.Id)
- if err != nil {
- zap.S().Errorf("failed to find edited pipeline %s", err.Error())
- return nil, model.WrapApiError(err, "failed to find edited pipeline")
- }
- pipelines = append(pipelines, *selected)
+ // For versioning, pipelines get stored with unique ids each time they are saved.
+ // This ensures updating a pipeline doesn't alter historical versions that referenced
+ // the same pipeline id.
+ r.Id = uuid.NewString()
+ r.OrderId = idx + 1
+ pipeline, apiErr := ic.insertPipeline(ctx, &r)
+ if apiErr != nil {
+ return nil, model.WrapApiError(apiErr, "failed to insert pipeline")
}
+ pipelines = append(pipelines, *pipeline)
}
@@ -85,34 +91,85 @@ func (ic *LogParsingPipelineController) ApplyPipelines(
return nil, err
}
- history, _ := agentConf.GetConfigHistory(ctx, agentConf.ElementTypeLogPipelines, 10)
- insertedCfg, _ := agentConf.GetConfigVersion(ctx, agentConf.ElementTypeLogPipelines, cfg.Version)
+ return ic.GetPipelinesByVersion(ctx, cfg.Version)
+}
- response := &PipelinesResponse{
- ConfigVersion: insertedCfg,
- Pipelines: pipelines,
- History: history,
+// Returns effective list of pipelines including user created
+// pipelines and pipelines for installed integrations
+func (ic *LogParsingPipelineController) getEffectivePipelinesByVersion(
+ ctx context.Context, version int,
+) ([]Pipeline, *model.ApiError) {
+ result := []Pipeline{}
+
+ if version >= 0 {
+ savedPipelines, errors := ic.getPipelinesByVersion(ctx, version)
+ if errors != nil {
+ zap.L().Error("failed to get pipelines for version", zap.Int("version", version), zap.Errors("errors", errors))
+ return nil, model.InternalError(fmt.Errorf("failed to get pipelines for given version"))
+ }
+ result = savedPipelines
}
- if err != nil {
- return response, model.WrapApiError(err, "failed to apply pipelines")
+ integrationPipelines, apiErr := ic.GetIntegrationPipelines(ctx)
+ if apiErr != nil {
+ return nil, model.WrapApiError(
+ apiErr, "could not get pipelines for installed integrations",
+ )
}
- return response, nil
+
+ // Filter out any integration pipelines included in pipelines saved by user
+ // if the corresponding integration is no longer installed.
+ ipAliases := utils.MapSlice(integrationPipelines, func(p Pipeline) string {
+ return p.Alias
+ })
+ result = utils.FilterSlice(result, func(p Pipeline) bool {
+ if !strings.HasPrefix(p.Alias, constants.IntegrationPipelineIdPrefix) {
+ return true
+ }
+ return slices.Contains(ipAliases, p.Alias)
+ })
+
+ // Add installed integration pipelines to the list of pipelines saved by user.
+ // Users are allowed to enable/disable and reorder integration pipelines while
+ // saving the pipeline list.
+ for _, ip := range integrationPipelines {
+ userPipelineIdx := slices.IndexFunc(result, func(p Pipeline) bool {
+ return p.Alias == ip.Alias
+ })
+ if userPipelineIdx >= 0 {
+ ip.Enabled = result[userPipelineIdx].Enabled
+ result[userPipelineIdx] = ip
+ } else {
+ // installed integration pipelines get added to the end of the list by default.
+ result = append(result, ip)
+ }
+ }
+
+ for idx := range result {
+ result[idx].OrderId = idx + 1
+ }
+
+ return result, nil
}
// GetPipelinesByVersion responds with version info and associated pipelines
func (ic *LogParsingPipelineController) GetPipelinesByVersion(
ctx context.Context, version int,
) (*PipelinesResponse, *model.ApiError) {
- pipelines, errors := ic.getPipelinesByVersion(ctx, version)
+ pipelines, errors := ic.getEffectivePipelinesByVersion(ctx, version)
if errors != nil {
- zap.S().Errorf("failed to get pipelines for version %d, %w", version, errors)
+ zap.L().Error("failed to get pipelines for version", zap.Int("version", version), zap.Error(errors))
return nil, model.InternalError(fmt.Errorf("failed to get pipelines for given version"))
}
- configVersion, err := agentConf.GetConfigVersion(ctx, agentConf.ElementTypeLogPipelines, version)
- if err != nil {
- zap.S().Errorf("failed to get config for version %d, %s", version, err.Error())
- return nil, model.WrapApiError(err, "failed to get config for given version")
+
+ var configVersion *agentConf.ConfigVersion
+ if version >= 0 {
+ cv, err := agentConf.GetConfigVersion(ctx, agentConf.ElementTypeLogPipelines, version)
+ if err != nil {
+ zap.L().Error("failed to get config for version", zap.Int("version", version), zap.Error(err))
+ return nil, model.WrapApiError(err, "failed to get config for given version")
+ }
+ configVersion = cv
}
return &PipelinesResponse{
@@ -163,26 +220,29 @@ func (pc *LogParsingPipelineController) RecommendAgentConfig(
serializedSettingsUsed string,
apiErr *model.ApiError,
) {
+ pipelinesVersion := -1
+ if configVersion != nil {
+ pipelinesVersion = configVersion.Version
+ }
- pipelines, errs := pc.getPipelinesByVersion(
- context.Background(), configVersion.Version,
+ pipelinesResp, apiErr := pc.GetPipelinesByVersion(
+ context.Background(), pipelinesVersion,
)
- if len(errs) > 0 {
- return nil, "", model.InternalError(multierr.Combine(errs...))
+ if apiErr != nil {
+ return nil, "", apiErr
}
updatedConf, apiErr := GenerateCollectorConfigWithPipelines(
- currentConfYaml, pipelines,
+ currentConfYaml, pipelinesResp.Pipelines,
)
if apiErr != nil {
return nil, "", model.WrapApiError(apiErr, "could not marshal yaml for updated conf")
}
- rawPipelineData, err := json.Marshal(pipelines)
+ rawPipelineData, err := json.Marshal(pipelinesResp.Pipelines)
if err != nil {
return nil, "", model.BadRequest(errors.Wrap(err, "could not serialize pipelines to JSON"))
}
return updatedConf, string(rawPipelineData), nil
-
}
diff --git a/pkg/query-service/app/logparsingpipeline/db.go b/pkg/query-service/app/logparsingpipeline/db.go
index df187f0de3..618060d105 100644
--- a/pkg/query-service/app/logparsingpipeline/db.go
+++ b/pkg/query-service/app/logparsingpipeline/db.go
@@ -99,7 +99,7 @@ func (r *Repo) insertPipeline(
insertRow.RawConfig)
if err != nil {
- zap.S().Errorf("error in inserting pipeline data: ", zap.Error(err))
+ zap.L().Error("error in inserting pipeline data", zap.Error(err))
return nil, model.InternalError(errors.Wrap(err, "failed to insert pipeline"))
}
@@ -171,19 +171,19 @@ func (r *Repo) GetPipeline(
err := r.db.SelectContext(ctx, &pipelines, pipelineQuery, id)
if err != nil {
- zap.S().Errorf("failed to get ingestion pipeline from db", err)
+ zap.L().Error("failed to get ingestion pipeline from db", zap.Error(err))
return nil, model.InternalError(errors.Wrap(err, "failed to get ingestion pipeline from db"))
}
if len(pipelines) == 0 {
- zap.S().Warnf("No row found for ingestion pipeline id", id)
+ zap.L().Warn("No row found for ingestion pipeline id", zap.String("id", id))
return nil, model.NotFoundError(fmt.Errorf("No row found for ingestion pipeline id %v", id))
}
if len(pipelines) == 1 {
err := pipelines[0].ParseRawConfig()
if err != nil {
- zap.S().Errorf("invalid pipeline config found", id, err)
+ zap.L().Error("invalid pipeline config found", zap.String("id", id), zap.Error(err))
return nil, model.InternalError(
errors.Wrap(err, "found an invalid pipeline config"),
)
diff --git a/pkg/query-service/app/metrics/v4/query_builder.go b/pkg/query-service/app/metrics/v4/query_builder.go
index ae9ee9b69a..c58c98c93f 100644
--- a/pkg/query-service/app/metrics/v4/query_builder.go
+++ b/pkg/query-service/app/metrics/v4/query_builder.go
@@ -21,14 +21,10 @@ func PrepareMetricQuery(start, end int64, queryType v3.QueryType, panelType v3.P
start, end = common.AdjustedMetricTimeRange(start, end, mq.StepInterval, mq.TimeAggregation)
- if mq.ShiftBy != 0 {
- start = start - mq.ShiftBy*1000
- end = end - mq.ShiftBy*1000
- }
-
var quantile float64
- if v3.IsPercentileOperator(mq.SpaceAggregation) {
+ if v3.IsPercentileOperator(mq.SpaceAggregation) &&
+ mq.AggregateAttribute.Type != v3.AttributeKeyType(v3.MetricTypeExponentialHistogram) {
quantile = v3.GetPercentileFromOperator(mq.SpaceAggregation)
// If quantile is set, we need to group by le
// and set the space aggregation to sum
@@ -81,7 +77,8 @@ func PrepareMetricQuery(start, end int64, queryType v3.QueryType, panelType v3.P
groupBy := helpers.GroupByAttributeKeyTags(groupByWithoutLe...)
orderBy := helpers.OrderByAttributeKeyTags(mq.OrderBy, groupByWithoutLe)
- if quantile != 0 {
+ // fixed-bucket histogram quantiles are calculated with UDF
+ if quantile != 0 && mq.AggregateAttribute.Type != v3.AttributeKeyType(v3.MetricTypeExponentialHistogram) {
query = fmt.Sprintf(`SELECT %s, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), %.3f) as value FROM (%s) GROUP BY %s ORDER BY %s`, groupBy, quantile, query, groupBy, orderBy)
}
diff --git a/pkg/query-service/app/opamp/config_provider_test.go b/pkg/query-service/app/opamp/config_provider_test.go
index 6718ff1581..1a6efe122a 100644
--- a/pkg/query-service/app/opamp/config_provider_test.go
+++ b/pkg/query-service/app/opamp/config_provider_test.go
@@ -2,7 +2,6 @@ package opamp
import (
"fmt"
- "os"
"testing"
"github.com/knadh/koanf"
@@ -13,6 +12,7 @@ import (
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
"go.signoz.io/signoz/pkg/query-service/app/opamp/model"
+ "go.signoz.io/signoz/pkg/query-service/utils"
"golang.org/x/exp/maps"
)
@@ -165,16 +165,8 @@ type testbed struct {
}
func newTestbed(t *testing.T) *testbed {
- // Init opamp model.
- testDBFile, err := os.CreateTemp("", "test-signoz-db-*")
- if err != nil {
- t.Fatalf("could not create temp file for test db: %v", err)
- }
- testDBFilePath := testDBFile.Name()
- t.Cleanup(func() { os.Remove(testDBFilePath) })
- testDBFile.Close()
-
- _, err = model.InitDB(testDBFilePath)
+ testDB := utils.NewQueryServiceDBForTests(t)
+ _, err := model.InitDB(testDB)
if err != nil {
t.Fatalf("could not init opamp model: %v", err)
}
diff --git a/pkg/query-service/app/opamp/configure_ingestionRules.go b/pkg/query-service/app/opamp/configure_ingestionRules.go
index bd71aa38b0..ec9c9e5b88 100644
--- a/pkg/query-service/app/opamp/configure_ingestionRules.go
+++ b/pkg/query-service/app/opamp/configure_ingestionRules.go
@@ -27,10 +27,10 @@ func UpsertControlProcessors(
// AddToTracePipeline() or RemoveFromTracesPipeline() prior to calling
// this method
- zap.S().Debug("initiating ingestion rules deployment config", signal, processors)
+ zap.L().Debug("initiating ingestion rules deployment config", zap.String("signal", signal), zap.Any("processors", processors))
if signal != string(Metrics) && signal != string(Traces) {
- zap.S().Error("received invalid signal int UpsertControlProcessors", signal)
+ zap.L().Error("received invalid signal int UpsertControlProcessors", zap.String("signal", signal))
fnerr = coreModel.BadRequest(fmt.Errorf(
"signal not supported in ingestion rules: %s", signal,
))
@@ -51,7 +51,7 @@ func UpsertControlProcessors(
}
if len(agents) > 1 && signal == string(Traces) {
- zap.S().Debug("found multiple agents. this feature is not supported for traces pipeline (sampling rules)")
+ zap.L().Debug("found multiple agents. this feature is not supported for traces pipeline (sampling rules)")
fnerr = coreModel.BadRequest(fmt.Errorf("multiple agents not supported in sampling rules"))
return
}
@@ -60,7 +60,7 @@ func UpsertControlProcessors(
agenthash, err := addIngestionControlToAgent(agent, signal, processors, false)
if err != nil {
- zap.S().Error("failed to push ingestion rules config to agent", agent.ID, err)
+ zap.L().Error("failed to push ingestion rules config to agent", zap.String("agentID", agent.ID), zap.Error(err))
continue
}
@@ -89,7 +89,7 @@ func addIngestionControlToAgent(agent *model.Agent, signal string, processors ma
// add ingestion control spec
err = makeIngestionControlSpec(agentConf, Signal(signal), processors)
if err != nil {
- zap.S().Error("failed to prepare ingestion control processors for agent ", agent.ID, err)
+ zap.L().Error("failed to prepare ingestion control processors for agent", zap.String("agentID", agent.ID), zap.Error(err))
return confHash, err
}
@@ -99,7 +99,7 @@ func addIngestionControlToAgent(agent *model.Agent, signal string, processors ma
return confHash, err
}
- zap.S().Debugf("sending new config", string(configR))
+ zap.L().Debug("sending new config", zap.String("config", string(configR)))
hash := sha256.New()
_, err = hash.Write(configR)
if err != nil {
@@ -140,7 +140,7 @@ func makeIngestionControlSpec(agentConf *confmap.Conf, signal Signal, processors
// merge tracesPipelinePlan with current pipeline
mergedPipeline, err := buildPipeline(signal, currentPipeline)
if err != nil {
- zap.S().Error("failed to build pipeline", signal, err)
+ zap.L().Error("failed to build pipeline", zap.String("signal", string(signal)), zap.Error(err))
return err
}
diff --git a/pkg/query-service/app/opamp/model/agent.go b/pkg/query-service/app/opamp/model/agent.go
index 1eef7bb4cf..5751bd255b 100644
--- a/pkg/query-service/app/opamp/model/agent.go
+++ b/pkg/query-service/app/opamp/model/agent.go
@@ -276,7 +276,7 @@ func (agent *Agent) processStatusUpdate(
func (agent *Agent) updateRemoteConfig(configProvider AgentConfigProvider) bool {
recommendedConfig, confId, err := configProvider.RecommendAgentConfig([]byte(agent.EffectiveConfig))
if err != nil {
- zap.S().Error("could not generate config recommendation for agent:", agent.ID, err)
+ zap.L().Error("could not generate config recommendation for agent", zap.String("agentID", agent.ID), zap.Error(err))
return false
}
@@ -293,7 +293,7 @@ func (agent *Agent) updateRemoteConfig(configProvider AgentConfigProvider) bool
if len(confId) < 1 {
// Should never happen. Handle gracefully if it does by some chance.
- zap.S().Errorf("config provider recommended a config with empty confId. Using content hash for configId")
+ zap.L().Error("config provider recommended a config with empty confId. Using content hash for configId")
hash := sha256.New()
for k, v := range cfg.Config.ConfigMap {
diff --git a/pkg/query-service/app/opamp/model/agents.go b/pkg/query-service/app/opamp/model/agents.go
index 50a554b957..e984cafce2 100644
--- a/pkg/query-service/app/opamp/model/agents.go
+++ b/pkg/query-service/app/opamp/model/agents.go
@@ -29,14 +29,9 @@ func (a *Agents) Count() int {
return len(a.connections)
}
-// InitDB initializes the database and creates the agents table.
-func InitDB(dataSourceName string) (*sqlx.DB, error) {
- var err error
-
- db, err = sqlx.Open("sqlite3", dataSourceName)
- if err != nil {
- return nil, err
- }
+// Initialize the database and create schema if needed
+func InitDB(qsDB *sqlx.DB) (*sqlx.DB, error) {
+ db = qsDB
tableSchema := `CREATE TABLE IF NOT EXISTS agents (
agent_id TEXT PRIMARY KEY UNIQUE,
@@ -46,7 +41,7 @@ func InitDB(dataSourceName string) (*sqlx.DB, error) {
effective_config TEXT NOT NULL
);`
- _, err = db.Exec(tableSchema)
+ _, err := db.Exec(tableSchema)
if err != nil {
return nil, fmt.Errorf("Error in creating agents table: %s", err.Error())
}
@@ -136,8 +131,8 @@ func (agents *Agents) RecommendLatestConfigToAll(
// Recommendation is same as current config
if string(newConfig) == agent.EffectiveConfig {
- zap.S().Infof(
- "Recommended config same as current effective config for agent %s", agent.ID,
+ zap.L().Info(
+ "Recommended config same as current effective config for agent", zap.String("agentID", agent.ID),
)
return nil
}
diff --git a/pkg/query-service/app/opamp/opamp_server.go b/pkg/query-service/app/opamp/opamp_server.go
index 2a7ba4c6fa..75d8d877be 100644
--- a/pkg/query-service/app/opamp/opamp_server.go
+++ b/pkg/query-service/app/opamp/opamp_server.go
@@ -40,7 +40,7 @@ func InitializeServer(
agents: agents,
agentConfigProvider: agentConfigProvider,
}
- opAmpServer.server = server.New(zap.S())
+ opAmpServer.server = server.New(zap.L().Sugar())
return opAmpServer
}
@@ -58,8 +58,8 @@ func (srv *Server) Start(listener string) error {
unsubscribe := srv.agentConfigProvider.SubscribeToConfigUpdates(func() {
err := srv.agents.RecommendLatestConfigToAll(srv.agentConfigProvider)
if err != nil {
- zap.S().Errorf(
- "could not roll out latest config recommendation to connected agents: %w", err,
+ zap.L().Error(
+ "could not roll out latest config recommendation to connected agents", zap.Error(err),
)
}
})
@@ -85,15 +85,14 @@ func (srv *Server) OnMessage(conn types.Connection, msg *protobufs.AgentToServer
agent, created, err := srv.agents.FindOrCreateAgent(agentID, conn)
if err != nil {
- zap.S().Error("Failed to find or create agent %q: %v", agentID, err)
+ zap.L().Error("Failed to find or create agent", zap.String("agentID", agentID), zap.Error(err))
// TODO: handle error
}
if created {
agent.CanLB = model.ExtractLbFlag(msg.AgentDescription)
- zap.S().Debugf(
- "New agent added:",
- zap.Bool("canLb", agent.CanLB),
+ zap.L().Debug(
+ "New agent added", zap.Bool("canLb", agent.CanLB),
zap.String("ID", agent.ID),
zap.Any("status", agent.CurrentStatus),
)
@@ -117,7 +116,7 @@ func Ready() bool {
return false
}
if opAmpServer.agents.Count() == 0 {
- zap.S().Warnf("no agents available, all agent config requests will be rejected")
+ zap.L().Warn("no agents available, all agent config requests will be rejected")
return false
}
return true
diff --git a/pkg/query-service/app/opamp/pipeline_builder.go b/pkg/query-service/app/opamp/pipeline_builder.go
index 841a9ce5c6..7654fe8c4f 100644
--- a/pkg/query-service/app/opamp/pipeline_builder.go
+++ b/pkg/query-service/app/opamp/pipeline_builder.go
@@ -89,7 +89,7 @@ func RemoveFromMetricsPipelineSpec(name string) {
func checkDuplicates(pipeline []interface{}) bool {
exists := make(map[string]bool, len(pipeline))
- zap.S().Debugf("checking duplicate processors in the pipeline:", pipeline)
+ zap.L().Debug("checking duplicate processors in the pipeline", zap.Any("pipeline", pipeline))
for _, processor := range pipeline {
name := processor.(string)
if _, ok := exists[name]; ok {
@@ -149,7 +149,7 @@ func buildPipeline(signal Signal, current []interface{}) ([]interface{}, error)
currentPos := loc + inserts
// if disabled then remove from the pipeline
if !m.Enabled {
- zap.S().Debugf("build_pipeline: found a disabled item, removing from pipeline at position", currentPos-1, " ", m.Name)
+ zap.L().Debug("build_pipeline: found a disabled item, removing from pipeline at position", zap.Int("position", currentPos-1), zap.String("processor", m.Name))
if currentPos-1 <= 0 {
pipeline = pipeline[currentPos+1:]
} else {
@@ -170,10 +170,10 @@ func buildPipeline(signal Signal, current []interface{}) ([]interface{}, error)
// right after last matched processsor (e.g. insert filters after tail_sampling for existing list of [batch, tail_sampling])
if lastMatched <= 0 {
- zap.S().Debugf("build_pipeline: found a new item to be inserted, inserting at position 0:", m.Name)
+ zap.L().Debug("build_pipeline: found a new item to be inserted, inserting at position 0", zap.String("processor", m.Name))
pipeline = append([]interface{}{m.Name}, pipeline[lastMatched+1:]...)
} else {
- zap.S().Debugf("build_pipeline: found a new item to be inserted, inserting at position :", lastMatched, " ", m.Name)
+ zap.L().Debug("build_pipeline: found a new item to be inserted, inserting at position", zap.Int("position", lastMatched), zap.String("processor", m.Name))
prior := make([]interface{}, len(pipeline[:lastMatched]))
next := make([]interface{}, len(pipeline[lastMatched:]))
copy(prior, pipeline[:lastMatched])
diff --git a/pkg/query-service/app/parser.go b/pkg/query-service/app/parser.go
index ad2a9fd8de..9a9f388ab5 100644
--- a/pkg/query-service/app/parser.go
+++ b/pkg/query-service/app/parser.go
@@ -829,8 +829,10 @@ func parseAggregateAttributeRequest(r *http.Request) (*v3.AggregateAttributeRequ
limit = 50
}
- if err := aggregateOperator.Validate(); err != nil {
- return nil, err
+ if dataSource != v3.DataSourceMetrics {
+ if err := aggregateOperator.Validate(); err != nil {
+ return nil, err
+ }
}
if err := dataSource.Validate(); err != nil {
@@ -861,8 +863,10 @@ func parseFilterAttributeKeyRequest(r *http.Request) (*v3.FilterAttributeKeyRequ
return nil, err
}
- if err := aggregateOperator.Validate(); err != nil {
- return nil, err
+ if dataSource != v3.DataSourceMetrics {
+ if err := aggregateOperator.Validate(); err != nil {
+ return nil, err
+ }
}
req = v3.FilterAttributeKeyRequest{
@@ -894,8 +898,10 @@ func parseFilterAttributeValueRequest(r *http.Request) (*v3.FilterAttributeValue
return nil, err
}
- if err := aggregateOperator.Validate(); err != nil {
- return nil, err
+ if dataSource != v3.DataSourceMetrics {
+ if err := aggregateOperator.Validate(); err != nil {
+ return nil, err
+ }
}
req = v3.FilterAttributeValueRequest{
@@ -1019,6 +1025,25 @@ func ParseQueryRangeParams(r *http.Request) (*v3.QueryRangeParamsV3, *model.ApiE
}
}
+ var timeShiftBy int64
+ if len(query.Functions) > 0 {
+ for idx := range query.Functions {
+ function := &query.Functions[idx]
+ if function.Name == v3.FunctionNameTimeShift {
+ // move the function to the beginning of the list
+ // so any other function can use the shifted time
+ var fns []v3.Function
+ fns = append(fns, *function)
+ fns = append(fns, query.Functions[:idx]...)
+ fns = append(fns, query.Functions[idx+1:]...)
+ query.Functions = fns
+ timeShiftBy = int64(function.Args[0].(float64))
+ break
+ }
+ }
+ }
+ query.ShiftBy = timeShiftBy
+
if query.Filters == nil || len(query.Filters.Items) == 0 {
continue
}
@@ -1045,25 +1070,6 @@ func ParseQueryRangeParams(r *http.Request) (*v3.QueryRangeParamsV3, *model.ApiE
}
}
}
-
- var timeShiftBy int64
- if len(query.Functions) > 0 {
- for idx := range query.Functions {
- function := &query.Functions[idx]
- if function.Name == v3.FunctionNameTimeShift {
- // move the function to the beginning of the list
- // so any other function can use the shifted time
- var fns []v3.Function
- fns = append(fns, *function)
- fns = append(fns, query.Functions[:idx]...)
- fns = append(fns, query.Functions[idx+1:]...)
- query.Functions = fns
- timeShiftBy = int64(function.Args[0].(float64))
- break
- }
- }
- }
- query.ShiftBy = timeShiftBy
}
}
queryRangeParams.Variables = formattedVars
diff --git a/pkg/query-service/app/parser_test.go b/pkg/query-service/app/parser_test.go
index 8b172027a4..5b9e776486 100644
--- a/pkg/query-service/app/parser_test.go
+++ b/pkg/query-service/app/parser_test.go
@@ -492,23 +492,23 @@ func TestParseQueryRangeParamsCompositeQuery(t *testing.T) {
expectErr: true,
errMsg: "data source is invalid",
},
- {
- desc: "invalid aggregate operator for builder query",
- compositeQuery: v3.CompositeQuery{
- PanelType: v3.PanelTypeGraph,
- QueryType: v3.QueryTypeBuilder,
- BuilderQueries: map[string]*v3.BuilderQuery{
- "A": {
- QueryName: "A",
- DataSource: "metrics",
- AggregateOperator: "invalid",
- Expression: "A",
- },
- },
- },
- expectErr: true,
- errMsg: "aggregate operator is invalid",
- },
+ // {
+ // desc: "invalid aggregate operator for builder query",
+ // compositeQuery: v3.CompositeQuery{
+ // PanelType: v3.PanelTypeGraph,
+ // QueryType: v3.QueryTypeBuilder,
+ // BuilderQueries: map[string]*v3.BuilderQuery{
+ // "A": {
+ // QueryName: "A",
+ // DataSource: "metrics",
+ // AggregateOperator: "invalid",
+ // Expression: "A",
+ // },
+ // },
+ // },
+ // expectErr: true,
+ // errMsg: "aggregate operator is invalid",
+ // },
{
desc: "invalid aggregate attribute for builder query",
compositeQuery: v3.CompositeQuery{
diff --git a/pkg/query-service/app/querier/helper.go b/pkg/query-service/app/querier/helper.go
index addd9744e3..71ee5da72d 100644
--- a/pkg/query-service/app/querier/helper.go
+++ b/pkg/query-service/app/querier/helper.go
@@ -90,11 +90,18 @@ func (q *querier) runBuilderQuery(
preferRPM = q.featureLookUp.CheckFeature(constants.PreferRPM) == nil
}
+ start := params.Start
+ end := params.End
+ if builderQuery.ShiftBy != 0 {
+ start = start - builderQuery.ShiftBy*1000
+ end = end - builderQuery.ShiftBy*1000
+ }
+
if builderQuery.DataSource == v3.DataSourceLogs {
var query string
var err error
if _, ok := cacheKeys[queryName]; !ok {
- query, err = prepareLogsQuery(ctx, params.Start, params.End, builderQuery, params, preferRPM)
+ query, err = prepareLogsQuery(ctx, start, end, builderQuery, params, preferRPM)
if err != nil {
ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil}
return
@@ -109,12 +116,12 @@ func (q *querier) runBuilderQuery(
if !params.NoCache && q.cache != nil {
var retrieveStatus status.RetrieveStatus
data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true)
- zap.S().Infof("cache retrieve status: %s", retrieveStatus.String())
+ zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String()))
if err == nil {
cachedData = data
}
}
- misses := q.findMissingTimeRanges(params.Start, params.End, params.Step, cachedData)
+ misses := q.findMissingTimeRanges(start, end, params.Step, cachedData)
missedSeries := make([]*v3.Series, 0)
cachedSeries := make([]*v3.Series, 0)
for _, miss := range misses {
@@ -136,7 +143,7 @@ func (q *querier) runBuilderQuery(
missedSeries = append(missedSeries, series...)
}
if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil {
- zap.S().Error("error unmarshalling cached data", zap.Error(err))
+ zap.L().Error("error unmarshalling cached data", zap.Error(err))
}
mergedSeries := mergeSerieses(cachedSeries, missedSeries)
@@ -147,12 +154,12 @@ func (q *querier) runBuilderQuery(
// caching the data
mergedSeriesData, marshallingErr = json.Marshal(mergedSeries)
if marshallingErr != nil {
- zap.S().Error("error marshalling merged series", zap.Error(marshallingErr))
+ zap.L().Error("error marshalling merged series", zap.Error(marshallingErr))
}
}
// response doesn't need everything
- filterCachedPoints(mergedSeries, params.Start, params.End)
+ filterCachedPoints(mergedSeries, start, end)
ch <- channelResult{
Err: nil,
@@ -165,7 +172,7 @@ func (q *querier) runBuilderQuery(
// caching the data
err = q.cache.Store(cacheKey, mergedSeriesData, time.Hour)
if err != nil {
- zap.S().Error("error storing merged series", zap.Error(err))
+ zap.L().Error("error storing merged series", zap.Error(err))
return
}
}
@@ -181,8 +188,8 @@ func (q *querier) runBuilderQuery(
// for ts query with group by and limit form two queries
if params.CompositeQuery.PanelType == v3.PanelTypeGraph && builderQuery.Limit > 0 && len(builderQuery.GroupBy) > 0 {
limitQuery, err := tracesV3.PrepareTracesQuery(
- params.Start,
- params.End,
+ start,
+ end,
params.CompositeQuery.PanelType,
builderQuery,
keys,
@@ -193,8 +200,8 @@ func (q *querier) runBuilderQuery(
return
}
placeholderQuery, err := tracesV3.PrepareTracesQuery(
- params.Start,
- params.End,
+ start,
+ end,
params.CompositeQuery.PanelType,
builderQuery,
keys,
@@ -207,8 +214,8 @@ func (q *querier) runBuilderQuery(
query = fmt.Sprintf(placeholderQuery, limitQuery)
} else {
query, err = tracesV3.PrepareTracesQuery(
- params.Start,
- params.End,
+ start,
+ end,
params.CompositeQuery.PanelType,
builderQuery,
keys,
@@ -229,7 +236,7 @@ func (q *querier) runBuilderQuery(
// We are only caching the graph panel queries. A non-existant cache key means that the query is not cached.
// If the query is not cached, we execute the query and return the result without caching it.
if _, ok := cacheKeys[queryName]; !ok {
- query, err := metricsV3.PrepareMetricQuery(params.Start, params.End, params.CompositeQuery.QueryType, params.CompositeQuery.PanelType, builderQuery, metricsV3.Options{PreferRPM: preferRPM})
+ query, err := metricsV3.PrepareMetricQuery(start, end, params.CompositeQuery.QueryType, params.CompositeQuery.PanelType, builderQuery, metricsV3.Options{PreferRPM: preferRPM})
if err != nil {
ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil}
return
@@ -244,12 +251,12 @@ func (q *querier) runBuilderQuery(
if !params.NoCache && q.cache != nil {
var retrieveStatus status.RetrieveStatus
data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true)
- zap.S().Infof("cache retrieve status: %s", retrieveStatus.String())
+ zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String()))
if err == nil {
cachedData = data
}
}
- misses := q.findMissingTimeRanges(params.Start, params.End, params.Step, cachedData)
+ misses := q.findMissingTimeRanges(start, end, params.Step, cachedData)
missedSeries := make([]*v3.Series, 0)
cachedSeries := make([]*v3.Series, 0)
for _, miss := range misses {
@@ -283,7 +290,7 @@ func (q *querier) runBuilderQuery(
missedSeries = append(missedSeries, series...)
}
if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil {
- zap.S().Error("error unmarshalling cached data", zap.Error(err))
+ zap.L().Error("error unmarshalling cached data", zap.Error(err))
}
mergedSeries := mergeSerieses(cachedSeries, missedSeries)
var mergedSeriesData []byte
@@ -293,12 +300,12 @@ func (q *querier) runBuilderQuery(
// caching the data
mergedSeriesData, marshallingErr = json.Marshal(mergedSeries)
if marshallingErr != nil {
- zap.S().Error("error marshalling merged series", zap.Error(marshallingErr))
+ zap.L().Error("error marshalling merged series", zap.Error(marshallingErr))
}
}
// response doesn't need everything
- filterCachedPoints(mergedSeries, params.Start, params.End)
+ filterCachedPoints(mergedSeries, start, end)
ch <- channelResult{
Err: nil,
Name: queryName,
@@ -309,7 +316,7 @@ func (q *querier) runBuilderQuery(
if missedSeriesLen > 0 && !params.NoCache && q.cache != nil && marshallingErr == nil {
err := q.cache.Store(cacheKey, mergedSeriesData, time.Hour)
if err != nil {
- zap.S().Error("error storing merged series", zap.Error(err))
+ zap.L().Error("error storing merged series", zap.Error(err))
return
}
}
@@ -346,7 +353,7 @@ func (q *querier) runBuilderExpression(
if !params.NoCache && q.cache != nil {
var retrieveStatus status.RetrieveStatus
data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true)
- zap.S().Infof("cache retrieve status: %s", retrieveStatus.String())
+ zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String()))
if err == nil {
cachedData = data
}
@@ -372,7 +379,7 @@ func (q *querier) runBuilderExpression(
missedSeries = append(missedSeries, series...)
}
if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil {
- zap.S().Error("error unmarshalling cached data", zap.Error(err))
+ zap.L().Error("error unmarshalling cached data", zap.Error(err))
}
mergedSeries := mergeSerieses(cachedSeries, missedSeries)
@@ -383,7 +390,7 @@ func (q *querier) runBuilderExpression(
// caching the data
mergedSeriesData, marshallingErr = json.Marshal(mergedSeries)
if marshallingErr != nil {
- zap.S().Error("error marshalling merged series", zap.Error(marshallingErr))
+ zap.L().Error("error marshalling merged series", zap.Error(marshallingErr))
}
}
@@ -399,7 +406,7 @@ func (q *querier) runBuilderExpression(
if len(missedSeries) > 0 && !params.NoCache && q.cache != nil && marshallingErr == nil {
err = q.cache.Store(cacheKey, mergedSeriesData, time.Hour)
if err != nil {
- zap.S().Error("error storing merged series", zap.Error(err))
+ zap.L().Error("error storing merged series", zap.Error(err))
return
}
}
diff --git a/pkg/query-service/app/querier/querier.go b/pkg/query-service/app/querier/querier.go
index 103660f8bc..d735e00a1f 100644
--- a/pkg/query-service/app/querier/querier.go
+++ b/pkg/query-service/app/querier/querier.go
@@ -108,7 +108,7 @@ func (q *querier) execClickHouseQuery(ctx context.Context, query string) ([]*v3.
series.Points = points
}
if pointsWithNegativeTimestamps > 0 {
- zap.S().Errorf("found points with negative timestamps for query %s", query)
+ zap.L().Error("found points with negative timestamps for query", zap.String("query", query))
}
return result, err
}
@@ -346,7 +346,7 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam
// Ensure NoCache is not set and cache is not nil
if !params.NoCache && q.cache != nil {
data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true)
- zap.S().Infof("cache retrieve status: %s", retrieveStatus.String())
+ zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String()))
if err == nil {
cachedData = data
}
@@ -365,7 +365,7 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam
}
if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil {
// ideally we should not be getting an error here
- zap.S().Error("error unmarshalling cached data", zap.Error(err))
+ zap.L().Error("error unmarshalling cached data", zap.Error(err))
}
mergedSeries := mergeSerieses(cachedSeries, missedSeries)
@@ -375,12 +375,12 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam
if len(missedSeries) > 0 && !params.NoCache && q.cache != nil {
mergedSeriesData, err := json.Marshal(mergedSeries)
if err != nil {
- zap.S().Error("error marshalling merged series", zap.Error(err))
+ zap.L().Error("error marshalling merged series", zap.Error(err))
return
}
err = q.cache.Store(cacheKey, mergedSeriesData, time.Hour)
if err != nil {
- zap.S().Error("error storing merged series", zap.Error(err))
+ zap.L().Error("error storing merged series", zap.Error(err))
return
}
}
diff --git a/pkg/query-service/app/querier/querier_test.go b/pkg/query-service/app/querier/querier_test.go
index 605d2f5180..37514b6f23 100644
--- a/pkg/query-service/app/querier/querier_test.go
+++ b/pkg/query-service/app/querier/querier_test.go
@@ -701,3 +701,253 @@ func TestQueryRangeValueType(t *testing.T) {
}
}
}
+
+// test timeshift
+func TestQueryRangeTimeShift(t *testing.T) {
+ params := []*v3.QueryRangeParamsV3{
+ {
+ Start: 1675115596722, //31, 3:23
+ End: 1675115596722 + 120*60*1000, //31, 5:23
+ Step: 5 * time.Minute.Milliseconds(),
+ CompositeQuery: &v3.CompositeQuery{
+ QueryType: v3.QueryTypeBuilder,
+ PanelType: v3.PanelTypeGraph,
+ BuilderQueries: map[string]*v3.BuilderQuery{
+ "A": {
+ QueryName: "A",
+ StepInterval: 60,
+ DataSource: v3.DataSourceLogs,
+ AggregateAttribute: v3.AttributeKey{},
+ Filters: &v3.FilterSet{
+ Operator: "AND",
+ Items: []v3.FilterItem{},
+ },
+ AggregateOperator: v3.AggregateOperatorCount,
+ Expression: "A",
+ ShiftBy: 86400,
+ },
+ },
+ },
+ },
+ }
+ opts := QuerierOptions{
+ Reader: nil,
+ FluxInterval: 5 * time.Minute,
+ KeyGenerator: queryBuilder.NewKeyGenerator(),
+ TestingMode: true,
+ }
+ q := NewQuerier(opts)
+ // logs queries are generates in ns
+ expectedTimeRangeInQueryString := fmt.Sprintf("timestamp >= %d AND timestamp <= %d", (1675115596722-86400*1000)*1000000, ((1675115596722+120*60*1000)-86400*1000)*1000000)
+
+ for i, param := range params {
+ _, err, errByName := q.QueryRange(context.Background(), param, nil)
+ if err != nil {
+ t.Errorf("expected no error, got %s", err)
+ }
+ if len(errByName) > 0 {
+ t.Errorf("expected no error, got %v", errByName)
+ }
+ if !strings.Contains(q.QueriesExecuted()[i], expectedTimeRangeInQueryString) {
+ t.Errorf("expected query to contain %s, got %s", expectedTimeRangeInQueryString, q.QueriesExecuted()[i])
+ }
+ }
+}
+
+// timeshift works with caching
+func TestQueryRangeTimeShiftWithCache(t *testing.T) {
+ params := []*v3.QueryRangeParamsV3{
+ {
+ Start: 1675115596722 + 60*60*1000 - 86400*1000, //30, 4:23
+ End: 1675115596722 + 120*60*1000 - 86400*1000, //30, 5:23
+ Step: 5 * time.Minute.Milliseconds(),
+ CompositeQuery: &v3.CompositeQuery{
+ QueryType: v3.QueryTypeBuilder,
+ PanelType: v3.PanelTypeGraph,
+ BuilderQueries: map[string]*v3.BuilderQuery{
+ "A": {
+ QueryName: "A",
+ StepInterval: 60,
+ DataSource: v3.DataSourceLogs,
+ AggregateAttribute: v3.AttributeKey{},
+ Filters: &v3.FilterSet{
+ Operator: "AND",
+ Items: []v3.FilterItem{},
+ },
+ AggregateOperator: v3.AggregateOperatorCount,
+ Expression: "A",
+ GroupBy: []v3.AttributeKey{
+ {Key: "service_name", IsColumn: false},
+ {Key: "method", IsColumn: false},
+ },
+ },
+ },
+ },
+ },
+ {
+ Start: 1675115596722, //31, 3:23
+ End: 1675115596722 + 120*60*1000, //31, 5:23
+ Step: 5 * time.Minute.Milliseconds(),
+ CompositeQuery: &v3.CompositeQuery{
+ QueryType: v3.QueryTypeBuilder,
+ PanelType: v3.PanelTypeGraph,
+ BuilderQueries: map[string]*v3.BuilderQuery{
+ "A": {
+ QueryName: "A",
+ StepInterval: 60,
+ DataSource: v3.DataSourceLogs,
+ AggregateAttribute: v3.AttributeKey{},
+ Filters: &v3.FilterSet{
+ Operator: "AND",
+ Items: []v3.FilterItem{},
+ },
+ AggregateOperator: v3.AggregateOperatorCount,
+ Expression: "A",
+ ShiftBy: 86400,
+ GroupBy: []v3.AttributeKey{
+ {Key: "service_name", IsColumn: false},
+ {Key: "method", IsColumn: false},
+ },
+ },
+ },
+ },
+ },
+ }
+ cache := inmemory.New(&inmemory.Options{TTL: 60 * time.Minute, CleanupInterval: 10 * time.Minute})
+ opts := QuerierOptions{
+ Cache: cache,
+ Reader: nil,
+ FluxInterval: 5 * time.Minute,
+ KeyGenerator: queryBuilder.NewKeyGenerator(),
+ TestingMode: true,
+ ReturnedSeries: []*v3.Series{
+ {
+ Labels: map[string]string{},
+ Points: []v3.Point{
+ {Timestamp: 1675115596722 + 60*60*1000 - 86400*1000, Value: 1},
+ {Timestamp: 1675115596722 + 120*60*1000 - 86400*1000 + 60*60*1000, Value: 2},
+ },
+ },
+ },
+ }
+ q := NewQuerier(opts)
+
+ // logs queries are generates in ns
+ expectedTimeRangeInQueryString := []string{
+ fmt.Sprintf("timestamp >= %d AND timestamp <= %d", (1675115596722+60*60*1000-86400*1000)*1000000, (1675115596722+120*60*1000-86400*1000)*1000000),
+ fmt.Sprintf("timestamp >= %d AND timestamp <= %d", (1675115596722-86400*1000)*1000000, ((1675115596722+60*60*1000)-86400*1000-1)*1000000),
+ }
+
+ for i, param := range params {
+ _, err, errByName := q.QueryRange(context.Background(), param, nil)
+ if err != nil {
+ t.Errorf("expected no error, got %s", err)
+ }
+ if len(errByName) > 0 {
+ t.Errorf("expected no error, got %v", errByName)
+ }
+ if !strings.Contains(q.QueriesExecuted()[i], expectedTimeRangeInQueryString[i]) {
+ t.Errorf("expected query to contain %s, got %s", expectedTimeRangeInQueryString[i], q.QueriesExecuted()[i])
+ }
+ }
+}
+
+// timeshift with limit queries
+func TestQueryRangeTimeShiftWithLimitAndCache(t *testing.T) {
+ params := []*v3.QueryRangeParamsV3{
+ {
+ Start: 1675115596722 + 60*60*1000 - 86400*1000, //30, 4:23
+ End: 1675115596722 + 120*60*1000 - 86400*1000, //30, 5:23
+ Step: 5 * time.Minute.Milliseconds(),
+ CompositeQuery: &v3.CompositeQuery{
+ QueryType: v3.QueryTypeBuilder,
+ PanelType: v3.PanelTypeGraph,
+ BuilderQueries: map[string]*v3.BuilderQuery{
+ "A": {
+ QueryName: "A",
+ StepInterval: 60,
+ DataSource: v3.DataSourceLogs,
+ AggregateAttribute: v3.AttributeKey{},
+ Filters: &v3.FilterSet{
+ Operator: "AND",
+ Items: []v3.FilterItem{},
+ },
+ AggregateOperator: v3.AggregateOperatorCount,
+ Expression: "A",
+ GroupBy: []v3.AttributeKey{
+ {Key: "service_name", IsColumn: false},
+ {Key: "method", IsColumn: false},
+ },
+ Limit: 5,
+ },
+ },
+ },
+ },
+ {
+ Start: 1675115596722, //31, 3:23
+ End: 1675115596722 + 120*60*1000, //31, 5:23
+ Step: 5 * time.Minute.Milliseconds(),
+ CompositeQuery: &v3.CompositeQuery{
+ QueryType: v3.QueryTypeBuilder,
+ PanelType: v3.PanelTypeGraph,
+ BuilderQueries: map[string]*v3.BuilderQuery{
+ "A": {
+ QueryName: "A",
+ StepInterval: 60,
+ DataSource: v3.DataSourceLogs,
+ AggregateAttribute: v3.AttributeKey{},
+ Filters: &v3.FilterSet{
+ Operator: "AND",
+ Items: []v3.FilterItem{},
+ },
+ AggregateOperator: v3.AggregateOperatorCount,
+ Expression: "A",
+ ShiftBy: 86400,
+ GroupBy: []v3.AttributeKey{
+ {Key: "service_name", IsColumn: false},
+ {Key: "method", IsColumn: false},
+ },
+ Limit: 5,
+ },
+ },
+ },
+ },
+ }
+ cache := inmemory.New(&inmemory.Options{TTL: 60 * time.Minute, CleanupInterval: 10 * time.Minute})
+ opts := QuerierOptions{
+ Cache: cache,
+ Reader: nil,
+ FluxInterval: 5 * time.Minute,
+ KeyGenerator: queryBuilder.NewKeyGenerator(),
+ TestingMode: true,
+ ReturnedSeries: []*v3.Series{
+ {
+ Labels: map[string]string{},
+ Points: []v3.Point{
+ {Timestamp: 1675115596722 + 60*60*1000 - 86400*1000, Value: 1},
+ {Timestamp: 1675115596722 + 120*60*1000 - 86400*1000 + 60*60*1000, Value: 2},
+ },
+ },
+ },
+ }
+ q := NewQuerier(opts)
+
+ // logs queries are generates in ns
+ expectedTimeRangeInQueryString := []string{
+ fmt.Sprintf("timestamp >= %d AND timestamp <= %d", (1675115596722+60*60*1000-86400*1000)*1000000, (1675115596722+120*60*1000-86400*1000)*1000000),
+ fmt.Sprintf("timestamp >= %d AND timestamp <= %d", (1675115596722-86400*1000)*1000000, ((1675115596722+60*60*1000)-86400*1000-1)*1000000),
+ }
+
+ for i, param := range params {
+ _, err, errByName := q.QueryRange(context.Background(), param, nil)
+ if err != nil {
+ t.Errorf("expected no error, got %s", err)
+ }
+ if len(errByName) > 0 {
+ t.Errorf("expected no error, got %v", errByName)
+ }
+ if !strings.Contains(q.QueriesExecuted()[i], expectedTimeRangeInQueryString[i]) {
+ t.Errorf("expected query to contain %s, got %s", expectedTimeRangeInQueryString[i], q.QueriesExecuted()[i])
+ }
+ }
+}
diff --git a/pkg/query-service/app/querier/v2/helper.go b/pkg/query-service/app/querier/v2/helper.go
index 61ab056251..e564956f19 100644
--- a/pkg/query-service/app/querier/v2/helper.go
+++ b/pkg/query-service/app/querier/v2/helper.go
@@ -36,6 +36,14 @@ func (q *querier) runBuilderQuery(
preferRPM = q.featureLookUp.CheckFeature(constants.PreferRPM) == nil
}
+ // making a local clone since we should not update the global params if there is sift by
+ start := params.Start
+ end := params.End
+ if builderQuery.ShiftBy != 0 {
+ start = start - builderQuery.ShiftBy*1000
+ end = end - builderQuery.ShiftBy*1000
+ }
+
// TODO: handle other data sources
if builderQuery.DataSource == v3.DataSourceLogs {
var query string
@@ -43,8 +51,8 @@ func (q *querier) runBuilderQuery(
// for ts query with limit replace it as it is already formed
if params.CompositeQuery.PanelType == v3.PanelTypeGraph && builderQuery.Limit > 0 && len(builderQuery.GroupBy) > 0 {
limitQuery, err := logsV3.PrepareLogsQuery(
- params.Start,
- params.End,
+ start,
+ end,
params.CompositeQuery.QueryType,
params.CompositeQuery.PanelType,
builderQuery,
@@ -55,8 +63,8 @@ func (q *querier) runBuilderQuery(
return
}
placeholderQuery, err := logsV3.PrepareLogsQuery(
- params.Start,
- params.End,
+ start,
+ end,
params.CompositeQuery.QueryType,
params.CompositeQuery.PanelType,
builderQuery,
@@ -69,8 +77,8 @@ func (q *querier) runBuilderQuery(
query = strings.Replace(placeholderQuery, "#LIMIT_PLACEHOLDER", limitQuery, 1)
} else {
query, err = logsV3.PrepareLogsQuery(
- params.Start,
- params.End,
+ start,
+ end,
params.CompositeQuery.QueryType,
params.CompositeQuery.PanelType,
builderQuery,
@@ -98,8 +106,8 @@ func (q *querier) runBuilderQuery(
// for ts query with group by and limit form two queries
if params.CompositeQuery.PanelType == v3.PanelTypeGraph && builderQuery.Limit > 0 && len(builderQuery.GroupBy) > 0 {
limitQuery, err := tracesV3.PrepareTracesQuery(
- params.Start,
- params.End,
+ start,
+ end,
params.CompositeQuery.PanelType,
builderQuery,
keys,
@@ -110,8 +118,8 @@ func (q *querier) runBuilderQuery(
return
}
placeholderQuery, err := tracesV3.PrepareTracesQuery(
- params.Start,
- params.End,
+ start,
+ end,
params.CompositeQuery.PanelType,
builderQuery,
keys,
@@ -124,8 +132,8 @@ func (q *querier) runBuilderQuery(
query = fmt.Sprintf(placeholderQuery, limitQuery)
} else {
query, err = tracesV3.PrepareTracesQuery(
- params.Start,
- params.End,
+ start,
+ end,
params.CompositeQuery.PanelType,
builderQuery,
keys,
@@ -146,7 +154,7 @@ func (q *querier) runBuilderQuery(
// We are only caching the graph panel queries. A non-existant cache key means that the query is not cached.
// If the query is not cached, we execute the query and return the result without caching it.
if _, ok := cacheKeys[queryName]; !ok {
- query, err := metricsV4.PrepareMetricQuery(params.Start, params.End, params.CompositeQuery.QueryType, params.CompositeQuery.PanelType, builderQuery, metricsV3.Options{PreferRPM: preferRPM})
+ query, err := metricsV4.PrepareMetricQuery(start, end, params.CompositeQuery.QueryType, params.CompositeQuery.PanelType, builderQuery, metricsV3.Options{PreferRPM: preferRPM})
if err != nil {
ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil}
return
@@ -161,12 +169,12 @@ func (q *querier) runBuilderQuery(
if !params.NoCache && q.cache != nil {
var retrieveStatus status.RetrieveStatus
data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true)
- zap.S().Infof("cache retrieve status: %s", retrieveStatus.String())
+ zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String()))
if err == nil {
cachedData = data
}
}
- misses := q.findMissingTimeRanges(params.Start, params.End, params.Step, cachedData)
+ misses := q.findMissingTimeRanges(start, end, params.Step, cachedData)
missedSeries := make([]*v3.Series, 0)
cachedSeries := make([]*v3.Series, 0)
for _, miss := range misses {
@@ -200,7 +208,7 @@ func (q *querier) runBuilderQuery(
missedSeries = append(missedSeries, series...)
}
if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil {
- zap.S().Error("error unmarshalling cached data", zap.Error(err))
+ zap.L().Error("error unmarshalling cached data", zap.Error(err))
}
mergedSeries := mergeSerieses(cachedSeries, missedSeries)
@@ -213,12 +221,12 @@ func (q *querier) runBuilderQuery(
if len(missedSeries) > 0 && !params.NoCache && q.cache != nil {
mergedSeriesData, err := json.Marshal(mergedSeries)
if err != nil {
- zap.S().Error("error marshalling merged series", zap.Error(err))
+ zap.L().Error("error marshalling merged series", zap.Error(err))
return
}
err = q.cache.Store(cacheKey, mergedSeriesData, time.Hour)
if err != nil {
- zap.S().Error("error storing merged series", zap.Error(err))
+ zap.L().Error("error storing merged series", zap.Error(err))
return
}
}
@@ -255,7 +263,7 @@ func (q *querier) runBuilderExpression(
if !params.NoCache && q.cache != nil {
var retrieveStatus status.RetrieveStatus
data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true)
- zap.S().Infof("cache retrieve status: %s", retrieveStatus.String())
+ zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String()))
if err == nil {
cachedData = data
}
@@ -281,7 +289,7 @@ func (q *querier) runBuilderExpression(
missedSeries = append(missedSeries, series...)
}
if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil {
- zap.S().Error("error unmarshalling cached data", zap.Error(err))
+ zap.L().Error("error unmarshalling cached data", zap.Error(err))
}
mergedSeries := mergeSerieses(cachedSeries, missedSeries)
@@ -294,12 +302,12 @@ func (q *querier) runBuilderExpression(
if len(missedSeries) > 0 && !params.NoCache && q.cache != nil {
mergedSeriesData, err := json.Marshal(mergedSeries)
if err != nil {
- zap.S().Error("error marshalling merged series", zap.Error(err))
+ zap.L().Error("error marshalling merged series", zap.Error(err))
return
}
err = q.cache.Store(cacheKey, mergedSeriesData, time.Hour)
if err != nil {
- zap.S().Error("error storing merged series", zap.Error(err))
+ zap.L().Error("error storing merged series", zap.Error(err))
return
}
}
diff --git a/pkg/query-service/app/querier/v2/querier.go b/pkg/query-service/app/querier/v2/querier.go
index 50f19b89b1..e45153da7d 100644
--- a/pkg/query-service/app/querier/v2/querier.go
+++ b/pkg/query-service/app/querier/v2/querier.go
@@ -108,7 +108,7 @@ func (q *querier) execClickHouseQuery(ctx context.Context, query string) ([]*v3.
series.Points = points
}
if pointsWithNegativeTimestamps > 0 {
- zap.S().Errorf("found points with negative timestamps for query %s", query)
+ zap.L().Error("found points with negative timestamps for query", zap.String("query", query))
}
return result, err
}
@@ -326,7 +326,7 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam
// Ensure NoCache is not set and cache is not nil
if !params.NoCache && q.cache != nil {
data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true)
- zap.S().Infof("cache retrieve status: %s", retrieveStatus.String())
+ zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String()))
if err == nil {
cachedData = data
}
@@ -345,7 +345,7 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam
}
if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil {
// ideally we should not be getting an error here
- zap.S().Error("error unmarshalling cached data", zap.Error(err))
+ zap.L().Error("error unmarshalling cached data", zap.Error(err))
}
mergedSeries := mergeSerieses(cachedSeries, missedSeries)
@@ -355,12 +355,12 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam
if len(missedSeries) > 0 && !params.NoCache && q.cache != nil {
mergedSeriesData, err := json.Marshal(mergedSeries)
if err != nil {
- zap.S().Error("error marshalling merged series", zap.Error(err))
+ zap.L().Error("error marshalling merged series", zap.Error(err))
return
}
err = q.cache.Store(cacheKey, mergedSeriesData, time.Hour)
if err != nil {
- zap.S().Error("error storing merged series", zap.Error(err))
+ zap.L().Error("error storing merged series", zap.Error(err))
return
}
}
diff --git a/pkg/query-service/app/queryBuilder/query_builder.go b/pkg/query-service/app/queryBuilder/query_builder.go
index 988acfb458..693bc88f44 100644
--- a/pkg/query-service/app/queryBuilder/query_builder.go
+++ b/pkg/query-service/app/queryBuilder/query_builder.go
@@ -183,6 +183,13 @@ func (qb *QueryBuilder) PrepareQueries(params *v3.QueryRangeParamsV3, args ...in
PreferRPMFeatureEnabled := err == nil
// Build queries for each builder query
for queryName, query := range compositeQuery.BuilderQueries {
+ // making a local clone since we should not update the global params if there is sift by
+ start := params.Start
+ end := params.End
+ if query.ShiftBy != 0 {
+ start = start - query.ShiftBy*1000
+ end = end - query.ShiftBy*1000
+ }
if query.Expression == queryName {
switch query.DataSource {
case v3.DataSourceTraces:
@@ -192,12 +199,12 @@ func (qb *QueryBuilder) PrepareQueries(params *v3.QueryRangeParamsV3, args ...in
}
// for ts query with group by and limit form two queries
if compositeQuery.PanelType == v3.PanelTypeGraph && query.Limit > 0 && len(query.GroupBy) > 0 {
- limitQuery, err := qb.options.BuildTraceQuery(params.Start, params.End, compositeQuery.PanelType, query,
+ limitQuery, err := qb.options.BuildTraceQuery(start, end, compositeQuery.PanelType, query,
keys, tracesV3.Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled})
if err != nil {
return nil, err
}
- placeholderQuery, err := qb.options.BuildTraceQuery(params.Start, params.End, compositeQuery.PanelType,
+ placeholderQuery, err := qb.options.BuildTraceQuery(start, end, compositeQuery.PanelType,
query, keys, tracesV3.Options{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled})
if err != nil {
return nil, err
@@ -205,7 +212,7 @@ func (qb *QueryBuilder) PrepareQueries(params *v3.QueryRangeParamsV3, args ...in
query := fmt.Sprintf(placeholderQuery, limitQuery)
queries[queryName] = query
} else {
- queryString, err := qb.options.BuildTraceQuery(params.Start, params.End, compositeQuery.PanelType,
+ queryString, err := qb.options.BuildTraceQuery(start, end, compositeQuery.PanelType,
query, keys, tracesV3.Options{PreferRPM: PreferRPMFeatureEnabled, GraphLimitQtype: ""})
if err != nil {
return nil, err
@@ -215,31 +222,31 @@ func (qb *QueryBuilder) PrepareQueries(params *v3.QueryRangeParamsV3, args ...in
case v3.DataSourceLogs:
// for ts query with limit replace it as it is already formed
if compositeQuery.PanelType == v3.PanelTypeGraph && query.Limit > 0 && len(query.GroupBy) > 0 {
- limitQuery, err := qb.options.BuildLogQuery(params.Start, params.End, compositeQuery.QueryType, compositeQuery.PanelType, query, logsV3.Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled})
+ limitQuery, err := qb.options.BuildLogQuery(start, end, compositeQuery.QueryType, compositeQuery.PanelType, query, logsV3.Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled})
if err != nil {
return nil, err
}
- placeholderQuery, err := qb.options.BuildLogQuery(params.Start, params.End, compositeQuery.QueryType, compositeQuery.PanelType, query, logsV3.Options{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled})
+ placeholderQuery, err := qb.options.BuildLogQuery(start, end, compositeQuery.QueryType, compositeQuery.PanelType, query, logsV3.Options{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled})
if err != nil {
return nil, err
}
query := fmt.Sprintf(placeholderQuery, limitQuery)
queries[queryName] = query
} else {
- queryString, err := qb.options.BuildLogQuery(params.Start, params.End, compositeQuery.QueryType, compositeQuery.PanelType, query, logsV3.Options{PreferRPM: PreferRPMFeatureEnabled, GraphLimitQtype: ""})
+ queryString, err := qb.options.BuildLogQuery(start, end, compositeQuery.QueryType, compositeQuery.PanelType, query, logsV3.Options{PreferRPM: PreferRPMFeatureEnabled, GraphLimitQtype: ""})
if err != nil {
return nil, err
}
queries[queryName] = queryString
}
case v3.DataSourceMetrics:
- queryString, err := qb.options.BuildMetricQuery(params.Start, params.End, compositeQuery.QueryType, compositeQuery.PanelType, query, metricsV3.Options{PreferRPM: PreferRPMFeatureEnabled})
+ queryString, err := qb.options.BuildMetricQuery(start, end, compositeQuery.QueryType, compositeQuery.PanelType, query, metricsV3.Options{PreferRPM: PreferRPMFeatureEnabled})
if err != nil {
return nil, err
}
queries[queryName] = queryString
default:
- zap.S().Errorf("Unknown data source %s", query.DataSource)
+ zap.L().Error("Unknown data source", zap.String("dataSource", string(query.DataSource)))
}
}
}
diff --git a/pkg/query-service/app/server.go b/pkg/query-service/app/server.go
index eb50a775ce..549e74e976 100644
--- a/pkg/query-service/app/server.go
+++ b/pkg/query-service/app/server.go
@@ -9,7 +9,9 @@ import (
"net"
"net/http"
_ "net/http/pprof" // http profiler
+ "net/url"
"os"
+ "strings"
"time"
"github.com/gorilla/handlers"
@@ -21,6 +23,7 @@ import (
"go.signoz.io/signoz/pkg/query-service/agentConf"
"go.signoz.io/signoz/pkg/query-service/app/clickhouseReader"
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
+ "go.signoz.io/signoz/pkg/query-service/app/integrations"
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
"go.signoz.io/signoz/pkg/query-service/app/opamp"
opAmpModel "go.signoz.io/signoz/pkg/query-service/app/opamp/model"
@@ -112,7 +115,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
var reader interfaces.Reader
storage := os.Getenv("STORAGE")
if storage == "clickhouse" {
- zap.S().Info("Using ClickHouse as datastore ...")
+ zap.L().Info("Using ClickHouse as datastore ...")
clickhouseReader := clickhouseReader.NewReader(
localDB,
serverOptions.PromConfigPath,
@@ -155,8 +158,15 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
if err != nil {
return nil, err
}
- // ingestion pipelines manager
- logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(localDB, "sqlite")
+
+ integrationsController, err := integrations.NewController(localDB)
+ if err != nil {
+ return nil, fmt.Errorf("couldn't create integrations controller: %w", err)
+ }
+
+ logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(
+ localDB, "sqlite", integrationsController.GetPipelinesForInstalledIntegrations,
+ )
if err != nil {
return nil, err
}
@@ -173,6 +183,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
AppDao: dao.DB(),
RuleManager: rm,
FeatureFlags: fm,
+ IntegrationsController: integrationsController,
LogsParsingPipelineController: logParsingPipelineController,
Cache: c,
FluxInterval: fluxInterval,
@@ -204,7 +215,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
s.privateHTTP = privateServer
- _, err = opAmpModel.InitDB(constants.RELATIONAL_DATASOURCE_PATH)
+ _, err = opAmpModel.InitDB(localDB)
if err != nil {
return nil, err
}
@@ -257,6 +268,7 @@ func (s *Server) createPublicServer(api *APIHandler) (*http.Server, error) {
r := NewRouter()
+ r.Use(LogCommentEnricher)
r.Use(setTimeoutMiddleware)
r.Use(s.analyticsMiddleware)
r.Use(loggingMiddleware)
@@ -266,6 +278,7 @@ func (s *Server) createPublicServer(api *APIHandler) (*http.Server, error) {
api.RegisterRoutes(r, am)
api.RegisterMetricsRoutes(r, am)
api.RegisterLogsRoutes(r, am)
+ api.RegisterIntegrationRoutes(r, am)
api.RegisterQueryRangeV3Routes(r, am)
api.RegisterQueryRangeV4Routes(r, am)
@@ -291,7 +304,66 @@ func loggingMiddleware(next http.Handler) http.Handler {
path, _ := route.GetPathTemplate()
startTime := time.Now()
next.ServeHTTP(w, r)
- zap.S().Info(path+"\ttimeTaken:"+time.Now().Sub(startTime).String(), zap.Duration("timeTaken", time.Now().Sub(startTime)), zap.String("path", path))
+ zap.L().Info(path+"\ttimeTaken:"+time.Now().Sub(startTime).String(), zap.Duration("timeTaken", time.Now().Sub(startTime)), zap.String("path", path))
+ })
+}
+
+func LogCommentEnricher(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ referrer := r.Header.Get("Referer")
+
+ var path, dashboardID, alertID, page, client, viewName, tab string
+
+ if referrer != "" {
+ referrerURL, _ := url.Parse(referrer)
+ client = "browser"
+ path = referrerURL.Path
+
+ if strings.Contains(path, "/dashboard") {
+ // Split the path into segments
+ pathSegments := strings.Split(referrerURL.Path, "/")
+ // The dashboard ID should be the segment after "/dashboard/"
+ // Loop through pathSegments to find "dashboard" and then take the next segment as the ID
+ for i, segment := range pathSegments {
+ if segment == "dashboard" && i < len(pathSegments)-1 {
+ // Return the next segment, which should be the dashboard ID
+ dashboardID = pathSegments[i+1]
+ }
+ }
+ page = "dashboards"
+ } else if strings.Contains(path, "/alerts") {
+ urlParams := referrerURL.Query()
+ alertID = urlParams.Get("ruleId")
+ page = "alerts"
+ } else if strings.Contains(path, "logs") && strings.Contains(path, "explorer") {
+ page = "logs-explorer"
+ viewName = referrerURL.Query().Get("viewName")
+ } else if strings.Contains(path, "/trace") || strings.Contains(path, "traces-explorer") {
+ page = "traces-explorer"
+ viewName = referrerURL.Query().Get("viewName")
+ } else if strings.Contains(path, "/services") {
+ page = "services"
+ tab = referrerURL.Query().Get("tab")
+ if tab == "" {
+ tab = "OVER_METRICS"
+ }
+ }
+ } else {
+ client = "api"
+ }
+
+ kvs := map[string]string{
+ "path": path,
+ "dashboardID": dashboardID,
+ "alertID": alertID,
+ "source": page,
+ "client": client,
+ "viewName": viewName,
+ "servicesTab": tab,
+ }
+
+ r = r.WithContext(context.WithValue(r.Context(), "log_comment", kvs))
+ next.ServeHTTP(w, r)
})
}
@@ -303,7 +375,7 @@ func loggingMiddlewarePrivate(next http.Handler) http.Handler {
path, _ := route.GetPathTemplate()
startTime := time.Now()
next.ServeHTTP(w, r)
- zap.S().Info(path+"\tprivatePort: true \ttimeTaken"+time.Now().Sub(startTime).String(), zap.Duration("timeTaken", time.Now().Sub(startTime)), zap.String("path", path), zap.Bool("tprivatePort", true))
+ zap.L().Info(path+"\tprivatePort: true \ttimeTaken"+time.Now().Sub(startTime).String(), zap.Duration("timeTaken", time.Now().Sub(startTime)), zap.String("path", path), zap.Bool("tprivatePort", true))
})
}
@@ -354,30 +426,33 @@ func extractQueryRangeV3Data(path string, r *http.Request) (map[string]interface
signozMetricsUsed := false
signozLogsUsed := false
- dataSources := []string{}
+ signozTracesUsed := false
if postData != nil {
if postData.CompositeQuery != nil {
data["queryType"] = postData.CompositeQuery.QueryType
data["panelType"] = postData.CompositeQuery.PanelType
- signozLogsUsed, signozMetricsUsed = telemetry.GetInstance().CheckSigNozSignals(postData)
+ signozLogsUsed, signozMetricsUsed, signozTracesUsed = telemetry.GetInstance().CheckSigNozSignals(postData)
}
}
- if signozMetricsUsed || signozLogsUsed {
+ if signozMetricsUsed || signozLogsUsed || signozTracesUsed {
if signozMetricsUsed {
- dataSources = append(dataSources, "metrics")
telemetry.GetInstance().AddActiveMetricsUser()
}
if signozLogsUsed {
- dataSources = append(dataSources, "logs")
telemetry.GetInstance().AddActiveLogsUser()
}
- data["dataSources"] = dataSources
+ if signozTracesUsed {
+ telemetry.GetInstance().AddActiveTracesUser()
+ }
+ data["metricsUsed"] = signozMetricsUsed
+ data["logsUsed"] = signozLogsUsed
+ data["tracesUsed"] = signozTracesUsed
userEmail, err := auth.GetEmailFromJwt(r.Context())
if err == nil {
- telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_QUERY_RANGE_V3, data, userEmail, true)
+ telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_QUERY_RANGE_API, data, userEmail)
}
}
return data, true
@@ -475,7 +550,7 @@ func (s *Server) initListeners() error {
return err
}
- zap.S().Info(fmt.Sprintf("Query server started listening on %s...", s.serverOptions.HTTPHostPort))
+ zap.L().Info(fmt.Sprintf("Query server started listening on %s...", s.serverOptions.HTTPHostPort))
// listen on private port to support internal services
privateHostPort := s.serverOptions.PrivateHostPort
@@ -488,7 +563,7 @@ func (s *Server) initListeners() error {
if err != nil {
return err
}
- zap.S().Info(fmt.Sprintf("Query server started listening on private port %s...", s.serverOptions.PrivateHostPort))
+ zap.L().Info(fmt.Sprintf("Query server started listening on private port %s...", s.serverOptions.PrivateHostPort))
return nil
}
@@ -500,7 +575,7 @@ func (s *Server) Start() error {
if !s.serverOptions.DisableRules {
s.ruleManager.Start()
} else {
- zap.S().Info("msg: Rules disabled as rules.disable is set to TRUE")
+ zap.L().Info("msg: Rules disabled as rules.disable is set to TRUE")
}
err := s.initListeners()
@@ -514,23 +589,23 @@ func (s *Server) Start() error {
}
go func() {
- zap.S().Info("Starting HTTP server", zap.Int("port", httpPort), zap.String("addr", s.serverOptions.HTTPHostPort))
+ zap.L().Info("Starting HTTP server", zap.Int("port", httpPort), zap.String("addr", s.serverOptions.HTTPHostPort))
switch err := s.httpServer.Serve(s.httpConn); err {
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
// normal exit, nothing to do
default:
- zap.S().Error("Could not start HTTP server", zap.Error(err))
+ zap.L().Error("Could not start HTTP server", zap.Error(err))
}
s.unavailableChannel <- healthcheck.Unavailable
}()
go func() {
- zap.S().Info("Starting pprof server", zap.String("addr", constants.DebugHttpPort))
+ zap.L().Info("Starting pprof server", zap.String("addr", constants.DebugHttpPort))
err = http.ListenAndServe(constants.DebugHttpPort, nil)
if err != nil {
- zap.S().Error("Could not start pprof server", zap.Error(err))
+ zap.L().Error("Could not start pprof server", zap.Error(err))
}
}()
@@ -540,14 +615,14 @@ func (s *Server) Start() error {
}
fmt.Println("starting private http")
go func() {
- zap.S().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.serverOptions.PrivateHostPort))
+ zap.L().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.serverOptions.PrivateHostPort))
switch err := s.privateHTTP.Serve(s.privateConn); err {
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
// normal exit, nothing to do
- zap.S().Info("private http server closed")
+ zap.L().Info("private http server closed")
default:
- zap.S().Error("Could not start private HTTP server", zap.Error(err))
+ zap.L().Error("Could not start private HTTP server", zap.Error(err))
}
s.unavailableChannel <- healthcheck.Unavailable
@@ -555,10 +630,10 @@ func (s *Server) Start() error {
}()
go func() {
- zap.S().Info("Starting OpAmp Websocket server", zap.String("addr", constants.OpAmpWsEndpoint))
+ zap.L().Info("Starting OpAmp Websocket server", zap.String("addr", constants.OpAmpWsEndpoint))
err := s.opampServer.Start(constants.OpAmpWsEndpoint)
if err != nil {
- zap.S().Info("opamp ws server failed to start", err)
+ zap.L().Info("opamp ws server failed to start", zap.Error(err))
s.unavailableChannel <- healthcheck.Unavailable
}
}()
@@ -631,7 +706,7 @@ func makeRulesManager(
return nil, fmt.Errorf("rule manager error: %v", err)
}
- zap.S().Info("rules manager is ready")
+ zap.L().Info("rules manager is ready")
return manager, nil
}
diff --git a/pkg/query-service/auth/auth.go b/pkg/query-service/auth/auth.go
index 6b96a6da85..0a90c8c730 100644
--- a/pkg/query-service/auth/auth.go
+++ b/pkg/query-service/auth/auth.go
@@ -40,7 +40,7 @@ type InviteEmailData struct {
// The root user should be able to invite people to create account on SigNoz cluster.
func Invite(ctx context.Context, req *model.InviteRequest) (*model.InviteResponse, error) {
- zap.S().Debugf("Got an invite request for email: %s\n", req.Email)
+ zap.L().Debug("Got an invite request for email", zap.String("email", req.Email))
token, err := utils.RandomHex(opaqueTokenSize)
if err != nil {
@@ -110,13 +110,13 @@ func inviteEmail(req *model.InviteRequest, au *model.UserPayload, token string)
tmpl, err := template.ParseFiles(constants.InviteEmailTemplate)
if err != nil {
- zap.S().Errorf("failed to send email", err)
+ zap.L().Error("failed to send email", zap.Error(err))
return
}
var body bytes.Buffer
if err := tmpl.Execute(&body, data); err != nil {
- zap.S().Errorf("failed to send email", err)
+ zap.L().Error("failed to send email", zap.Error(err))
return
}
@@ -126,7 +126,7 @@ func inviteEmail(req *model.InviteRequest, au *model.UserPayload, token string)
body.String(),
)
if err != nil {
- zap.S().Errorf("failed to send email", err)
+ zap.L().Error("failed to send email", zap.Error(err))
return
}
return
@@ -134,7 +134,7 @@ func inviteEmail(req *model.InviteRequest, au *model.UserPayload, token string)
// RevokeInvite is used to revoke the invitation for the given email.
func RevokeInvite(ctx context.Context, email string) error {
- zap.S().Debugf("RevokeInvite method invoked for email: %s\n", email)
+ zap.L().Debug("RevokeInvite method invoked for email", zap.String("email", email))
if !isValidEmail(email) {
return ErrorInvalidInviteToken
@@ -148,7 +148,7 @@ func RevokeInvite(ctx context.Context, email string) error {
// GetInvite returns an invitation object for the given token.
func GetInvite(ctx context.Context, token string) (*model.InvitationResponseObject, error) {
- zap.S().Debugf("GetInvite method invoked for token: %s\n", token)
+ zap.L().Debug("GetInvite method invoked for token", zap.String("token", token))
inv, apiErr := dao.DB().GetInviteFromToken(ctx, token)
if apiErr != nil {
@@ -234,24 +234,23 @@ func ResetPassword(ctx context.Context, req *model.ResetPasswordRequest) error {
return nil
}
-func ChangePassword(ctx context.Context, req *model.ChangePasswordRequest) error {
-
+func ChangePassword(ctx context.Context, req *model.ChangePasswordRequest) *model.ApiError {
user, apiErr := dao.DB().GetUser(ctx, req.UserId)
if apiErr != nil {
- return errors.Wrap(apiErr.Err, "failed to query user from the DB")
+ return apiErr
}
if user == nil || !passwordMatch(user.Password, req.OldPassword) {
- return ErrorInvalidCreds
+ return model.ForbiddenError(ErrorInvalidCreds)
}
hash, err := PasswordHash(req.NewPassword)
if err != nil {
- return errors.Wrap(err, "Failed to generate password hash")
+ return model.InternalError(errors.New("Failed to generate password hash"))
}
if apiErr := dao.DB().UpdateUserPassword(ctx, hash, user.Id); apiErr != nil {
- return apiErr.Err
+ return apiErr
}
return nil
@@ -283,13 +282,13 @@ func RegisterFirstUser(ctx context.Context, req *RegisterRequest) (*model.User,
org, apierr := dao.DB().CreateOrg(ctx,
&model.Organization{Name: req.OrgName})
if apierr != nil {
- zap.S().Debugf("CreateOrg failed, err: %v\n", zap.Error(apierr.ToError()))
+ zap.L().Error("CreateOrg failed", zap.Error(apierr.ToError()))
return nil, apierr
}
group, apiErr := dao.DB().GetGroupByName(ctx, groupName)
if apiErr != nil {
- zap.S().Debugf("GetGroupByName failed, err: %v\n", apiErr.Err)
+ zap.L().Error("GetGroupByName failed", zap.Error(apiErr.Err))
return nil, apiErr
}
@@ -298,7 +297,7 @@ func RegisterFirstUser(ctx context.Context, req *RegisterRequest) (*model.User,
hash, err = PasswordHash(req.Password)
if err != nil {
- zap.S().Errorf("failed to generate password hash when registering a user", zap.Error(err))
+ zap.L().Error("failed to generate password hash when registering a user", zap.Error(err))
return nil, model.InternalError(model.ErrSignupFailed{})
}
@@ -329,7 +328,7 @@ func RegisterInvitedUser(ctx context.Context, req *RegisterRequest, nopassword b
invite, err := ValidateInvite(ctx, req)
if err != nil {
- zap.S().Errorf("failed to validate invite token", err)
+ zap.L().Error("failed to validate invite token", zap.Error(err))
return nil, model.BadRequest(model.ErrSignupFailed{})
}
@@ -338,7 +337,7 @@ func RegisterInvitedUser(ctx context.Context, req *RegisterRequest, nopassword b
// in the same transaction at the end of this function
userPayload, apierr := dao.DB().GetUserByEmail(ctx, invite.Email)
if apierr != nil {
- zap.S().Debugf("failed to get user by email", apierr.Err)
+ zap.L().Error("failed to get user by email", zap.Error(apierr.Err))
return nil, apierr
}
@@ -348,7 +347,7 @@ func RegisterInvitedUser(ctx context.Context, req *RegisterRequest, nopassword b
}
if invite.OrgId == "" {
- zap.S().Errorf("failed to find org in the invite")
+ zap.L().Error("failed to find org in the invite")
return nil, model.InternalError(fmt.Errorf("invalid invite, org not found"))
}
@@ -359,7 +358,7 @@ func RegisterInvitedUser(ctx context.Context, req *RegisterRequest, nopassword b
group, apiErr := dao.DB().GetGroupByName(ctx, invite.Role)
if apiErr != nil {
- zap.S().Debugf("GetGroupByName failed, err: %v\n", apiErr.Err)
+ zap.L().Error("GetGroupByName failed", zap.Error(apiErr.Err))
return nil, model.InternalError(model.ErrSignupFailed{})
}
@@ -369,13 +368,13 @@ func RegisterInvitedUser(ctx context.Context, req *RegisterRequest, nopassword b
if req.Password != "" {
hash, err = PasswordHash(req.Password)
if err != nil {
- zap.S().Errorf("failed to generate password hash when registering a user", zap.Error(err))
+ zap.L().Error("failed to generate password hash when registering a user", zap.Error(err))
return nil, model.InternalError(model.ErrSignupFailed{})
}
} else {
hash, err = PasswordHash(utils.GeneratePassowrd())
if err != nil {
- zap.S().Errorf("failed to generate password hash when registering a user", zap.Error(err))
+ zap.L().Error("failed to generate password hash when registering a user", zap.Error(err))
return nil, model.InternalError(model.ErrSignupFailed{})
}
}
@@ -394,13 +393,13 @@ func RegisterInvitedUser(ctx context.Context, req *RegisterRequest, nopassword b
// TODO(Ahsan): Ideally create user and delete invitation should happen in a txn.
user, apiErr = dao.DB().CreateUser(ctx, user, false)
if apiErr != nil {
- zap.S().Debugf("CreateUser failed, err: %v\n", apiErr.Err)
+ zap.L().Error("CreateUser failed", zap.Error(apiErr.Err))
return nil, apiErr
}
apiErr = dao.DB().DeleteInvitation(ctx, user.Email)
if apiErr != nil {
- zap.S().Debugf("delete invitation failed, err: %v\n", apiErr.Err)
+ zap.L().Error("delete invitation failed", zap.Error(apiErr.Err))
return nil, apiErr
}
@@ -429,17 +428,17 @@ func Register(ctx context.Context, req *RegisterRequest) (*model.User, *model.Ap
// Login method returns access and refresh tokens on successful login, else it errors out.
func Login(ctx context.Context, request *model.LoginRequest) (*model.LoginResponse, error) {
- zap.S().Debugf("Login method called for user: %s\n", request.Email)
+ zap.L().Debug("Login method called for user", zap.String("email", request.Email))
user, err := authenticateLogin(ctx, request)
if err != nil {
- zap.S().Debugf("Failed to authenticate login request, %v", err)
+ zap.L().Error("Failed to authenticate login request", zap.Error(err))
return nil, err
}
userjwt, err := GenerateJWTForUser(&user.User)
if err != nil {
- zap.S().Debugf("Failed to generate JWT against login creds, %v", err)
+ zap.L().Error("Failed to generate JWT against login creds", zap.Error(err))
return nil, err
}
diff --git a/pkg/query-service/auth/jwt.go b/pkg/query-service/auth/jwt.go
index 90e2f7008d..b27d43fb9d 100644
--- a/pkg/query-service/auth/jwt.go
+++ b/pkg/query-service/auth/jwt.go
@@ -60,7 +60,7 @@ func validateUser(tok string) (*model.UserPayload, error) {
func AttachJwtToContext(ctx context.Context, r *http.Request) context.Context {
token, err := ExtractJwtFromRequest(r)
if err != nil {
- zap.S().Debugf("Error while getting token from header, %v", err)
+ zap.L().Error("Error while getting token from header", zap.Error(err))
return ctx
}
diff --git a/pkg/query-service/cache/redis/redis.go b/pkg/query-service/cache/redis/redis.go
index 22278c52ed..6338eca6f3 100644
--- a/pkg/query-service/cache/redis/redis.go
+++ b/pkg/query-service/cache/redis/redis.go
@@ -59,7 +59,7 @@ func (c *cache) Retrieve(cacheKey string, allowExpired bool) ([]byte, status.Ret
func (c *cache) SetTTL(cacheKey string, ttl time.Duration) {
err := c.client.Expire(context.Background(), cacheKey, ttl).Err()
if err != nil {
- zap.S().Error("error setting TTL for cache key", zap.String("cacheKey", cacheKey), zap.Duration("ttl", ttl), zap.Error(err))
+ zap.L().Error("error setting TTL for cache key", zap.String("cacheKey", cacheKey), zap.Duration("ttl", ttl), zap.Error(err))
}
}
@@ -67,7 +67,7 @@ func (c *cache) SetTTL(cacheKey string, ttl time.Duration) {
func (c *cache) Remove(cacheKey string) {
err := c.client.Del(context.Background(), cacheKey).Err()
if err != nil {
- zap.S().Error("error deleting cache key", zap.String("cacheKey", cacheKey), zap.Error(err))
+ zap.L().Error("error deleting cache key", zap.String("cacheKey", cacheKey), zap.Error(err))
}
}
@@ -102,7 +102,7 @@ func (c *cache) GetOptions() *Options {
func (c *cache) GetTTL(cacheKey string) time.Duration {
ttl, err := c.client.TTL(context.Background(), cacheKey).Result()
if err != nil {
- zap.S().Error("error getting TTL for cache key", zap.String("cacheKey", cacheKey), zap.Error(err))
+ zap.L().Error("error getting TTL for cache key", zap.String("cacheKey", cacheKey), zap.Error(err))
}
return ttl
}
diff --git a/pkg/query-service/common/metrics.go b/pkg/query-service/common/metrics.go
index 8596ba9d7c..c6b8075991 100644
--- a/pkg/query-service/common/metrics.go
+++ b/pkg/query-service/common/metrics.go
@@ -2,6 +2,7 @@ package common
import (
"math"
+ "time"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
)
@@ -17,3 +18,8 @@ func AdjustedMetricTimeRange(start, end, step int64, aggregaOperator v3.TimeAggr
end = end - (end % (adjustStep * 1000))
return start, end
}
+
+func PastDayRoundOff() int64 {
+ now := time.Now().UnixMilli()
+ return int64(math.Floor(float64(now)/float64(time.Hour.Milliseconds()*24))) * time.Hour.Milliseconds() * 24
+}
diff --git a/pkg/query-service/constants/constants.go b/pkg/query-service/constants/constants.go
index e7a482d02f..54fc819478 100644
--- a/pkg/query-service/constants/constants.go
+++ b/pkg/query-service/constants/constants.go
@@ -58,8 +58,8 @@ var InviteEmailTemplate = GetOrDefaultEnv("INVITE_EMAIL_TEMPLATE", "/root/templa
// Alert manager channel subpath
var AmChannelApiPath = GetOrDefaultEnv("ALERTMANAGER_API_CHANNEL_PATH", "v1/routes")
-var OTLPTarget = GetOrDefaultEnv("OTLP_TARGET", "")
-var LogExportBatchSize = GetOrDefaultEnv("LOG_EXPORT_BATCH_SIZE", "1000")
+var OTLPTarget = GetOrDefaultEnv("OTEL_EXPORTER_OTLP_ENDPOINT", "")
+var LogExportBatchSize = GetOrDefaultEnv("OTEL_BLRP_MAX_EXPORT_BATCH_SIZE", "512")
var RELATIONAL_DATASOURCE_PATH = GetOrDefaultEnv("SIGNOZ_LOCAL_DB_PATH", "/var/lib/signoz/signoz.db")
@@ -213,6 +213,7 @@ const (
SIGNOZ_TIMESERIES_v4_LOCAL_TABLENAME = "time_series_v4"
SIGNOZ_TIMESERIES_v4_6HRS_LOCAL_TABLENAME = "time_series_v4_6hrs"
SIGNOZ_TIMESERIES_v4_1DAY_LOCAL_TABLENAME = "time_series_v4_1day"
+ SIGNOZ_TIMESERIES_v4_1DAY_TABLENAME = "distributed_time_series_v4_1day"
)
var TimeoutExcludedRoutes = map[string]bool{
@@ -223,7 +224,8 @@ var TimeoutExcludedRoutes = map[string]bool{
// alert related constants
const (
// AlertHelpPage is used in case default alert repo url is not set
- AlertHelpPage = "https://signoz.io/docs/userguide/alerts-management/#generator-url"
+ AlertHelpPage = "https://signoz.io/docs/userguide/alerts-management/#generator-url"
+ AlertTimeFormat = "2006-01-02 15:04:05"
)
func GetOrDefaultEnv(key string, fallback string) string {
@@ -307,6 +309,8 @@ var ReservedColumnTargetAliases = map[string]struct{}{
// logsPPLPfx is a short constant for logsPipelinePrefix
const LogsPPLPfx = "logstransform/pipeline_"
+const IntegrationPipelineIdPrefix = "integration"
+
// The datatype present here doesn't represent the actual datatype of column in the logs table.
var StaticFieldsLogsV3 = map[string]v3.AttributeKey{
diff --git a/pkg/query-service/dao/sqlite/connection.go b/pkg/query-service/dao/sqlite/connection.go
index a7335d6426..a2545e9531 100644
--- a/pkg/query-service/dao/sqlite/connection.go
+++ b/pkg/query-service/dao/sqlite/connection.go
@@ -180,7 +180,7 @@ func (mds *ModelDaoSqlite) createGroupIfNotPresent(ctx context.Context,
return group, nil
}
- zap.S().Debugf("%s is not found, creating it", name)
+ zap.L().Debug("group is not found, creating it", zap.String("group_name", name))
group, cErr := mds.CreateGroup(ctx, &model.Group{Name: name})
if cErr != nil {
return nil, cErr.Err
diff --git a/pkg/query-service/featureManager/manager.go b/pkg/query-service/featureManager/manager.go
index 15175b1882..439b8b7bd2 100644
--- a/pkg/query-service/featureManager/manager.go
+++ b/pkg/query-service/featureManager/manager.go
@@ -43,12 +43,12 @@ func (fm *FeatureManager) GetFeatureFlags() (model.FeatureSet, error) {
}
func (fm *FeatureManager) InitFeatures(req model.FeatureSet) error {
- zap.S().Error("InitFeatures not implemented in OSS")
+ zap.L().Error("InitFeatures not implemented in OSS")
return nil
}
func (fm *FeatureManager) UpdateFeatureFlag(req model.Feature) error {
- zap.S().Error("UpdateFeatureFlag not implemented in OSS")
+ zap.L().Error("UpdateFeatureFlag not implemented in OSS")
return nil
}
@@ -63,4 +63,4 @@ func (fm *FeatureManager) GetFeatureFlag(key string) (model.Feature, error) {
}
}
return model.Feature{}, model.ErrFeatureUnavailable{Key: key}
-}
\ No newline at end of file
+}
diff --git a/pkg/query-service/integrations/alertManager/manager.go b/pkg/query-service/integrations/alertManager/manager.go
index 3b7df3ce56..d80893010e 100644
--- a/pkg/query-service/integrations/alertManager/manager.go
+++ b/pkg/query-service/integrations/alertManager/manager.go
@@ -83,13 +83,12 @@ func (m *manager) AddRoute(receiver *Receiver) *model.ApiError {
response, err := http.Post(amURL, contentType, bytes.NewBuffer(receiverString))
if err != nil {
- zap.S().Errorf(fmt.Sprintf("Error in getting response of API call to alertmanager(POST %s)\n", amURL), err)
+ zap.L().Error("Error in getting response of API call to alertmanager", zap.String("url", amURL), zap.Error(err))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
if response.StatusCode > 299 {
- err := fmt.Errorf(fmt.Sprintf("Error in getting 2xx response in API call to alertmanager(POST %s)\n", amURL), response.Status)
- zap.S().Error(err)
+ zap.L().Error("Error in getting 2xx response in API call to alertmanager", zap.String("url", amURL), zap.String("status", response.Status))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
return nil
@@ -102,7 +101,7 @@ func (m *manager) EditRoute(receiver *Receiver) *model.ApiError {
req, err := http.NewRequest(http.MethodPut, amURL, bytes.NewBuffer(receiverString))
if err != nil {
- zap.S().Errorf(fmt.Sprintf("Error creating new update request for API call to alertmanager(PUT %s)\n", amURL), err)
+ zap.L().Error("Error creating new update request for API call to alertmanager", zap.String("url", amURL), zap.Error(err))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
@@ -112,13 +111,12 @@ func (m *manager) EditRoute(receiver *Receiver) *model.ApiError {
response, err := client.Do(req)
if err != nil {
- zap.S().Errorf(fmt.Sprintf("Error in getting response of API call to alertmanager(PUT %s)\n", amURL), err)
+ zap.L().Error("Error in getting response of API call to alertmanager", zap.String("url", amURL), zap.Error(err))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
if response.StatusCode > 299 {
- err := fmt.Errorf(fmt.Sprintf("Error in getting 2xx response in PUT API call to alertmanager(PUT %s)\n", amURL), response.Status)
- zap.S().Error(err)
+ zap.L().Error("Error in getting 2xx response in PUT API call to alertmanager", zap.String("url", amURL), zap.String("status", response.Status))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
return nil
@@ -132,7 +130,7 @@ func (m *manager) DeleteRoute(name string) *model.ApiError {
req, err := http.NewRequest(http.MethodDelete, amURL, bytes.NewBuffer(requestData))
if err != nil {
- zap.S().Errorf("Error in creating new delete request to alertmanager/v1/receivers\n", err)
+ zap.L().Error("Error in creating new delete request to alertmanager/v1/receivers", zap.Error(err))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
@@ -142,13 +140,13 @@ func (m *manager) DeleteRoute(name string) *model.ApiError {
response, err := client.Do(req)
if err != nil {
- zap.S().Errorf(fmt.Sprintf("Error in getting response of API call to alertmanager(DELETE %s)\n", amURL), err)
+ zap.L().Error("Error in getting response of API call to alertmanager", zap.String("url", amURL), zap.Error(err))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
if response.StatusCode > 299 {
err := fmt.Errorf(fmt.Sprintf("Error in getting 2xx response in PUT API call to alertmanager(DELETE %s)\n", amURL), response.Status)
- zap.S().Error(err)
+ zap.L().Error("Error in getting 2xx response in PUT API call to alertmanager", zap.String("url", amURL), zap.String("status", response.Status))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
return nil
@@ -162,19 +160,19 @@ func (m *manager) TestReceiver(receiver *Receiver) *model.ApiError {
response, err := http.Post(amTestURL, contentType, bytes.NewBuffer(receiverBytes))
if err != nil {
- zap.S().Errorf(fmt.Sprintf("Error in getting response of API call to alertmanager(POST %s)\n", amTestURL), err)
+ zap.L().Error("Error in getting response of API call to alertmanager", zap.String("url", amTestURL), zap.Error(err))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
if response.StatusCode > 201 && response.StatusCode < 400 {
err := fmt.Errorf(fmt.Sprintf("Invalid parameters in test alert api for alertmanager(POST %s)\n", amTestURL), response.Status)
- zap.S().Error(err)
+ zap.L().Error("Invalid parameters in test alert api for alertmanager", zap.Error(err))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
if response.StatusCode > 400 {
err := fmt.Errorf(fmt.Sprintf("Received Server Error response for API call to alertmanager(POST %s)\n", amTestURL), response.Status)
- zap.S().Error(err)
+ zap.L().Error("Received Server Error response for API call to alertmanager", zap.Error(err))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
diff --git a/pkg/query-service/integrations/alertManager/notifier.go b/pkg/query-service/integrations/alertManager/notifier.go
index 148d489ed0..e86cf28c5e 100644
--- a/pkg/query-service/integrations/alertManager/notifier.go
+++ b/pkg/query-service/integrations/alertManager/notifier.go
@@ -87,11 +87,11 @@ func NewNotifier(o *NotifierOptions, logger log.Logger) (*Notifier, error) {
amset, err := newAlertmanagerSet(o.AlertManagerURLs, timeout, logger)
if err != nil {
- zap.S().Errorf("failed to parse alert manager urls")
+ zap.L().Error("failed to parse alert manager urls")
return n, err
}
n.alertmanagers = amset
- zap.S().Info("Starting notifier with alert manager:", o.AlertManagerURLs)
+ zap.L().Info("Starting notifier with alert manager", zap.Strings("urls", o.AlertManagerURLs))
return n, nil
}
@@ -123,7 +123,7 @@ func (n *Notifier) nextBatch() []*Alert {
// Run dispatches notifications continuously.
func (n *Notifier) Run() {
- zap.S().Info("msg: Initiating alert notifier...")
+ zap.L().Info("msg: Initiating alert notifier...")
for {
select {
case <-n.ctx.Done():
@@ -133,7 +133,7 @@ func (n *Notifier) Run() {
alerts := n.nextBatch()
if !n.sendAll(alerts...) {
- zap.S().Warn("msg: dropped alerts", "\t count:", len(alerts))
+ zap.L().Warn("msg: dropped alerts", zap.Int("count", len(alerts)))
// n.metrics.dropped.Add(float64(len(alerts)))
}
// If the queue still has items left, kick off the next iteration.
@@ -205,7 +205,7 @@ func (n *Notifier) sendAll(alerts ...*Alert) bool {
b, err := json.Marshal(alerts)
if err != nil {
- zap.S().Errorf("msg", "Encoding alerts failed", "err", err)
+ zap.L().Error("Encoding alerts failed", zap.Error(err))
return false
}
@@ -229,7 +229,7 @@ func (n *Notifier) sendAll(alerts ...*Alert) bool {
go func(ams *alertmanagerSet, am Manager) {
u := am.URLPath(alertPushEndpoint).String()
if err := n.sendOne(ctx, ams.client, u, b); err != nil {
- zap.S().Errorf("alertmanager", u, "count", len(alerts), "msg", "Error calling alert API", "err", err)
+ zap.L().Error("Error calling alert API", zap.String("alertmanager", u), zap.Int("count", len(alerts)), zap.Error(err))
} else {
atomic.AddUint64(&numSuccess, 1)
}
diff --git a/pkg/query-service/interfaces/interface.go b/pkg/query-service/interfaces/interface.go
index 9d0d65c39c..dfe24c9064 100644
--- a/pkg/query-service/interfaces/interface.go
+++ b/pkg/query-service/interfaces/interface.go
@@ -23,7 +23,7 @@ type Reader interface {
GetInstantQueryMetricsResult(ctx context.Context, query *model.InstantQueryMetricsParams) (*promql.Result, *stats.QueryStats, *model.ApiError)
GetQueryRangeResult(ctx context.Context, query *model.QueryRangeParams) (*promql.Result, *stats.QueryStats, *model.ApiError)
GetServiceOverview(ctx context.Context, query *model.GetServiceOverviewParams, skipConfig *model.SkipConfig) (*[]model.ServiceOverviewItem, *model.ApiError)
- GetTopLevelOperations(ctx context.Context, skipConfig *model.SkipConfig) (*map[string][]string, *model.ApiError)
+ GetTopLevelOperations(ctx context.Context, skipConfig *model.SkipConfig, start, end time.Time) (*map[string][]string, *map[string][]string, *model.ApiError)
GetServices(ctx context.Context, query *model.GetServicesParams, skipConfig *model.SkipConfig) (*[]model.ServiceItem, *model.ApiError)
GetTopOperations(ctx context.Context, query *model.GetTopOperationsParams) (*[]model.TopOperationsItem, *model.ApiError)
GetUsage(ctx context.Context, query *model.GetUsageParams) (*[]model.UsageItem, error)
@@ -67,6 +67,9 @@ type Reader interface {
GetMetricAttributeKeys(ctx context.Context, req *v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error)
GetMetricAttributeValues(ctx context.Context, req *v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error)
+ // Returns `MetricStatus` for latest received metric among `metricNames`. Useful for status calculations
+ GetLatestReceivedMetric(ctx context.Context, metricNames []string) (*model.MetricStatus, *model.ApiError)
+
// QB V3 metrics/traces/logs
GetTimeSeriesResultV3(ctx context.Context, query string) ([]*v3.Series, error)
GetListResultV3(ctx context.Context, query string) ([]*v3.Row, error)
@@ -74,6 +77,7 @@ type Reader interface {
GetDashboardsInfo(ctx context.Context) (*model.DashboardsInfo, error)
GetAlertsInfo(ctx context.Context) (*model.AlertsInfo, error)
+ GetSavedViewsInfo(ctx context.Context) (*model.SavedViewsInfo, error)
GetTotalSpans(ctx context.Context) (uint64, error)
GetTotalLogs(ctx context.Context) (uint64, error)
GetTotalSamples(ctx context.Context) (uint64, error)
diff --git a/pkg/query-service/main.go b/pkg/query-service/main.go
index f0602c4dcd..ec68c61939 100644
--- a/pkg/query-service/main.go
+++ b/pkg/query-service/main.go
@@ -18,7 +18,7 @@ import (
)
func initZapLog() *zap.Logger {
- config := zap.NewDevelopmentConfig()
+ config := zap.NewProductionConfig()
config.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder
config.EncoderConfig.TimeKey = "timestamp"
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
@@ -85,9 +85,9 @@ func main() {
auth.JwtSecret = os.Getenv("SIGNOZ_JWT_SECRET")
if len(auth.JwtSecret) == 0 {
- zap.S().Warn("No JWT secret key is specified.")
+ zap.L().Warn("No JWT secret key is specified.")
} else {
- zap.S().Info("No JWT secret key set successfully.")
+ zap.L().Info("No JWT secret key set successfully.")
}
server, err := app.NewServer(serverOptions)
diff --git a/pkg/query-service/model/featureSet.go b/pkg/query-service/model/featureSet.go
index 26cd70b908..2d0f4a55be 100644
--- a/pkg/query-service/model/featureSet.go
+++ b/pkg/query-service/model/featureSet.go
@@ -21,6 +21,7 @@ const AlertChannelWebhook = "ALERT_CHANNEL_WEBHOOK"
const AlertChannelPagerduty = "ALERT_CHANNEL_PAGERDUTY"
const AlertChannelMsTeams = "ALERT_CHANNEL_MSTEAMS"
const AlertChannelOpsgenie = "ALERT_CHANNEL_OPSGENIE"
+const AlertChannelEmail = "ALERT_CHANNEL_EMAIL"
var BasicPlan = FeatureSet{
Feature{
@@ -100,6 +101,13 @@ var BasicPlan = FeatureSet{
UsageLimit: -1,
Route: "",
},
+ Feature{
+ Name: AlertChannelEmail,
+ Active: true,
+ Usage: 0,
+ UsageLimit: -1,
+ Route: "",
+ },
Feature{
Name: AlertChannelMsTeams,
Active: false,
diff --git a/pkg/query-service/model/response.go b/pkg/query-service/model/response.go
index ae99473720..1f3970e0d4 100644
--- a/pkg/query-service/model/response.go
+++ b/pkg/query-service/model/response.go
@@ -112,6 +112,13 @@ func UnavailableError(err error) *ApiError {
}
}
+func ForbiddenError(err error) *ApiError {
+ return &ApiError{
+ Typ: ErrorForbidden,
+ Err: err,
+ }
+}
+
func WrapApiError(err *ApiError, msg string) *ApiError {
return &ApiError{
Typ: err.Type(),
@@ -171,16 +178,21 @@ type AlertingRuleResponse struct {
// Value float64 `json:"value"`
}
+type DataWarning struct {
+ TopLevelOps []string `json:"topLevelOps"`
+}
+
type ServiceItem struct {
- ServiceName string `json:"serviceName" ch:"serviceName"`
- Percentile99 float64 `json:"p99" ch:"p99"`
- AvgDuration float64 `json:"avgDuration" ch:"avgDuration"`
- NumCalls uint64 `json:"numCalls" ch:"numCalls"`
- CallRate float64 `json:"callRate" ch:"callRate"`
- NumErrors uint64 `json:"numErrors" ch:"numErrors"`
- ErrorRate float64 `json:"errorRate" ch:"errorRate"`
- Num4XX uint64 `json:"num4XX" ch:"num4xx"`
- FourXXRate float64 `json:"fourXXRate" ch:"fourXXRate"`
+ ServiceName string `json:"serviceName" ch:"serviceName"`
+ Percentile99 float64 `json:"p99" ch:"p99"`
+ AvgDuration float64 `json:"avgDuration" ch:"avgDuration"`
+ NumCalls uint64 `json:"numCalls" ch:"numCalls"`
+ CallRate float64 `json:"callRate" ch:"callRate"`
+ NumErrors uint64 `json:"numErrors" ch:"numErrors"`
+ ErrorRate float64 `json:"errorRate" ch:"errorRate"`
+ Num4XX uint64 `json:"num4XX" ch:"num4xx"`
+ FourXXRate float64 `json:"fourXXRate" ch:"fourXXRate"`
+ DataWarning DataWarning `json:"dataWarning"`
}
type ServiceErrorItem struct {
Time time.Time `json:"time" ch:"time"`
@@ -506,6 +518,12 @@ type MetricPoint struct {
Value float64
}
+type MetricStatus struct {
+ MetricName string
+ LastReceivedTsMillis int64
+ LastReceivedLabels map[string]string
+}
+
// MarshalJSON implements json.Marshaler.
func (p *MetricPoint) MarshalJSON() ([]byte, error) {
v := strconv.FormatFloat(p.Value, 'f', -1, 64)
@@ -623,11 +641,18 @@ type AlertsInfo struct {
TracesBasedAlerts int `json:"tracesBasedAlerts"`
}
+type SavedViewsInfo struct {
+ TotalSavedViews int `json:"totalSavedViews"`
+ TracesSavedViews int `json:"tracesSavedViews"`
+ LogsSavedViews int `json:"logsSavedViews"`
+}
+
type DashboardsInfo struct {
- TotalDashboards int `json:"totalDashboards"`
- LogsBasedPanels int `json:"logsBasedPanels"`
- MetricBasedPanels int `json:"metricBasedPanels"`
- TracesBasedPanels int `json:"tracesBasedPanels"`
+ TotalDashboards int `json:"totalDashboards"`
+ TotalDashboardsWithPanelAndName int `json:"totalDashboardsWithPanelAndName"` // dashboards with panel and name without sample title
+ LogsBasedPanels int `json:"logsBasedPanels"`
+ MetricBasedPanels int `json:"metricBasedPanels"`
+ TracesBasedPanels int `json:"tracesBasedPanels"`
}
type TagTelemetryData struct {
diff --git a/pkg/query-service/model/v3/v3.go b/pkg/query-service/model/v3/v3.go
index c01660d6e7..43e7a940ab 100644
--- a/pkg/query-service/model/v3/v3.go
+++ b/pkg/query-service/model/v3/v3.go
@@ -509,11 +509,11 @@ const (
SpaceAggregationMin SpaceAggregation = "min"
SpaceAggregationMax SpaceAggregation = "max"
SpaceAggregationCount SpaceAggregation = "count"
- SpaceAggregationPercentile50 SpaceAggregation = "percentile_50"
- SpaceAggregationPercentile75 SpaceAggregation = "percentile_75"
- SpaceAggregationPercentile90 SpaceAggregation = "percentile_90"
- SpaceAggregationPercentile95 SpaceAggregation = "percentile_95"
- SpaceAggregationPercentile99 SpaceAggregation = "percentile_99"
+ SpaceAggregationPercentile50 SpaceAggregation = "p50"
+ SpaceAggregationPercentile75 SpaceAggregation = "p75"
+ SpaceAggregationPercentile90 SpaceAggregation = "p90"
+ SpaceAggregationPercentile95 SpaceAggregation = "p95"
+ SpaceAggregationPercentile99 SpaceAggregation = "p99"
)
func (s SpaceAggregation) Validate() error {
@@ -654,19 +654,22 @@ func (b *BuilderQuery) Validate() error {
}
if b.DataSource == DataSourceMetrics {
// if AggregateOperator is specified, then the request is using v3 payload
- if b.AggregateOperator != "" {
- if err := b.AggregateOperator.Validate(); err != nil {
- return fmt.Errorf("aggregate operator is invalid: %w", err)
- }
- } else {
- if err := b.TimeAggregation.Validate(); err != nil {
- return fmt.Errorf("time aggregation is invalid: %w", err)
- }
+ // if b.AggregateOperator != "" && b.SpaceAggregation == SpaceAggregationUnspecified {
+ // if err := b.AggregateOperator.Validate(); err != nil {
+ // return fmt.Errorf("aggregate operator is invalid: %w", err)
+ // }
+ // } else {
+ // // the time aggregation is not needed for percentile operators
+ // if !IsPercentileOperator(b.SpaceAggregation) {
+ // if err := b.TimeAggregation.Validate(); err != nil {
+ // return fmt.Errorf("time aggregation is invalid: %w", err)
+ // }
+ // }
- if err := b.SpaceAggregation.Validate(); err != nil {
- return fmt.Errorf("space aggregation is invalid: %w", err)
- }
- }
+ // if err := b.SpaceAggregation.Validate(); err != nil {
+ // return fmt.Errorf("space aggregation is invalid: %w", err)
+ // }
+ // }
} else {
if err := b.AggregateOperator.Validate(); err != nil {
return fmt.Errorf("aggregate operator is invalid: %w", err)
@@ -689,7 +692,7 @@ func (b *BuilderQuery) Validate() error {
}
}
- if b.DataSource == DataSourceMetrics && len(b.GroupBy) > 0 {
+ if b.DataSource == DataSourceMetrics && len(b.GroupBy) > 0 && b.SpaceAggregation == SpaceAggregationUnspecified {
if b.AggregateOperator == AggregateOperatorNoOp || b.AggregateOperator == AggregateOperatorRate {
return fmt.Errorf("group by requires aggregate operator other than noop or rate")
}
@@ -723,13 +726,30 @@ func (b *BuilderQuery) Validate() error {
if len(function.Args) == 0 {
return fmt.Errorf("timeShiftBy param missing in query")
}
+ _, ok := function.Args[0].(float64)
+ if !ok {
+ // if string, attempt to convert to float
+ timeShiftBy, err := strconv.ParseFloat(function.Args[0].(string), 64)
+ if err != nil {
+ return fmt.Errorf("timeShiftBy param should be a number")
+ }
+ function.Args[0] = timeShiftBy
+ }
} else if function.Name == FunctionNameEWMA3 ||
function.Name == FunctionNameEWMA5 ||
function.Name == FunctionNameEWMA7 {
if len(function.Args) == 0 {
return fmt.Errorf("alpha param missing in query")
}
- alpha := function.Args[0].(float64)
+ alpha, ok := function.Args[0].(float64)
+ if !ok {
+ // if string, attempt to convert to float
+ alpha, err := strconv.ParseFloat(function.Args[0].(string), 64)
+ if err != nil {
+ return fmt.Errorf("alpha param should be a float")
+ }
+ function.Args[0] = alpha
+ }
if alpha < 0 || alpha > 1 {
return fmt.Errorf("alpha param should be between 0 and 1")
}
@@ -740,6 +760,15 @@ func (b *BuilderQuery) Validate() error {
if len(function.Args) == 0 {
return fmt.Errorf("threshold param missing in query")
}
+ _, ok := function.Args[0].(float64)
+ if !ok {
+ // if string, attempt to convert to float
+ threshold, err := strconv.ParseFloat(function.Args[0].(string), 64)
+ if err != nil {
+ return fmt.Errorf("threshold param should be a float")
+ }
+ function.Args[0] = threshold
+ }
}
}
}
diff --git a/pkg/query-service/queryBuilderToExpr/queryBuilderToExpr.go b/pkg/query-service/queryBuilderToExpr/queryBuilderToExpr.go
index 0139792dfa..e853a37685 100644
--- a/pkg/query-service/queryBuilderToExpr/queryBuilderToExpr.go
+++ b/pkg/query-service/queryBuilderToExpr/queryBuilderToExpr.go
@@ -143,11 +143,11 @@ func exprFormattedValue(v interface{}) string {
case uint8, uint16, uint32, uint64, int, int8, int16, int32, int64, float32, float64, bool:
return strings.Join(strings.Fields(fmt.Sprint(x)), ",")
default:
- zap.S().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x[0])))
+ zap.L().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x[0])))
return ""
}
default:
- zap.S().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x)))
+ zap.L().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x)))
return ""
}
}
diff --git a/pkg/query-service/rules/alerting.go b/pkg/query-service/rules/alerting.go
index 623d5dea21..b2f511c6c0 100644
--- a/pkg/query-service/rules/alerting.go
+++ b/pkg/query-service/rules/alerting.go
@@ -15,15 +15,9 @@ import (
// this file contains common structs and methods used by
// rule engine
-// how long before re-sending the alert
-const resolvedRetention = 15 * time.Minute
-
const (
- // AlertMetricName is the metric name for synthetic alert timeseries.
- alertMetricName = "ALERTS"
-
- // AlertForStateMetricName is the metric name for 'for' state of alert.
- alertForStateMetricName = "ALERTS_FOR_STATE"
+ // how long before re-sending the alert
+ resolvedRetention = 15 * time.Minute
TestAlertPostFix = "_TEST_ALERT"
)
@@ -142,9 +136,11 @@ type RuleCondition struct {
CompositeQuery *v3.CompositeQuery `json:"compositeQuery,omitempty" yaml:"compositeQuery,omitempty"`
CompareOp CompareOp `yaml:"op,omitempty" json:"op,omitempty"`
Target *float64 `yaml:"target,omitempty" json:"target,omitempty"`
- MatchType `json:"matchType,omitempty"`
- TargetUnit string `json:"targetUnit,omitempty"`
- SelectedQuery string `json:"selectedQueryName,omitempty"`
+ AlertOnAbsent bool `yaml:"alertOnAbsent,omitempty" json:"alertOnAbsent,omitempty"`
+ AbsentFor uint64 `yaml:"absentFor,omitempty" json:"absentFor,omitempty"`
+ MatchType MatchType `json:"matchType,omitempty"`
+ TargetUnit string `json:"targetUnit,omitempty"`
+ SelectedQuery string `json:"selectedQueryName,omitempty"`
}
func (rc *RuleCondition) IsValid() bool {
diff --git a/pkg/query-service/rules/apiParams.go b/pkg/query-service/rules/apiParams.go
index 6000ec280f..af7e9378f6 100644
--- a/pkg/query-service/rules/apiParams.go
+++ b/pkg/query-service/rules/apiParams.go
@@ -10,7 +10,6 @@ import (
"github.com/pkg/errors"
"go.signoz.io/signoz/pkg/query-service/model"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
- "go.uber.org/zap"
"go.signoz.io/signoz/pkg/query-service/utils/times"
"go.signoz.io/signoz/pkg/query-service/utils/timestamp"
@@ -32,7 +31,7 @@ func newApiErrorBadData(err error) *model.ApiError {
// PostableRule is used to create alerting rule from HTTP api
type PostableRule struct {
- Alert string `yaml:"alert,omitempty" json:"alert,omitempty"`
+ AlertName string `yaml:"alert,omitempty" json:"alert,omitempty"`
AlertType string `yaml:"alertType,omitempty" json:"alertType,omitempty"`
Description string `yaml:"description,omitempty" json:"description,omitempty"`
RuleType RuleType `yaml:"ruleType,omitempty" json:"ruleType,omitempty"`
@@ -50,6 +49,8 @@ type PostableRule struct {
PreferredChannels []string `json:"preferredChannels,omitempty"`
+ Version string `json:"version,omitempty"`
+
// legacy
Expr string `yaml:"expr,omitempty" json:"expr,omitempty"`
OldYaml string `json:"yaml,omitempty"`
@@ -72,18 +73,15 @@ func parseIntoRule(initRule PostableRule, content []byte, kind string) (*Postabl
var err error
if kind == "json" {
if err = json.Unmarshal(content, rule); err != nil {
- zap.S().Debugf("postable rule content", string(content), "\t kind:", kind)
return nil, []error{fmt.Errorf("failed to load json")}
}
} else if kind == "yaml" {
if err = yaml.Unmarshal(content, rule); err != nil {
- zap.S().Debugf("postable rule content", string(content), "\t kind:", kind)
return nil, []error{fmt.Errorf("failed to load yaml")}
}
} else {
return nil, []error{fmt.Errorf("invalid data type")}
}
- zap.S().Debugf("postable rule(parsed):", rule)
if rule.RuleCondition == nil && rule.Expr != "" {
// account for legacy rules
@@ -124,8 +122,6 @@ func parseIntoRule(initRule PostableRule, content []byte, kind string) (*Postabl
}
}
- zap.S().Debugf("postable rule:", rule, "\t condition", rule.RuleCondition.String())
-
if errs := rule.Validate(); len(errs) > 0 {
return nil, errs
}
@@ -192,7 +188,7 @@ func (r *PostableRule) Validate() (errs []error) {
}
func testTemplateParsing(rl *PostableRule) (errs []error) {
- if rl.Alert == "" {
+ if rl.AlertName == "" {
// Not an alerting rule.
return errs
}
@@ -204,7 +200,7 @@ func testTemplateParsing(rl *PostableRule) (errs []error) {
tmpl := NewTemplateExpander(
context.TODO(),
defs+text,
- "__alert_"+rl.Alert,
+ "__alert_"+rl.AlertName,
tmplData,
times.Time(timestamp.FromTime(time.Now())),
nil,
diff --git a/pkg/query-service/rules/db.go b/pkg/query-service/rules/db.go
index f0b1bb3281..cf903884fd 100644
--- a/pkg/query-service/rules/db.go
+++ b/pkg/query-service/rules/db.go
@@ -73,7 +73,7 @@ func (r *ruleDB) CreateRuleTx(ctx context.Context, rule string) (int64, Tx, erro
stmt, err := tx.Prepare(`INSERT into rules (created_at, created_by, updated_at, updated_by, data) VALUES($1,$2,$3,$4,$5);`)
if err != nil {
- zap.S().Errorf("Error in preparing statement for INSERT to rules\n", err)
+ zap.L().Error("Error in preparing statement for INSERT to rules", zap.Error(err))
tx.Rollback()
return lastInsertId, nil, err
}
@@ -82,14 +82,14 @@ func (r *ruleDB) CreateRuleTx(ctx context.Context, rule string) (int64, Tx, erro
result, err := stmt.Exec(createdAt, userEmail, updatedAt, userEmail, rule)
if err != nil {
- zap.S().Errorf("Error in Executing prepared statement for INSERT to rules\n", err)
+ zap.L().Error("Error in Executing prepared statement for INSERT to rules", zap.Error(err))
tx.Rollback() // return an error too, we may want to wrap them
return lastInsertId, nil, err
}
lastInsertId, err = result.LastInsertId()
if err != nil {
- zap.S().Errorf("Error in getting last insert id for INSERT to rules\n", err)
+ zap.L().Error("Error in getting last insert id for INSERT to rules\n", zap.Error(err))
tx.Rollback() // return an error too, we may want to wrap them
return lastInsertId, nil, err
}
@@ -122,14 +122,14 @@ func (r *ruleDB) EditRuleTx(ctx context.Context, rule string, id string) (string
//}
stmt, err := r.Prepare(`UPDATE rules SET updated_by=$1, updated_at=$2, data=$3 WHERE id=$4;`)
if err != nil {
- zap.S().Errorf("Error in preparing statement for UPDATE to rules\n", err)
+ zap.L().Error("Error in preparing statement for UPDATE to rules", zap.Error(err))
// tx.Rollback()
return groupName, nil, err
}
defer stmt.Close()
if _, err := stmt.Exec(userEmail, updatedAt, rule, idInt); err != nil {
- zap.S().Errorf("Error in Executing prepared statement for UPDATE to rules\n", err)
+ zap.L().Error("Error in Executing prepared statement for UPDATE to rules", zap.Error(err))
// tx.Rollback() // return an error too, we may want to wrap them
return groupName, nil, err
}
@@ -158,7 +158,7 @@ func (r *ruleDB) DeleteRuleTx(ctx context.Context, id string) (string, Tx, error
defer stmt.Close()
if _, err := stmt.Exec(idInt); err != nil {
- zap.S().Errorf("Error in Executing prepared statement for DELETE to rules\n", err)
+ zap.L().Error("Error in Executing prepared statement for DELETE to rules", zap.Error(err))
// tx.Rollback()
return groupName, nil, err
}
@@ -175,7 +175,7 @@ func (r *ruleDB) GetStoredRules(ctx context.Context) ([]StoredRule, error) {
err := r.Select(&rules, query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, err
}
@@ -193,10 +193,10 @@ func (r *ruleDB) GetStoredRule(ctx context.Context, id string) (*StoredRule, err
query := fmt.Sprintf("SELECT id, created_at, created_by, updated_at, updated_by, data FROM rules WHERE id=%d", intId)
err = r.Get(rule, query)
- // zap.S().Info(query)
+ // zap.L().Info(query)
if err != nil {
- zap.S().Error("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, err
}
diff --git a/pkg/query-service/rules/manager.go b/pkg/query-service/rules/manager.go
index d87d1820e0..cad02523d7 100644
--- a/pkg/query-service/rules/manager.go
+++ b/pkg/query-service/rules/manager.go
@@ -125,7 +125,7 @@ func NewManager(o *ManagerOptions) (*Manager, error) {
func (m *Manager) Start() {
if err := m.initiate(); err != nil {
- zap.S().Errorf("failed to initialize alerting rules manager: %v", err)
+ zap.L().Error("failed to initialize alerting rules manager", zap.Error(err))
}
m.run()
}
@@ -154,40 +154,40 @@ func (m *Manager) initiate() error {
if len(errs) > 0 {
if errs[0].Error() == "failed to load json" {
- zap.S().Info("failed to load rule in json format, trying yaml now:", rec.Data)
+ zap.L().Info("failed to load rule in json format, trying yaml now:", zap.String("name", taskName))
// see if rule is stored in yaml format
parsedRule, errs = parsePostableRule([]byte(rec.Data), "yaml")
if parsedRule == nil {
- zap.S().Errorf("failed to parse and initialize yaml rule:", errs)
+ zap.L().Error("failed to parse and initialize yaml rule", zap.String("name", taskName), zap.Error(err))
// just one rule is being parsed so expect just one error
loadErrors = append(loadErrors, errs[0])
continue
} else {
// rule stored in yaml, so migrate it to json
- zap.S().Info("msg:", "migrating rule from JSON to yaml", "\t rule:", rec.Data, "\t parsed rule:", parsedRule)
+ zap.L().Info("migrating rule from JSON to yaml", zap.String("name", taskName))
ruleJSON, err := json.Marshal(parsedRule)
if err == nil {
taskName, _, err := m.ruleDB.EditRuleTx(context.Background(), string(ruleJSON), fmt.Sprintf("%d", rec.Id))
if err != nil {
- zap.S().Errorf("msg: failed to migrate rule ", "/t error:", err)
+ zap.L().Error("failed to migrate rule", zap.String("name", taskName), zap.Error(err))
} else {
- zap.S().Info("msg:", "migrated rule from yaml to json", "/t rule:", taskName)
+ zap.L().Info("migrated rule from yaml to json", zap.String("name", taskName))
}
}
}
} else {
- zap.S().Errorf("failed to parse and initialize rule:", errs)
+ zap.L().Error("failed to parse and initialize rule", zap.String("name", taskName), zap.Error(err))
// just one rule is being parsed so expect just one error
- loadErrors = append(loadErrors, errs[0])
+ loadErrors = append(loadErrors, err)
continue
}
}
if !parsedRule.Disabled {
err := m.addTask(parsedRule, taskName)
if err != nil {
- zap.S().Errorf("failed to load the rule definition (%s): %v", taskName, err)
+ zap.L().Error("failed to load the rule definition", zap.String("name", taskName), zap.Error(err))
}
}
}
@@ -213,13 +213,13 @@ func (m *Manager) Stop() {
m.mtx.Lock()
defer m.mtx.Unlock()
- zap.S().Info("msg: ", "Stopping rule manager...")
+ zap.L().Info("Stopping rule manager...")
for _, t := range m.tasks {
t.Stop()
}
- zap.S().Info("msg: ", "Rule manager stopped")
+ zap.L().Info("Rule manager stopped")
}
// EditRuleDefinition writes the rule definition to the
@@ -230,7 +230,7 @@ func (m *Manager) EditRule(ctx context.Context, ruleStr string, id string) error
currentRule, err := m.GetRule(ctx, id)
if err != nil {
- zap.S().Errorf("msg: ", "failed to get the rule from rule db", "\t ruleid: ", id)
+ zap.L().Error("failed to get the rule from rule db", zap.String("id", id), zap.Error(err))
return err
}
@@ -243,7 +243,7 @@ func (m *Manager) EditRule(ctx context.Context, ruleStr string, id string) error
}
if len(errs) > 0 {
- zap.S().Errorf("failed to parse rules:", errs)
+ zap.L().Error("failed to parse rules", zap.Errors("errors", errs))
// just one rule is being parsed so expect just one error
return errs[0]
}
@@ -264,13 +264,13 @@ func (m *Manager) EditRule(ctx context.Context, ruleStr string, id string) error
if !checkIfTraceOrLogQB(¤tRule.PostableRule) {
err = m.updateFeatureUsage(parsedRule, 1)
if err != nil {
- zap.S().Errorf("error updating feature usage: %v", err)
+ zap.L().Error("error updating feature usage", zap.Error(err))
}
// update feature usage if the new rule is not a trace or log query builder and the current rule is
} else if !checkIfTraceOrLogQB(parsedRule) {
err = m.updateFeatureUsage(¤tRule.PostableRule, -1)
if err != nil {
- zap.S().Errorf("error updating feature usage: %v", err)
+ zap.L().Error("error updating feature usage", zap.Error(err))
}
}
@@ -281,12 +281,12 @@ func (m *Manager) editTask(rule *PostableRule, taskName string) error {
m.mtx.Lock()
defer m.mtx.Unlock()
- zap.S().Debugf("msg:", "editing a rule task", "\t task name:", taskName)
+ zap.L().Debug("editing a rule task", zap.String("name", taskName))
newTask, err := m.prepareTask(false, rule, taskName)
if err != nil {
- zap.S().Errorf("msg:", "loading tasks failed", "\t err:", err)
+ zap.L().Error("loading tasks failed", zap.Error(err))
return errors.New("error preparing rule with given parameters, previous rule set restored")
}
@@ -294,7 +294,7 @@ func (m *Manager) editTask(rule *PostableRule, taskName string) error {
// it to finish the current iteration. Then copy it into the new group.
oldTask, ok := m.tasks[taskName]
if !ok {
- zap.S().Warnf("msg:", "rule task not found, a new task will be created ", "\t task name:", taskName)
+ zap.L().Warn("rule task not found, a new task will be created", zap.String("name", taskName))
}
delete(m.tasks, taskName)
@@ -319,14 +319,14 @@ func (m *Manager) DeleteRule(ctx context.Context, id string) error {
idInt, err := strconv.Atoi(id)
if err != nil {
- zap.S().Errorf("msg: ", "delete rule received an rule id in invalid format, must be a number", "\t ruleid:", id)
+ zap.L().Error("delete rule received an rule id in invalid format, must be a number", zap.String("id", id), zap.Error(err))
return fmt.Errorf("delete rule received an rule id in invalid format, must be a number")
}
// update feature usage
rule, err := m.GetRule(ctx, id)
if err != nil {
- zap.S().Errorf("msg: ", "failed to get the rule from rule db", "\t ruleid: ", id)
+ zap.L().Error("failed to get the rule from rule db", zap.String("id", id), zap.Error(err))
return err
}
@@ -336,13 +336,13 @@ func (m *Manager) DeleteRule(ctx context.Context, id string) error {
}
if _, _, err := m.ruleDB.DeleteRuleTx(ctx, id); err != nil {
- zap.S().Errorf("msg: ", "failed to delete the rule from rule db", "\t ruleid: ", id)
+ zap.L().Error("failed to delete the rule from rule db", zap.String("id", id), zap.Error(err))
return err
}
err = m.updateFeatureUsage(&rule.PostableRule, -1)
if err != nil {
- zap.S().Errorf("error updating feature usage: %v", err)
+ zap.L().Error("error updating feature usage", zap.Error(err))
}
return nil
@@ -351,16 +351,16 @@ func (m *Manager) DeleteRule(ctx context.Context, id string) error {
func (m *Manager) deleteTask(taskName string) {
m.mtx.Lock()
defer m.mtx.Unlock()
- zap.S().Debugf("msg:", "deleting a rule task", "\t task name:", taskName)
+ zap.L().Debug("deleting a rule task", zap.String("name", taskName))
oldg, ok := m.tasks[taskName]
if ok {
oldg.Stop()
delete(m.tasks, taskName)
delete(m.rules, ruleIdFromTaskName(taskName))
- zap.S().Debugf("msg:", "rule task deleted", "\t task name:", taskName)
+ zap.L().Debug("rule task deleted", zap.String("name", taskName))
} else {
- zap.S().Info("msg: ", "rule not found for deletion", "\t name:", taskName)
+ zap.L().Info("rule not found for deletion", zap.String("name", taskName))
}
}
@@ -376,7 +376,7 @@ func (m *Manager) CreateRule(ctx context.Context, ruleStr string) (*GettableRule
}
if len(errs) > 0 {
- zap.S().Errorf("failed to parse rules:", errs)
+ zap.L().Error("failed to parse rules", zap.Errors("errors", errs))
// just one rule is being parsed so expect just one error
return nil, errs[0]
}
@@ -400,7 +400,7 @@ func (m *Manager) CreateRule(ctx context.Context, ruleStr string) (*GettableRule
// update feature usage
err = m.updateFeatureUsage(parsedRule, 1)
if err != nil {
- zap.S().Errorf("error updating feature usage: %v", err)
+ zap.L().Error("error updating feature usage", zap.Error(err))
}
gettableRule := &GettableRule{
Id: fmt.Sprintf("%d", lastInsertId),
@@ -438,10 +438,10 @@ func (m *Manager) checkFeatureUsage(parsedRule *PostableRule) error {
if err != nil {
switch err.(type) {
case model.ErrFeatureUnavailable:
- zap.S().Errorf("feature unavailable", zap.String("featureKey", model.QueryBuilderAlerts), zap.Error(err))
+ zap.L().Error("feature unavailable", zap.String("featureKey", model.QueryBuilderAlerts), zap.Error(err))
return model.BadRequest(err)
default:
- zap.S().Errorf("feature check failed", zap.String("featureKey", model.QueryBuilderAlerts), zap.Error(err))
+ zap.L().Error("feature check failed", zap.String("featureKey", model.QueryBuilderAlerts), zap.Error(err))
return model.BadRequest(err)
}
}
@@ -466,11 +466,11 @@ func (m *Manager) addTask(rule *PostableRule, taskName string) error {
m.mtx.Lock()
defer m.mtx.Unlock()
- zap.S().Debugf("msg:", "adding a new rule task", "\t task name:", taskName)
+ zap.L().Debug("adding a new rule task", zap.String("name", taskName))
newTask, err := m.prepareTask(false, rule, taskName)
if err != nil {
- zap.S().Errorf("msg:", "creating rule task failed", "\t name:", taskName, "\t err", err)
+ zap.L().Error("creating rule task failed", zap.String("name", taskName), zap.Error(err))
return errors.New("error loading rules, previous rule set restored")
}
@@ -503,8 +503,8 @@ func (m *Manager) prepareTask(acquireLock bool, r *PostableRule, taskName string
rules := make([]Rule, 0)
var task Task
- if r.Alert == "" {
- zap.S().Errorf("msg:", "task load failed, at least one rule must be set", "\t task name:", taskName)
+ if r.AlertName == "" {
+ zap.L().Error("task load failed, at least one rule must be set", zap.String("name", taskName))
return task, fmt.Errorf("task load failed, at least one rule must be set")
}
@@ -536,7 +536,7 @@ func (m *Manager) prepareTask(acquireLock bool, r *PostableRule, taskName string
pr, err := NewPromRule(
ruleId,
r,
- log.With(m.logger, "alert", r.Alert),
+ log.With(m.logger, "alert", r.AlertName),
PromRuleOpts{},
)
@@ -686,7 +686,7 @@ func (m *Manager) ListRuleStates(ctx context.Context) (*GettableRules, error) {
ruleResponse := &GettableRule{}
if err := json.Unmarshal([]byte(s.Data), ruleResponse); err != nil { // Parse []byte to go struct pointer
- zap.S().Errorf("msg:", "invalid rule data", "\t err:", err)
+ zap.L().Error("failed to unmarshal rule from db", zap.Int("id", s.Id), zap.Error(err))
continue
}
@@ -779,28 +779,28 @@ func (m *Manager) PatchRule(ctx context.Context, ruleStr string, ruleId string)
// retrieve rule from DB
storedJSON, err := m.ruleDB.GetStoredRule(ctx, ruleId)
if err != nil {
- zap.S().Errorf("msg:", "failed to get stored rule with given id", "\t error:", err)
+ zap.L().Error("failed to get stored rule with given id", zap.String("id", ruleId), zap.Error(err))
return nil, err
}
// storedRule holds the current stored rule from DB
storedRule := PostableRule{}
if err := json.Unmarshal([]byte(storedJSON.Data), &storedRule); err != nil {
- zap.S().Errorf("msg:", "failed to get unmarshal stored rule with given id", "\t error:", err)
+ zap.L().Error("failed to unmarshal stored rule with given id", zap.String("id", ruleId), zap.Error(err))
return nil, err
}
// patchedRule is combo of stored rule and patch received in the request
patchedRule, errs := parseIntoRule(storedRule, []byte(ruleStr), "json")
if len(errs) > 0 {
- zap.S().Errorf("failed to parse rules:", errs)
+ zap.L().Error("failed to parse rules", zap.Errors("errors", errs))
// just one rule is being parsed so expect just one error
return nil, errs[0]
}
// deploy or un-deploy task according to patched (new) rule state
if err := m.syncRuleStateWithTask(taskName, patchedRule); err != nil {
- zap.S().Errorf("failed to sync stored rule state with the task")
+ zap.L().Error("failed to sync stored rule state with the task", zap.String("taskName", taskName), zap.Error(err))
return nil, err
}
@@ -816,7 +816,7 @@ func (m *Manager) PatchRule(ctx context.Context, ruleStr string, ruleId string)
// restore task state from the stored rule
if err := m.syncRuleStateWithTask(taskName, &storedRule); err != nil {
- zap.S().Errorf("msg: ", "failed to restore rule after patch failure", "\t error:", err)
+ zap.L().Error("failed to restore rule after patch failure", zap.String("taskName", taskName), zap.Error(err))
}
return nil, err
@@ -846,11 +846,11 @@ func (m *Manager) TestNotification(ctx context.Context, ruleStr string) (int, *m
parsedRule, errs := ParsePostableRule([]byte(ruleStr))
if len(errs) > 0 {
- zap.S().Errorf("msg: failed to parse rule from request:", "\t error: ", errs)
+ zap.L().Error("failed to parse rule from request", zap.Errors("errors", errs))
return 0, newApiErrorBadData(errs[0])
}
- var alertname = parsedRule.Alert
+ var alertname = parsedRule.AlertName
if alertname == "" {
// alertname is not mandatory for testing, so picking
// a random string here
@@ -858,7 +858,7 @@ func (m *Manager) TestNotification(ctx context.Context, ruleStr string) (int, *m
}
// append name to indicate this is test alert
- parsedRule.Alert = fmt.Sprintf("%s%s", alertname, TestAlertPostFix)
+ parsedRule.AlertName = fmt.Sprintf("%s%s", alertname, TestAlertPostFix)
var rule Rule
var err error
@@ -866,7 +866,6 @@ func (m *Manager) TestNotification(ctx context.Context, ruleStr string) (int, *m
if parsedRule.RuleType == RuleTypeThreshold {
// add special labels for test alerts
- parsedRule.Labels[labels.AlertAdditionalInfoLabel] = fmt.Sprintf("The rule threshold is set to %.4f, and the observed metric value is {{$value}}.", *parsedRule.RuleCondition.Target)
parsedRule.Annotations[labels.AlertSummaryLabel] = fmt.Sprintf("The rule threshold is set to %.4f, and the observed metric value is {{$value}}.", *parsedRule.RuleCondition.Target)
parsedRule.Labels[labels.RuleSourceLabel] = ""
parsedRule.Labels[labels.AlertRuleIdLabel] = ""
@@ -883,7 +882,7 @@ func (m *Manager) TestNotification(ctx context.Context, ruleStr string) (int, *m
)
if err != nil {
- zap.S().Errorf("msg: failed to prepare a new threshold rule for test:", "\t error: ", err)
+ zap.L().Error("failed to prepare a new threshold rule for test", zap.String("name", rule.Name()), zap.Error(err))
return 0, newApiErrorBadData(err)
}
@@ -900,7 +899,7 @@ func (m *Manager) TestNotification(ctx context.Context, ruleStr string) (int, *m
)
if err != nil {
- zap.S().Errorf("msg: failed to prepare a new promql rule for test:", "\t error: ", err)
+ zap.L().Error("failed to prepare a new promql rule for test", zap.String("name", rule.Name()), zap.Error(err))
return 0, newApiErrorBadData(err)
}
} else {
@@ -912,10 +911,13 @@ func (m *Manager) TestNotification(ctx context.Context, ruleStr string) (int, *m
count, err := rule.Eval(ctx, ts, m.opts.Queriers)
if err != nil {
- zap.S().Warn("msg:", "Evaluating rule failed", "\t rule:", rule, "\t err: ", err)
+ zap.L().Error("evaluating rule failed", zap.String("rule", rule.Name()), zap.Error(err))
return 0, newApiErrorInternal(fmt.Errorf("rule evaluation failed"))
}
- alertsFound := count.(int)
+ alertsFound, ok := count.(int)
+ if !ok {
+ return 0, newApiErrorInternal(fmt.Errorf("something went wrong"))
+ }
rule.SendAlerts(ctx, ts, 0, time.Duration(1*time.Minute), m.prepareNotifyFunc())
return alertsFound, nil
diff --git a/pkg/query-service/rules/promRule.go b/pkg/query-service/rules/promRule.go
index 5607366e6b..a998de243e 100644
--- a/pkg/query-service/rules/promRule.go
+++ b/pkg/query-service/rules/promRule.go
@@ -71,7 +71,7 @@ func NewPromRule(
p := PromRule{
id: id,
- name: postableRule.Alert,
+ name: postableRule.AlertName,
source: postableRule.Source,
ruleCondition: postableRule.RuleCondition,
evalWindow: time.Duration(postableRule.EvalWindow),
@@ -94,7 +94,7 @@ func NewPromRule(
return nil, err
}
- zap.S().Info("msg:", "creating new alerting rule", "\t name:", p.name, "\t condition:", p.ruleCondition.String(), "\t query:", query)
+ zap.L().Info("creating new alerting rule", zap.String("name", p.name), zap.String("condition", p.ruleCondition.String()), zap.String("query", query))
return &p, nil
}
@@ -182,26 +182,6 @@ func (r *PromRule) Annotations() qslabels.BaseLabels {
return r.annotations
}
-func (r *PromRule) sample(alert *Alert, ts time.Time) pql.Sample {
- lb := plabels.NewBuilder(r.labels)
-
- alertLabels := alert.Labels.(plabels.Labels)
- for _, l := range alertLabels {
- lb.Set(l.Name, l.Value)
- }
-
- lb.Set(qslabels.MetricNameLabel, alertMetricName)
- lb.Set(qslabels.AlertNameLabel, r.name)
- lb.Set(qslabels.AlertStateLabel, alert.State.String())
-
- s := pql.Sample{
- Metric: lb.Labels(),
- T: timestamp.FromTime(ts),
- F: 1,
- }
- return s
-}
-
// GetEvaluationDuration returns the time in seconds it took to evaluate the alerting rule.
func (r *PromRule) GetEvaluationDuration() time.Duration {
r.mtx.Lock()
@@ -359,7 +339,7 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time, queriers *Queriers) (
if err != nil {
return nil, err
}
- zap.S().Info("rule:", r.Name(), "\t evaluating promql query: ", q)
+ zap.L().Info("evaluating promql query", zap.String("name", r.Name()), zap.String("query", q))
res, err := queriers.PqlEngine.RunAlertQuery(ctx, q, start, end, interval)
if err != nil {
r.SetHealth(HealthBad)
@@ -388,6 +368,7 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time, queriers *Queriers) (
if !shouldAlert {
continue
}
+ zap.L().Debug("alerting for series", zap.String("name", r.Name()), zap.Any("series", series))
thresholdFormatter := formatter.FromUnit(r.ruleCondition.TargetUnit)
threshold := thresholdFormatter.Format(r.targetVal(), r.ruleCondition.TargetUnit)
@@ -454,6 +435,7 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time, queriers *Queriers) (
}
}
+ zap.L().Debug("found alerts for rule", zap.Int("count", len(alerts)), zap.String("name", r.Name()))
// alerts[h] is ready, add or update active list now
for h, a := range alerts {
// Check whether we already have alerting state for the identifying label set.
@@ -630,7 +612,7 @@ func (r *PromRule) shouldAlert(series pql.Series) (pql.Sample, bool) {
func (r *PromRule) String() string {
ar := PostableRule{
- Alert: r.name,
+ AlertName: r.name,
RuleCondition: r.ruleCondition,
EvalWindow: Duration(r.evalWindow),
Labels: r.labels.Map(),
diff --git a/pkg/query-service/rules/promRuleTask.go b/pkg/query-service/rules/promRuleTask.go
index d4a853d844..af38488f7c 100644
--- a/pkg/query-service/rules/promRuleTask.go
+++ b/pkg/query-service/rules/promRuleTask.go
@@ -40,7 +40,7 @@ type PromRuleTask struct {
// newPromRuleTask holds rules that have promql condition
// and evalutes the rule at a given frequency
func newPromRuleTask(name, file string, frequency time.Duration, rules []Rule, opts *ManagerOptions, notify NotifyFunc) *PromRuleTask {
- zap.S().Info("Initiating a new rule group:", name, "\t frequency:", frequency)
+ zap.L().Info("Initiating a new rule group", zap.String("name", name), zap.Duration("frequency", frequency))
if time.Now() == time.Now().Add(frequency) {
frequency = DefaultFrequency
@@ -312,7 +312,7 @@ func (g *PromRuleTask) CopyState(fromTask Task) error {
// Eval runs a single evaluation cycle in which all rules are evaluated sequentially.
func (g *PromRuleTask) Eval(ctx context.Context, ts time.Time) {
- zap.S().Info("promql rule task:", g.name, "\t eval started at:", ts)
+ zap.L().Info("promql rule task", zap.String("name", g.name), zap.Time("eval started at", ts))
for i, rule := range g.rules {
if rule == nil {
continue
@@ -340,7 +340,7 @@ func (g *PromRuleTask) Eval(ctx context.Context, ts time.Time) {
rule.SetHealth(HealthBad)
rule.SetLastError(err)
- zap.S().Warn("msg", "Evaluating rule failed", "rule", rule, "err", err)
+ zap.L().Warn("Evaluating rule failed", zap.String("ruleid", rule.ID()), zap.Error(err))
// Canceled queries are intentional termination of queries. This normally
// happens on shutdown and thus we skip logging of any errors here.
diff --git a/pkg/query-service/rules/promrule_test.go b/pkg/query-service/rules/promrule_test.go
index ee843b9b64..0707933b89 100644
--- a/pkg/query-service/rules/promrule_test.go
+++ b/pkg/query-service/rules/promrule_test.go
@@ -20,7 +20,7 @@ func (l testLogger) Log(args ...interface{}) error {
func TestPromRuleShouldAlert(t *testing.T) {
postableRule := PostableRule{
- Alert: "Test Rule",
+ AlertName: "Test Rule",
AlertType: "METRIC_BASED_ALERT",
RuleType: RuleTypeProm,
EvalWindow: Duration(5 * time.Minute),
diff --git a/pkg/query-service/rules/resultTypes.go b/pkg/query-service/rules/resultTypes.go
index 78474526bd..5f39208b01 100644
--- a/pkg/query-service/rules/resultTypes.go
+++ b/pkg/query-service/rules/resultTypes.go
@@ -20,6 +20,8 @@ type Sample struct {
// Label keys as-is from the result query.
// The original labels are used to prepare the related{logs, traces} link in alert notification
MetricOrig labels.Labels
+
+ IsMissing bool
}
func (s Sample) String() string {
diff --git a/pkg/query-service/rules/ruleTask.go b/pkg/query-service/rules/ruleTask.go
index 46d3a0917a..edf3957a6f 100644
--- a/pkg/query-service/rules/ruleTask.go
+++ b/pkg/query-service/rules/ruleTask.go
@@ -25,10 +25,8 @@ type RuleTask struct {
evaluationTime time.Duration
lastEvaluation time.Time
- markStale bool
- done chan struct{}
- terminated chan struct{}
- managerDone chan struct{}
+ done chan struct{}
+ terminated chan struct{}
pause bool
notify NotifyFunc
@@ -42,7 +40,7 @@ func newRuleTask(name, file string, frequency time.Duration, rules []Rule, opts
if time.Now() == time.Now().Add(frequency) {
frequency = DefaultFrequency
}
- zap.S().Info("msg:", "initiating a new rule task", "\t name:", name, "\t frequency:", frequency)
+ zap.L().Info("initiating a new rule task", zap.String("name", name), zap.Duration("frequency", frequency))
return &RuleTask{
name: name,
@@ -91,7 +89,7 @@ func (g *RuleTask) Run(ctx context.Context) {
// Wait an initial amount to have consistently slotted intervals.
evalTimestamp := g.EvalTimestamp(time.Now().UnixNano()).Add(g.frequency)
- zap.S().Debugf("group:", g.name, "\t group run to begin at: ", evalTimestamp)
+ zap.L().Debug("group run to begin at", zap.Time("evalTimestamp", evalTimestamp))
select {
case <-time.After(time.Until(evalTimestamp)):
case <-g.done:
@@ -294,7 +292,7 @@ func (g *RuleTask) CopyState(fromTask Task) error {
// Eval runs a single evaluation cycle in which all rules are evaluated sequentially.
func (g *RuleTask) Eval(ctx context.Context, ts time.Time) {
- zap.S().Debugf("msg:", "rule task eval started", "\t name:", g.name, "\t start time:", ts)
+ zap.L().Debug("rule task eval started", zap.String("name", g.name), zap.Time("start time", ts))
for i, rule := range g.rules {
if rule == nil {
@@ -318,12 +316,19 @@ func (g *RuleTask) Eval(ctx context.Context, ts time.Time) {
rule.SetEvaluationTimestamp(t)
}(time.Now())
+ kvs := map[string]string{
+ "alertID": rule.ID(),
+ "source": "alerts",
+ "client": "query-service",
+ }
+ ctx = context.WithValue(ctx, "log_comment", kvs)
+
_, err := rule.Eval(ctx, ts, g.opts.Queriers)
if err != nil {
rule.SetHealth(HealthBad)
rule.SetLastError(err)
- zap.S().Warn("msg:", "Evaluating rule failed", "\t rule:", rule, "\t err: ", err)
+ zap.L().Warn("Evaluating rule failed", zap.String("ruleid", rule.ID()), zap.Error(err))
// Canceled queries are intentional termination of queries. This normally
// happens on shutdown and thus we skip logging of any errors here.
diff --git a/pkg/query-service/rules/thresholdRule.go b/pkg/query-service/rules/thresholdRule.go
index 9687038a40..05fd526b79 100644
--- a/pkg/query-service/rules/thresholdRule.go
+++ b/pkg/query-service/rules/thresholdRule.go
@@ -18,6 +18,7 @@ import (
"go.uber.org/zap"
"github.com/ClickHouse/clickhouse-go/v2"
+ "github.com/ClickHouse/clickhouse-go/v2/lib/driver"
"go.signoz.io/signoz/pkg/query-service/converter"
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
@@ -31,6 +32,7 @@ import (
logsv3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3"
metricsv3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3"
+ metricsV4 "go.signoz.io/signoz/pkg/query-service/app/metrics/v4"
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
"go.signoz.io/signoz/pkg/query-service/formatter"
@@ -59,10 +61,19 @@ type ThresholdRule struct {
// map of active alerts
active map[uint64]*Alert
- queryBuilder *queryBuilder.QueryBuilder
+ queryBuilder *queryBuilder.QueryBuilder
+ version string
+ queryBuilderV4 *queryBuilder.QueryBuilder
+ // temporalityMap is a map of metric name to temporality
+ // to avoid fetching temporality for the same metric multiple times
+ // querying the v4 table on low cardinal temporality column
+ // should be fast but we can still avoid the query if we have the data in memory
+ temporalityMap map[string]map[v3.Temporality]bool
opts ThresholdRuleOpts
- typ string
+
+ lastTimestampWithDatapoints time.Time
+ typ string
}
type ThresholdRuleOpts struct {
@@ -91,7 +102,7 @@ func NewThresholdRule(
t := ThresholdRule{
id: id,
- name: p.Alert,
+ name: p.AlertName,
source: p.Source,
ruleCondition: p.RuleCondition,
evalWindow: time.Duration(p.EvalWindow),
@@ -102,6 +113,8 @@ func NewThresholdRule(
active: map[uint64]*Alert{},
opts: opts,
typ: p.AlertType,
+ version: p.Version,
+ temporalityMap: make(map[string]map[v3.Temporality]bool),
}
if int64(t.evalWindow) == 0 {
@@ -115,7 +128,14 @@ func NewThresholdRule(
}
t.queryBuilder = queryBuilder.NewQueryBuilder(builderOpts, featureFlags)
- zap.S().Info("msg:", "creating new alerting rule", "\t name:", t.name, "\t condition:", t.ruleCondition.String(), "\t generatorURL:", t.GeneratorURL())
+ builderOptsV4 := queryBuilder.QueryBuilderOptions{
+ BuildMetricQuery: metricsV4.PrepareMetricQuery,
+ BuildTraceQuery: tracesV3.PrepareTracesQuery,
+ BuildLogQuery: logsv3.PrepareLogsQuery,
+ }
+ t.queryBuilderV4 = queryBuilder.NewQueryBuilder(builderOptsV4, featureFlags)
+
+ zap.L().Info("creating new ThresholdRule", zap.String("name", t.name), zap.String("id", t.id))
return &t, nil
}
@@ -274,6 +294,84 @@ func (r *ThresholdRule) ActiveAlerts() []*Alert {
return res
}
+func (r *ThresholdRule) FetchTemporality(ctx context.Context, metricNames []string, ch driver.Conn) (map[string]map[v3.Temporality]bool, error) {
+
+ metricNameToTemporality := make(map[string]map[v3.Temporality]bool)
+
+ query := fmt.Sprintf(`SELECT DISTINCT metric_name, temporality FROM %s.%s WHERE metric_name IN $1`, constants.SIGNOZ_METRIC_DBNAME, constants.SIGNOZ_TIMESERIES_v4_1DAY_TABLENAME)
+
+ rows, err := ch.Query(ctx, query, metricNames)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ for rows.Next() {
+ var metricName, temporality string
+ err := rows.Scan(&metricName, &temporality)
+ if err != nil {
+ return nil, err
+ }
+ if _, ok := metricNameToTemporality[metricName]; !ok {
+ metricNameToTemporality[metricName] = make(map[v3.Temporality]bool)
+ }
+ metricNameToTemporality[metricName][v3.Temporality(temporality)] = true
+ }
+ return metricNameToTemporality, nil
+}
+
+// populateTemporality same as addTemporality but for v4 and better
+func (r *ThresholdRule) populateTemporality(ctx context.Context, qp *v3.QueryRangeParamsV3, ch driver.Conn) error {
+
+ missingTemporality := make([]string, 0)
+ metricNameToTemporality := make(map[string]map[v3.Temporality]bool)
+ if qp.CompositeQuery != nil && len(qp.CompositeQuery.BuilderQueries) > 0 {
+ for _, query := range qp.CompositeQuery.BuilderQueries {
+ // if there is no temporality specified in the query but we have it in the map
+ // then use the value from the map
+ if query.Temporality == "" && r.temporalityMap[query.AggregateAttribute.Key] != nil {
+ // We prefer delta if it is available
+ if r.temporalityMap[query.AggregateAttribute.Key][v3.Delta] {
+ query.Temporality = v3.Delta
+ } else if r.temporalityMap[query.AggregateAttribute.Key][v3.Cumulative] {
+ query.Temporality = v3.Cumulative
+ } else {
+ query.Temporality = v3.Unspecified
+ }
+ }
+ // we don't have temporality for this metric
+ if query.DataSource == v3.DataSourceMetrics && query.Temporality == "" {
+ missingTemporality = append(missingTemporality, query.AggregateAttribute.Key)
+ }
+ if _, ok := metricNameToTemporality[query.AggregateAttribute.Key]; !ok {
+ metricNameToTemporality[query.AggregateAttribute.Key] = make(map[v3.Temporality]bool)
+ }
+ }
+ }
+
+ nameToTemporality, err := r.FetchTemporality(ctx, missingTemporality, ch)
+ if err != nil {
+ return err
+ }
+
+ if qp.CompositeQuery != nil && len(qp.CompositeQuery.BuilderQueries) > 0 {
+ for name := range qp.CompositeQuery.BuilderQueries {
+ query := qp.CompositeQuery.BuilderQueries[name]
+ if query.DataSource == v3.DataSourceMetrics && query.Temporality == "" {
+ if nameToTemporality[query.AggregateAttribute.Key][v3.Delta] {
+ query.Temporality = v3.Delta
+ } else if nameToTemporality[query.AggregateAttribute.Key][v3.Cumulative] {
+ query.Temporality = v3.Cumulative
+ } else {
+ query.Temporality = v3.Unspecified
+ }
+ r.temporalityMap[query.AggregateAttribute.Key] = nameToTemporality[query.AggregateAttribute.Key]
+ }
+ }
+ }
+ return nil
+}
+
// ForEachActiveAlert runs the given function on each alert.
// This should be used when you want to use the actual alerts from the ThresholdRule
// and not on its copy.
@@ -288,7 +386,7 @@ func (r *ThresholdRule) ForEachActiveAlert(f func(*Alert)) {
}
func (r *ThresholdRule) SendAlerts(ctx context.Context, ts time.Time, resendDelay time.Duration, interval time.Duration, notifyFunc NotifyFunc) {
- zap.S().Info("msg:", "sending alerts", "\t rule:", r.Name())
+ zap.L().Info("sending alerts", zap.String("rule", r.Name()))
alerts := []*Alert{}
r.ForEachActiveAlert(func(alert *Alert) {
if r.opts.SendAlways || alert.needsSending(ts, resendDelay) {
@@ -302,7 +400,7 @@ func (r *ThresholdRule) SendAlerts(ctx context.Context, ts time.Time, resendDela
anew := *alert
alerts = append(alerts, &anew)
} else {
- zap.S().Debugf("msg: skipping send alert due to resend delay", "\t rule: ", r.Name(), "\t alert:", alert.Labels)
+ zap.L().Debug("skipping send alert due to resend delay", zap.String("rule", r.Name()), zap.Any("alert", alert.Labels))
}
})
notifyFunc(ctx, "", alerts...)
@@ -318,12 +416,12 @@ func (r *ThresholdRule) Unit() string {
func (r *ThresholdRule) CheckCondition(v float64) bool {
if math.IsNaN(v) {
- zap.S().Debugf("msg:", "found NaN in rule condition", "\t rule name:", r.Name())
+ zap.L().Debug("found NaN in rule condition", zap.String("rule", r.Name()))
return false
}
if r.ruleCondition.Target == nil {
- zap.S().Debugf("msg:", "found null target in rule condition", "\t rulename:", r.Name())
+ zap.L().Debug("found null target in rule condition", zap.String("rule", r.Name()))
return false
}
@@ -331,7 +429,7 @@ func (r *ThresholdRule) CheckCondition(v float64) bool {
value := unitConverter.Convert(converter.Value{F: *r.ruleCondition.Target, U: converter.Unit(r.ruleCondition.TargetUnit)}, converter.Unit(r.Unit()))
- zap.S().Debugf("Checking condition for rule: %s, Converter=%s, Value=%f, Target=%f, CompareOp=%s", r.Name(), unitConverter.Name(), v, value.F, r.ruleCondition.CompareOp)
+ zap.L().Info("Checking condition for rule", zap.String("rule", r.Name()), zap.String("converter", unitConverter.Name()), zap.Float64("value", v), zap.Float64("target", value.F), zap.String("compareOp", string(r.ruleCondition.CompareOp)))
switch r.ruleCondition.CompareOp {
case ValueIsEq:
return v == value.F
@@ -398,7 +496,7 @@ func (r *ThresholdRule) shouldSkipFirstRecord() bool {
func (r *ThresholdRule) runChQuery(ctx context.Context, db clickhouse.Conn, query string) (Vector, error) {
rows, err := db.Query(ctx, query)
if err != nil {
- zap.S().Errorf("rule:", r.Name(), "\t failed to get alert query result")
+ zap.L().Error("failed to get alert query result", zap.String("rule", r.Name()), zap.Error(err))
return nil, err
}
@@ -435,6 +533,7 @@ func (r *ThresholdRule) runChQuery(ctx context.Context, db clickhouse.Conn, quer
if err := rows.Scan(vars...); err != nil {
return nil, err
}
+ r.lastTimestampWithDatapoints = time.Now()
sample := Sample{}
// Why do we maintain two labels sets? Alertmanager requires
@@ -459,8 +558,8 @@ func (r *ThresholdRule) runChQuery(ctx context.Context, db clickhouse.Conn, quer
if colName == "ts" || colName == "interval" {
sample.Point.T = timval.Unix()
} else {
- lbls.Set(colName, timval.Format("2006-01-02 15:04:05"))
- lblsOrig.Set(columnNames[i], timval.Format("2006-01-02 15:04:05"))
+ lbls.Set(colName, timval.Format(constants.AlertTimeFormat))
+ lblsOrig.Set(columnNames[i], timval.Format(constants.AlertTimeFormat))
}
case *float64:
@@ -505,7 +604,7 @@ func (r *ThresholdRule) runChQuery(ctx context.Context, db clickhouse.Conn, quer
lblsOrig.Set(columnNames[i], fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int()))
}
default:
- zap.S().Errorf("ruleId:", r.ID(), "\t error: invalid var found in query result", v, columnNames[i])
+ zap.L().Error("invalid var found in query result", zap.String("ruleId", r.ID()), zap.Any("value", v), zap.Any("column", columnNames[i]))
}
}
@@ -611,7 +710,21 @@ func (r *ThresholdRule) runChQuery(ctx context.Context, db clickhouse.Conn, quer
}
}
- zap.S().Debugf("ruleid:", r.ID(), "\t resultmap(potential alerts):", len(resultMap))
+ zap.L().Debug("resultmap(potential alerts)", zap.String("ruleid", r.ID()), zap.Int("count", len(resultMap)))
+
+ // if the data is missing for `For` duration then we should send alert
+ if r.ruleCondition.AlertOnAbsent && r.lastTimestampWithDatapoints.Add(time.Duration(r.Condition().AbsentFor)*time.Minute).Before(time.Now()) {
+ zap.L().Info("no data found for rule condition", zap.String("ruleid", r.ID()))
+ lbls := labels.NewBuilder(labels.Labels{})
+ if !r.lastTimestampWithDatapoints.IsZero() {
+ lbls.Set("lastSeen", r.lastTimestampWithDatapoints.Format(constants.AlertTimeFormat))
+ }
+ result = append(result, Sample{
+ Metric: lbls.Labels(),
+ IsMissing: true,
+ })
+ return result, nil
+ }
for _, sample := range resultMap {
// check alert rule condition before dumping results, if sendUnmatchedResults
@@ -621,12 +734,12 @@ func (r *ThresholdRule) runChQuery(ctx context.Context, db clickhouse.Conn, quer
}
}
if len(result) != 0 {
- zap.S().Infof("For rule %s, with ClickHouseQuery %s, found %d alerts", r.ID(), query, len(result))
+ zap.L().Info("found alerts", zap.String("ruleid", r.ID()), zap.String("query", query), zap.Int("count", len(result)))
}
return result, nil
}
-func (r *ThresholdRule) prepareBuilderQueries(ts time.Time) (map[string]string, error) {
+func (r *ThresholdRule) prepareBuilderQueries(ts time.Time, ch driver.Conn) (map[string]string, error) {
params := r.prepareQueryRange(ts)
if params.CompositeQuery.QueryType == v3.QueryTypeBuilder {
// check if any enrichment is required for logs if yes then enrich them
@@ -638,7 +751,17 @@ func (r *ThresholdRule) prepareBuilderQueries(ts time.Time) (map[string]string,
}
- runQueries, err := r.queryBuilder.PrepareQueries(params)
+ var runQueries map[string]string
+ var err error
+
+ if r.version == "v4" {
+ if ch != nil {
+ r.populateTemporality(context.Background(), params, ch)
+ }
+ runQueries, err = r.queryBuilderV4.PrepareQueries(params)
+ } else {
+ runQueries, err = r.queryBuilder.PrepareQueries(params)
+ }
return runQueries, err
}
@@ -717,10 +840,11 @@ func (r *ThresholdRule) prepareLinksToLogs(ts time.Time, lbls labels.Labels) str
return ""
}
+ q := r.prepareQueryRange(ts)
// Logs list view expects time in milliseconds
tr := timeRange{
- Start: ts.Add(-time.Duration(r.evalWindow)).UnixMilli(),
- End: ts.UnixMilli(),
+ Start: q.Start,
+ End: q.End,
PageSize: 100,
}
@@ -780,10 +904,11 @@ func (r *ThresholdRule) prepareLinksToTraces(ts time.Time, lbls labels.Labels) s
return ""
}
+ q := r.prepareQueryRange(ts)
// Traces list view expects time in nanoseconds
tr := timeRange{
- Start: ts.Add(-time.Duration(r.evalWindow)).UnixNano(),
- End: ts.UnixNano(),
+ Start: q.Start * time.Second.Microseconds(),
+ End: q.End * time.Second.Microseconds(),
PageSize: 100,
}
@@ -854,7 +979,7 @@ func (r *ThresholdRule) prepareClickhouseQueries(ts time.Time) (map[string]strin
}
if r.ruleCondition.QueryType() != v3.QueryTypeClickHouseSQL {
- zap.S().Debugf("ruleid:", r.ID(), "\t msg: unsupported query type in prepareClickhouseQueries()")
+ zap.L().Error("unsupported query type in prepareClickhouseQueries", zap.String("ruleid", r.ID()))
return nil, fmt.Errorf("failed to prepare clickhouse queries")
}
@@ -870,18 +995,17 @@ func (r *ThresholdRule) prepareClickhouseQueries(ts time.Time) (map[string]strin
tmpl := template.New("clickhouse-query")
tmpl, err := tmpl.Parse(chQuery.Query)
if err != nil {
- zap.S().Errorf("ruleid:", r.ID(), "\t msg: failed to parse clickhouse query to populate vars", err)
+ zap.L().Error("failed to parse clickhouse query to populate vars", zap.String("ruleid", r.ID()), zap.Error(err))
r.SetHealth(HealthBad)
return nil, err
}
var query bytes.Buffer
err = tmpl.Execute(&query, params.Variables)
if err != nil {
- zap.S().Errorf("ruleid:", r.ID(), "\t msg: failed to populate clickhouse query", err)
+ zap.L().Error("failed to populate clickhouse query", zap.String("ruleid", r.ID()), zap.Error(err))
r.SetHealth(HealthBad)
return nil, err
}
- zap.S().Debugf("ruleid:", r.ID(), "\t query:", query.String())
queries[name] = query.String()
}
return queries, nil
@@ -896,15 +1020,15 @@ func (r *ThresholdRule) GetSelectedQuery() string {
var err error
if r.ruleCondition.QueryType() == v3.QueryTypeBuilder {
- queries, err = r.prepareBuilderQueries(time.Now())
+ queries, err = r.prepareBuilderQueries(time.Now(), nil)
if err != nil {
- zap.S().Errorf("ruleid:", r.ID(), "\t msg: failed to prepare metric queries", zap.Error(err))
+ zap.L().Error("failed to prepare metric queries", zap.String("ruleid", r.ID()), zap.Error(err))
return ""
}
} else if r.ruleCondition.QueryType() == v3.QueryTypeClickHouseSQL {
queries, err = r.prepareClickhouseQueries(time.Now())
if err != nil {
- zap.S().Errorf("ruleid:", r.ID(), "\t msg: failed to prepare clickhouse queries", zap.Error(err))
+ zap.L().Error("failed to prepare clickhouse queries", zap.String("ruleid", r.ID()), zap.Error(err))
return ""
}
}
@@ -950,10 +1074,10 @@ func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, ts time.Time, ch c
// fetch the target query based on query type
if r.ruleCondition.QueryType() == v3.QueryTypeBuilder {
- queries, err = r.prepareBuilderQueries(ts)
+ queries, err = r.prepareBuilderQueries(ts, ch)
if err != nil {
- zap.S().Errorf("ruleid:", r.ID(), "\t msg: failed to prepare metric queries", zap.Error(err))
+ zap.L().Error("failed to prepare metric queries", zap.String("ruleid", r.ID()), zap.Error(err))
return nil, fmt.Errorf("failed to prepare metric queries")
}
@@ -962,7 +1086,7 @@ func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, ts time.Time, ch c
queries, err = r.prepareClickhouseQueries(ts)
if err != nil {
- zap.S().Errorf("ruleid:", r.ID(), "\t msg: failed to prepare clickhouse queries", zap.Error(err))
+ zap.L().Error("failed to prepare clickhouse queries", zap.String("ruleid", r.ID()), zap.Error(err))
return nil, fmt.Errorf("failed to prepare clickhouse queries")
}
@@ -974,16 +1098,16 @@ func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, ts time.Time, ch c
return nil, fmt.Errorf("no queries could be built with the rule config")
}
- zap.S().Debugf("ruleid:", r.ID(), "\t runQueries:", queries)
+ zap.L().Info("prepared queries", zap.String("ruleid", r.ID()), zap.Any("queries", queries))
queryLabel := r.GetSelectedQuery()
- zap.S().Debugf("ruleId: ", r.ID(), "\t result query label:", queryLabel)
+ zap.L().Debug("Selected query lable for rule", zap.String("ruleid", r.ID()), zap.String("label", queryLabel))
if queryString, ok := queries[queryLabel]; ok {
return r.runChQuery(ctx, ch, queryString)
}
- zap.S().Errorf("ruleId: ", r.ID(), "\t invalid query label:", queryLabel, "\t queries:", queries)
+ zap.L().Error("invalid query label", zap.String("ruleid", r.ID()), zap.String("label", queryLabel), zap.Any("queries", queries))
return nil, fmt.Errorf("this is unexpected, invalid query label")
}
@@ -1012,7 +1136,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time, queriers *Querie
if err != nil {
r.SetHealth(HealthBad)
r.SetLastError(err)
- zap.S().Debugf("ruleid:", r.ID(), "\t failure in buildAndRunQuery:", err)
+ zap.L().Error("failure in buildAndRunQuery", zap.String("ruleid", r.ID()), zap.Error(err))
return nil, err
}
@@ -1031,7 +1155,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time, queriers *Querie
value := valueFormatter.Format(smpl.V, r.Unit())
thresholdFormatter := formatter.FromUnit(r.ruleCondition.TargetUnit)
threshold := thresholdFormatter.Format(r.targetVal(), r.ruleCondition.TargetUnit)
- zap.S().Debugf("Alert template data for rule %s: Formatter=%s, Value=%s, Threshold=%s", r.Name(), valueFormatter.Name(), value, threshold)
+ zap.L().Debug("Alert template data for rule", zap.String("name", r.Name()), zap.String("formatter", valueFormatter.Name()), zap.String("value", value), zap.String("threshold", threshold))
tmplData := AlertTemplateData(l, value, threshold)
// Inject some convenience variables that are easier to remember for users
@@ -1052,7 +1176,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time, queriers *Querie
result, err := tmpl.Expand()
if err != nil {
result = fmt.Sprintf("", err)
- zap.S().Errorf("msg:", "Expanding alert template failed", "\t err", err, "\t data", tmplData)
+ zap.L().Error("Expanding alert template failed", zap.Error(err), zap.Any("data", tmplData))
}
return result
}
@@ -1069,6 +1193,11 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time, queriers *Querie
annotations := make(labels.Labels, 0, len(r.annotations))
for _, a := range r.annotations {
+ if smpl.IsMissing {
+ if a.Name == labels.AlertDescriptionLabel || a.Name == labels.AlertSummaryLabel {
+ a.Value = labels.AlertMissingData
+ }
+ }
annotations = append(annotations, labels.Label{Name: normalizeLabelName(a.Name), Value: expand(a.Value)})
}
@@ -1092,7 +1221,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time, queriers *Querie
resultFPs[h] = struct{}{}
if _, ok := alerts[h]; ok {
- zap.S().Errorf("ruleId: ", r.ID(), "\t msg:", "the alert query returns duplicate records:", alerts[h])
+ zap.L().Error("the alert query returns duplicate records", zap.String("ruleid", r.ID()), zap.Any("alert", alerts[h]))
err = fmt.Errorf("duplicate alert found, vector contains metrics with the same labelset after applying alert labels")
// We have already acquired the lock above hence using SetHealth and
// SetLastError will deadlock.
@@ -1112,7 +1241,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time, queriers *Querie
}
}
- zap.S().Info("rule:", r.Name(), "\t alerts found: ", len(alerts))
+ zap.L().Info("alerts found", zap.String("name", r.Name()), zap.Int("count", len(alerts)))
// alerts[h] is ready, add or update active list now
for h, a := range alerts {
@@ -1160,7 +1289,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time, queriers *Querie
func (r *ThresholdRule) String() string {
ar := PostableRule{
- Alert: r.name,
+ AlertName: r.name,
RuleCondition: r.ruleCondition,
EvalWindow: Duration(r.evalWindow),
Labels: r.labels.Map(),
diff --git a/pkg/query-service/rules/thresholdRule_test.go b/pkg/query-service/rules/thresholdRule_test.go
index 2b39084bec..b7d3cc5fee 100644
--- a/pkg/query-service/rules/thresholdRule_test.go
+++ b/pkg/query-service/rules/thresholdRule_test.go
@@ -14,7 +14,7 @@ import (
func TestThresholdRuleCombinations(t *testing.T) {
postableRule := PostableRule{
- Alert: "Tricky Condition Tests",
+ AlertName: "Tricky Condition Tests",
AlertType: "METRIC_BASED_ALERT",
RuleType: RuleTypeThreshold,
EvalWindow: Duration(5 * time.Minute),
@@ -339,7 +339,7 @@ func TestNormalizeLabelName(t *testing.T) {
func TestPrepareLinksToLogs(t *testing.T) {
postableRule := PostableRule{
- Alert: "Tricky Condition Tests",
+ AlertName: "Tricky Condition Tests",
AlertType: "LOGS_BASED_ALERT",
RuleType: RuleTypeThreshold,
EvalWindow: Duration(5 * time.Minute),
@@ -376,12 +376,12 @@ func TestPrepareLinksToLogs(t *testing.T) {
ts := time.UnixMilli(1705469040000)
link := rule.prepareLinksToLogs(ts, labels.Labels{})
- assert.Contains(t, link, "&timeRange=%7B%22start%22%3A1705468740000%2C%22end%22%3A1705469040000%2C%22pageSize%22%3A100%7D&startTime=1705468740000&endTime=1705469040000")
+ assert.Contains(t, link, "&timeRange=%7B%22start%22%3A1705468620000%2C%22end%22%3A1705468920000%2C%22pageSize%22%3A100%7D&startTime=1705468620000&endTime=1705468920000")
}
func TestPrepareLinksToTraces(t *testing.T) {
postableRule := PostableRule{
- Alert: "Links to traces test",
+ AlertName: "Links to traces test",
AlertType: "TRACES_BASED_ALERT",
RuleType: RuleTypeThreshold,
EvalWindow: Duration(5 * time.Minute),
@@ -418,5 +418,5 @@ func TestPrepareLinksToTraces(t *testing.T) {
ts := time.UnixMilli(1705469040000)
link := rule.prepareLinksToTraces(ts, labels.Labels{})
- assert.Contains(t, link, "&timeRange=%7B%22start%22%3A1705468740000000000%2C%22end%22%3A1705469040000000000%2C%22pageSize%22%3A100%7D&startTime=1705468740000000000&endTime=1705469040000000000")
+ assert.Contains(t, link, "&timeRange=%7B%22start%22%3A1705468620000000000%2C%22end%22%3A1705468920000000000%2C%22pageSize%22%3A100%7D&startTime=1705468620000000000&endTime=1705468920000000000")
}
diff --git a/pkg/query-service/telemetry/telemetry.go b/pkg/query-service/telemetry/telemetry.go
index 939d0cd39b..28affe94a2 100644
--- a/pkg/query-service/telemetry/telemetry.go
+++ b/pkg/query-service/telemetry/telemetry.go
@@ -22,41 +22,46 @@ import (
)
const (
- TELEMETRY_EVENT_PATH = "API Call"
- TELEMETRY_EVENT_USER = "User"
- TELEMETRY_EVENT_INPRODUCT_FEEDBACK = "InProduct Feedback Submitted"
- TELEMETRY_EVENT_NUMBER_OF_SERVICES = "Number of Services"
- TELEMETRY_EVENT_NUMBER_OF_SERVICES_PH = "Number of Services V2"
- TELEMETRY_EVENT_HEART_BEAT = "Heart Beat"
- TELEMETRY_EVENT_ORG_SETTINGS = "Org Settings"
- DEFAULT_SAMPLING = 0.1
- TELEMETRY_LICENSE_CHECK_FAILED = "License Check Failed"
- TELEMETRY_LICENSE_UPDATED = "License Updated"
- TELEMETRY_LICENSE_ACT_FAILED = "License Activation Failed"
- TELEMETRY_EVENT_ENVIRONMENT = "Environment"
- TELEMETRY_EVENT_LANGUAGE = "Language"
- TELEMETRY_EVENT_SERVICE = "ServiceName"
- TELEMETRY_EVENT_LOGS_FILTERS = "Logs Filters"
- TELEMETRY_EVENT_DISTRIBUTED = "Distributed"
- TELEMETRY_EVENT_QUERY_RANGE_V3 = "Query Range V3 Metadata"
- TELEMETRY_EVENT_DASHBOARDS_ALERTS = "Dashboards/Alerts Info"
- TELEMETRY_EVENT_ACTIVE_USER = "Active User"
- TELEMETRY_EVENT_ACTIVE_USER_PH = "Active User V2"
- TELEMETRY_EVENT_USER_INVITATION_SENT = "User Invitation Sent"
- TELEMETRY_EVENT_USER_INVITATION_ACCEPTED = "User Invitation Accepted"
- DEFAULT_CLOUD_EMAIL = "admin@signoz.cloud"
+ TELEMETRY_EVENT_PATH = "API Call"
+ TELEMETRY_EVENT_USER = "User"
+ TELEMETRY_EVENT_INPRODUCT_FEEDBACK = "InProduct Feedback Submitted"
+ TELEMETRY_EVENT_NUMBER_OF_SERVICES = "Number of Services"
+ TELEMETRY_EVENT_NUMBER_OF_SERVICES_PH = "Number of Services V2"
+ TELEMETRY_EVENT_HEART_BEAT = "Heart Beat"
+ TELEMETRY_EVENT_ORG_SETTINGS = "Org Settings"
+ DEFAULT_SAMPLING = 0.1
+ TELEMETRY_LICENSE_CHECK_FAILED = "License Check Failed"
+ TELEMETRY_LICENSE_UPDATED = "License Updated"
+ TELEMETRY_LICENSE_ACT_FAILED = "License Activation Failed"
+ TELEMETRY_EVENT_ENVIRONMENT = "Environment"
+ TELEMETRY_EVENT_LANGUAGE = "Language"
+ TELEMETRY_EVENT_SERVICE = "ServiceName"
+ TELEMETRY_EVENT_LOGS_FILTERS = "Logs Filters"
+ TELEMETRY_EVENT_DISTRIBUTED = "Distributed"
+ TELEMETRY_EVENT_QUERY_RANGE_API = "Query Range API"
+ TELEMETRY_EVENT_DASHBOARDS_ALERTS = "Dashboards/Alerts Info"
+ TELEMETRY_EVENT_ACTIVE_USER = "Active User"
+ TELEMETRY_EVENT_ACTIVE_USER_PH = "Active User V2"
+ TELEMETRY_EVENT_USER_INVITATION_SENT = "User Invitation Sent"
+ TELEMETRY_EVENT_USER_INVITATION_ACCEPTED = "User Invitation Accepted"
+ TELEMETRY_EVENT_SUCCESSFUL_DASHBOARD_PANEL_QUERY = "Successful Dashboard Panel Query"
+ TELEMETRY_EVENT_SUCCESSFUL_ALERT_QUERY = "Successful Alert Query"
+ DEFAULT_CLOUD_EMAIL = "admin@signoz.cloud"
)
var SAAS_EVENTS_LIST = map[string]struct{}{
- TELEMETRY_EVENT_NUMBER_OF_SERVICES: {},
- TELEMETRY_EVENT_ACTIVE_USER: {},
- TELEMETRY_EVENT_HEART_BEAT: {},
- TELEMETRY_EVENT_LANGUAGE: {},
- TELEMETRY_EVENT_SERVICE: {},
- TELEMETRY_EVENT_ENVIRONMENT: {},
- TELEMETRY_EVENT_USER_INVITATION_SENT: {},
- TELEMETRY_EVENT_USER_INVITATION_ACCEPTED: {},
- TELEMETRY_EVENT_DASHBOARDS_ALERTS: {},
+ TELEMETRY_EVENT_NUMBER_OF_SERVICES: {},
+ TELEMETRY_EVENT_ACTIVE_USER: {},
+ TELEMETRY_EVENT_HEART_BEAT: {},
+ TELEMETRY_EVENT_LANGUAGE: {},
+ TELEMETRY_EVENT_SERVICE: {},
+ TELEMETRY_EVENT_ENVIRONMENT: {},
+ TELEMETRY_EVENT_USER_INVITATION_SENT: {},
+ TELEMETRY_EVENT_USER_INVITATION_ACCEPTED: {},
+ TELEMETRY_EVENT_DASHBOARDS_ALERTS: {},
+ TELEMETRY_EVENT_SUCCESSFUL_DASHBOARD_PANEL_QUERY: {},
+ TELEMETRY_EVENT_SUCCESSFUL_ALERT_QUERY: {},
+ // TELEMETRY_EVENT_QUERY_RANGE_API: {}, // this event is not part of SAAS_EVENTS_LIST as it may cause too many events to be sent
}
const api_key = "4Gmoa4ixJAUHx2BpJxsjwA1bEfnwEeRz"
@@ -93,9 +98,10 @@ func (a *Telemetry) IsSampled() bool {
}
-func (telemetry *Telemetry) CheckSigNozSignals(postData *v3.QueryRangeParamsV3) (bool, bool) {
+func (telemetry *Telemetry) CheckSigNozSignals(postData *v3.QueryRangeParamsV3) (bool, bool, bool) {
signozLogsUsed := false
signozMetricsUsed := false
+ signozTracesUsed := false
if postData.CompositeQuery.QueryType == v3.QueryTypeBuilder {
for _, query := range postData.CompositeQuery.BuilderQueries {
@@ -105,6 +111,8 @@ func (telemetry *Telemetry) CheckSigNozSignals(postData *v3.QueryRangeParamsV3)
!strings.Contains(query.AggregateAttribute.Key, "signoz_") &&
len(query.AggregateAttribute.Key) > 0 {
signozMetricsUsed = true
+ } else if query.DataSource == v3.DataSourceTraces && len(query.Filters.Items) > 0 {
+ signozTracesUsed = true
}
}
} else if postData.CompositeQuery.QueryType == v3.QueryTypePromQL {
@@ -118,9 +126,15 @@ func (telemetry *Telemetry) CheckSigNozSignals(postData *v3.QueryRangeParamsV3)
if strings.Contains(query.Query, "signoz_metrics") && len(query.Query) > 0 {
signozMetricsUsed = true
}
+ if strings.Contains(query.Query, "signoz_logs") && len(query.Query) > 0 {
+ signozLogsUsed = true
+ }
+ if strings.Contains(query.Query, "signoz_traces") && len(query.Query) > 0 {
+ signozTracesUsed = true
+ }
}
}
- return signozLogsUsed, signozMetricsUsed
+ return signozLogsUsed, signozMetricsUsed, signozTracesUsed
}
func (telemetry *Telemetry) AddActiveTracesUser() {
@@ -275,29 +289,39 @@ func createTelemetry() {
telemetry.SendEvent(TELEMETRY_EVENT_HEART_BEAT, data, "")
alertsInfo, err := telemetry.reader.GetAlertsInfo(context.Background())
- if err != nil {
- telemetry.SendEvent(TELEMETRY_EVENT_DASHBOARDS_ALERTS, map[string]interface{}{"error": err.Error()}, "")
- } else {
+ if err == nil {
dashboardsInfo, err := telemetry.reader.GetDashboardsInfo(context.Background())
if err == nil {
- dashboardsAlertsData := map[string]interface{}{
- "totalDashboards": dashboardsInfo.TotalDashboards,
- "logsBasedPanels": dashboardsInfo.LogsBasedPanels,
- "metricBasedPanels": dashboardsInfo.MetricBasedPanels,
- "tracesBasedPanels": dashboardsInfo.TracesBasedPanels,
- "totalAlerts": alertsInfo.TotalAlerts,
- "logsBasedAlerts": alertsInfo.LogsBasedAlerts,
- "metricBasedAlerts": alertsInfo.MetricBasedAlerts,
- "tracesBasedAlerts": alertsInfo.TracesBasedAlerts,
+ channels, err := telemetry.reader.GetChannels()
+ if err == nil {
+ savedViewsInfo, err := telemetry.reader.GetSavedViewsInfo(context.Background())
+ if err == nil {
+ dashboardsAlertsData := map[string]interface{}{
+ "totalDashboards": dashboardsInfo.TotalDashboards,
+ "totalDashboardsWithPanelAndName": dashboardsInfo.TotalDashboardsWithPanelAndName,
+ "logsBasedPanels": dashboardsInfo.LogsBasedPanels,
+ "metricBasedPanels": dashboardsInfo.MetricBasedPanels,
+ "tracesBasedPanels": dashboardsInfo.TracesBasedPanels,
+ "totalAlerts": alertsInfo.TotalAlerts,
+ "logsBasedAlerts": alertsInfo.LogsBasedAlerts,
+ "metricBasedAlerts": alertsInfo.MetricBasedAlerts,
+ "tracesBasedAlerts": alertsInfo.TracesBasedAlerts,
+ "totalChannels": len(*channels),
+ "totalSavedViews": savedViewsInfo.TotalSavedViews,
+ "logsSavedViews": savedViewsInfo.LogsSavedViews,
+ "tracesSavedViews": savedViewsInfo.TracesSavedViews,
+ }
+ // send event only if there are dashboards or alerts or channels
+ if dashboardsInfo.TotalDashboards > 0 || alertsInfo.TotalAlerts > 0 || len(*channels) > 0 || savedViewsInfo.TotalSavedViews > 0 {
+ telemetry.SendEvent(TELEMETRY_EVENT_DASHBOARDS_ALERTS, dashboardsAlertsData, "")
+ }
+ }
}
- // send event only if there are dashboards or alerts
- if dashboardsInfo.TotalDashboards > 0 || alertsInfo.TotalAlerts > 0 {
- telemetry.SendEvent(TELEMETRY_EVENT_DASHBOARDS_ALERTS, dashboardsAlertsData, "")
- }
- } else {
- telemetry.SendEvent(TELEMETRY_EVENT_DASHBOARDS_ALERTS, map[string]interface{}{"error": err.Error()}, "")
}
}
+ if err != nil {
+ telemetry.SendEvent(TELEMETRY_EVENT_DASHBOARDS_ALERTS, map[string]interface{}{"error": err.Error()}, "")
+ }
getDistributedInfoInLastHeartBeatInterval, _ := telemetry.reader.GetDistributedInfoInLastHeartBeatInterval(context.Background())
telemetry.SendEvent(TELEMETRY_EVENT_DISTRIBUTED, getDistributedInfoInLastHeartBeatInterval, "")
@@ -411,7 +435,7 @@ func (a *Telemetry) checkEvents(event string) bool {
func (a *Telemetry) SendEvent(event string, data map[string]interface{}, userEmail string, opts ...bool) {
// ignore telemetry for default user
- if userEmail == DEFAULT_CLOUD_EMAIL {
+ if userEmail == DEFAULT_CLOUD_EMAIL || a.GetUserEmail() == DEFAULT_CLOUD_EMAIL {
return
}
@@ -449,7 +473,7 @@ func (a *Telemetry) SendEvent(event string, data map[string]interface{}, userEma
}
}
- // zap.S().Info(data)
+ // zap.L().Info(data)
properties := analytics.NewProperties()
properties.Set("version", version.GetVersion())
properties.Set("deploymentType", getDeploymentType())
diff --git a/pkg/query-service/tests/docker.go b/pkg/query-service/tests/docker.go
index a710161a0e..c65a627512 100644
--- a/pkg/query-service/tests/docker.go
+++ b/pkg/query-service/tests/docker.go
@@ -13,7 +13,6 @@ import (
"log"
minio "github.com/minio/minio-go/v6"
- "go.uber.org/zap"
)
const (
@@ -36,7 +35,7 @@ func init() {
} else if goArch == "amd64" {
composeFile = "./test-deploy/docker-compose.yaml"
} else {
- zap.S().Fatalf("Unsupported architecture: %s", goArch)
+ log.Fatalf("Unsupported architecture: %s", goArch)
}
}
diff --git a/pkg/query-service/tests/integration/logparsingpipeline_test.go b/pkg/query-service/tests/integration/logparsingpipeline_test.go
index 4c260596e5..9ef47171a4 100644
--- a/pkg/query-service/tests/integration/logparsingpipeline_test.go
+++ b/pkg/query-service/tests/integration/logparsingpipeline_test.go
@@ -1,14 +1,11 @@
package tests
import (
- "bytes"
- "context"
"encoding/json"
"fmt"
"io"
- "net/http"
"net/http/httptest"
- "os"
+ "runtime/debug"
"strings"
"testing"
@@ -18,10 +15,10 @@ import (
"github.com/knadh/koanf/parsers/yaml"
"github.com/open-telemetry/opamp-go/protobufs"
"github.com/pkg/errors"
- "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.signoz.io/signoz/pkg/query-service/agentConf"
"go.signoz.io/signoz/pkg/query-service/app"
+ "go.signoz.io/signoz/pkg/query-service/app/integrations"
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
"go.signoz.io/signoz/pkg/query-service/app/opamp"
opampModel "go.signoz.io/signoz/pkg/query-service/app/opamp/model"
@@ -31,20 +28,21 @@ import (
"go.signoz.io/signoz/pkg/query-service/model"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/query-service/queryBuilderToExpr"
+ "go.signoz.io/signoz/pkg/query-service/utils"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
)
func TestLogPipelinesLifecycle(t *testing.T) {
- testbed := NewLogPipelinesTestBed(t)
- assert := assert.New(t)
+ testbed := NewLogPipelinesTestBed(t, nil)
+ require := require.New(t)
getPipelinesResp := testbed.GetPipelinesFromQS()
- assert.Equal(
+ require.Equal(
0, len(getPipelinesResp.Pipelines),
"There should be no pipelines at the start",
)
- assert.Equal(
+ require.Equal(
0, len(getPipelinesResp.History),
"There should be no pipelines config history at the start",
)
@@ -118,11 +116,11 @@ func TestLogPipelinesLifecycle(t *testing.T) {
)
// Deployment status should be pending.
- assert.Equal(
+ require.Equal(
1, len(getPipelinesResp.History),
"pipelines config history should not be empty after 1st configuration",
)
- assert.Equal(
+ require.Equal(
agentConf.DeployInitiated, getPipelinesResp.History[0].DeployStatus,
"pipelines deployment should be in progress after 1st configuration",
)
@@ -134,7 +132,7 @@ func TestLogPipelinesLifecycle(t *testing.T) {
assertPipelinesResponseMatchesPostedPipelines(
t, postablePipelines, getPipelinesResp,
)
- assert.Equal(
+ require.Equal(
agentConf.Deployed,
getPipelinesResp.History[0].DeployStatus,
"pipeline deployment should be complete after acknowledgment from opamp client",
@@ -149,12 +147,13 @@ func TestLogPipelinesLifecycle(t *testing.T) {
testbed.assertPipelinesSentToOpampClient(updatePipelinesResp.Pipelines)
testbed.assertNewAgentGetsPipelinesOnConnection(updatePipelinesResp.Pipelines)
- assert.Equal(
- 2, len(updatePipelinesResp.History),
+ getPipelinesResp = testbed.GetPipelinesFromQS()
+ require.Equal(
+ 2, len(getPipelinesResp.History),
"there should be 2 history entries after posting pipelines config for the 2nd time",
)
- assert.Equal(
- agentConf.DeployInitiated, updatePipelinesResp.History[0].DeployStatus,
+ require.Equal(
+ agentConf.DeployInitiated, getPipelinesResp.History[0].DeployStatus,
"deployment should be in progress for latest pipeline config",
)
@@ -165,7 +164,7 @@ func TestLogPipelinesLifecycle(t *testing.T) {
assertPipelinesResponseMatchesPostedPipelines(
t, postablePipelines, getPipelinesResp,
)
- assert.Equal(
+ require.Equal(
agentConf.Deployed,
getPipelinesResp.History[0].DeployStatus,
"deployment for latest pipeline config should be complete after acknowledgment from opamp client",
@@ -174,7 +173,7 @@ func TestLogPipelinesLifecycle(t *testing.T) {
func TestLogPipelinesHistory(t *testing.T) {
require := require.New(t)
- testbed := NewLogPipelinesTestBed(t)
+ testbed := NewLogPipelinesTestBed(t, nil)
// Only the latest config version can be "IN_PROGRESS",
// other incomplete deployments should have status "UNKNOWN"
@@ -356,7 +355,7 @@ func TestLogPipelinesValidation(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
- testbed := NewLogPipelinesTestBed(t)
+ testbed := NewLogPipelinesTestBed(t, nil)
testbed.PostPipelinesToQSExpectingStatusCode(
logparsingpipeline.PostablePipelines{
Pipelines: []logparsingpipeline.PostablePipeline{tc.Pipeline},
@@ -369,7 +368,7 @@ func TestLogPipelinesValidation(t *testing.T) {
func TestCanSavePipelinesWithoutConnectedAgents(t *testing.T) {
require := require.New(t)
- testbed := NewTestbedWithoutOpamp(t)
+ testbed := NewTestbedWithoutOpamp(t, nil)
getPipelinesResp := testbed.GetPipelinesFromQS()
require.Equal(0, len(getPipelinesResp.Pipelines))
@@ -422,7 +421,6 @@ func TestCanSavePipelinesWithoutConnectedAgents(t *testing.T) {
// configuring log pipelines and provides test helpers.
type LogPipelinesTestBed struct {
t *testing.T
- testDBFilePath string
testUser *model.User
apiHandler *app.APIHandler
agentConfMgr *agentConf.Manager
@@ -430,25 +428,20 @@ type LogPipelinesTestBed struct {
opampClientConn *opamp.MockOpAmpConnection
}
-func NewTestbedWithoutOpamp(t *testing.T) *LogPipelinesTestBed {
- // Create a tmp file based sqlite db for testing.
- testDBFile, err := os.CreateTemp("", "test-signoz-db-*")
- if err != nil {
- t.Fatalf("could not create temp file for test db: %v", err)
+// testDB can be injected for sharing a DB across multiple integration testbeds.
+func NewTestbedWithoutOpamp(t *testing.T, testDB *sqlx.DB) *LogPipelinesTestBed {
+ if testDB == nil {
+ testDB = utils.NewQueryServiceDBForTests(t)
}
- testDBFilePath := testDBFile.Name()
- t.Cleanup(func() { os.Remove(testDBFilePath) })
- testDBFile.Close()
- // TODO(Raj): move away from singleton DB instances to avoid
- // issues when running tests in parallel.
- dao.InitDao("sqlite", testDBFilePath)
-
- testDB, err := sqlx.Open("sqlite3", testDBFilePath)
+ ic, err := integrations.NewController(testDB)
if err != nil {
- t.Fatalf("could not open test db sqlite file: %v", err)
+ t.Fatalf("could not create integrations controller: %v", err)
}
- controller, err := logparsingpipeline.NewLogParsingPipelinesController(testDB, "sqlite")
+
+ controller, err := logparsingpipeline.NewLogParsingPipelinesController(
+ testDB, "sqlite", ic.GetPipelinesForInstalledIntegrations,
+ )
if err != nil {
t.Fatalf("could not create a logparsingpipelines controller: %v", err)
}
@@ -467,7 +460,7 @@ func NewTestbedWithoutOpamp(t *testing.T) *LogPipelinesTestBed {
}
// Mock an available opamp agent
- testDB, err = opampModel.InitDB(testDBFilePath)
+ testDB, err = opampModel.InitDB(testDB)
require.Nil(t, err, "failed to init opamp model")
agentConfMgr, err := agentConf.Initiate(&agentConf.ManagerOptions{
@@ -479,16 +472,15 @@ func NewTestbedWithoutOpamp(t *testing.T) *LogPipelinesTestBed {
require.Nil(t, err, "failed to init agentConf")
return &LogPipelinesTestBed{
- t: t,
- testDBFilePath: testDBFilePath,
- testUser: user,
- apiHandler: apiHandler,
- agentConfMgr: agentConfMgr,
+ t: t,
+ testUser: user,
+ apiHandler: apiHandler,
+ agentConfMgr: agentConfMgr,
}
}
-func NewLogPipelinesTestBed(t *testing.T) *LogPipelinesTestBed {
- testbed := NewTestbedWithoutOpamp(t)
+func NewLogPipelinesTestBed(t *testing.T, testDB *sqlx.DB) *LogPipelinesTestBed {
+ testbed := NewTestbedWithoutOpamp(t, testDB)
opampServer := opamp.InitializeServer(nil, testbed.agentConfMgr)
err := opampServer.Start(opamp.GetAvailableLocalAddress())
@@ -590,8 +582,8 @@ func (tb *LogPipelinesTestBed) GetPipelinesFromQS() *logparsingpipeline.Pipeline
if response.StatusCode != 200 {
tb.t.Fatalf(
- "could not list log parsing pipelines. status: %d, body: %v",
- response.StatusCode, string(responseBody),
+ "could not list log parsing pipelines. status: %d, body: %v\n%s",
+ response.StatusCode, string(responseBody), string(debug.Stack()),
)
}
@@ -625,7 +617,7 @@ func assertPipelinesRecommendedInRemoteConfig(
pipelines []logparsingpipeline.Pipeline,
) {
collectorConfigFiles := msg.RemoteConfig.Config.ConfigMap
- assert.Equal(
+ require.Equal(
t, len(collectorConfigFiles), 1,
"otel config sent to client is expected to contain atleast 1 file",
)
@@ -653,7 +645,7 @@ func assertPipelinesRecommendedInRemoteConfig(
}
_, expectedLogProcessorNames, err := logparsingpipeline.PreparePipelineProcessor(pipelines)
- assert.Equal(
+ require.Equal(
t, expectedLogProcessorNames, collectorConfLogsPipelineProcNames,
"config sent to opamp client doesn't contain expected log pipelines",
)
@@ -661,7 +653,7 @@ func assertPipelinesRecommendedInRemoteConfig(
collectorConfProcessors := collectorConfSentToClient["processors"].(map[string]interface{})
for _, procName := range expectedLogProcessorNames {
pipelineProcessorInConf, procExists := collectorConfProcessors[procName]
- assert.True(t, procExists, fmt.Sprintf(
+ require.True(t, procExists, fmt.Sprintf(
"%s processor not found in config sent to opamp client", procName,
))
@@ -747,16 +739,16 @@ func assertPipelinesResponseMatchesPostedPipelines(
postablePipelines logparsingpipeline.PostablePipelines,
pipelinesResp *logparsingpipeline.PipelinesResponse,
) {
- assert.Equal(
+ require.Equal(
t, len(postablePipelines.Pipelines), len(pipelinesResp.Pipelines),
"length mistmatch between posted pipelines and pipelines in response",
)
for i, pipeline := range pipelinesResp.Pipelines {
postable := postablePipelines.Pipelines[i]
- assert.Equal(t, postable.Name, pipeline.Name, "pipeline.Name mismatch")
- assert.Equal(t, postable.OrderId, pipeline.OrderId, "pipeline.OrderId mismatch")
- assert.Equal(t, postable.Enabled, pipeline.Enabled, "pipeline.Enabled mismatch")
- assert.Equal(t, postable.Config, pipeline.Config, "pipeline.Config mismatch")
+ require.Equal(t, postable.Name, pipeline.Name, "pipeline.Name mismatch")
+ require.Equal(t, postable.OrderId, pipeline.OrderId, "pipeline.OrderId mismatch")
+ require.Equal(t, postable.Enabled, pipeline.Enabled, "pipeline.Enabled mismatch")
+ require.Equal(t, postable.Config, pipeline.Config, "pipeline.Config mismatch")
}
}
@@ -792,60 +784,3 @@ func newInitialAgentConfigMap() *protobufs.AgentConfigMap {
},
}
}
-
-func createTestUser() (*model.User, *model.ApiError) {
- // Create a test user for auth
- ctx := context.Background()
- org, apiErr := dao.DB().CreateOrg(ctx, &model.Organization{
- Name: "test",
- })
- if apiErr != nil {
- return nil, apiErr
- }
-
- group, apiErr := dao.DB().CreateGroup(ctx, &model.Group{
- Name: "test",
- })
- if apiErr != nil {
- return nil, apiErr
- }
-
- return dao.DB().CreateUser(
- ctx,
- &model.User{
- Name: "test",
- Email: "test@test.com",
- Password: "test",
- OrgId: org.Id,
- GroupId: group.Id,
- },
- true,
- )
-}
-
-func NewAuthenticatedTestRequest(
- user *model.User,
- path string,
- postData interface{},
-) (*http.Request, error) {
- userJwt, err := auth.GenerateJWTForUser(user)
- if err != nil {
- return nil, err
- }
-
- var req *http.Request
-
- if postData != nil {
- var body bytes.Buffer
- err = json.NewEncoder(&body).Encode(postData)
- if err != nil {
- return nil, err
- }
- req = httptest.NewRequest(http.MethodPost, path, &body)
- } else {
- req = httptest.NewRequest(http.MethodPost, path, nil)
- }
-
- req.Header.Add("Authorization", "Bearer "+userJwt.AccessJwt)
- return req, nil
-}
diff --git a/pkg/query-service/tests/integration/signoz_integrations_test.go b/pkg/query-service/tests/integration/signoz_integrations_test.go
new file mode 100644
index 0000000000..d58ccaf51a
--- /dev/null
+++ b/pkg/query-service/tests/integration/signoz_integrations_test.go
@@ -0,0 +1,631 @@
+package tests
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "runtime/debug"
+ "slices"
+ "testing"
+ "time"
+
+ "github.com/jmoiron/sqlx"
+ mockhouse "github.com/srikanthccv/ClickHouse-go-mock"
+ "github.com/stretchr/testify/require"
+ "go.signoz.io/signoz/pkg/query-service/app"
+ "go.signoz.io/signoz/pkg/query-service/app/dashboards"
+ "go.signoz.io/signoz/pkg/query-service/app/integrations"
+ "go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
+ "go.signoz.io/signoz/pkg/query-service/auth"
+ "go.signoz.io/signoz/pkg/query-service/dao"
+ "go.signoz.io/signoz/pkg/query-service/featureManager"
+ "go.signoz.io/signoz/pkg/query-service/model"
+ v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
+ "go.signoz.io/signoz/pkg/query-service/utils"
+)
+
+// Higher level tests for UI facing APIs
+
+func TestSignozIntegrationLifeCycle(t *testing.T) {
+ require := require.New(t)
+ testbed := NewIntegrationsTestBed(t, nil)
+
+ installedResp := testbed.GetInstalledIntegrationsFromQS()
+ require.Equal(
+ len(installedResp.Integrations), 0,
+ "no integrations should be installed at the beginning",
+ )
+
+ availableResp := testbed.GetAvailableIntegrationsFromQS()
+ availableIntegrations := availableResp.Integrations
+ require.Greater(
+ len(availableIntegrations), 0,
+ "some integrations should come bundled with SigNoz",
+ )
+
+ // Should be able to install integration
+ require.False(availableIntegrations[0].IsInstalled)
+ testbed.RequestQSToInstallIntegration(
+ availableIntegrations[0].Id, map[string]interface{}{},
+ )
+
+ ii := testbed.GetIntegrationDetailsFromQS(availableIntegrations[0].Id)
+ require.Equal(ii.Id, availableIntegrations[0].Id)
+ require.NotNil(ii.Installation)
+
+ installedResp = testbed.GetInstalledIntegrationsFromQS()
+ installedIntegrations := installedResp.Integrations
+ require.Equal(len(installedIntegrations), 1)
+ require.Equal(installedIntegrations[0].Id, availableIntegrations[0].Id)
+
+ availableResp = testbed.GetAvailableIntegrationsFromQS()
+ availableIntegrations = availableResp.Integrations
+ require.Greater(len(availableIntegrations), 0)
+
+ // Integration connection status should get updated after signal data has been received.
+ testbed.mockLogQueryResponse([]model.SignozLog{})
+ testbed.mockMetricStatusQueryResponse(nil)
+ connectionStatus := testbed.GetIntegrationConnectionStatus(ii.Id)
+ require.NotNil(connectionStatus)
+ require.Nil(connectionStatus.Logs)
+ require.Nil(connectionStatus.Metrics)
+
+ testLog := makeTestSignozLog("test log body", map[string]interface{}{
+ "source": "nginx",
+ })
+ testbed.mockLogQueryResponse([]model.SignozLog{testLog})
+
+ testMetricName := ii.ConnectionTests.Metrics[0]
+ testMetricLastReceivedTs := time.Now().UnixMilli()
+ testbed.mockMetricStatusQueryResponse(&model.MetricStatus{
+ MetricName: testMetricName,
+ LastReceivedTsMillis: testMetricLastReceivedTs,
+ })
+
+ connectionStatus = testbed.GetIntegrationConnectionStatus(ii.Id)
+ require.NotNil(connectionStatus)
+ require.NotNil(connectionStatus.Logs)
+ require.Equal(connectionStatus.Logs.LastReceivedTsMillis, int64(testLog.Timestamp/1000000))
+ require.NotNil(connectionStatus.Metrics)
+ require.Equal(connectionStatus.Metrics.LastReceivedTsMillis, testMetricLastReceivedTs)
+
+ // Should be able to uninstall integration
+ require.True(availableIntegrations[0].IsInstalled)
+ testbed.RequestQSToUninstallIntegration(
+ availableIntegrations[0].Id,
+ )
+
+ ii = testbed.GetIntegrationDetailsFromQS(availableIntegrations[0].Id)
+ require.Equal(ii.Id, availableIntegrations[0].Id)
+ require.Nil(ii.Installation)
+
+ installedResp = testbed.GetInstalledIntegrationsFromQS()
+ installedIntegrations = installedResp.Integrations
+ require.Equal(len(installedIntegrations), 0)
+
+ availableResp = testbed.GetAvailableIntegrationsFromQS()
+ availableIntegrations = availableResp.Integrations
+ require.Greater(len(availableIntegrations), 0)
+ require.False(availableIntegrations[0].IsInstalled)
+}
+
+func TestLogPipelinesForInstalledSignozIntegrations(t *testing.T) {
+ require := require.New(t)
+
+ testDB := utils.NewQueryServiceDBForTests(t)
+ integrationsTB := NewIntegrationsTestBed(t, testDB)
+ pipelinesTB := NewLogPipelinesTestBed(t, testDB)
+
+ availableIntegrationsResp := integrationsTB.GetAvailableIntegrationsFromQS()
+ availableIntegrations := availableIntegrationsResp.Integrations
+ require.Greater(
+ len(availableIntegrations), 0,
+ "some integrations should come bundled with SigNoz",
+ )
+
+ getPipelinesResp := pipelinesTB.GetPipelinesFromQS()
+ require.Equal(
+ 0, len(getPipelinesResp.Pipelines),
+ "There should be no pipelines at the start",
+ )
+
+ // Find an available integration that contains a log pipeline
+ var testAvailableIntegration *integrations.IntegrationsListItem
+ for _, ai := range availableIntegrations {
+ details := integrationsTB.GetIntegrationDetailsFromQS(ai.Id)
+ require.NotNil(details)
+ if len(details.Assets.Logs.Pipelines) > 0 {
+ testAvailableIntegration = &ai
+ break
+ }
+ }
+
+ if testAvailableIntegration == nil {
+ // None of the built in integrations include a pipeline right now.
+ return
+ }
+
+ // Installing an integration should add its pipelines to pipelines list
+ require.NotNil(testAvailableIntegration)
+ require.False(testAvailableIntegration.IsInstalled)
+ integrationsTB.RequestQSToInstallIntegration(
+ testAvailableIntegration.Id, map[string]interface{}{},
+ )
+
+ testIntegration := integrationsTB.GetIntegrationDetailsFromQS(testAvailableIntegration.Id)
+ require.NotNil(testIntegration.Installation)
+ testIntegrationPipelines := testIntegration.Assets.Logs.Pipelines
+ require.Greater(
+ len(testIntegrationPipelines), 0,
+ "test integration expected to have a pipeline",
+ )
+
+ getPipelinesResp = pipelinesTB.GetPipelinesFromQS()
+ require.Equal(
+ len(testIntegrationPipelines), len(getPipelinesResp.Pipelines),
+ "Pipelines for installed integrations should appear in pipelines list",
+ )
+ lastPipeline := getPipelinesResp.Pipelines[len(getPipelinesResp.Pipelines)-1]
+ require.NotNil(integrations.IntegrationIdForPipeline(lastPipeline))
+ require.Equal(testIntegration.Id, *integrations.IntegrationIdForPipeline(lastPipeline))
+
+ pipelinesTB.assertPipelinesSentToOpampClient(getPipelinesResp.Pipelines)
+ pipelinesTB.assertNewAgentGetsPipelinesOnConnection(getPipelinesResp.Pipelines)
+
+ // After saving a user created pipeline, pipelines response should include
+ // both user created pipelines and pipelines for installed integrations.
+ postablePipelines := logparsingpipeline.PostablePipelines{
+ Pipelines: []logparsingpipeline.PostablePipeline{
+ {
+ OrderId: 1,
+ Name: "pipeline1",
+ Alias: "pipeline1",
+ Enabled: true,
+ Filter: &v3.FilterSet{
+ Operator: "AND",
+ Items: []v3.FilterItem{
+ {
+ Key: v3.AttributeKey{
+ Key: "method",
+ DataType: v3.AttributeKeyDataTypeString,
+ Type: v3.AttributeKeyTypeTag,
+ },
+ Operator: "=",
+ Value: "GET",
+ },
+ },
+ },
+ Config: []logparsingpipeline.PipelineOperator{
+ {
+ OrderId: 1,
+ ID: "add",
+ Type: "add",
+ Field: "attributes.test",
+ Value: "val",
+ Enabled: true,
+ Name: "test add",
+ },
+ },
+ },
+ },
+ }
+
+ pipelinesTB.PostPipelinesToQS(postablePipelines)
+
+ getPipelinesResp = pipelinesTB.GetPipelinesFromQS()
+ require.Equal(1+len(testIntegrationPipelines), len(getPipelinesResp.Pipelines))
+ pipelinesTB.assertPipelinesSentToOpampClient(getPipelinesResp.Pipelines)
+ pipelinesTB.assertNewAgentGetsPipelinesOnConnection(getPipelinesResp.Pipelines)
+
+ // Reordering integration pipelines should be possible.
+ postable := postableFromPipelines(getPipelinesResp.Pipelines)
+ slices.Reverse(postable.Pipelines)
+ for i := range postable.Pipelines {
+ postable.Pipelines[i].OrderId = i + 1
+ }
+
+ pipelinesTB.PostPipelinesToQS(postable)
+
+ getPipelinesResp = pipelinesTB.GetPipelinesFromQS()
+ firstPipeline := getPipelinesResp.Pipelines[0]
+ require.NotNil(integrations.IntegrationIdForPipeline(firstPipeline))
+ require.Equal(testIntegration.Id, *integrations.IntegrationIdForPipeline(firstPipeline))
+
+ pipelinesTB.assertPipelinesSentToOpampClient(getPipelinesResp.Pipelines)
+ pipelinesTB.assertNewAgentGetsPipelinesOnConnection(getPipelinesResp.Pipelines)
+
+ // enabling/disabling integration pipelines should be possible.
+ require.True(firstPipeline.Enabled)
+
+ postable.Pipelines[0].Enabled = false
+ pipelinesTB.PostPipelinesToQS(postable)
+
+ getPipelinesResp = pipelinesTB.GetPipelinesFromQS()
+ require.Equal(1+len(testIntegrationPipelines), len(getPipelinesResp.Pipelines))
+
+ firstPipeline = getPipelinesResp.Pipelines[0]
+ require.NotNil(integrations.IntegrationIdForPipeline(firstPipeline))
+ require.Equal(testIntegration.Id, *integrations.IntegrationIdForPipeline(firstPipeline))
+
+ require.False(firstPipeline.Enabled)
+
+ pipelinesTB.assertPipelinesSentToOpampClient(getPipelinesResp.Pipelines)
+ pipelinesTB.assertNewAgentGetsPipelinesOnConnection(getPipelinesResp.Pipelines)
+
+ // should not be able to edit integrations pipeline.
+ require.Greater(len(postable.Pipelines[0].Config), 0)
+ postable.Pipelines[0].Config = []logparsingpipeline.PipelineOperator{}
+ pipelinesTB.PostPipelinesToQS(postable)
+
+ getPipelinesResp = pipelinesTB.GetPipelinesFromQS()
+ require.Equal(1+len(testIntegrationPipelines), len(getPipelinesResp.Pipelines))
+
+ firstPipeline = getPipelinesResp.Pipelines[0]
+ require.NotNil(integrations.IntegrationIdForPipeline(firstPipeline))
+ require.Equal(testIntegration.Id, *integrations.IntegrationIdForPipeline(firstPipeline))
+
+ require.False(firstPipeline.Enabled)
+ require.Greater(len(firstPipeline.Config), 0)
+
+ // should not be able to delete integrations pipeline
+ postable.Pipelines = []logparsingpipeline.PostablePipeline{postable.Pipelines[1]}
+ pipelinesTB.PostPipelinesToQS(postable)
+
+ getPipelinesResp = pipelinesTB.GetPipelinesFromQS()
+ require.Equal(1+len(testIntegrationPipelines), len(getPipelinesResp.Pipelines))
+
+ lastPipeline = getPipelinesResp.Pipelines[1]
+ require.NotNil(integrations.IntegrationIdForPipeline(lastPipeline))
+ require.Equal(testIntegration.Id, *integrations.IntegrationIdForPipeline(lastPipeline))
+
+ // Uninstalling an integration should remove its pipelines
+ // from pipelines list in the UI
+ integrationsTB.RequestQSToUninstallIntegration(
+ testIntegration.Id,
+ )
+ getPipelinesResp = pipelinesTB.GetPipelinesFromQS()
+ require.Equal(
+ 1, len(getPipelinesResp.Pipelines),
+ "Pipelines for uninstalled integrations should get removed from pipelines list",
+ )
+ pipelinesTB.assertPipelinesSentToOpampClient(getPipelinesResp.Pipelines)
+ pipelinesTB.assertNewAgentGetsPipelinesOnConnection(getPipelinesResp.Pipelines)
+}
+
+func TestDashboardsForInstalledIntegrationDashboards(t *testing.T) {
+ require := require.New(t)
+
+ testDB := utils.NewQueryServiceDBForTests(t)
+ integrationsTB := NewIntegrationsTestBed(t, testDB)
+
+ availableIntegrationsResp := integrationsTB.GetAvailableIntegrationsFromQS()
+ availableIntegrations := availableIntegrationsResp.Integrations
+ require.Greater(
+ len(availableIntegrations), 0,
+ "some integrations should come bundled with SigNoz",
+ )
+
+ dashboards := integrationsTB.GetDashboardsFromQS()
+ require.Equal(
+ 0, len(dashboards),
+ "There should be no dashboards at the start",
+ )
+
+ // Find an available integration that contains dashboards
+ var testAvailableIntegration *integrations.IntegrationsListItem
+ for _, ai := range availableIntegrations {
+ details := integrationsTB.GetIntegrationDetailsFromQS(ai.Id)
+ require.NotNil(details)
+ if len(details.Assets.Dashboards) > 0 {
+ testAvailableIntegration = &ai
+ break
+ }
+ }
+ require.NotNil(testAvailableIntegration)
+
+ // Installing an integration should make its dashboards appear in the dashboard list
+ require.False(testAvailableIntegration.IsInstalled)
+ integrationsTB.RequestQSToInstallIntegration(
+ testAvailableIntegration.Id, map[string]interface{}{},
+ )
+
+ testIntegration := integrationsTB.GetIntegrationDetailsFromQS(testAvailableIntegration.Id)
+ require.NotNil(testIntegration.Installation)
+ testIntegrationDashboards := testIntegration.Assets.Dashboards
+ require.Greater(
+ len(testIntegrationDashboards), 0,
+ "test integration is expected to have dashboards",
+ )
+
+ dashboards = integrationsTB.GetDashboardsFromQS()
+ require.Equal(
+ len(testIntegrationDashboards), len(dashboards),
+ "dashboards for installed integrations should appear in dashboards list",
+ )
+
+ // Should be able to get installed integrations dashboard by id
+ dd := integrationsTB.GetDashboardByIdFromQS(dashboards[0].Uuid)
+ require.Equal(*dd, dashboards[0])
+
+ // Integration dashboards should not longer appear in dashboard list after uninstallation
+ integrationsTB.RequestQSToUninstallIntegration(
+ testIntegration.Id,
+ )
+ dashboards = integrationsTB.GetDashboardsFromQS()
+ require.Equal(
+ 0, len(dashboards),
+ "dashboards for uninstalled integrations should not appear in dashboards list",
+ )
+}
+
+type IntegrationsTestBed struct {
+ t *testing.T
+ testUser *model.User
+ qsHttpHandler http.Handler
+ mockClickhouse mockhouse.ClickConnMockCommon
+}
+
+func (tb *IntegrationsTestBed) GetAvailableIntegrationsFromQS() *integrations.IntegrationsListResponse {
+ result := tb.RequestQS("/api/v1/integrations", nil)
+
+ dataJson, err := json.Marshal(result.Data)
+ if err != nil {
+ tb.t.Fatalf("could not marshal apiResponse.Data: %v", err)
+ }
+ var integrationsResp integrations.IntegrationsListResponse
+ err = json.Unmarshal(dataJson, &integrationsResp)
+ if err != nil {
+ tb.t.Fatalf("could not unmarshal apiResponse.Data json into PipelinesResponse")
+ }
+
+ return &integrationsResp
+}
+
+func (tb *IntegrationsTestBed) GetInstalledIntegrationsFromQS() *integrations.IntegrationsListResponse {
+ result := tb.RequestQS("/api/v1/integrations?is_installed=true", nil)
+
+ dataJson, err := json.Marshal(result.Data)
+ if err != nil {
+ tb.t.Fatalf("could not marshal apiResponse.Data: %v", err)
+ }
+ var integrationsResp integrations.IntegrationsListResponse
+ err = json.Unmarshal(dataJson, &integrationsResp)
+ if err != nil {
+ tb.t.Fatalf(" could not unmarshal apiResponse.Data json into PipelinesResponse")
+ }
+
+ return &integrationsResp
+}
+
+func (tb *IntegrationsTestBed) GetIntegrationDetailsFromQS(
+ integrationId string,
+) *integrations.Integration {
+ result := tb.RequestQS(fmt.Sprintf(
+ "/api/v1/integrations/%s", integrationId,
+ ), nil)
+
+ dataJson, err := json.Marshal(result.Data)
+ if err != nil {
+ tb.t.Fatalf("could not marshal apiResponse.Data: %v", err)
+ }
+ var integrationResp integrations.Integration
+ err = json.Unmarshal(dataJson, &integrationResp)
+ if err != nil {
+ tb.t.Fatalf("could not unmarshal apiResponse.Data json")
+ }
+
+ return &integrationResp
+}
+
+func (tb *IntegrationsTestBed) GetIntegrationConnectionStatus(
+ integrationId string,
+) *integrations.IntegrationConnectionStatus {
+ result := tb.RequestQS(fmt.Sprintf(
+ "/api/v1/integrations/%s/connection_status", integrationId,
+ ), nil)
+
+ dataJson, err := json.Marshal(result.Data)
+ if err != nil {
+ tb.t.Fatalf("could not marshal apiResponse.Data: %v", err)
+ }
+ var connectionStatus integrations.IntegrationConnectionStatus
+ err = json.Unmarshal(dataJson, &connectionStatus)
+ if err != nil {
+ tb.t.Fatalf("could not unmarshal apiResponse.Data json")
+ }
+
+ return &connectionStatus
+}
+
+func (tb *IntegrationsTestBed) RequestQSToInstallIntegration(
+ integrationId string, config map[string]interface{},
+) {
+ request := integrations.InstallIntegrationRequest{
+ IntegrationId: integrationId,
+ Config: config,
+ }
+ tb.RequestQS("/api/v1/integrations/install", request)
+}
+
+func (tb *IntegrationsTestBed) RequestQSToUninstallIntegration(
+ integrationId string,
+) {
+ request := integrations.UninstallIntegrationRequest{
+ IntegrationId: integrationId,
+ }
+ tb.RequestQS("/api/v1/integrations/uninstall", request)
+}
+
+func (tb *IntegrationsTestBed) GetDashboardsFromQS() []dashboards.Dashboard {
+ result := tb.RequestQS("/api/v1/dashboards", nil)
+
+ dataJson, err := json.Marshal(result.Data)
+ if err != nil {
+ tb.t.Fatalf("could not marshal apiResponse.Data: %v", err)
+ }
+
+ dashboards := []dashboards.Dashboard{}
+ err = json.Unmarshal(dataJson, &dashboards)
+ if err != nil {
+ tb.t.Fatalf(" could not unmarshal apiResponse.Data json into dashboards")
+ }
+
+ return dashboards
+}
+
+func (tb *IntegrationsTestBed) GetDashboardByIdFromQS(dashboardUuid string) *dashboards.Dashboard {
+ result := tb.RequestQS(fmt.Sprintf("/api/v1/dashboards/%s", dashboardUuid), nil)
+
+ dataJson, err := json.Marshal(result.Data)
+ if err != nil {
+ tb.t.Fatalf("could not marshal apiResponse.Data: %v", err)
+ }
+
+ dashboard := dashboards.Dashboard{}
+ err = json.Unmarshal(dataJson, &dashboard)
+ if err != nil {
+ tb.t.Fatalf(" could not unmarshal apiResponse.Data json into dashboards")
+ }
+
+ return &dashboard
+}
+
+func (tb *IntegrationsTestBed) RequestQS(
+ path string,
+ postData interface{},
+) *app.ApiResponse {
+ req, err := NewAuthenticatedTestRequest(
+ tb.testUser, path, postData,
+ )
+ if err != nil {
+ tb.t.Fatalf("couldn't create authenticated test request: %v", err)
+ }
+
+ respWriter := httptest.NewRecorder()
+ tb.qsHttpHandler.ServeHTTP(respWriter, req)
+ response := respWriter.Result()
+ responseBody, err := io.ReadAll(response.Body)
+ if err != nil {
+ tb.t.Fatalf("couldn't read response body received from QS: %v", err)
+ }
+
+ if response.StatusCode != 200 {
+ tb.t.Fatalf(
+ "unexpected response status from query service for path %s. status: %d, body: %v\n%v",
+ path, response.StatusCode, string(responseBody), string(debug.Stack()),
+ )
+ }
+
+ var result app.ApiResponse
+ err = json.Unmarshal(responseBody, &result)
+ if err != nil {
+ tb.t.Fatalf(
+ "Could not unmarshal QS response into an ApiResponse.\nResponse body: %s",
+ string(responseBody),
+ )
+ }
+
+ return &result
+}
+
+func (tb *IntegrationsTestBed) mockLogQueryResponse(logsInResponse []model.SignozLog) {
+ addLogsQueryExpectation(tb.mockClickhouse, logsInResponse)
+}
+
+func (tb *IntegrationsTestBed) mockMetricStatusQueryResponse(expectation *model.MetricStatus) {
+ cols := []mockhouse.ColumnType{}
+ cols = append(cols, mockhouse.ColumnType{Type: "String", Name: "metric_name"})
+ cols = append(cols, mockhouse.ColumnType{Type: "String", Name: "labels"})
+ cols = append(cols, mockhouse.ColumnType{Type: "Int64", Name: "unix_milli"})
+
+ values := [][]any{}
+ if expectation != nil {
+ rowValues := []any{}
+
+ rowValues = append(rowValues, expectation.MetricName)
+
+ labelsJson, err := json.Marshal(expectation.LastReceivedLabels)
+ require.Nil(tb.t, err)
+ rowValues = append(rowValues, labelsJson)
+
+ rowValues = append(rowValues, expectation.LastReceivedTsMillis)
+
+ values = append(values, rowValues)
+ }
+
+ tb.mockClickhouse.ExpectQuery(
+ `SELECT.*metric_name, labels, unix_milli.*from.*signoz_metrics.*where metric_name in.*limit 1.*`,
+ ).WillReturnRows(mockhouse.NewRows(cols, values))
+}
+
+// testDB can be injected for sharing a DB across multiple integration testbeds.
+func NewIntegrationsTestBed(t *testing.T, testDB *sqlx.DB) *IntegrationsTestBed {
+ if testDB == nil {
+ testDB = utils.NewQueryServiceDBForTests(t)
+ }
+
+ controller, err := integrations.NewController(testDB)
+ if err != nil {
+ t.Fatalf("could not create integrations controller: %v", err)
+ }
+
+ fm := featureManager.StartManager()
+ reader, mockClickhouse := NewMockClickhouseReader(t, testDB, fm)
+ mockClickhouse.MatchExpectationsInOrder(false)
+
+ apiHandler, err := app.NewAPIHandler(app.APIHandlerOpts{
+ Reader: reader,
+ AppDao: dao.DB(),
+ IntegrationsController: controller,
+ FeatureFlags: fm,
+ })
+ if err != nil {
+ t.Fatalf("could not create a new ApiHandler: %v", err)
+ }
+
+ router := app.NewRouter()
+ am := app.NewAuthMiddleware(auth.GetUserFromRequest)
+ apiHandler.RegisterRoutes(router, am)
+ apiHandler.RegisterIntegrationRoutes(router, am)
+
+ user, apiErr := createTestUser()
+ if apiErr != nil {
+ t.Fatalf("could not create a test user: %v", apiErr)
+ }
+
+ return &IntegrationsTestBed{
+ t: t,
+ testUser: user,
+ qsHttpHandler: router,
+ mockClickhouse: mockClickhouse,
+ }
+}
+
+func postableFromPipelines(pipelines []logparsingpipeline.Pipeline) logparsingpipeline.PostablePipelines {
+ result := logparsingpipeline.PostablePipelines{}
+
+ for _, p := range pipelines {
+ postable := logparsingpipeline.PostablePipeline{
+ Id: p.Id,
+ OrderId: p.OrderId,
+ Name: p.Name,
+ Alias: p.Alias,
+ Enabled: p.Enabled,
+ Config: p.Config,
+ }
+
+ if p.Description != nil {
+ postable.Description = *p.Description
+ }
+
+ if p.Filter != nil {
+ postable.Filter = p.Filter
+ }
+
+ result.Pipelines = append(result.Pipelines, postable)
+ }
+
+ return result
+}
diff --git a/pkg/query-service/tests/integration/test_utils.go b/pkg/query-service/tests/integration/test_utils.go
new file mode 100644
index 0000000000..ac6e1db7c5
--- /dev/null
+++ b/pkg/query-service/tests/integration/test_utils.go
@@ -0,0 +1,200 @@
+package tests
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+
+ "github.com/DATA-DOG/go-sqlmock"
+ "github.com/google/uuid"
+ "github.com/jmoiron/sqlx"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry"
+ mockhouse "github.com/srikanthccv/ClickHouse-go-mock"
+ "github.com/stretchr/testify/require"
+ "go.signoz.io/signoz/pkg/query-service/app/clickhouseReader"
+ "go.signoz.io/signoz/pkg/query-service/auth"
+ "go.signoz.io/signoz/pkg/query-service/constants"
+ "go.signoz.io/signoz/pkg/query-service/dao"
+ "go.signoz.io/signoz/pkg/query-service/interfaces"
+ "go.signoz.io/signoz/pkg/query-service/model"
+ "golang.org/x/exp/maps"
+)
+
+func NewMockClickhouseReader(
+ t *testing.T, testDB *sqlx.DB, featureFlags interfaces.FeatureLookup,
+) (
+ *clickhouseReader.ClickHouseReader, mockhouse.ClickConnMockCommon,
+) {
+ require.NotNil(t, testDB)
+
+ mockDB, err := mockhouse.NewClickHouseWithQueryMatcher(nil, sqlmock.QueryMatcherRegexp)
+
+ require.Nil(t, err, "could not init mock clickhouse")
+ reader := clickhouseReader.NewReaderFromClickhouseConnection(
+ mockDB,
+ clickhouseReader.NewOptions("", 10, 10, 10*time.Second, ""),
+ testDB,
+ "",
+ featureFlags,
+ "",
+ )
+
+ return reader, mockDB
+}
+
+func addLogsQueryExpectation(
+ mockClickhouse mockhouse.ClickConnMockCommon,
+ logsToReturn []model.SignozLog,
+) {
+ cols := []mockhouse.ColumnType{}
+ cols = append(cols, mockhouse.ColumnType{Type: "UInt64", Name: "timestamp"})
+ cols = append(cols, mockhouse.ColumnType{Type: "UInt64", Name: "observed_timestamp"})
+ cols = append(cols, mockhouse.ColumnType{Type: "String", Name: "id"})
+ cols = append(cols, mockhouse.ColumnType{Type: "String", Name: "trace_id"})
+ cols = append(cols, mockhouse.ColumnType{Type: "String", Name: "span_id"})
+ cols = append(cols, mockhouse.ColumnType{Type: "UInt32", Name: "trace_flags"})
+ cols = append(cols, mockhouse.ColumnType{Type: "String", Name: "severity_text"})
+ cols = append(cols, mockhouse.ColumnType{Type: "UInt8", Name: "severity_number"})
+ cols = append(cols, mockhouse.ColumnType{Type: "String", Name: "body"})
+ cols = append(cols, mockhouse.ColumnType{Type: "Array(String)", Name: "resources_string_key"})
+ cols = append(cols, mockhouse.ColumnType{Type: "Array(String)", Name: "resources_string_value"})
+ cols = append(cols, mockhouse.ColumnType{Type: "Array(String)", Name: "attributes_string_key"})
+ cols = append(cols, mockhouse.ColumnType{Type: "Array(String)", Name: "attributes_string_value"})
+ cols = append(cols, mockhouse.ColumnType{Type: "Array(String)", Name: "attributes_int64_key"})
+ cols = append(cols, mockhouse.ColumnType{Type: "Array(Int64)", Name: "attributes_int64_value"})
+ cols = append(cols, mockhouse.ColumnType{Type: "Array(String)", Name: "attributes_float64_key"})
+ cols = append(cols, mockhouse.ColumnType{Type: "Array(Float64)", Name: "attributes_float64_value"})
+ cols = append(cols, mockhouse.ColumnType{Type: "Array(String)", Name: "attributes_bool_key"})
+ cols = append(cols, mockhouse.ColumnType{Type: "Array(Bool)", Name: "attributes_bool_value"})
+
+ values := [][]any{}
+ for _, l := range logsToReturn {
+ rowValues := []any{}
+ rowValues = append(rowValues, l.Timestamp)
+ rowValues = append(rowValues, l.Timestamp)
+ rowValues = append(rowValues, l.ID)
+ rowValues = append(rowValues, l.TraceID)
+ rowValues = append(rowValues, l.SpanID)
+ rowValues = append(rowValues, l.TraceFlags)
+ rowValues = append(rowValues, l.SeverityText)
+ rowValues = append(rowValues, l.SeverityNumber)
+ rowValues = append(rowValues, l.Body)
+ rowValues = append(rowValues, maps.Keys(l.Resources_string))
+ rowValues = append(rowValues, maps.Values(l.Resources_string))
+ rowValues = append(rowValues, maps.Keys(l.Attributes_string))
+ rowValues = append(rowValues, maps.Values(l.Attributes_string))
+ rowValues = append(rowValues, maps.Keys(l.Attributes_int64))
+ rowValues = append(rowValues, maps.Values(l.Attributes_int64))
+ rowValues = append(rowValues, maps.Keys(l.Attributes_float64))
+ rowValues = append(rowValues, maps.Values(l.Attributes_float64))
+ rowValues = append(rowValues, maps.Keys(l.Attributes_bool))
+ rowValues = append(rowValues, maps.Values(l.Attributes_bool))
+ values = append(values, rowValues)
+ }
+
+ rows := mockhouse.NewRows(cols, values)
+ mockClickhouse.ExpectQuery(
+ "SELECT .*? from signoz_logs.distributed_logs.*",
+ ).WillReturnRows(rows)
+}
+
+func makeTestSignozLog(
+ body string,
+ attributes map[string]interface{},
+) model.SignozLog {
+
+ testLog := model.SignozLog{
+ Timestamp: uint64(time.Now().UnixNano()),
+ Body: body,
+ Attributes_bool: map[string]bool{},
+ Attributes_string: map[string]string{},
+ Attributes_int64: map[string]int64{},
+ Attributes_float64: map[string]float64{},
+ Resources_string: map[string]string{},
+ SeverityText: entry.Info.String(),
+ SeverityNumber: uint8(entry.Info),
+ SpanID: uuid.New().String(),
+ TraceID: uuid.New().String(),
+ }
+
+ for k, v := range attributes {
+ switch v.(type) {
+ case bool:
+ testLog.Attributes_bool[k] = v.(bool)
+ case string:
+ testLog.Attributes_string[k] = v.(string)
+ case int:
+ testLog.Attributes_int64[k] = int64(v.(int))
+ case float64:
+ testLog.Attributes_float64[k] = v.(float64)
+ default:
+ panic(fmt.Sprintf("found attribute value of unsupported type %T in test log", v))
+ }
+ }
+
+ return testLog
+}
+
+func createTestUser() (*model.User, *model.ApiError) {
+ // Create a test user for auth
+ ctx := context.Background()
+ org, apiErr := dao.DB().CreateOrg(ctx, &model.Organization{
+ Name: "test",
+ })
+ if apiErr != nil {
+ return nil, apiErr
+ }
+
+ group, apiErr := dao.DB().GetGroupByName(ctx, constants.AdminGroup)
+ if apiErr != nil {
+ return nil, apiErr
+ }
+
+ auth.InitAuthCache(ctx)
+
+ userId := uuid.NewString()
+ return dao.DB().CreateUser(
+ ctx,
+ &model.User{
+ Id: userId,
+ Name: "test",
+ Email: userId[:8] + "test@test.com",
+ Password: "test",
+ OrgId: org.Id,
+ GroupId: group.Id,
+ },
+ true,
+ )
+}
+
+func NewAuthenticatedTestRequest(
+ user *model.User,
+ path string,
+ postData interface{},
+) (*http.Request, error) {
+ userJwt, err := auth.GenerateJWTForUser(user)
+ if err != nil {
+ return nil, err
+ }
+
+ var req *http.Request
+
+ if postData != nil {
+ var body bytes.Buffer
+ err = json.NewEncoder(&body).Encode(postData)
+ if err != nil {
+ return nil, err
+ }
+ req = httptest.NewRequest(http.MethodPost, path, &body)
+ } else {
+ req = httptest.NewRequest(http.MethodGet, path, nil)
+ }
+
+ req.Header.Add("Authorization", "Bearer "+userJwt.AccessJwt)
+ return req, nil
+}
diff --git a/pkg/query-service/tests/test-deploy/docker-compose.yaml b/pkg/query-service/tests/test-deploy/docker-compose.yaml
index 3c64daad7b..77132c42a9 100644
--- a/pkg/query-service/tests/test-deploy/docker-compose.yaml
+++ b/pkg/query-service/tests/test-deploy/docker-compose.yaml
@@ -138,7 +138,7 @@ services:
# - ./data/clickhouse-3/:/var/lib/clickhouse/
alertmanager:
- image: signoz/alertmanager:0.23.4
+ image: signoz/alertmanager:0.23.5
container_name: signoz-alertmanager
volumes:
- ./data/alertmanager:/data
diff --git a/pkg/query-service/utils/format.go b/pkg/query-service/utils/format.go
index bc15a8a1e9..0a614e2987 100644
--- a/pkg/query-service/utils/format.go
+++ b/pkg/query-service/utils/format.go
@@ -183,11 +183,11 @@ func ClickHouseFormattedValue(v interface{}) string {
case uint8, uint16, uint32, uint64, int, int8, int16, int32, int64, float32, float64, bool:
return strings.Join(strings.Fields(fmt.Sprint(x)), ",")
default:
- zap.S().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x[0])))
+ zap.L().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x[0])))
return ""
}
default:
- zap.S().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x)))
+ zap.L().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x)))
return ""
}
}
diff --git a/pkg/query-service/utils/labels/labels.go b/pkg/query-service/utils/labels/labels.go
index ff4237a8f9..2e0041aafc 100644
--- a/pkg/query-service/utils/labels/labels.go
+++ b/pkg/query-service/utils/labels/labels.go
@@ -16,8 +16,6 @@ const sep = '\xff'
const (
MetricNameLabel = "__name__"
AlertNameLabel = "alertname"
- BucketLabel = "le"
- InstanceName = "instance"
// AlertStateLabel is the label name indicating the state of an alert.
AlertStateLabel = "alertstate"
@@ -25,9 +23,11 @@ const (
AlertRuleIdLabel = "ruleId"
RuleSourceLabel = "ruleSource"
- RuleThresholdLabel = "threshold"
- AlertAdditionalInfoLabel = "additionalInfo"
- AlertSummaryLabel = "summary"
+ RuleThresholdLabel = "threshold"
+ AlertSummaryLabel = "summary"
+ AlertDescriptionLabel = "description"
+
+ AlertMissingData = "Missing data"
)
// Label is a key/value pair of strings.
diff --git a/pkg/query-service/utils/slices.go b/pkg/query-service/utils/slices.go
new file mode 100644
index 0000000000..c196529a6a
--- /dev/null
+++ b/pkg/query-service/utils/slices.go
@@ -0,0 +1,29 @@
+package utils
+
+// Map as in map-reduce.
+func MapSlice[Slice ~[]Elem, Elem any, Output any](
+ slice Slice, mapper func(Elem) Output,
+) []Output {
+ result := []Output{}
+
+ for _, item := range slice {
+ mapped := mapper(item)
+ result = append(result, mapped)
+ }
+
+ return result
+}
+
+func FilterSlice[Slice ~[]Elem, Elem any](
+ slice Slice, filterFn func(Elem) bool,
+) Slice {
+ result := Slice{}
+
+ for _, item := range slice {
+ if filterFn(item) {
+ result = append(result, item)
+ }
+ }
+
+ return result
+}
diff --git a/pkg/query-service/utils/testutils.go b/pkg/query-service/utils/testutils.go
new file mode 100644
index 0000000000..d8989d9323
--- /dev/null
+++ b/pkg/query-service/utils/testutils.go
@@ -0,0 +1,31 @@
+package utils
+
+import (
+ "os"
+ "testing"
+
+ "github.com/jmoiron/sqlx"
+ "go.signoz.io/signoz/pkg/query-service/app/dashboards"
+ "go.signoz.io/signoz/pkg/query-service/dao"
+)
+
+func NewQueryServiceDBForTests(t *testing.T) *sqlx.DB {
+ testDBFile, err := os.CreateTemp("", "test-signoz-db-*")
+ if err != nil {
+ t.Fatalf("could not create temp file for test db: %v", err)
+ }
+ testDBFilePath := testDBFile.Name()
+ t.Cleanup(func() { os.Remove(testDBFilePath) })
+ testDBFile.Close()
+
+ testDB, err := sqlx.Open("sqlite3", testDBFilePath)
+ if err != nil {
+ t.Fatalf("could not open test db sqlite file: %v", err)
+ }
+
+ // TODO(Raj): This should not require passing in the DB file path
+ dao.InitDao("sqlite", testDBFilePath)
+ dashboards.InitDB(testDBFilePath)
+
+ return testDB
+}
diff --git a/pkg/query-service/utils/time.go b/pkg/query-service/utils/time.go
index ea644d7600..274b032cdb 100644
--- a/pkg/query-service/utils/time.go
+++ b/pkg/query-service/utils/time.go
@@ -1,6 +1,7 @@
package utils
import (
+ "fmt"
"time"
"go.uber.org/zap"
@@ -8,7 +9,12 @@ import (
func Elapsed(funcName string, args ...interface{}) func() {
start := time.Now()
+ argsStr := ""
+ for _, v := range args {
+ argsStr += fmt.Sprintf("%v, ", v)
+ }
+ argsStr = argsStr[:len(argsStr)-2]
return func() {
- zap.S().Infof("func %s took %v with args %v", funcName, time.Since(start), args)
+ zap.L().Info("Elapsed time", zap.String("func_name", funcName), zap.Duration("duration", time.Since(start)), zap.String("args", argsStr))
}
}
diff --git a/pkg/query-service/version/version.go b/pkg/query-service/version/version.go
index 577fe6789c..68c37a4e0e 100644
--- a/pkg/query-service/version/version.go
+++ b/pkg/query-service/version/version.go
@@ -3,8 +3,6 @@ package version
import (
"fmt"
"runtime"
-
- "go.uber.org/zap"
)
// These fields are set during an official build
@@ -40,7 +38,7 @@ Copyright 2022 SigNoz
// PrintVersion prints version and other helpful information.
func PrintVersion() {
- zap.S().Infof("\n%s\n", BuildDetails())
+ fmt.Println(BuildDetails())
}
func GetVersion() string {