+
diff --git a/frontend/src/container/NewDashboard/DashboardDescription/index.tsx b/frontend/src/container/NewDashboard/DashboardDescription/index.tsx
index 996c508da4..c916ec7501 100644
--- a/frontend/src/container/NewDashboard/DashboardDescription/index.tsx
+++ b/frontend/src/container/NewDashboard/DashboardDescription/index.tsx
@@ -23,7 +23,12 @@ function DashboardDescription(): JSX.Element {
handleDashboardLockToggle,
} = useDashboard();
- const selectedData = selectedDashboard?.data || ({} as DashboardData);
+ const selectedData = selectedDashboard
+ ? {
+ ...selectedDashboard.data,
+ uuid: selectedDashboard.uuid,
+ }
+ : ({} as DashboardData);
const { title = '', tags, description } = selectedData || {};
diff --git a/frontend/src/container/NewDashboard/DashboardVariablesSelection/VariableItem.tsx b/frontend/src/container/NewDashboard/DashboardVariablesSelection/VariableItem.tsx
index 339210f956..9f54305b1e 100644
--- a/frontend/src/container/NewDashboard/DashboardVariablesSelection/VariableItem.tsx
+++ b/frontend/src/container/NewDashboard/DashboardVariablesSelection/VariableItem.tsx
@@ -2,14 +2,13 @@ import './DashboardVariableSelection.styles.scss';
import { orange } from '@ant-design/colors';
import { WarningOutlined } from '@ant-design/icons';
-import { Input, Popover, Select, Tooltip, Typography } from 'antd';
+import { Input, Popover, Select, Typography } from 'antd';
import dashboardVariablesQuery from 'api/dashboard/variables/dashboardVariablesQuery';
import { REACT_QUERY_KEY } from 'constants/reactQueryKeys';
import { commaValuesParser } from 'lib/dashbaordVariables/customCommaValuesParser';
import sortValues from 'lib/dashbaordVariables/sortVariableValues';
import { debounce } from 'lodash-es';
import map from 'lodash-es/map';
-import { useDashboard } from 'providers/Dashboard/Dashboard';
import { memo, useEffect, useMemo, useState } from 'react';
import { useQuery } from 'react-query';
import { IDashboardVariable } from 'types/api/dashboard/getAll';
@@ -52,7 +51,6 @@ function VariableItem({
onValueUpdate,
lastUpdatedVar,
}: VariableItemProps): JSX.Element {
- const { isDashboardLocked } = useDashboard();
const [optionsData, setOptionsData] = useState<(string | number | boolean)[]>(
[],
);
@@ -222,84 +220,77 @@ function VariableItem({
}, [variableData.type, variableData.customValue]);
return (
-
-
-
- ${variableData.name}
-
-
- {variableData.type === 'TEXTBOX' ? (
-
+
+ ${variableData.name}
+
+
+ {variableData.type === 'TEXTBOX' ? (
+ {
+ debouncedHandleChange(e.target.value || '');
+ }}
+ style={{
+ width:
+ 50 + ((variableData.selectedValue?.toString()?.length || 0) * 7 || 50),
+ }}
+ />
+ ) : (
+ !errorMessage &&
+ optionsData && (
+
-
-
+ placeholder="Select value"
+ placement="bottomRight"
+ mode={mode}
+ dropdownMatchSelectWidth={false}
+ style={SelectItemStyle}
+ loading={isLoading}
+ showSearch
+ data-testid="variable-select"
+ className="variable-select"
+ getPopupContainer={popupContainer}
+ >
+ {enableSelectAll && (
+
+ ALL
+
+ )}
+ {map(optionsData, (option) => (
+
+ {option.toString()}
+
+ ))}
+
+ )
+ )}
+ {variableData.type !== 'TEXTBOX' && errorMessage && (
+
+ {errorMessage}}
+ >
+
+
+
+ )}
+
+
);
}
diff --git a/frontend/src/container/NewWidget/LeftContainer/WidgetGraph/WidgetGraphs.tsx b/frontend/src/container/NewWidget/LeftContainer/WidgetGraph/WidgetGraphs.tsx
index 647b746c2d..aa7553af53 100644
--- a/frontend/src/container/NewWidget/LeftContainer/WidgetGraph/WidgetGraphs.tsx
+++ b/frontend/src/container/NewWidget/LeftContainer/WidgetGraph/WidgetGraphs.tsx
@@ -133,6 +133,7 @@ function WidgetGraph({
softMax,
softMin,
panelType: selectedGraph,
+ currentQuery,
}),
[
widgetId,
@@ -148,6 +149,7 @@ function WidgetGraph({
softMax,
softMin,
selectedGraph,
+ currentQuery,
],
);
diff --git a/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/Kubernetes/php-kubernetes-installOtelCollector.md b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/Kubernetes/php-kubernetes-installOtelCollector.md
new file mode 100644
index 0000000000..946b7fbdbf
--- /dev/null
+++ b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/Kubernetes/php-kubernetes-installOtelCollector.md
@@ -0,0 +1,24 @@
+## Install otel-collector in your Kubernetes infra
+
+
+Add the SigNoz Helm Chart repository
+```bash
+helm repo add signoz https://charts.signoz.io
+```
+
+
+If the chart is already present, update the chart to the latest using:
+```bash
+helm repo update
+```
+
+
+Install the Kubernetes Infrastructure chart provided by SigNoz
+```bash
+helm install my-release signoz/k8s-infra \
+--set otelCollectorEndpoint=ingest.{{REGION}}.signoz.cloud:443 \
+--set otelInsecure=false \
+--set signozApiKey={{SIGNOZ_INGESTION_KEY}} \
+--set global.clusterName=
+```
+- Replace `` with the name of the Kubernetes cluster or a unique identifier of the cluster.
diff --git a/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/Kubernetes/php-kubernetes-instrumentApplication.md b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/Kubernetes/php-kubernetes-instrumentApplication.md
new file mode 100644
index 0000000000..c9138e8a5c
--- /dev/null
+++ b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/Kubernetes/php-kubernetes-instrumentApplication.md
@@ -0,0 +1,64 @@
+
+
+After setting up the Otel collector agent, follow the steps below to instrument your PHP Application
+
+### Step 1: Setup Development Environment
+Add these crates just below the `[dependencies]` section of your `cargo.toml` file
+
+To configure our PHP application to send data, you need to use OpenTelemetry PHP extension. Since the extension is built from the source, you need to have the build tools, which can be installed using the following command:
+
+**Linux**:
+```bash
+sudo apt-get install gcc make autoconf
+```
+
+**MacOs(Homebrew)**:
+```bash
+brew install gcc make autoconf
+```
+
+
+
+### Step 2: Build the extension
+
+With our environment set up we can install the extension using [PECL](https://pecl.php.net/):
+
+```bash
+pecl install opentelemetry
+```
+
+After successfully installing the OpenTelemetry extension, add the extension to php.ini file of your project:
+
+```bash
+[opentelemetry]
+extension=opentelemetry.so
+```
+
+Verify that the extension is enabled by running:
+
+```bash
+php -m | grep opentelemetry
+```
+
+Running the above command will **output**:
+
+```bash
+opentelemetry
+```
+
+
+
+### Step 3: Add the dependencies
+
+Add dependencies required to perform automatic instrumentation using this command :
+
+```bash
+composer config allow-plugins.php-http/discovery false
+composer require \
+ open-telemetry/sdk \
+ open-telemetry/exporter-otlp \
+ php-http/guzzle7-adapter \
+ open-telemetry/transport-grpc
+```
+
+
diff --git a/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/Kubernetes/php-kubernetes-runApplication.md b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/Kubernetes/php-kubernetes-runApplication.md
new file mode 100644
index 0000000000..9fa8f823e2
--- /dev/null
+++ b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/Kubernetes/php-kubernetes-runApplication.md
@@ -0,0 +1,16 @@
+### Set environment variables and run app
+
+We will pass environment variables at the runtime:
+
+```bash
+env OTEL_PHP_AUTOLOAD_ENABLED=true \
+ OTEL_SERVICE_NAME={MYAPP} \
+ OTEL_TRACES_EXPORTER=otlp \
+ OTEL_EXPORTER_OTLP_PROTOCOL=http/protobuf \
+ OTEL_EXPORTER_OTLP_ENDPOINT= \
+ OTEL_PROPAGATORS=baggage,tracecontext \
+
+```
+
+- - Endpoint at which the collector is running. Ex. -> `http://localhost:4317`
+- - Run command for your PHP application
\ No newline at end of file
diff --git a/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/LinuxAMD64/QuickStart/php-linuxamd64-quickStart-instrumentApplication.md b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/LinuxAMD64/QuickStart/php-linuxamd64-quickStart-instrumentApplication.md
new file mode 100644
index 0000000000..2b28b1ab8b
--- /dev/null
+++ b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/LinuxAMD64/QuickStart/php-linuxamd64-quickStart-instrumentApplication.md
@@ -0,0 +1,60 @@
+
+
+### Step 1: Setup Development Environment
+Add these crates just below the `[dependencies]` section of your `cargo.toml` file
+
+To configure our PHP application to send data, you need to use OpenTelemetry PHP extension. Since the extension is built from the source, you need to have the build tools, which can be installed using the following command:
+
+**Linux**:
+```bash
+sudo apt-get install gcc make autoconf
+```
+
+**MacOs(Homebrew)**:
+```bash
+brew install gcc make autoconf
+```
+
+
+
+### Step 2: Build the extension
+
+With our environment set up we can install the extension using [PECL](https://pecl.php.net/):
+
+```bash
+pecl install opentelemetry
+```
+
+After successfully installing the OpenTelemetry extension, add the extension to php.ini file of your project:
+
+```bash
+[opentelemetry]
+extension=opentelemetry.so
+```
+
+Verify that the extension is enabled by running:
+
+```bash
+php -m | grep opentelemetry
+```
+
+Running the above command will **output**:
+
+```bash
+opentelemetry
+```
+
+
+
+### Step 3: Add the dependencies
+
+Add dependencies required to perform automatic instrumentation using this command :
+
+```bash
+composer config allow-plugins.php-http/discovery false
+composer require \
+ open-telemetry/sdk \
+ open-telemetry/exporter-otlp \
+ php-http/guzzle7-adapter \
+ open-telemetry/transport-grpc
+```
diff --git a/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/LinuxAMD64/QuickStart/php-linuxamd64-quickStart-runApplication.md b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/LinuxAMD64/QuickStart/php-linuxamd64-quickStart-runApplication.md
new file mode 100644
index 0000000000..587a1b4373
--- /dev/null
+++ b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/LinuxAMD64/QuickStart/php-linuxamd64-quickStart-runApplication.md
@@ -0,0 +1,16 @@
+### Running your PHP application
+
+We will pass environment variables at the runtime:
+
+```bash
+env OTEL_PHP_AUTOLOAD_ENABLED=true \
+ OTEL_SERVICE_NAME={{MYAPP}} \
+ OTEL_TRACES_EXPORTER=otlp \
+ OTEL_EXPORTER_OTLP_PROTOCOL=http/protobuf \
+ OTEL_EXPORTER_OTLP_ENDPOINT=https://ingest.{{REGION}}.signoz.cloud:443 \
+ OTEL_EXPORTER_OTLP_HEADERS=signoz-access-token={{SIGNOZ_INGESTION_KEY}} \
+ OTEL_PROPAGATORS=baggage,tracecontext \
+
+```
+
+- - Run command for your PHP application
diff --git a/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/LinuxAMD64/Recommended/php-linuxamd64-recommended-installOtelCollector.md b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/LinuxAMD64/Recommended/php-linuxamd64-recommended-installOtelCollector.md
new file mode 100644
index 0000000000..a659f36474
--- /dev/null
+++ b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/LinuxAMD64/Recommended/php-linuxamd64-recommended-installOtelCollector.md
@@ -0,0 +1,96 @@
+## Setup OpenTelemetry Binary as an agent
+
+
+### Step 1: Download otel-collector tar.gz
+```bash
+wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_linux_amd64.tar.gz
+```
+
+
+### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder
+```bash
+mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_linux_amd64.tar.gz -C otelcol-contrib
+```
+
+
+### Step 3: Create config.yaml in folder otelcol-contrib with the below content in it
+```bash
+receivers:
+ otlp:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:4317
+ http:
+ endpoint: 0.0.0.0:4318
+ hostmetrics:
+ collection_interval: 60s
+ scrapers:
+ cpu: {}
+ disk: {}
+ load: {}
+ filesystem: {}
+ memory: {}
+ network: {}
+ paging: {}
+ process:
+ mute_process_name_error: true
+ mute_process_exe_error: true
+ mute_process_io_error: true
+ processes: {}
+ prometheus:
+ config:
+ global:
+ scrape_interval: 60s
+ scrape_configs:
+ - job_name: otel-collector-binary
+ static_configs:
+ - targets:
+ # - localhost:8888
+processors:
+ batch:
+ send_batch_size: 1000
+ timeout: 10s
+ # Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md
+ resourcedetection:
+ detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure.
+ # Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
+ timeout: 2s
+ system:
+ hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback
+extensions:
+ health_check: {}
+ zpages: {}
+exporters:
+ otlp:
+ endpoint: "ingest.{{REGION}}.signoz.cloud:443"
+ tls:
+ insecure: false
+ headers:
+ "signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}"
+ logging:
+ verbosity: normal
+service:
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8888
+ extensions: [health_check, zpages]
+ pipelines:
+ metrics:
+ receivers: [otlp]
+ processors: [batch]
+ exporters: [otlp]
+ metrics/internal:
+ receivers: [prometheus, hostmetrics]
+ processors: [resourcedetection, batch]
+ exporters: [otlp]
+ traces:
+ receivers: [otlp]
+ processors: [batch]
+ exporters: [otlp]
+ logs:
+ receivers: [otlp]
+ processors: [batch]
+ exporters: [otlp]
+```
+
+
diff --git a/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/LinuxAMD64/Recommended/php-linuxamd64-recommended-instrumentApplication.md b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/LinuxAMD64/Recommended/php-linuxamd64-recommended-instrumentApplication.md
new file mode 100644
index 0000000000..a59e7cd63e
--- /dev/null
+++ b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/LinuxAMD64/Recommended/php-linuxamd64-recommended-instrumentApplication.md
@@ -0,0 +1,62 @@
+
+
+After setting up the Otel collector agent, follow the steps below to instrument your PHP Application
+
+### Step 1: Setup Development Environment
+Add these crates just below the `[dependencies]` section of your `cargo.toml` file
+
+To configure our PHP application to send data, you need to use OpenTelemetry PHP extension. Since the extension is built from the source, you need to have the build tools, which can be installed using the following command:
+
+**Linux**:
+```bash
+sudo apt-get install gcc make autoconf
+```
+
+**MacOs(Homebrew)**:
+```bash
+brew install gcc make autoconf
+```
+
+
+
+### Step 2: Build the extension
+
+With our environment set up we can install the extension using [PECL](https://pecl.php.net/):
+
+```bash
+pecl install opentelemetry
+```
+
+After successfully installing the OpenTelemetry extension, add the extension to php.ini file of your project:
+
+```bash
+[opentelemetry]
+extension=opentelemetry.so
+```
+
+Verify that the extension is enabled by running:
+
+```bash
+php -m | grep opentelemetry
+```
+
+Running the above command will **output**:
+
+```bash
+opentelemetry
+```
+
+
+
+### Step 3: Add the dependencies
+
+Add dependencies required to perform automatic instrumentation using this command :
+
+```bash
+composer config allow-plugins.php-http/discovery false
+composer require \
+ open-telemetry/sdk \
+ open-telemetry/exporter-otlp \
+ php-http/guzzle7-adapter \
+ open-telemetry/transport-grpc
+```
diff --git a/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/LinuxAMD64/Recommended/php-linuxamd64-recommended-runApplication.md b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/LinuxAMD64/Recommended/php-linuxamd64-recommended-runApplication.md
new file mode 100644
index 0000000000..f69dd3b393
--- /dev/null
+++ b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/LinuxAMD64/Recommended/php-linuxamd64-recommended-runApplication.md
@@ -0,0 +1,41 @@
+
+
+Once you are done instrumenting your PHP application, you can run it using the below commands
+
+
+
+### Step 1: Run OTel Collector
+ Run this command inside the `otelcol-contrib` directory that you created in the install Otel Collector step
+
+```bash
+./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid
+```
+
+
+#### (Optional Step): View last 50 lines of `otelcol` logs
+```bash
+tail -f -n 50 otelcol-output.log
+```
+
+#### (Optional Step): Stop `otelcol`
+```bash
+kill "$(< otel-pid)"
+```
+
+
+### Step 2: Running your PHP application
+
+We will pass environment variables at the runtime:
+
+```bash
+env OTEL_PHP_AUTOLOAD_ENABLED=true \
+ OTEL_SERVICE_NAME= \
+ OTEL_TRACES_EXPORTER=otlp \
+ OTEL_EXPORTER_OTLP_PROTOCOL=http/protobuf \
+ OTEL_EXPORTER_OTLP_ENDPOINT= \
+ OTEL_PROPAGATORS=baggage,tracecontext \
+
+```
+
+- - Endpoint at which the collector is running. Ex. -> `http://localhost:4317`
+- - Run command for your PHP application
\ No newline at end of file
diff --git a/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/LinuxARM64/QuickStart/php-linuxarm64-quickStart-instrumentApplication.md b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/LinuxARM64/QuickStart/php-linuxarm64-quickStart-instrumentApplication.md
new file mode 100644
index 0000000000..2b28b1ab8b
--- /dev/null
+++ b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/LinuxARM64/QuickStart/php-linuxarm64-quickStart-instrumentApplication.md
@@ -0,0 +1,60 @@
+
+
+### Step 1: Setup Development Environment
+Add these crates just below the `[dependencies]` section of your `cargo.toml` file
+
+To configure our PHP application to send data, you need to use OpenTelemetry PHP extension. Since the extension is built from the source, you need to have the build tools, which can be installed using the following command:
+
+**Linux**:
+```bash
+sudo apt-get install gcc make autoconf
+```
+
+**MacOs(Homebrew)**:
+```bash
+brew install gcc make autoconf
+```
+
+
+
+### Step 2: Build the extension
+
+With our environment set up we can install the extension using [PECL](https://pecl.php.net/):
+
+```bash
+pecl install opentelemetry
+```
+
+After successfully installing the OpenTelemetry extension, add the extension to php.ini file of your project:
+
+```bash
+[opentelemetry]
+extension=opentelemetry.so
+```
+
+Verify that the extension is enabled by running:
+
+```bash
+php -m | grep opentelemetry
+```
+
+Running the above command will **output**:
+
+```bash
+opentelemetry
+```
+
+
+
+### Step 3: Add the dependencies
+
+Add dependencies required to perform automatic instrumentation using this command :
+
+```bash
+composer config allow-plugins.php-http/discovery false
+composer require \
+ open-telemetry/sdk \
+ open-telemetry/exporter-otlp \
+ php-http/guzzle7-adapter \
+ open-telemetry/transport-grpc
+```
diff --git a/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/LinuxARM64/QuickStart/php-linuxarm64-quickStart-runApplication.md b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/LinuxARM64/QuickStart/php-linuxarm64-quickStart-runApplication.md
new file mode 100644
index 0000000000..587a1b4373
--- /dev/null
+++ b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/LinuxARM64/QuickStart/php-linuxarm64-quickStart-runApplication.md
@@ -0,0 +1,16 @@
+### Running your PHP application
+
+We will pass environment variables at the runtime:
+
+```bash
+env OTEL_PHP_AUTOLOAD_ENABLED=true \
+ OTEL_SERVICE_NAME={{MYAPP}} \
+ OTEL_TRACES_EXPORTER=otlp \
+ OTEL_EXPORTER_OTLP_PROTOCOL=http/protobuf \
+ OTEL_EXPORTER_OTLP_ENDPOINT=https://ingest.{{REGION}}.signoz.cloud:443 \
+ OTEL_EXPORTER_OTLP_HEADERS=signoz-access-token={{SIGNOZ_INGESTION_KEY}} \
+ OTEL_PROPAGATORS=baggage,tracecontext \
+
+```
+
+- - Run command for your PHP application
diff --git a/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/LinuxARM64/Recommended/php-linuxarm64-recommended-installOtelCollector.md b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/LinuxARM64/Recommended/php-linuxarm64-recommended-installOtelCollector.md
new file mode 100644
index 0000000000..cbabb8077b
--- /dev/null
+++ b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/LinuxARM64/Recommended/php-linuxarm64-recommended-installOtelCollector.md
@@ -0,0 +1,96 @@
+## Setup OpenTelemetry Binary as an agent
+
+
+### Step 1: Download otel-collector tar.gz
+```bash
+wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_linux_arm64.tar.gz
+```
+
+
+### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder
+```bash
+mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_linux_arm64.tar.gz -C otelcol-contrib
+```
+
+
+### Step 3: Create config.yaml in folder otelcol-contrib with the below content in it
+```bash
+receivers:
+ otlp:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:4317
+ http:
+ endpoint: 0.0.0.0:4318
+ hostmetrics:
+ collection_interval: 60s
+ scrapers:
+ cpu: {}
+ disk: {}
+ load: {}
+ filesystem: {}
+ memory: {}
+ network: {}
+ paging: {}
+ process:
+ mute_process_name_error: true
+ mute_process_exe_error: true
+ mute_process_io_error: true
+ processes: {}
+ prometheus:
+ config:
+ global:
+ scrape_interval: 60s
+ scrape_configs:
+ - job_name: otel-collector-binary
+ static_configs:
+ - targets:
+ # - localhost:8888
+processors:
+ batch:
+ send_batch_size: 1000
+ timeout: 10s
+ # Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md
+ resourcedetection:
+ detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure.
+ # Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
+ timeout: 2s
+ system:
+ hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback
+extensions:
+ health_check: {}
+ zpages: {}
+exporters:
+ otlp:
+ endpoint: "ingest.{{REGION}}.signoz.cloud:443"
+ tls:
+ insecure: false
+ headers:
+ "signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}"
+ logging:
+ verbosity: normal
+service:
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8888
+ extensions: [health_check, zpages]
+ pipelines:
+ metrics:
+ receivers: [otlp]
+ processors: [batch]
+ exporters: [otlp]
+ metrics/internal:
+ receivers: [prometheus, hostmetrics]
+ processors: [resourcedetection, batch]
+ exporters: [otlp]
+ traces:
+ receivers: [otlp]
+ processors: [batch]
+ exporters: [otlp]
+ logs:
+ receivers: [otlp]
+ processors: [batch]
+ exporters: [otlp]
+```
+
+
diff --git a/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/LinuxARM64/Recommended/php-linuxarm64-recommended-instrumentApplication.md b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/LinuxARM64/Recommended/php-linuxarm64-recommended-instrumentApplication.md
new file mode 100644
index 0000000000..a59e7cd63e
--- /dev/null
+++ b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/LinuxARM64/Recommended/php-linuxarm64-recommended-instrumentApplication.md
@@ -0,0 +1,62 @@
+
+
+After setting up the Otel collector agent, follow the steps below to instrument your PHP Application
+
+### Step 1: Setup Development Environment
+Add these crates just below the `[dependencies]` section of your `cargo.toml` file
+
+To configure our PHP application to send data, you need to use OpenTelemetry PHP extension. Since the extension is built from the source, you need to have the build tools, which can be installed using the following command:
+
+**Linux**:
+```bash
+sudo apt-get install gcc make autoconf
+```
+
+**MacOs(Homebrew)**:
+```bash
+brew install gcc make autoconf
+```
+
+
+
+### Step 2: Build the extension
+
+With our environment set up we can install the extension using [PECL](https://pecl.php.net/):
+
+```bash
+pecl install opentelemetry
+```
+
+After successfully installing the OpenTelemetry extension, add the extension to php.ini file of your project:
+
+```bash
+[opentelemetry]
+extension=opentelemetry.so
+```
+
+Verify that the extension is enabled by running:
+
+```bash
+php -m | grep opentelemetry
+```
+
+Running the above command will **output**:
+
+```bash
+opentelemetry
+```
+
+
+
+### Step 3: Add the dependencies
+
+Add dependencies required to perform automatic instrumentation using this command :
+
+```bash
+composer config allow-plugins.php-http/discovery false
+composer require \
+ open-telemetry/sdk \
+ open-telemetry/exporter-otlp \
+ php-http/guzzle7-adapter \
+ open-telemetry/transport-grpc
+```
diff --git a/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/LinuxARM64/Recommended/php-linuxarm64-recommended-runApplication.md b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/LinuxARM64/Recommended/php-linuxarm64-recommended-runApplication.md
new file mode 100644
index 0000000000..a11e47198f
--- /dev/null
+++ b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/LinuxARM64/Recommended/php-linuxarm64-recommended-runApplication.md
@@ -0,0 +1,41 @@
+
+
+Once you are done instrumenting your Rust application, you can run it using the below commands
+
+
+
+### Step 1: Run OTel Collector
+ Run this command inside the `otelcol-contrib` directory that you created in the install Otel Collector step
+
+```bash
+./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid
+```
+
+
+#### (Optional Step): View last 50 lines of `otelcol` logs
+```bash
+tail -f -n 50 otelcol-output.log
+```
+
+#### (Optional Step): Stop `otelcol`
+```bash
+kill "$(< otel-pid)"
+```
+
+
+### Step 2: Running your PHP application
+
+We will pass environment variables at the runtime:
+
+```bash
+env OTEL_PHP_AUTOLOAD_ENABLED=true \
+ OTEL_SERVICE_NAME= \
+ OTEL_TRACES_EXPORTER=otlp \
+ OTEL_EXPORTER_OTLP_PROTOCOL=http/protobuf \
+ OTEL_EXPORTER_OTLP_ENDPOINT= \
+ OTEL_PROPAGATORS=baggage,tracecontext \
+
+```
+
+- - Endpoint at which the collector is running. Ex. -> `http://localhost:4317`
+- - Run command for your PHP application
\ No newline at end of file
diff --git a/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/MacOsAMD64/QuickStart/php-macosamd64-quickStart-instrumentApplication.md b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/MacOsAMD64/QuickStart/php-macosamd64-quickStart-instrumentApplication.md
new file mode 100644
index 0000000000..2b28b1ab8b
--- /dev/null
+++ b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/MacOsAMD64/QuickStart/php-macosamd64-quickStart-instrumentApplication.md
@@ -0,0 +1,60 @@
+
+
+### Step 1: Setup Development Environment
+Add these crates just below the `[dependencies]` section of your `cargo.toml` file
+
+To configure our PHP application to send data, you need to use OpenTelemetry PHP extension. Since the extension is built from the source, you need to have the build tools, which can be installed using the following command:
+
+**Linux**:
+```bash
+sudo apt-get install gcc make autoconf
+```
+
+**MacOs(Homebrew)**:
+```bash
+brew install gcc make autoconf
+```
+
+
+
+### Step 2: Build the extension
+
+With our environment set up we can install the extension using [PECL](https://pecl.php.net/):
+
+```bash
+pecl install opentelemetry
+```
+
+After successfully installing the OpenTelemetry extension, add the extension to php.ini file of your project:
+
+```bash
+[opentelemetry]
+extension=opentelemetry.so
+```
+
+Verify that the extension is enabled by running:
+
+```bash
+php -m | grep opentelemetry
+```
+
+Running the above command will **output**:
+
+```bash
+opentelemetry
+```
+
+
+
+### Step 3: Add the dependencies
+
+Add dependencies required to perform automatic instrumentation using this command :
+
+```bash
+composer config allow-plugins.php-http/discovery false
+composer require \
+ open-telemetry/sdk \
+ open-telemetry/exporter-otlp \
+ php-http/guzzle7-adapter \
+ open-telemetry/transport-grpc
+```
diff --git a/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/MacOsAMD64/QuickStart/php-macosamd64-quickStart-runApplication.md b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/MacOsAMD64/QuickStart/php-macosamd64-quickStart-runApplication.md
new file mode 100644
index 0000000000..7b61210f9d
--- /dev/null
+++ b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/MacOsAMD64/QuickStart/php-macosamd64-quickStart-runApplication.md
@@ -0,0 +1,16 @@
+### Running your PHP application
+
+We will pass environment variables at the runtime:
+
+```bash
+env OTEL_PHP_AUTOLOAD_ENABLED=true \
+ OTEL_SERVICE_NAME={{MYAPP}} \
+ OTEL_TRACES_EXPORTER=otlp \
+ OTEL_EXPORTER_OTLP_PROTOCOL=http/protobuf \
+ OTEL_EXPORTER_OTLP_ENDPOINT=https://ingest.{{REGION}}.signoz.cloud:443 \
+ OTEL_EXPORTER_OTLP_HEADERS=signoz-access-token={{SIGNOZ_INGESTION_KEY}} \
+ OTEL_PROPAGATORS=baggage,tracecontext \
+
+```
+
+- - Run command for your PHP application
\ No newline at end of file
diff --git a/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/MacOsAMD64/Recommended/php-macosamd64-recommended-installOtelCollector.md b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/MacOsAMD64/Recommended/php-macosamd64-recommended-installOtelCollector.md
new file mode 100644
index 0000000000..843e86a411
--- /dev/null
+++ b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/MacOsAMD64/Recommended/php-macosamd64-recommended-installOtelCollector.md
@@ -0,0 +1,96 @@
+### Setup OpenTelemetry Binary as an agent
+
+
+### Step 1: Download otel-collector tar.gz
+```bash
+wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_darwin_amd64.tar.gz
+```
+
+
+### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder
+```bash
+mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_darwin_amd64.tar.gz -C otelcol-contrib
+```
+
+
+### Step 3: Create config.yaml in folder otelcol-contrib with the below content in it
+```bash
+receivers:
+ otlp:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:4317
+ http:
+ endpoint: 0.0.0.0:4318
+ hostmetrics:
+ collection_interval: 60s
+ scrapers:
+ cpu: {}
+ disk: {}
+ load: {}
+ filesystem: {}
+ memory: {}
+ network: {}
+ paging: {}
+ process:
+ mute_process_name_error: true
+ mute_process_exe_error: true
+ mute_process_io_error: true
+ processes: {}
+ prometheus:
+ config:
+ global:
+ scrape_interval: 60s
+ scrape_configs:
+ - job_name: otel-collector-binary
+ static_configs:
+ - targets:
+ # - localhost:8888
+processors:
+ batch:
+ send_batch_size: 1000
+ timeout: 10s
+ # Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md
+ resourcedetection:
+ detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure.
+ # Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
+ timeout: 2s
+ system:
+ hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback
+extensions:
+ health_check: {}
+ zpages: {}
+exporters:
+ otlp:
+ endpoint: "ingest.{{REGION}}.signoz.cloud:443"
+ tls:
+ insecure: false
+ headers:
+ "signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}"
+ logging:
+ verbosity: normal
+service:
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8888
+ extensions: [health_check, zpages]
+ pipelines:
+ metrics:
+ receivers: [otlp]
+ processors: [batch]
+ exporters: [otlp]
+ metrics/internal:
+ receivers: [prometheus, hostmetrics]
+ processors: [resourcedetection, batch]
+ exporters: [otlp]
+ traces:
+ receivers: [otlp]
+ processors: [batch]
+ exporters: [otlp]
+ logs:
+ receivers: [otlp]
+ processors: [batch]
+ exporters: [otlp]
+```
+
+
diff --git a/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/MacOsAMD64/Recommended/php-macosamd64-recommended-instrumentApplication.md b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/MacOsAMD64/Recommended/php-macosamd64-recommended-instrumentApplication.md
new file mode 100644
index 0000000000..a59e7cd63e
--- /dev/null
+++ b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/MacOsAMD64/Recommended/php-macosamd64-recommended-instrumentApplication.md
@@ -0,0 +1,62 @@
+
+
+After setting up the Otel collector agent, follow the steps below to instrument your PHP Application
+
+### Step 1: Setup Development Environment
+Add these crates just below the `[dependencies]` section of your `cargo.toml` file
+
+To configure our PHP application to send data, you need to use OpenTelemetry PHP extension. Since the extension is built from the source, you need to have the build tools, which can be installed using the following command:
+
+**Linux**:
+```bash
+sudo apt-get install gcc make autoconf
+```
+
+**MacOs(Homebrew)**:
+```bash
+brew install gcc make autoconf
+```
+
+
+
+### Step 2: Build the extension
+
+With our environment set up we can install the extension using [PECL](https://pecl.php.net/):
+
+```bash
+pecl install opentelemetry
+```
+
+After successfully installing the OpenTelemetry extension, add the extension to php.ini file of your project:
+
+```bash
+[opentelemetry]
+extension=opentelemetry.so
+```
+
+Verify that the extension is enabled by running:
+
+```bash
+php -m | grep opentelemetry
+```
+
+Running the above command will **output**:
+
+```bash
+opentelemetry
+```
+
+
+
+### Step 3: Add the dependencies
+
+Add dependencies required to perform automatic instrumentation using this command :
+
+```bash
+composer config allow-plugins.php-http/discovery false
+composer require \
+ open-telemetry/sdk \
+ open-telemetry/exporter-otlp \
+ php-http/guzzle7-adapter \
+ open-telemetry/transport-grpc
+```
diff --git a/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/MacOsAMD64/Recommended/php-macosamd64-recommended-runApplication.md b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/MacOsAMD64/Recommended/php-macosamd64-recommended-runApplication.md
new file mode 100644
index 0000000000..a11e47198f
--- /dev/null
+++ b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/MacOsAMD64/Recommended/php-macosamd64-recommended-runApplication.md
@@ -0,0 +1,41 @@
+
+
+Once you are done instrumenting your Rust application, you can run it using the below commands
+
+
+
+### Step 1: Run OTel Collector
+ Run this command inside the `otelcol-contrib` directory that you created in the install Otel Collector step
+
+```bash
+./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid
+```
+
+
+#### (Optional Step): View last 50 lines of `otelcol` logs
+```bash
+tail -f -n 50 otelcol-output.log
+```
+
+#### (Optional Step): Stop `otelcol`
+```bash
+kill "$(< otel-pid)"
+```
+
+
+### Step 2: Running your PHP application
+
+We will pass environment variables at the runtime:
+
+```bash
+env OTEL_PHP_AUTOLOAD_ENABLED=true \
+ OTEL_SERVICE_NAME= \
+ OTEL_TRACES_EXPORTER=otlp \
+ OTEL_EXPORTER_OTLP_PROTOCOL=http/protobuf \
+ OTEL_EXPORTER_OTLP_ENDPOINT= \
+ OTEL_PROPAGATORS=baggage,tracecontext \
+
+```
+
+- - Endpoint at which the collector is running. Ex. -> `http://localhost:4317`
+- - Run command for your PHP application
\ No newline at end of file
diff --git a/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/MacOsARM64/QuickStart/php-macosarm64-quickStart-instrumentApplication.md b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/MacOsARM64/QuickStart/php-macosarm64-quickStart-instrumentApplication.md
new file mode 100644
index 0000000000..2b28b1ab8b
--- /dev/null
+++ b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/MacOsARM64/QuickStart/php-macosarm64-quickStart-instrumentApplication.md
@@ -0,0 +1,60 @@
+
+
+### Step 1: Setup Development Environment
+Add these crates just below the `[dependencies]` section of your `cargo.toml` file
+
+To configure our PHP application to send data, you need to use OpenTelemetry PHP extension. Since the extension is built from the source, you need to have the build tools, which can be installed using the following command:
+
+**Linux**:
+```bash
+sudo apt-get install gcc make autoconf
+```
+
+**MacOs(Homebrew)**:
+```bash
+brew install gcc make autoconf
+```
+
+
+
+### Step 2: Build the extension
+
+With our environment set up we can install the extension using [PECL](https://pecl.php.net/):
+
+```bash
+pecl install opentelemetry
+```
+
+After successfully installing the OpenTelemetry extension, add the extension to php.ini file of your project:
+
+```bash
+[opentelemetry]
+extension=opentelemetry.so
+```
+
+Verify that the extension is enabled by running:
+
+```bash
+php -m | grep opentelemetry
+```
+
+Running the above command will **output**:
+
+```bash
+opentelemetry
+```
+
+
+
+### Step 3: Add the dependencies
+
+Add dependencies required to perform automatic instrumentation using this command :
+
+```bash
+composer config allow-plugins.php-http/discovery false
+composer require \
+ open-telemetry/sdk \
+ open-telemetry/exporter-otlp \
+ php-http/guzzle7-adapter \
+ open-telemetry/transport-grpc
+```
diff --git a/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/MacOsARM64/QuickStart/php-macosarm64-quickStart-runApplication.md b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/MacOsARM64/QuickStart/php-macosarm64-quickStart-runApplication.md
new file mode 100644
index 0000000000..587a1b4373
--- /dev/null
+++ b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/MacOsARM64/QuickStart/php-macosarm64-quickStart-runApplication.md
@@ -0,0 +1,16 @@
+### Running your PHP application
+
+We will pass environment variables at the runtime:
+
+```bash
+env OTEL_PHP_AUTOLOAD_ENABLED=true \
+ OTEL_SERVICE_NAME={{MYAPP}} \
+ OTEL_TRACES_EXPORTER=otlp \
+ OTEL_EXPORTER_OTLP_PROTOCOL=http/protobuf \
+ OTEL_EXPORTER_OTLP_ENDPOINT=https://ingest.{{REGION}}.signoz.cloud:443 \
+ OTEL_EXPORTER_OTLP_HEADERS=signoz-access-token={{SIGNOZ_INGESTION_KEY}} \
+ OTEL_PROPAGATORS=baggage,tracecontext \
+
+```
+
+- - Run command for your PHP application
diff --git a/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/MacOsARM64/Recommended/php-macosarm64-recommended-installOtelCollector.md b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/MacOsARM64/Recommended/php-macosarm64-recommended-installOtelCollector.md
new file mode 100644
index 0000000000..3a780bb8de
--- /dev/null
+++ b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/MacOsARM64/Recommended/php-macosarm64-recommended-installOtelCollector.md
@@ -0,0 +1,96 @@
+## Setup OpenTelemetry Binary as an agent
+
+
+### Step 1: Download otel-collector tar.gz
+```bash
+wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_darwin_arm64.tar.gz
+```
+
+
+### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder
+```bash
+mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_darwin_arm64.tar.gz -C otelcol-contrib
+```
+
+
+### Step 3: Create config.yaml in folder otelcol-contrib with the below content in it
+```bash
+receivers:
+ otlp:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:4317
+ http:
+ endpoint: 0.0.0.0:4318
+ hostmetrics:
+ collection_interval: 60s
+ scrapers:
+ cpu: {}
+ disk: {}
+ load: {}
+ filesystem: {}
+ memory: {}
+ network: {}
+ paging: {}
+ process:
+ mute_process_name_error: true
+ mute_process_exe_error: true
+ mute_process_io_error: true
+ processes: {}
+ prometheus:
+ config:
+ global:
+ scrape_interval: 60s
+ scrape_configs:
+ - job_name: otel-collector-binary
+ static_configs:
+ - targets:
+ # - localhost:8888
+processors:
+ batch:
+ send_batch_size: 1000
+ timeout: 10s
+ # Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md
+ resourcedetection:
+ detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure.
+ # Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
+ timeout: 2s
+ system:
+ hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback
+extensions:
+ health_check: {}
+ zpages: {}
+exporters:
+ otlp:
+ endpoint: "ingest.{{REGION}}.signoz.cloud:443"
+ tls:
+ insecure: false
+ headers:
+ "signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}"
+ logging:
+ verbosity: normal
+service:
+ telemetry:
+ metrics:
+ address: 0.0.0.0:8888
+ extensions: [health_check, zpages]
+ pipelines:
+ metrics:
+ receivers: [otlp]
+ processors: [batch]
+ exporters: [otlp]
+ metrics/internal:
+ receivers: [prometheus, hostmetrics]
+ processors: [resourcedetection, batch]
+ exporters: [otlp]
+ traces:
+ receivers: [otlp]
+ processors: [batch]
+ exporters: [otlp]
+ logs:
+ receivers: [otlp]
+ processors: [batch]
+ exporters: [otlp]
+```
+
+
diff --git a/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/MacOsARM64/Recommended/php-macosarm64-recommended-instrumentApplication.md b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/MacOsARM64/Recommended/php-macosarm64-recommended-instrumentApplication.md
new file mode 100644
index 0000000000..a59e7cd63e
--- /dev/null
+++ b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/MacOsARM64/Recommended/php-macosarm64-recommended-instrumentApplication.md
@@ -0,0 +1,62 @@
+
+
+After setting up the Otel collector agent, follow the steps below to instrument your PHP Application
+
+### Step 1: Setup Development Environment
+Add these crates just below the `[dependencies]` section of your `cargo.toml` file
+
+To configure our PHP application to send data, you need to use OpenTelemetry PHP extension. Since the extension is built from the source, you need to have the build tools, which can be installed using the following command:
+
+**Linux**:
+```bash
+sudo apt-get install gcc make autoconf
+```
+
+**MacOs(Homebrew)**:
+```bash
+brew install gcc make autoconf
+```
+
+
+
+### Step 2: Build the extension
+
+With our environment set up we can install the extension using [PECL](https://pecl.php.net/):
+
+```bash
+pecl install opentelemetry
+```
+
+After successfully installing the OpenTelemetry extension, add the extension to php.ini file of your project:
+
+```bash
+[opentelemetry]
+extension=opentelemetry.so
+```
+
+Verify that the extension is enabled by running:
+
+```bash
+php -m | grep opentelemetry
+```
+
+Running the above command will **output**:
+
+```bash
+opentelemetry
+```
+
+
+
+### Step 3: Add the dependencies
+
+Add dependencies required to perform automatic instrumentation using this command :
+
+```bash
+composer config allow-plugins.php-http/discovery false
+composer require \
+ open-telemetry/sdk \
+ open-telemetry/exporter-otlp \
+ php-http/guzzle7-adapter \
+ open-telemetry/transport-grpc
+```
diff --git a/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/MacOsARM64/Recommended/php-macosarm64-recommended-runApplication.md b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/MacOsARM64/Recommended/php-macosarm64-recommended-runApplication.md
new file mode 100644
index 0000000000..a11e47198f
--- /dev/null
+++ b/frontend/src/container/OnboardingContainer/Modules/APM/Php/md-docs/MacOsARM64/Recommended/php-macosarm64-recommended-runApplication.md
@@ -0,0 +1,41 @@
+
+
+Once you are done instrumenting your Rust application, you can run it using the below commands
+
+
+
+### Step 1: Run OTel Collector
+ Run this command inside the `otelcol-contrib` directory that you created in the install Otel Collector step
+
+```bash
+./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid
+```
+
+
+#### (Optional Step): View last 50 lines of `otelcol` logs
+```bash
+tail -f -n 50 otelcol-output.log
+```
+
+#### (Optional Step): Stop `otelcol`
+```bash
+kill "$(< otel-pid)"
+```
+
+
+### Step 2: Running your PHP application
+
+We will pass environment variables at the runtime:
+
+```bash
+env OTEL_PHP_AUTOLOAD_ENABLED=true \
+ OTEL_SERVICE_NAME= \
+ OTEL_TRACES_EXPORTER=otlp \
+ OTEL_EXPORTER_OTLP_PROTOCOL=http/protobuf \
+ OTEL_EXPORTER_OTLP_ENDPOINT= \
+ OTEL_PROPAGATORS=baggage,tracecontext \
+
+```
+
+- - Endpoint at which the collector is running. Ex. -> `http://localhost:4317`
+- - Run command for your PHP application
\ No newline at end of file
diff --git a/frontend/src/container/OnboardingContainer/constants/apmDocFilePaths.ts b/frontend/src/container/OnboardingContainer/constants/apmDocFilePaths.ts
index 7bf505f30d..485a33382c 100644
--- a/frontend/src/container/OnboardingContainer/constants/apmDocFilePaths.ts
+++ b/frontend/src/container/OnboardingContainer/constants/apmDocFilePaths.ts
@@ -403,6 +403,38 @@ import APM_javascript_reactjs_macOsARM64_quickStart_runApplication from '../Modu
import APM_javascript_reactjs_macOsARM64_recommendedSteps_setupOtelCollector from '../Modules/APM/Javascript/md-docs/ReactJS/MacOsARM64/Recommended/reactjs-macosarm64-recommended-installOtelCollector.md';
import APM_javascript_reactjs_macOsARM64_recommendedSteps_instrumentApplication from '../Modules/APM/Javascript/md-docs/ReactJS/MacOsARM64/Recommended/reactjs-macosarm64-recommended-instrumentApplication.md';
import APM_javascript_reactjs_macOsARM64_recommendedSteps_runApplication from '../Modules/APM/Javascript/md-docs/ReactJS/MacOsARM64/Recommended/reactjs-macosarm64-recommended-runApplication.md';
+// PHP-Kubernetes
+import APM_php_kubernetes_recommendedSteps_setupOtelCollector from '../Modules/APM/Php/md-docs/Kubernetes/php-kubernetes-installOtelCollector.md';
+import APM_php_kubernetes_recommendedSteps_instrumentApplication from '../Modules/APM/Php/md-docs/Kubernetes/php-kubernetes-instrumentApplication.md';
+import APM_php_kubernetes_recommendedSteps_runApplication from '../Modules/APM/Php/md-docs/Kubernetes/php-kubernetes-runApplication.md';
+// PHP-LinuxAMD64-quickstart
+import APM_php_linuxAMD64_quickStart_instrumentApplication from '../Modules/APM/Php/md-docs/LinuxAMD64/QuickStart/php-linuxamd64-quickStart-instrumentApplication.md';
+import APM_php_linuxAMD64_quickStart_runApplication from '../Modules/APM/Php/md-docs/LinuxAMD64/QuickStart/php-linuxamd64-quickStart-runApplication.md';
+// PHP-LinuxAMD64-recommended
+import APM_php_linuxAMD64_recommendedSteps_setupOtelCollector from '../Modules/APM/Php/md-docs/LinuxAMD64/Recommended/php-linuxamd64-recommended-installOtelCollector.md';
+import APM_php_linuxAMD64_recommendedSteps_instrumentApplication from '../Modules/APM/Php/md-docs/LinuxAMD64/Recommended/php-linuxamd64-recommended-instrumentApplication.md';
+import APM_php_linuxAMD64_recommendedSteps_runApplication from '../Modules/APM/Php/md-docs/LinuxAMD64/Recommended/php-linuxamd64-recommended-runApplication.md';
+// PHP-LinuxARM64-quickstart
+import APM_php_linuxARM64_quickStart_instrumentApplication from '../Modules/APM/Php/md-docs/LinuxARM64/QuickStart/php-linuxarm64-quickStart-instrumentApplication.md';
+import APM_php_linuxARM64_quickStart_runApplication from '../Modules/APM/Php/md-docs/LinuxARM64/QuickStart/php-linuxarm64-quickStart-runApplication.md';
+// PHP-LinuxARM64-recommended
+import APM_php_linuxARM64_recommendedSteps_setupOtelCollector from '../Modules/APM/Php/md-docs/LinuxARM64/Recommended/php-linuxarm64-recommended-installOtelCollector.md';
+import APM_php_linuxARM64_recommendedSteps_instrumentApplication from '../Modules/APM/Php/md-docs/LinuxARM64/Recommended/php-linuxarm64-recommended-instrumentApplication.md';
+import APM_php_linuxARM64_recommendedSteps_runApplication from '../Modules/APM/Php/md-docs/LinuxARM64/Recommended/php-linuxarm64-recommended-runApplication.md';
+// PHP-MacOsAMD64-quickstart
+import APM_php_macOsAMD64_quickStart_instrumentApplication from '../Modules/APM/Php/md-docs/MacOsAMD64/QuickStart/php-macosamd64-quickStart-instrumentApplication.md';
+import APM_php_macOsAMD64_quickStart_runApplication from '../Modules/APM/Php/md-docs/MacOsAMD64/QuickStart/php-macosamd64-quickStart-runApplication.md';
+// PHP-MacOsAMD64-recommended
+import APM_php_macOsAMD64_recommendedSteps_setupOtelCollector from '../Modules/APM/Php/md-docs/MacOsAMD64/Recommended/php-macosamd64-recommended-installOtelCollector.md';
+import APM_php_macOsAMD64_recommendedSteps_instrumentApplication from '../Modules/APM/Php/md-docs/MacOsAMD64/Recommended/php-macosamd64-recommended-instrumentApplication.md';
+import APM_php_macOsAMD64_recommendedSteps_runApplication from '../Modules/APM/Php/md-docs/MacOsAMD64/Recommended/php-macosamd64-recommended-runApplication.md';
+// PHP-MacOsARM64-quickstart
+import APM_php_macOsARM64_quickStart_instrumentApplication from '../Modules/APM/Php/md-docs/MacOsARM64/QuickStart/php-macosarm64-quickStart-instrumentApplication.md';
+import APM_php_macOsARM64_quickStart_runApplication from '../Modules/APM/Php/md-docs/MacOsARM64/QuickStart/php-macosarm64-quickStart-runApplication.md';
+// PHP-MacOsARM64-recommended
+import APM_php_macOsARM64_recommendedSteps_setupOtelCollector from '../Modules/APM/Php/md-docs/MacOsARM64/Recommended/php-macosarm64-recommended-installOtelCollector.md';
+import APM_php_macOsARM64_recommendedSteps_instrumentApplication from '../Modules/APM/Php/md-docs/MacOsARM64/Recommended/php-macosarm64-recommended-instrumentApplication.md';
+import APM_php_macOsARM64_recommendedSteps_runApplication from '../Modules/APM/Php/md-docs/MacOsARM64/Recommended/php-macosarm64-recommended-runApplication.md';
/// ////// Javascript Done
/// ///// Python Start
// Django
@@ -575,7 +607,6 @@ import APM_python_other_macOsARM64_recommendedSteps_setupOtelCollector from '../
import APM_python_other_macOsARM64_recommendedSteps_instrumentApplication from '../Modules/APM/Python/md-docs/Others/MacOsARM64/Recommended/others-macosarm64-recommended-instrumentApplication.md';
import APM_python_other_macOsARM64_recommendedSteps_runApplication from '../Modules/APM/Python/md-docs/Others/MacOsARM64/Recommended/others-macosarm64-recommended-runApplication.md';
// ----------------------------------------------------------------------------
-/// ////// Go Done
/// ///// ROR Start
// ROR-Kubernetes
import APM_rails_kubernetes_recommendedSteps_setupOtelCollector from '../Modules/APM/RubyOnRails/md-docs/Kubernetes/ror-kubernetes-installOtelCollector.md';
@@ -1546,4 +1577,36 @@ export const ApmDocFilePaths = {
APM_swift_macOsARM64_recommendedSteps_setupOtelCollector,
APM_swift_macOsARM64_recommendedSteps_instrumentApplication,
APM_swift_macOsARM64_recommendedSteps_runApplication,
+
+ APM_php_kubernetes_recommendedSteps_setupOtelCollector,
+ APM_php_kubernetes_recommendedSteps_instrumentApplication,
+ APM_php_kubernetes_recommendedSteps_runApplication,
+
+ APM_php_linuxAMD64_quickStart_instrumentApplication,
+ APM_php_linuxAMD64_quickStart_runApplication,
+
+ APM_php_linuxAMD64_recommendedSteps_setupOtelCollector,
+ APM_php_linuxAMD64_recommendedSteps_instrumentApplication,
+ APM_php_linuxAMD64_recommendedSteps_runApplication,
+
+ APM_php_linuxARM64_quickStart_instrumentApplication,
+ APM_php_linuxARM64_quickStart_runApplication,
+
+ APM_php_linuxARM64_recommendedSteps_setupOtelCollector,
+ APM_php_linuxARM64_recommendedSteps_instrumentApplication,
+ APM_php_linuxARM64_recommendedSteps_runApplication,
+
+ APM_php_macOsAMD64_quickStart_instrumentApplication,
+ APM_php_macOsAMD64_quickStart_runApplication,
+
+ APM_php_macOsAMD64_recommendedSteps_setupOtelCollector,
+ APM_php_macOsAMD64_recommendedSteps_instrumentApplication,
+ APM_php_macOsAMD64_recommendedSteps_runApplication,
+
+ APM_php_macOsARM64_quickStart_instrumentApplication,
+ APM_php_macOsARM64_quickStart_runApplication,
+
+ APM_php_macOsARM64_recommendedSteps_setupOtelCollector,
+ APM_php_macOsARM64_recommendedSteps_instrumentApplication,
+ APM_php_macOsARM64_recommendedSteps_runApplication,
};
diff --git a/frontend/src/container/OnboardingContainer/utils/dataSourceUtils.ts b/frontend/src/container/OnboardingContainer/utils/dataSourceUtils.ts
index 77f1210858..517cc38171 100644
--- a/frontend/src/container/OnboardingContainer/utils/dataSourceUtils.ts
+++ b/frontend/src/container/OnboardingContainer/utils/dataSourceUtils.ts
@@ -132,6 +132,11 @@ const supportedLanguages = [
id: 'swift',
imgURL: `/Logos/swift.png`,
},
+ {
+ name: 'php',
+ id: 'php',
+ imgURL: `/Logos/php.png`,
+ },
];
export const defaultLogsType = {
@@ -293,7 +298,8 @@ export const getSupportedFrameworks = ({
(moduleID === ModulesMap.APM && dataSourceName === '.NET') ||
(moduleID === ModulesMap.APM && dataSourceName === 'rust') ||
(moduleID === ModulesMap.APM && dataSourceName === 'elixir') ||
- (moduleID === ModulesMap.APM && dataSourceName === 'swift')
+ (moduleID === ModulesMap.APM && dataSourceName === 'swift') ||
+ (moduleID === ModulesMap.APM && dataSourceName === 'php')
) {
return [];
}
@@ -322,7 +328,8 @@ export const hasFrameworks = ({
(moduleID === ModulesMap.APM && dataSourceName === '.NET') ||
(moduleID === ModulesMap.APM && dataSourceName === 'rust') ||
(moduleID === ModulesMap.APM && dataSourceName === 'elixir') ||
- (moduleID === ModulesMap.APM && dataSourceName === 'swift')
+ (moduleID === ModulesMap.APM && dataSourceName === 'swift') ||
+ (moduleID === ModulesMap.APM && dataSourceName === 'php')
) {
return false;
}
diff --git a/frontend/src/container/OrganizationSettings/PendingInvitesContainer/index.tsx b/frontend/src/container/OrganizationSettings/PendingInvitesContainer/index.tsx
index 7395102d4c..3e9276f596 100644
--- a/frontend/src/container/OrganizationSettings/PendingInvitesContainer/index.tsx
+++ b/frontend/src/container/OrganizationSettings/PendingInvitesContainer/index.tsx
@@ -279,9 +279,6 @@ function PendingInvitesContainer(): JSX.Element {
-
- {t('invite_link_share_manually')}
-
}
type="primary"
diff --git a/frontend/src/container/QueryBuilder/filters/GroupByFilter/GroupByFilter.tsx b/frontend/src/container/QueryBuilder/filters/GroupByFilter/GroupByFilter.tsx
index 386786f70c..e7b00756f5 100644
--- a/frontend/src/container/QueryBuilder/filters/GroupByFilter/GroupByFilter.tsx
+++ b/frontend/src/container/QueryBuilder/filters/GroupByFilter/GroupByFilter.tsx
@@ -1,11 +1,7 @@
import { Select, Spin } from 'antd';
import { getAggregateKeys } from 'api/queryBuilder/getAttributeKeys';
// ** Constants
-import {
- idDivider,
- QueryBuilderKeys,
- selectValueDivider,
-} from 'constants/queryBuilder';
+import { idDivider, QueryBuilderKeys } from 'constants/queryBuilder';
import { DEBOUNCE_DELAY } from 'constants/queryBuilderFilterConfig';
import { useGetAggregateKeys } from 'hooks/queryBuilder/useGetAggregateKeys';
import useDebounce from 'hooks/useDebounce';
@@ -83,11 +79,7 @@ export const GroupByFilter = memo(function GroupByFilter({
dataType={item.dataType || ''}
/>
),
- value: `${transformStringWithPrefix({
- str: item.key,
- prefix: item.type || '',
- condition: !item.isColumn,
- })}${selectValueDivider}${item.id}`,
+ value: `${item.id}`,
})) || [];
setOptionsData(options);
@@ -135,7 +127,8 @@ export const GroupByFilter = memo(function GroupByFilter({
const keys = await getAttributeKeys();
const groupByValues: BaseAutocompleteData[] = values.map((item) => {
- const [currentValue, id] = item.value.split(selectValueDivider);
+ const id = item.value;
+ const currentValue = item.value.split(idDivider)[0];
if (id && id.includes(idDivider)) {
const attribute = keys.find((item) => item.id === id);
@@ -174,11 +167,7 @@ export const GroupByFilter = memo(function GroupByFilter({
condition: !item.isColumn,
}),
)}`,
- value: `${transformStringWithPrefix({
- str: item.key,
- prefix: item.type || '',
- condition: !item.isColumn,
- })}${selectValueDivider}${item.id}`,
+ value: `${item.id}`,
}),
);
diff --git a/frontend/src/container/QueryBuilder/filters/QueryBuilderSearch/utils.ts b/frontend/src/container/QueryBuilder/filters/QueryBuilderSearch/utils.ts
index c549a6fd62..ec7eba3973 100644
--- a/frontend/src/container/QueryBuilder/filters/QueryBuilderSearch/utils.ts
+++ b/frontend/src/container/QueryBuilder/filters/QueryBuilderSearch/utils.ts
@@ -5,7 +5,7 @@ import { parse } from 'papaparse';
import { orderByValueDelimiter } from '../OrderByFilter/utils';
// eslint-disable-next-line no-useless-escape
-export const tagRegexp = /^\s*(.*?)\s*(IN|NOT_IN|LIKE|NOT_LIKE|REGEX|NOT_REGEX|=|!=|EXISTS|NOT_EXISTS|CONTAINS|NOT_CONTAINS|>=|>|<=|<|HAS|NHAS)\s*(.*)$/g;
+export const tagRegexp = /^\s*(.*?)\s*(\bIN\b|\bNOT_IN\b|\bLIKE\b|\bNOT_LIKE\b|\bREGEX\b|\bNOT_REGEX\b|=|!=|\bEXISTS\b|\bNOT_EXISTS\b|\bCONTAINS\b|\bNOT_CONTAINS\b|>=|>|<=|<|\bHAS\b|\bNHAS\b)\s*(.*)$/gi;
export function isInNInOperator(value: string): boolean {
return value === OPERATORS.IN || value === OPERATORS.NIN;
@@ -25,8 +25,8 @@ export function getTagToken(tag: string): ITagToken {
const [, matchTagKey, matchTagOperator, matchTagValue] = match;
return {
tagKey: matchTagKey,
- tagOperator: matchTagOperator,
- tagValue: isInNInOperator(matchTagOperator)
+ tagOperator: matchTagOperator.toUpperCase(),
+ tagValue: isInNInOperator(matchTagOperator.toUpperCase())
? parse(matchTagValue).data.flat()
: matchTagValue,
} as ITagToken;
diff --git a/frontend/src/container/ResourceAttributesFilter/ResourceAttributesFilter.styles.scss b/frontend/src/container/ResourceAttributesFilter/ResourceAttributesFilter.styles.scss
new file mode 100644
index 0000000000..9d10445703
--- /dev/null
+++ b/frontend/src/container/ResourceAttributesFilter/ResourceAttributesFilter.styles.scss
@@ -0,0 +1,20 @@
+.resourceAttributesFilter-container {
+ display: flex;
+ align-items: center;
+ justify-content: stretch;
+ flex-wrap: wrap;
+ gap: 8px;
+ margin-bottom: 16px;
+
+ .resource-attributes-selector {
+ flex: 1;
+ }
+
+ .environment-selector {
+ min-width: 200px;
+ }
+
+ .ant-form-item {
+ margin-bottom: 0;
+ }
+}
diff --git a/frontend/src/container/ResourceAttributesFilter/ResourceAttributesFilter.tsx b/frontend/src/container/ResourceAttributesFilter/ResourceAttributesFilter.tsx
index a61a0ce0ee..4211291742 100644
--- a/frontend/src/container/ResourceAttributesFilter/ResourceAttributesFilter.tsx
+++ b/frontend/src/container/ResourceAttributesFilter/ResourceAttributesFilter.tsx
@@ -1,10 +1,17 @@
+import './ResourceAttributesFilter.styles.scss';
+
import { CloseCircleFilled } from '@ant-design/icons';
import { Button, Select, Spin } from 'antd';
import useResourceAttribute, {
isResourceEmpty,
} from 'hooks/useResourceAttribute';
-import { convertMetricKeyToTrace } from 'hooks/useResourceAttribute/utils';
-import { ReactNode, useMemo } from 'react';
+import {
+ convertMetricKeyToTrace,
+ getEnvironmentTagKeys,
+ getEnvironmentTagValues,
+} from 'hooks/useResourceAttribute/utils';
+import { ReactNode, useEffect, useMemo, useState } from 'react';
+import { SelectOption } from 'types/common/select';
import { popupContainer } from 'utils/selectPopupContainer';
import { v4 as uuid } from 'uuid';
@@ -22,60 +29,129 @@ function ResourceAttributesFilter({
handleClearAll,
handleFocus,
handleChange,
+ handleEnvironmentChange,
selectedQuery,
optionsData,
loading,
} = useResourceAttribute();
- const isEmpty = useMemo(
- () => isResourceEmpty(queries, staging, selectedQuery),
- [queries, selectedQuery, staging],
+ const [environments, setEnvironments] = useState<
+ SelectOption[]
+ >([]);
+
+ const [selectedEnvironments, setSelectedEnvironments] = useState([]);
+
+ const queriesExcludingEnvironment = useMemo(
+ () =>
+ queries.filter(
+ (query) => query.tagKey !== 'resource_deployment_environment',
+ ),
+ [queries],
);
- return (
-
-
- {queries.map((query) => (
-
- ))}
- {staging.map((query, idx) => (
-
- {idx === 0 ? convertMetricKeyToTrace(query) : query}
-
- ))}
-
-
- Loading...
-
- ) : (
-
- No resource attributes available to filter. Please refer docs to send
- attributes.
-
- )
- }
- />
+ const isEmpty = useMemo(
+ () => isResourceEmpty(queriesExcludingEnvironment, staging, selectedQuery),
+ [queriesExcludingEnvironment, selectedQuery, staging],
+ );
- {queries.length || staging.length || selectedQuery.length ? (
- } type="text" />
- ) : null}
-
+ useEffect(() => {
+ const resourceDeploymentEnvironmentQuery = queries.filter(
+ (query) => query.tagKey === 'resource_deployment_environment',
+ );
+
+ if (resourceDeploymentEnvironmentQuery?.length > 0) {
+ setSelectedEnvironments(resourceDeploymentEnvironmentQuery[0].tagValue);
+ } else {
+ setSelectedEnvironments([]);
+ }
+ }, [queries]);
+
+ useEffect(() => {
+ getEnvironmentTagKeys().then((tagKeys) => {
+ if (tagKeys && Array.isArray(tagKeys) && tagKeys.length > 0) {
+ getEnvironmentTagValues().then((tagValues) => {
+ setEnvironments(tagValues);
+ });
+ }
+ });
+ }, []);
+
+ return (
+
+
+
+ {environments.map((opt) => (
+
+ {opt.label}
+
+ ))}
+
+
+
+
+
+
+ {queriesExcludingEnvironment.map((query) => (
+
+ ))}
+ {staging.map((query, idx) => (
+
+ {idx === 0 ? convertMetricKeyToTrace(query) : query}
+
+ ))}
+
+
+ Loading...
+
+ ) : (
+
+ No resource attributes available to filter. Please refer docs to send
+ attributes.
+
+ )
+ }
+ />
+
+ {queries.length || staging.length || selectedQuery.length ? (
+ }
+ type="text"
+ />
+ ) : null}
+
+
+
);
}
diff --git a/frontend/src/container/ResourceAttributesFilter/components/QueryChip/QueryChip.tsx b/frontend/src/container/ResourceAttributesFilter/components/QueryChip/QueryChip.tsx
index 363e6d5143..b2babd78b5 100644
--- a/frontend/src/container/ResourceAttributesFilter/components/QueryChip/QueryChip.tsx
+++ b/frontend/src/container/ResourceAttributesFilter/components/QueryChip/QueryChip.tsx
@@ -12,7 +12,10 @@ function QueryChip({ queryData, onClose }: IQueryChipProps): JSX.Element {
{convertMetricKeyToTrace(queryData.tagKey)}
{queryData.operator}
-
+
{queryData.tagValue.join(', ')}
diff --git a/frontend/src/container/ResourceAttributesFilter/styles.ts b/frontend/src/container/ResourceAttributesFilter/styles.ts
index c1dcd863f2..8764247385 100644
--- a/frontend/src/container/ResourceAttributesFilter/styles.ts
+++ b/frontend/src/container/ResourceAttributesFilter/styles.ts
@@ -7,9 +7,10 @@ export const SearchContainer = styled.div`
display: flex;
align-items: center;
gap: 0.2rem;
- padding: 0.2rem;
- margin: 1rem 0;
- border: 1px solid #ccc5;
+ padding: 0 0.2rem;
+ border: 1px solid #454c58;
+ box-sizing: border-box;
+ border-radius: 3px;
`;
export const QueryChipContainer = styled.span`
diff --git a/frontend/src/hooks/Integrations/useGetIntegrationStatus.ts b/frontend/src/hooks/Integrations/useGetIntegrationStatus.ts
index af58f63996..56849d2515 100644
--- a/frontend/src/hooks/Integrations/useGetIntegrationStatus.ts
+++ b/frontend/src/hooks/Integrations/useGetIntegrationStatus.ts
@@ -8,13 +8,12 @@ import {
export const useGetIntegrationStatus = ({
integrationId,
- enabled,
}: GetIntegrationPayloadProps): UseQueryResult<
AxiosResponse,
AxiosError
> =>
useQuery, AxiosError>({
- queryKey: ['Integration', integrationId, Date.now()],
+ queryKey: ['integration-connection-status', integrationId],
queryFn: () => getIntegrationStatus({ integrationId }),
- enabled,
+ refetchInterval: 5000,
});
diff --git a/frontend/src/hooks/queryBuilder/useOperators.ts b/frontend/src/hooks/queryBuilder/useOperators.ts
index 63f4a9222a..11cc797667 100644
--- a/frontend/src/hooks/queryBuilder/useOperators.ts
+++ b/frontend/src/hooks/queryBuilder/useOperators.ts
@@ -1,4 +1,7 @@
-import { QUERY_BUILDER_OPERATORS_BY_TYPES } from 'constants/queryBuilder';
+import {
+ OPERATORS,
+ QUERY_BUILDER_OPERATORS_BY_TYPES,
+} from 'constants/queryBuilder';
import { getRemovePrefixFromKey } from 'container/QueryBuilder/filters/QueryBuilderSearch/utils';
import { useMemo } from 'react';
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
@@ -16,9 +19,14 @@ export const useOperators = (
): IOperators =>
useMemo(() => {
const currentKey = keys?.find((el) => el.key === getRemovePrefixFromKey(key));
+ const strippedKey = key.split(' ')[0];
+
+ // eslint-disable-next-line no-nested-ternary
return currentKey?.dataType
? QUERY_BUILDER_OPERATORS_BY_TYPES[
currentKey.dataType as keyof typeof QUERY_BUILDER_OPERATORS_BY_TYPES
]
+ : strippedKey.endsWith('[*]') && strippedKey.startsWith('body.')
+ ? [OPERATORS.HAS, OPERATORS.NHAS]
: QUERY_BUILDER_OPERATORS_BY_TYPES.universal;
}, [keys, key]);
diff --git a/frontend/src/hooks/queryBuilder/useTag.ts b/frontend/src/hooks/queryBuilder/useTag.ts
index 268a01e0c6..419aaaedc9 100644
--- a/frontend/src/hooks/queryBuilder/useTag.ts
+++ b/frontend/src/hooks/queryBuilder/useTag.ts
@@ -74,7 +74,14 @@ export const useTag = (
const handleAddTag = useCallback(
(value: string): void => {
const { tagKey } = getTagToken(value);
- const [key, id] = tagKey.split('-');
+ const parts = tagKey.split('-');
+ // this is done to ensure that `hello-world` also gets converted to `body CONTAINS hello-world`
+ let id = parts[parts.length - 1];
+ let key = parts.slice(0, -1).join('-');
+ if (parts.length === 1) {
+ id = '';
+ [key] = parts;
+ }
if (id === 'custom') {
const customValue = whereClauseConfig
diff --git a/frontend/src/hooks/useResourceAttribute/ResourceProvider.tsx b/frontend/src/hooks/useResourceAttribute/ResourceProvider.tsx
index e027c70b8f..8a3de793e8 100644
--- a/frontend/src/hooks/useResourceAttribute/ResourceProvider.tsx
+++ b/frontend/src/hooks/useResourceAttribute/ResourceProvider.tsx
@@ -52,6 +52,7 @@ function ResourceProvider({ children }: Props): JSX.Element {
? `?resourceAttribute=${encode(JSON.stringify(queries))}`
: '',
});
+
setQueries(queries);
},
[pathname],
@@ -62,12 +63,14 @@ function ResourceProvider({ children }: Props): JSX.Element {
onSelectTagKey: () => {
handleLoading(true);
GetTagKeys()
- .then((tagKeys) =>
+ .then((tagKeys) => {
+ const options = mappingWithRoutesAndKeys(pathname, tagKeys);
+
setOptionsData({
- options: mappingWithRoutesAndKeys(pathname, tagKeys),
+ options,
mode: undefined,
- }),
- )
+ });
+ })
.finally(() => {
handleLoading(false);
});
@@ -96,6 +99,7 @@ function ResourceProvider({ children }: Props): JSX.Element {
}
const generatedQuery = createQuery([...staging, selectedQuery]);
+
if (generatedQuery) {
dispatchQueries([...queries, generatedQuery]);
}
@@ -127,6 +131,29 @@ function ResourceProvider({ children }: Props): JSX.Element {
[optionsData.mode, send],
);
+ const handleEnvironmentChange = useCallback(
+ (environments: string[]): void => {
+ const staging = ['resource_deployment_environment', 'IN'];
+
+ const queriesCopy = queries.filter(
+ (query) => query.tagKey !== 'resource_deployment_environment',
+ );
+
+ if (environments && Array.isArray(environments) && environments.length > 0) {
+ const generatedQuery = createQuery([...staging, environments]);
+
+ if (generatedQuery) {
+ dispatchQueries([...queriesCopy, generatedQuery]);
+ }
+ } else {
+ dispatchQueries([...queriesCopy]);
+ }
+
+ send('RESET');
+ },
+ [dispatchQueries, queries, send],
+ );
+
const handleClose = useCallback(
(id: string): void => {
dispatchQueries(queries.filter((queryData) => queryData.id !== id));
@@ -159,12 +186,14 @@ function ResourceProvider({ children }: Props): JSX.Element {
handleFocus,
loading,
handleChange,
+ handleEnvironmentChange,
selectedQuery,
optionsData,
}),
[
handleBlur,
handleChange,
+ handleEnvironmentChange,
handleClearAll,
handleClose,
handleFocus,
diff --git a/frontend/src/hooks/useResourceAttribute/types.ts b/frontend/src/hooks/useResourceAttribute/types.ts
index 422a0555ba..cce06c5cd1 100644
--- a/frontend/src/hooks/useResourceAttribute/types.ts
+++ b/frontend/src/hooks/useResourceAttribute/types.ts
@@ -28,4 +28,5 @@ export interface IResourceAttributeProps {
handleChange: (value: string) => void;
selectedQuery: string[];
optionsData: OptionsData;
+ handleEnvironmentChange: (environments: string[]) => void;
}
diff --git a/frontend/src/hooks/useResourceAttribute/utils.ts b/frontend/src/hooks/useResourceAttribute/utils.ts
index 8926621e32..52dc85c1c4 100644
--- a/frontend/src/hooks/useResourceAttribute/utils.ts
+++ b/frontend/src/hooks/useResourceAttribute/utils.ts
@@ -109,12 +109,43 @@ export const GetTagKeys = async (): Promise => {
if (!payload || !payload?.data) {
return [];
}
+ return payload.data
+ .filter((tagKey: string) => tagKey !== 'resource_deployment_environment')
+ .map((tagKey: string) => ({
+ label: convertMetricKeyToTrace(tagKey),
+ value: tagKey,
+ }));
+};
+
+export const getEnvironmentTagKeys = async (): Promise => {
+ const { payload } = await getResourceAttributesTagKeys({
+ metricName: 'signoz_calls_total',
+ match: 'resource_deployment_environment',
+ });
+ if (!payload || !payload?.data) {
+ return [];
+ }
return payload.data.map((tagKey: string) => ({
label: convertMetricKeyToTrace(tagKey),
value: tagKey,
}));
};
+export const getEnvironmentTagValues = async (): Promise => {
+ const { payload } = await getResourceAttributesTagValues({
+ tagKey: 'resource_deployment_environment',
+ metricName: 'signoz_calls_total',
+ });
+
+ if (!payload || !payload?.data) {
+ return [];
+ }
+ return payload.data.map((tagValue: string) => ({
+ label: tagValue,
+ value: tagValue,
+ }));
+};
+
export const GetTagValues = async (tagKey: string): Promise => {
const { payload } = await getResourceAttributesTagValues({
tagKey,
@@ -132,6 +163,23 @@ export const GetTagValues = async (tagKey: string): Promise => {
export const createQuery = (
selectedItems: Array = [],
+): IResourceAttribute | null => {
+ console.log('selectedItems', selectedItems);
+
+ if (selectedItems.length === 3) {
+ return {
+ id: uuid().slice(0, 8),
+ tagKey: selectedItems[0] as string,
+ operator: selectedItems[1] as string,
+ tagValue: selectedItems[2] as string[],
+ };
+ }
+ return null;
+};
+
+export const updateQuery = (
+ queryKey: string,
+ selectedItems: Array = [],
): IResourceAttribute | null => {
if (selectedItems.length === 3) {
return {
diff --git a/frontend/src/index.tsx b/frontend/src/index.tsx
index b95631c107..570db8c1da 100644
--- a/frontend/src/index.tsx
+++ b/frontend/src/index.tsx
@@ -3,6 +3,7 @@ import 'styles.scss';
import * as Sentry from '@sentry/react';
import AppRoutes from 'AppRoutes';
+import { AxiosError } from 'axios';
import { ThemeProvider } from 'hooks/useDarkMode';
import ErrorBoundaryFallback from 'pages/ErrorBoundaryFallback/ErrorBoundaryFallback';
import { createRoot } from 'react-dom/client';
@@ -16,6 +17,17 @@ const queryClient = new QueryClient({
defaultOptions: {
queries: {
refetchOnWindowFocus: false,
+ retry(failureCount, error): boolean {
+ if (
+ // in case of manually throwing errors please make sure to send error.response.status
+ error instanceof AxiosError &&
+ error.response?.status &&
+ (error.response?.status >= 400 || error.response?.status <= 499)
+ ) {
+ return false;
+ }
+ return failureCount < 2;
+ },
},
},
});
diff --git a/frontend/src/lib/uPlotLib/getUplotChartOptions.ts b/frontend/src/lib/uPlotLib/getUplotChartOptions.ts
index 50f6c5fbc4..0b281506f6 100644
--- a/frontend/src/lib/uPlotLib/getUplotChartOptions.ts
+++ b/frontend/src/lib/uPlotLib/getUplotChartOptions.ts
@@ -12,6 +12,7 @@ import { Dimensions } from 'hooks/useDimensions';
import { convertValue } from 'lib/getConvertedValue';
import _noop from 'lodash-es/noop';
import { MetricRangePayloadProps } from 'types/api/metrics/getQueryRange';
+import { Query } from 'types/api/queryBuilder/queryBuilderData';
import uPlot from 'uplot';
import onClickPlugin, { OnClickPluginOpts } from './plugins/onClickPlugin';
@@ -40,6 +41,7 @@ export interface GetUPlotChartOptions {
maxTimeScale?: number;
softMin: number | null;
softMax: number | null;
+ currentQuery?: Query;
}
export const getUPlotChartOptions = ({
@@ -59,6 +61,7 @@ export const getUPlotChartOptions = ({
softMax,
softMin,
panelType,
+ currentQuery,
}: GetUPlotChartOptions): uPlot.Options => {
const timeScaleProps = getXAxisScale(minTimeScale, maxTimeScale);
@@ -223,6 +226,7 @@ export const getUPlotChartOptions = ({
widgetMetaData: apiResponse?.data.result,
graphsVisibilityStates,
panelType,
+ currentQuery,
}),
axes: getAxes(isDarkMode, yAxisUnit),
};
diff --git a/frontend/src/lib/uPlotLib/plugins/tooltipPlugin.ts b/frontend/src/lib/uPlotLib/plugins/tooltipPlugin.ts
index 4ec3677dfb..b06e5bff63 100644
--- a/frontend/src/lib/uPlotLib/plugins/tooltipPlugin.ts
+++ b/frontend/src/lib/uPlotLib/plugins/tooltipPlugin.ts
@@ -3,6 +3,7 @@ import { themeColors } from 'constants/theme';
import dayjs from 'dayjs';
import customParseFormat from 'dayjs/plugin/customParseFormat';
import getLabelName from 'lib/getLabelName';
+import { get } from 'lodash-es';
import { MetricRangePayloadProps } from 'types/api/metrics/getQueryRange';
import { placement } from '../placement';
@@ -68,7 +69,18 @@ const generateTooltipContent = (
const dataIngested = quantity[idx];
const label = getLabelName(metric, queryName || '', legend || '');
- const color = generateColor(label, themeColors.chartcolors);
+ let color = generateColor(label, themeColors.chartcolors);
+
+ // in case of billing graph pick colors from the series options
+ if (isBillingUsageGraphs) {
+ let clr;
+ series.forEach((item) => {
+ if (item.label === label) {
+ clr = get(item, '_fill');
+ }
+ });
+ color = clr ?? color;
+ }
let tooltipItemLabel = label;
diff --git a/frontend/src/lib/uPlotLib/utils/getSeriesData.ts b/frontend/src/lib/uPlotLib/utils/getSeriesData.ts
index cf60a632cb..574b8dc1de 100644
--- a/frontend/src/lib/uPlotLib/utils/getSeriesData.ts
+++ b/frontend/src/lib/uPlotLib/utils/getSeriesData.ts
@@ -3,6 +3,7 @@ import { PANEL_TYPES } from 'constants/queryBuilder';
import { themeColors } from 'constants/theme';
import getLabelName from 'lib/getLabelName';
import { MetricRangePayloadProps } from 'types/api/metrics/getQueryRange';
+import { Query } from 'types/api/queryBuilder/queryBuilderData';
import { QueryData } from 'types/api/widgets/getQuery';
import { drawStyles, lineInterpolations } from './constants';
@@ -31,6 +32,7 @@ const getSeries = ({
widgetMetaData,
graphsVisibilityStates,
panelType,
+ currentQuery,
}: GetSeriesProps): uPlot.Options['series'] => {
const configurations: uPlot.Series[] = [
{ label: 'Timestamp', stroke: 'purple' },
@@ -40,13 +42,15 @@ const getSeries = ({
const newGraphVisibilityStates = graphsVisibilityStates?.slice(1);
for (let i = 0; i < seriesList?.length; i += 1) {
- const { metric = {}, queryName = '', legend = '' } = widgetMetaData[i] || {};
+ const { metric = {}, queryName = '', legend: lgd } = widgetMetaData[i] || {};
- const label = getLabelName(
- metric,
- queryName || '', // query
- legend || '',
- );
+ const newLegend =
+ currentQuery?.builder.queryData.find((item) => item.queryName === queryName)
+ ?.legend || '';
+
+ const legend = newLegend || lgd || '';
+
+ const label = getLabelName(metric, queryName || '', legend);
const color = generateColor(label, themeColors.chartcolors);
@@ -87,6 +91,7 @@ export type GetSeriesProps = {
widgetMetaData: QueryData[];
graphsVisibilityStates?: boolean[];
panelType?: PANEL_TYPES;
+ currentQuery?: Query;
};
export default getSeries;
diff --git a/frontend/src/pages/AlertList/index.tsx b/frontend/src/pages/AlertList/index.tsx
index 336c399a2f..33f3ada0f9 100644
--- a/frontend/src/pages/AlertList/index.tsx
+++ b/frontend/src/pages/AlertList/index.tsx
@@ -12,6 +12,11 @@ function AllAlertList(): JSX.Element {
children: ,
},
// {
+ // label: 'Planned Downtime',
+ // key: 'Planned Downtime',
+ // // children: ,
+ // },
+ // {
// label: 'Map Alert Channels',
// key = 'Map Alert Channels',
// children: ,
diff --git a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContent.tsx b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContent.tsx
index 6083489b58..ec81d51db6 100644
--- a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContent.tsx
+++ b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContent.tsx
@@ -1,7 +1,8 @@
import './IntegrationDetailPage.styles.scss';
import { Button, Tabs, TabsProps, Typography } from 'antd';
-import { Drum, Hammer, Table2 } from 'lucide-react';
+import ConfigureIcon from 'assets/Integrations/ConfigureIcon';
+import { CableCar, Group } from 'lucide-react';
import { IntegrationDetailedProps } from 'types/api/integrations/types';
import Configure from './IntegrationDetailContentTabs/Configure';
@@ -24,7 +25,7 @@ function IntegrationDetailContent(
}
+ icon={}
>
Overview
@@ -43,7 +44,7 @@ function IntegrationDetailContent(
}
+ icon={}
>
Configure
@@ -56,7 +57,7 @@ function IntegrationDetailContent(
}
+ icon={}
>
Data Collected
diff --git a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/Configure.tsx b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/Configure.tsx
index ede3b41137..92a5e0c823 100644
--- a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/Configure.tsx
+++ b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/Configure.tsx
@@ -1,6 +1,6 @@
import './IntegrationDetailContentTabs.styles.scss';
-import { Button, Tooltip, Typography } from 'antd';
+import { Button, Typography } from 'antd';
import cx from 'classnames';
import { MarkdownRenderer } from 'components/MarkdownRenderer/MarkdownRenderer';
import { useState } from 'react';
@@ -21,18 +21,18 @@ function Configure(props: ConfigurationProps): JSX.Element {
{configuration.map((config, index) => (
-
-
-
+
))}
diff --git a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/DataCollected.tsx b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/DataCollected.tsx
index a3c387dc3a..1c605ec863 100644
--- a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/DataCollected.tsx
+++ b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/DataCollected.tsx
@@ -59,7 +59,7 @@ function DataCollected(props: DataCollectedProps): JSX.Element {
index % 2 === 0 ? 'table-row-dark' : ''
}
dataSource={logsData}
- pagination={{ pageSize: 3 }}
+ pagination={{ pageSize: 20 }}
className="logs-section-table"
/>
@@ -74,7 +74,7 @@ function DataCollected(props: DataCollectedProps): JSX.Element {
index % 2 === 0 ? 'table-row-dark' : ''
}
dataSource={metricsData}
- pagination={{ pageSize: 3 }}
+ pagination={{ pageSize: 20 }}
className="metrics-section-table"
/>
diff --git a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/IntegrationDetailContentTabs.styles.scss b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/IntegrationDetailContentTabs.styles.scss
index 8340d0d4c0..81dcb6bf59 100644
--- a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/IntegrationDetailContentTabs.styles.scss
+++ b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/IntegrationDetailContentTabs.styles.scss
@@ -4,7 +4,7 @@
.integration-detail-overview-left-container {
display: flex;
flex-direction: column;
- width: 25%;
+ width: 30%;
gap: 26px;
border-right: 1px solid var(--bg-slate-500);
padding: 16px 0;
@@ -185,13 +185,14 @@
.configure-menu {
display: flex;
flex-direction: column;
- width: 25%;
+ width: 30%;
padding: 16px 16px 0px 0px;
border-right: 1px solid var(--bg-slate-500);
gap: 8px;
.configure-menu-item {
padding: 4px 8px;
+ height: auto;
text-align: start;
color: var(--bg-vanilla-100);
font-family: Inter;
@@ -199,6 +200,10 @@
font-style: normal;
font-weight: 400;
line-height: 18px; /* 128.571% */
+
+ .configure-text {
+ text-wrap: pretty;
+ }
}
.configure-menu-item:hover {
diff --git a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailHeader.tsx b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailHeader.tsx
index 34f5e612bf..cab49391f5 100644
--- a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailHeader.tsx
+++ b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailHeader.tsx
@@ -1,7 +1,7 @@
/* eslint-disable no-nested-ternary */
import './IntegrationDetailPage.styles.scss';
-import { Button, Modal, Typography } from 'antd';
+import { Button, Modal, Tooltip, Typography } from 'antd';
import installIntegration from 'api/Integrations/installIntegration';
import { SOMETHING_WENT_WRONG } from 'constants/api';
import dayjs from 'dayjs';
@@ -9,7 +9,7 @@ import { useNotifications } from 'hooks/useNotifications';
import { ArrowLeftRight, Check } from 'lucide-react';
import { useState } from 'react';
import { useMutation } from 'react-query';
-import { IntegrationStatusProps } from 'types/api/integrations/types';
+import { IntegrationConnectionStatus } from 'types/api/integrations/types';
import TestConnection, { ConnectionStates } from './TestConnection';
@@ -20,8 +20,9 @@ interface IntegrationDetailHeaderProps {
icon: string;
refetchIntegrationDetails: () => void;
connectionState: ConnectionStates;
- connectionData: IntegrationStatusProps['connection_status'];
+ connectionData: IntegrationConnectionStatus;
}
+// eslint-disable-next-line sonarjs/cognitive-complexity
function IntegrationDetailHeader(
props: IntegrationDetailHeaderProps,
): JSX.Element {
@@ -154,19 +155,42 @@ function IntegrationDetailHeader(
Last recieved from
-
- {latestData.last_received_from}
-
+
+
+
+ {latestData.last_received_from}
+
+
Last recieved at
-
- {latestData.last_received_ts_ms
- ? dayjs(latestData.last_received_ts_ms).format('DD MMM YYYY HH:mm')
- : ''}
-
+
+
+
+ {latestData.last_received_ts_ms
+ ? dayjs(latestData.last_received_ts_ms).format('DD MMM YYYY HH:mm')
+ : ''}
+
+
>
) : connectionState === ConnectionStates.TestingConnection ? (
diff --git a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailPage.styles.scss b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailPage.styles.scss
index d9982c3aab..b7630491ae 100644
--- a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailPage.styles.scss
+++ b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailPage.styles.scss
@@ -53,9 +53,17 @@
.loading-integration-details {
display: flex;
- height: 400px;
- justify-content: center;
- align-items: center;
+ flex-direction: column;
+ gap: 16px;
+
+ .skeleton-1 {
+ height: 125px;
+ width: 100%;
+ }
+ .skeleton-2 {
+ height: 250px;
+ width: 100%;
+ }
}
.all-integrations-btn {
@@ -254,6 +262,7 @@
border-radius: 4px;
border: 1px solid rgba(218, 85, 101, 0.2);
background: rgba(218, 85, 101, 0.06);
+ gap: 32px;
.unintall-integration-bar-text {
display: flex;
@@ -429,6 +438,15 @@
.data-info {
display: flex;
justify-content: space-between;
+ align-items: center;
+
+ .connection-line {
+ border: 1px dashed var(--bg-slate-200);
+ min-width: 20px;
+ height: 0px;
+ flex-grow: 1;
+ margin: 0px 8px;
+ }
.last-data {
color: var(--bg-vanilla-400);
@@ -447,6 +465,7 @@
font-style: normal;
font-weight: 400;
line-height: 18px; /* 150% */
+ max-width: 320px;
}
}
.testingConnection {
@@ -622,6 +641,9 @@
.connection-content {
.data-info {
+ .connection-line {
+ border: 1px dashed var(--bg-vanilla-400);
+ }
.last-data {
color: var(--bg-slate-400);
}
diff --git a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailPage.tsx b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailPage.tsx
index 3d498a07d8..88be0dc3a3 100644
--- a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailPage.tsx
+++ b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailPage.tsx
@@ -4,12 +4,11 @@
import './IntegrationDetailPage.styles.scss';
import { Color } from '@signozhq/design-tokens';
-import { Button, Typography } from 'antd';
+import { Button, Skeleton, Typography } from 'antd';
import { useGetIntegration } from 'hooks/Integrations/useGetIntegration';
import { useGetIntegrationStatus } from 'hooks/Integrations/useGetIntegrationStatus';
import { defaultTo } from 'lodash-es';
import { ArrowLeft, MoveUpRight, RotateCw } from 'lucide-react';
-import { useEffect } from 'react';
import { isCloudUser } from 'utils/app';
import { handleContactSupport } from '../utils';
@@ -41,11 +40,9 @@ function IntegrationDetailPage(props: IntegrationDetailPageProps): JSX.Element {
const {
data: integrationStatus,
- refetch: refetchStatus,
isLoading: isStatusLoading,
} = useGetIntegrationStatus({
integrationId: selectedIntegration,
- enabled: false,
});
const loading = isLoading || isFetching || isRefetching || isStatusLoading;
@@ -54,23 +51,11 @@ function IntegrationDetailPage(props: IntegrationDetailPageProps): JSX.Element {
const connectionStatus = getConnectionStatesFromConnectionStatus(
integrationData?.installation,
defaultTo(
- integrationStatus?.data.data.connection_status,
+ integrationStatus?.data.data,
defaultTo(integrationData?.connection_status, { logs: null, metrics: null }),
),
);
- useEffect(() => {
- // we should once get data on load and then keep polling every 5 seconds
- refetchStatus();
- const timer = setInterval(() => {
- refetchStatus();
- }, 5000);
-
- return (): void => {
- clearInterval(timer);
- };
- }, [refetchStatus]);
-
return (
) : isError ? (
@@ -128,10 +114,10 @@ function IntegrationDetailPage(props: IntegrationDetailPageProps): JSX.Element {
description={defaultTo(integrationData?.description, '')}
icon={defaultTo(integrationData?.icon, '')}
connectionState={connectionStatus}
- connectionData={defaultTo(
- integrationStatus?.data.data.connection_status,
- { logs: null, metrics: null },
- )}
+ connectionData={defaultTo(integrationStatus?.data.data, {
+ logs: null,
+ metrics: null,
+ })}
refetchIntegrationDetails={refetch}
/>
(
- null,
+ const urlQuery = useUrlQuery();
+ const history = useHistory();
+ const location = useLocation();
+
+ const selectedIntegration = useMemo(() => urlQuery.get('integration'), [
+ urlQuery,
+ ]);
+
+ const setSelectedIntegration = useCallback(
+ (integration: string | null) => {
+ if (integration) {
+ urlQuery.set('integration', integration);
+ } else {
+ urlQuery.set('integration', '');
+ }
+ const generatedUrl = `${location.pathname}?${urlQuery.toString()}`;
+ history.push(generatedUrl);
+ },
+ [history, location.pathname, urlQuery],
);
- const [activeDetailTab, setActiveDetailTab] = useState(null);
+ const [activeDetailTab, setActiveDetailTab] = useState(
+ 'overview',
+ );
const [searchTerm, setSearchTerm] = useState('');
return (
diff --git a/frontend/src/pages/LogsModulePage/LogsModulePage.styles.scss b/frontend/src/pages/LogsModulePage/LogsModulePage.styles.scss
index 9465594ccb..acba2781df 100644
--- a/frontend/src/pages/LogsModulePage/LogsModulePage.styles.scss
+++ b/frontend/src/pages/LogsModulePage/LogsModulePage.styles.scss
@@ -1,10 +1,4 @@
.logs-module-container {
- // margin: 0 -1rem; // as we have added a margin of 0 1rem components container, have to adjust the margin with negative to style the logs explorer as we want
-
- // .ant-tabs-content-holder {
- // margin: 0 -1rem;
- // }
-
flex: 1;
display: flex;
flex-direction: column;
@@ -54,4 +48,4 @@
}
}
}
-}
\ No newline at end of file
+}
diff --git a/frontend/src/pages/Pipelines/Pipelines.styles.scss b/frontend/src/pages/Pipelines/Pipelines.styles.scss
index 8521aab75e..78578006ee 100644
--- a/frontend/src/pages/Pipelines/Pipelines.styles.scss
+++ b/frontend/src/pages/Pipelines/Pipelines.styles.scss
@@ -2,4 +2,8 @@
.ant-tabs-content {
padding: 0 16px;
}
+
+ .ant-tabs-tabpane-hidden {
+ display: none !important;
+ }
}
diff --git a/frontend/src/pages/Services/Metrics.test.tsx b/frontend/src/pages/Services/Metrics.test.tsx
index 37c13ee84c..fcafd76466 100644
--- a/frontend/src/pages/Services/Metrics.test.tsx
+++ b/frontend/src/pages/Services/Metrics.test.tsx
@@ -6,9 +6,11 @@ describe('Services', () => {
test('Should render the component', () => {
render();
- const inputBox = screen.getByRole('combobox');
+ const inputBox = screen.getByTestId('resource-attributes-filter');
expect(inputBox).toBeInTheDocument();
+ expect(screen.getByTestId('resource-environment-filter')).toBeInTheDocument();
+
const application = screen.getByRole('columnheader', {
name: /application search/i,
});
diff --git a/frontend/src/types/api/alerts/def.ts b/frontend/src/types/api/alerts/def.ts
index af3a4bc912..c773cb78a2 100644
--- a/frontend/src/types/api/alerts/def.ts
+++ b/frontend/src/types/api/alerts/def.ts
@@ -6,6 +6,8 @@ export const defaultMatchType = '1';
// default eval window
export const defaultEvalWindow = '5m0s';
+export const defaultFrequency = '1m0s';
+
// default compare op: above
export const defaultCompareOp = '1';
@@ -14,6 +16,7 @@ export interface AlertDef {
alertType?: string;
alert?: string;
ruleType?: string;
+ frequency?: string;
condition: RuleCondition;
labels?: Labels;
annotations?: Labels;
diff --git a/frontend/src/types/api/dashboard/getAll.ts b/frontend/src/types/api/dashboard/getAll.ts
index e616ee28ea..ba23e55186 100644
--- a/frontend/src/types/api/dashboard/getAll.ts
+++ b/frontend/src/types/api/dashboard/getAll.ts
@@ -55,6 +55,7 @@ export interface Dashboard {
}
export interface DashboardData {
+ uuid?: string;
description?: string;
tags?: string[];
name?: string;
diff --git a/frontend/src/types/api/integrations/types.ts b/frontend/src/types/api/integrations/types.ts
index ae18b73caf..b9f5e55480 100644
--- a/frontend/src/types/api/integrations/types.ts
+++ b/frontend/src/types/api/integrations/types.ts
@@ -65,21 +65,19 @@ export interface GetIntegrationProps {
data: IntegrationDetailedProps;
}
-export interface IntegrationStatusProps {
- connection_status: {
- logs: {
- last_received_ts_ms: number;
- last_received_from: string;
- } | null;
- metrics: {
- last_received_ts_ms: number;
- last_received_from: string;
- } | null;
- };
+export interface IntegrationConnectionStatus {
+ logs: {
+ last_received_ts_ms: number;
+ last_received_from: string;
+ } | null;
+ metrics: {
+ last_received_ts_ms: number;
+ last_received_from: string;
+ } | null;
}
export interface GetIntegrationStatusProps {
- data: IntegrationStatusProps;
+ data: IntegrationConnectionStatus;
}
export interface GetIntegrationPayloadProps {
diff --git a/frontend/webpack.config.prod.js b/frontend/webpack.config.prod.js
index a2b3ecb40e..cf5816f24d 100644
--- a/frontend/webpack.config.prod.js
+++ b/frontend/webpack.config.prod.js
@@ -79,6 +79,7 @@ if (process.env.BUNDLE_ANALYSER === 'true') {
const config = {
mode: 'production',
+ devtool: 'source-map',
entry: resolve(__dirname, './src/index.tsx'),
output: {
path: resolve(__dirname, './build'),
diff --git a/pkg/query-service/agentConf/db.go b/pkg/query-service/agentConf/db.go
index ffbc2f53a8..04ab780db6 100644
--- a/pkg/query-service/agentConf/db.go
+++ b/pkg/query-service/agentConf/db.go
@@ -151,7 +151,7 @@ func (r *Repo) insertConfig(
// allowing empty elements for logs - use case is deleting all pipelines
if len(elements) == 0 && c.ElementType != ElementTypeLogPipelines {
- zap.S().Error("insert config called with no elements ", c.ElementType)
+ zap.L().Error("insert config called with no elements ", zap.String("ElementType", string(c.ElementType)))
return model.BadRequest(fmt.Errorf("config must have atleast one element"))
}
@@ -159,7 +159,7 @@ func (r *Repo) insertConfig(
// the version can not be set by the user, we want to auto-assign the versions
// in a monotonically increasing order starting with 1. hence, we reject insert
// requests with version anything other than 0. here, 0 indicates un-assigned
- zap.S().Error("invalid version assignment while inserting agent config", c.Version, c.ElementType)
+ zap.L().Error("invalid version assignment while inserting agent config", zap.Int("version", c.Version), zap.String("ElementType", string(c.ElementType)))
return model.BadRequest(fmt.Errorf(
"user defined versions are not supported in the agent config",
))
@@ -167,7 +167,7 @@ func (r *Repo) insertConfig(
configVersion, err := r.GetLatestVersion(ctx, c.ElementType)
if err != nil && err.Type() != model.ErrorNotFound {
- zap.S().Error("failed to fetch latest config version", err)
+ zap.L().Error("failed to fetch latest config version", zap.Error(err))
return model.InternalError(fmt.Errorf("failed to fetch latest config version"))
}
@@ -212,7 +212,7 @@ func (r *Repo) insertConfig(
c.DeployResult)
if dbErr != nil {
- zap.S().Error("error in inserting config version: ", zap.Error(dbErr))
+ zap.L().Error("error in inserting config version: ", zap.Error(dbErr))
return model.InternalError(errors.Wrap(dbErr, "failed to insert ingestion rule"))
}
@@ -258,7 +258,7 @@ func (r *Repo) updateDeployStatus(ctx context.Context,
_, err := r.db.ExecContext(ctx, updateQuery, status, result, lastHash, lastconf, version, string(elementType))
if err != nil {
- zap.S().Error("failed to update deploy status", err)
+ zap.L().Error("failed to update deploy status", zap.Error(err))
return model.BadRequest(fmt.Errorf("failed to update deploy status"))
}
@@ -276,7 +276,7 @@ func (r *Repo) updateDeployStatusByHash(
_, err := r.db.ExecContext(ctx, updateQuery, status, result, confighash)
if err != nil {
- zap.S().Error("failed to update deploy status", err)
+ zap.L().Error("failed to update deploy status", zap.Error(err))
return model.InternalError(errors.Wrap(err, "failed to update deploy status"))
}
diff --git a/pkg/query-service/agentConf/manager.go b/pkg/query-service/agentConf/manager.go
index 0fdab4e990..c9a7335e0b 100644
--- a/pkg/query-service/agentConf/manager.go
+++ b/pkg/query-service/agentConf/manager.go
@@ -224,19 +224,19 @@ func Redeploy(ctx context.Context, typ ElementTypeDef, version int) *model.ApiEr
configVersion, err := GetConfigVersion(ctx, typ, version)
if err != nil {
- zap.S().Debug("failed to fetch config version during redeploy", err)
+ zap.L().Error("failed to fetch config version during redeploy", zap.Error(err))
return model.WrapApiError(err, "failed to fetch details of the config version")
}
if configVersion == nil || (configVersion != nil && configVersion.LastConf == "") {
- zap.S().Debug("config version has no conf yaml", configVersion)
+ zap.L().Debug("config version has no conf yaml", zap.Any("configVersion", configVersion))
return model.BadRequest(fmt.Errorf("the config version can not be redeployed"))
}
switch typ {
case ElementTypeSamplingRules:
var config *tsp.Config
if err := yaml.Unmarshal([]byte(configVersion.LastConf), &config); err != nil {
- zap.S().Error("failed to read last conf correctly", err)
+ zap.L().Debug("failed to read last conf correctly", zap.Error(err))
return model.BadRequest(fmt.Errorf("failed to read the stored config correctly"))
}
@@ -248,7 +248,7 @@ func Redeploy(ctx context.Context, typ ElementTypeDef, version int) *model.ApiEr
opamp.AddToTracePipelineSpec("signoz_tail_sampling")
configHash, err := opamp.UpsertControlProcessors(ctx, "traces", processorConf, m.OnConfigUpdate)
if err != nil {
- zap.S().Error("failed to call agent config update for trace processor:", err)
+ zap.L().Error("failed to call agent config update for trace processor", zap.Error(err))
return model.InternalError(fmt.Errorf("failed to deploy the config"))
}
@@ -256,7 +256,7 @@ func Redeploy(ctx context.Context, typ ElementTypeDef, version int) *model.ApiEr
case ElementTypeDropRules:
var filterConfig *filterprocessor.Config
if err := yaml.Unmarshal([]byte(configVersion.LastConf), &filterConfig); err != nil {
- zap.S().Error("failed to read last conf correctly", err)
+ zap.L().Error("failed to read last conf correctly", zap.Error(err))
return model.InternalError(fmt.Errorf("failed to read the stored config correctly"))
}
processorConf := map[string]interface{}{
@@ -266,7 +266,7 @@ func Redeploy(ctx context.Context, typ ElementTypeDef, version int) *model.ApiEr
opamp.AddToMetricsPipelineSpec("filter")
configHash, err := opamp.UpsertControlProcessors(ctx, "metrics", processorConf, m.OnConfigUpdate)
if err != nil {
- zap.S().Error("failed to call agent config update for trace processor:", err)
+ zap.L().Error("failed to call agent config update for trace processor", zap.Error(err))
return err
}
@@ -292,13 +292,13 @@ func UpsertFilterProcessor(ctx context.Context, version int, config *filterproce
opamp.AddToMetricsPipelineSpec("filter")
configHash, err := opamp.UpsertControlProcessors(ctx, "metrics", processorConf, m.OnConfigUpdate)
if err != nil {
- zap.S().Error("failed to call agent config update for trace processor:", err)
+ zap.L().Error("failed to call agent config update for trace processor", zap.Error(err))
return err
}
processorConfYaml, yamlErr := yaml.Marshal(config)
if yamlErr != nil {
- zap.S().Warnf("unexpected error while transforming processor config to yaml", yamlErr)
+ zap.L().Warn("unexpected error while transforming processor config to yaml", zap.Error(yamlErr))
}
m.updateDeployStatus(ctx, ElementTypeDropRules, version, string(DeployInitiated), "Deployment started", configHash, string(processorConfYaml))
@@ -317,7 +317,7 @@ func (m *Manager) OnConfigUpdate(agentId string, hash string, err error) {
message := "Deployment was successful"
defer func() {
- zap.S().Info(status, zap.String("agentId", agentId), zap.String("agentResponse", message))
+ zap.L().Info(status, zap.String("agentId", agentId), zap.String("agentResponse", message))
}()
if err != nil {
@@ -343,13 +343,13 @@ func UpsertSamplingProcessor(ctx context.Context, version int, config *tsp.Confi
opamp.AddToTracePipelineSpec("signoz_tail_sampling")
configHash, err := opamp.UpsertControlProcessors(ctx, "traces", processorConf, m.OnConfigUpdate)
if err != nil {
- zap.S().Error("failed to call agent config update for trace processor:", err)
+ zap.L().Error("failed to call agent config update for trace processor", zap.Error(err))
return err
}
processorConfYaml, yamlErr := yaml.Marshal(config)
if yamlErr != nil {
- zap.S().Warnf("unexpected error while transforming processor config to yaml", yamlErr)
+ zap.L().Warn("unexpected error while transforming processor config to yaml", zap.Error(yamlErr))
}
m.updateDeployStatus(ctx, ElementTypeSamplingRules, version, string(DeployInitiated), "Deployment started", configHash, string(processorConfYaml))
diff --git a/pkg/query-service/app/clickhouseReader/options.go b/pkg/query-service/app/clickhouseReader/options.go
index 0defced7ed..d92b5ee38f 100644
--- a/pkg/query-service/app/clickhouseReader/options.go
+++ b/pkg/query-service/app/clickhouseReader/options.go
@@ -106,7 +106,7 @@ func defaultConnector(cfg *namespaceConfig) (clickhouse.Conn, error) {
options.DialTimeout = cfg.DialTimeout
}
- zap.S().Infof("Connecting to Clickhouse at %s, Secure: %t, MaxIdleConns: %d, MaxOpenConns: %d, DialTimeout: %s", options.Addr, options.TLS != nil, options.MaxIdleConns, options.MaxOpenConns, options.DialTimeout)
+ zap.L().Info("Connecting to Clickhouse", zap.String("at", options.Addr[0]), zap.Int("MaxIdleConns", options.MaxIdleConns), zap.Int("MaxOpenConns", options.MaxOpenConns), zap.Duration("DialTimeout", options.DialTimeout))
db, err := clickhouse.Open(options)
if err != nil {
return nil, err
diff --git a/pkg/query-service/app/clickhouseReader/reader.go b/pkg/query-service/app/clickhouseReader/reader.go
index df41572155..a1b12d9415 100644
--- a/pkg/query-service/app/clickhouseReader/reader.go
+++ b/pkg/query-service/app/clickhouseReader/reader.go
@@ -44,6 +44,7 @@ import (
"go.uber.org/zap"
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
+ "go.signoz.io/signoz/pkg/query-service/app/explorer"
"go.signoz.io/signoz/pkg/query-service/app/logs"
"go.signoz.io/signoz/pkg/query-service/app/services"
"go.signoz.io/signoz/pkg/query-service/auth"
@@ -72,6 +73,7 @@ const (
signozSampleLocalTableName = "samples_v2"
signozSampleTableName = "distributed_samples_v2"
signozTSTableName = "distributed_time_series_v2"
+ signozTSTableNameV4 = "distributed_time_series_v4"
signozTSTableNameV41Day = "distributed_time_series_v4_1day"
minTimespanForProgressiveSearch = time.Hour
@@ -139,8 +141,7 @@ func NewReader(
db, err := initialize(options)
if err != nil {
- zap.S().Error("failed to initialize ClickHouse: ", err)
- os.Exit(1)
+ zap.L().Fatal("failed to initialize ClickHouse", zap.Error(err))
}
return NewReaderFromClickhouseConnection(db, options, localDB, configFile, featureFlag, cluster)
@@ -156,8 +157,8 @@ func NewReaderFromClickhouseConnection(
) *ClickHouseReader {
alertManager, err := am.New("")
if err != nil {
- zap.S().Errorf("msg: failed to initialize alert manager: ", "/t error:", err)
- zap.S().Errorf("msg: check if the alert manager URL is correctly set and valid")
+ zap.L().Error("failed to initialize alert manager", zap.Error(err))
+ zap.L().Error("check if the alert manager URL is correctly set and valid")
os.Exit(1)
}
@@ -345,20 +346,6 @@ func (r *ClickHouseReader) Start(readerReady chan bool) {
reloadReady.Close()
- // ! commented the alert manager can now
- // call query service to do this
- // channels, apiErrorObj := r.GetChannels()
-
- // if apiErrorObj != nil {
- // zap.S().Errorf("Not able to read channels from DB")
- // }
- // for _, channel := range *channels {
- // apiErrorObj = r.LoadChannel(&channel)
- // if apiErrorObj != nil {
- // zap.S().Errorf("Not able to load channel with id=%d loaded from DB", channel.Id, channel.Data)
- // }
- // }
-
<-cancel
return nil
@@ -442,14 +429,14 @@ func (r *ClickHouseReader) LoadChannel(channel *model.ChannelItem) *model.ApiErr
response, err := http.Post(constants.GetAlertManagerApiPrefix()+"v1/receivers", "application/json", bytes.NewBuffer([]byte(channel.Data)))
if err != nil {
- zap.S().Errorf("Error in getting response of API call to alertmanager/v1/receivers\n", err)
+ zap.L().Error("Error in getting response of API call to alertmanager/v1/receivers", zap.Error(err))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
if response.StatusCode > 299 {
responseData, _ := io.ReadAll(response.Body)
- err := fmt.Errorf("Error in getting 2xx response in API call to alertmanager/v1/receivers\n Status: %s \n Data: %s", response.Status, string(responseData))
- zap.S().Error(err)
+ err := fmt.Errorf("Error in getting 2xx response in API call to alertmanager/v1/receivers")
+ zap.L().Error("Error in getting 2xx response in API call to alertmanager/v1/receivers", zap.String("Status", response.Status), zap.String("Data", string(responseData)))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
@@ -466,17 +453,15 @@ func (r *ClickHouseReader) GetChannel(id string) (*model.ChannelItem, *model.Api
stmt, err := r.localDB.Preparex(query)
- zap.S().Info(query, idInt)
-
if err != nil {
- zap.S().Debug("Error in preparing sql query for GetChannel : ", err)
+ zap.L().Error("Error in preparing sql query for GetChannel", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
err = stmt.Get(&channel, idInt)
if err != nil {
- zap.S().Debug(fmt.Sprintf("Error in getting channel with id=%d : ", idInt), err)
+ zap.L().Error("Error in getting channel with id", zap.Int("id", idInt), zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
@@ -502,14 +487,14 @@ func (r *ClickHouseReader) DeleteChannel(id string) *model.ApiError {
{
stmt, err := tx.Prepare(`DELETE FROM notification_channels WHERE id=$1;`)
if err != nil {
- zap.S().Errorf("Error in preparing statement for INSERT to notification_channels\n", err)
+ zap.L().Error("Error in preparing statement for INSERT to notification_channels", zap.Error(err))
tx.Rollback()
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
defer stmt.Close()
if _, err := stmt.Exec(idInt); err != nil {
- zap.S().Errorf("Error in Executing prepared statement for INSERT to notification_channels\n", err)
+ zap.L().Error("Error in Executing prepared statement for INSERT to notification_channels", zap.Error(err))
tx.Rollback() // return an error too, we may want to wrap them
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
@@ -523,7 +508,7 @@ func (r *ClickHouseReader) DeleteChannel(id string) *model.ApiError {
err = tx.Commit()
if err != nil {
- zap.S().Errorf("Error in committing transaction for DELETE command to notification_channels\n", err)
+ zap.L().Error("Error in committing transaction for DELETE command to notification_channels", zap.Error(err))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
@@ -539,10 +524,10 @@ func (r *ClickHouseReader) GetChannels() (*[]model.ChannelItem, *model.ApiError)
err := r.localDB.Select(&channels, query)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
@@ -607,7 +592,7 @@ func (r *ClickHouseReader) EditChannel(receiver *am.Receiver, id string) (*am.Re
// check if channel type is supported in the current user plan
if err := r.featureFlags.CheckFeature(fmt.Sprintf("ALERT_CHANNEL_%s", strings.ToUpper(channel_type))); err != nil {
- zap.S().Warn("an unsupported feature was blocked", err)
+ zap.L().Warn("an unsupported feature was blocked", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("unsupported feature. please upgrade your plan to access this feature")}
}
@@ -617,14 +602,14 @@ func (r *ClickHouseReader) EditChannel(receiver *am.Receiver, id string) (*am.Re
stmt, err := tx.Prepare(`UPDATE notification_channels SET updated_at=$1, type=$2, data=$3 WHERE id=$4;`)
if err != nil {
- zap.S().Errorf("Error in preparing statement for UPDATE to notification_channels\n", err)
+ zap.L().Error("Error in preparing statement for UPDATE to notification_channels", zap.Error(err))
tx.Rollback()
return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
defer stmt.Close()
if _, err := stmt.Exec(time.Now(), channel_type, string(receiverString), idInt); err != nil {
- zap.S().Errorf("Error in Executing prepared statement for UPDATE to notification_channels\n", err)
+ zap.L().Error("Error in Executing prepared statement for UPDATE to notification_channels", zap.Error(err))
tx.Rollback() // return an error too, we may want to wrap them
return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
@@ -638,7 +623,7 @@ func (r *ClickHouseReader) EditChannel(receiver *am.Receiver, id string) (*am.Re
err = tx.Commit()
if err != nil {
- zap.S().Errorf("Error in committing transaction for INSERT to notification_channels\n", err)
+ zap.L().Error("Error in committing transaction for INSERT to notification_channels", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
@@ -652,7 +637,7 @@ func (r *ClickHouseReader) CreateChannel(receiver *am.Receiver) (*am.Receiver, *
// check if channel type is supported in the current user plan
if err := r.featureFlags.CheckFeature(fmt.Sprintf("ALERT_CHANNEL_%s", strings.ToUpper(channel_type))); err != nil {
- zap.S().Warn("an unsupported feature was blocked", err)
+ zap.L().Warn("an unsupported feature was blocked", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("unsupported feature. please upgrade your plan to access this feature")}
}
@@ -666,14 +651,14 @@ func (r *ClickHouseReader) CreateChannel(receiver *am.Receiver) (*am.Receiver, *
{
stmt, err := tx.Prepare(`INSERT INTO notification_channels (created_at, updated_at, name, type, data) VALUES($1,$2,$3,$4,$5);`)
if err != nil {
- zap.S().Errorf("Error in preparing statement for INSERT to notification_channels\n", err)
+ zap.L().Error("Error in preparing statement for INSERT to notification_channels", zap.Error(err))
tx.Rollback()
return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
defer stmt.Close()
if _, err := stmt.Exec(time.Now(), time.Now(), receiver.Name, channel_type, string(receiverString)); err != nil {
- zap.S().Errorf("Error in Executing prepared statement for INSERT to notification_channels\n", err)
+ zap.L().Error("Error in Executing prepared statement for INSERT to notification_channels", zap.Error(err))
tx.Rollback() // return an error too, we may want to wrap them
return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
@@ -687,7 +672,7 @@ func (r *ClickHouseReader) CreateChannel(receiver *am.Receiver) (*am.Receiver, *
err = tx.Commit()
if err != nil {
- zap.S().Errorf("Error in committing transaction for INSERT to notification_channels\n", err)
+ zap.L().Error("Error in committing transaction for INSERT to notification_channels", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
@@ -740,10 +725,10 @@ func (r *ClickHouseReader) GetServicesList(ctx context.Context) (*[]string, erro
rows, err := r.db.Query(ctx, query)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, fmt.Errorf("Error in processing sql query")
}
@@ -771,7 +756,7 @@ func (r *ClickHouseReader) GetTopLevelOperations(ctx context.Context, skipConfig
rows, err := r.db.Query(ctx, query)
if err != nil {
- zap.S().Error("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
@@ -872,7 +857,7 @@ func (r *ClickHouseReader) GetServices(ctx context.Context, queryParams *model.G
query += subQuery
args = append(args, argsSubQuery...)
if errStatus != nil {
- zap.S().Error("Error in processing sql query: ", errStatus)
+ zap.L().Error("Error in processing sql query", zap.Error(errStatus))
return
}
err := r.db.QueryRow(
@@ -886,19 +871,19 @@ func (r *ClickHouseReader) GetServices(ctx context.Context, queryParams *model.G
}
if err != nil {
- zap.S().Error("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return
}
subQuery, argsSubQuery, errStatus = buildQueryWithTagParams(ctx, tags)
if errStatus != nil {
- zap.S().Error("Error building query with tag params: ", err)
+ zap.L().Error("Error building query with tag params", zap.Error(errStatus))
return
}
query += subQuery
args = append(args, argsSubQuery...)
err = r.db.QueryRow(ctx, errorQuery, args...).Scan(&numErrors)
if err != nil {
- zap.S().Error("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return
}
@@ -964,11 +949,11 @@ func (r *ClickHouseReader) GetServiceOverview(ctx context.Context, queryParams *
query += " GROUP BY time ORDER BY time DESC"
err := r.db.Select(ctx, &serviceOverviewItems, query, args...)
- zap.S().Debug(query)
+ zap.L().Debug("running query", zap.String("query", query))
if err != nil {
- zap.S().Error("Error in processing sql query: ", err)
- return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
+ zap.L().Error("Error in processing sql query", zap.Error(err))
+ return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
serviceErrorItems := []model.ServiceErrorItem{}
@@ -992,10 +977,8 @@ func (r *ClickHouseReader) GetServiceOverview(ctx context.Context, queryParams *
query += " GROUP BY time ORDER BY time DESC"
err = r.db.Select(ctx, &serviceErrorItems, query, args...)
- zap.S().Debug(query)
-
if err != nil {
- zap.S().Error("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
}
@@ -1131,10 +1114,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode
finalQuery += " GROUP BY serviceName"
var dBResponse []model.DBResponseServiceName
err := r.db.Select(ctx, &dBResponse, finalQuery, args...)
- zap.S().Info(finalQuery)
+ zap.L().Info(finalQuery)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)}
}
for _, service := range dBResponse {
@@ -1148,10 +1131,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode
finalQuery += " GROUP BY httpCode"
var dBResponse []model.DBResponseHttpCode
err := r.db.Select(ctx, &dBResponse, finalQuery, args...)
- zap.S().Info(finalQuery)
+ zap.L().Info(finalQuery)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)}
}
for _, service := range dBResponse {
@@ -1165,10 +1148,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode
finalQuery += " GROUP BY httpRoute"
var dBResponse []model.DBResponseHttpRoute
err := r.db.Select(ctx, &dBResponse, finalQuery, args...)
- zap.S().Info(finalQuery)
+ zap.L().Info(finalQuery)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)}
}
for _, service := range dBResponse {
@@ -1182,10 +1165,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode
finalQuery += " GROUP BY httpUrl"
var dBResponse []model.DBResponseHttpUrl
err := r.db.Select(ctx, &dBResponse, finalQuery, args...)
- zap.S().Info(finalQuery)
+ zap.L().Info(finalQuery)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)}
}
for _, service := range dBResponse {
@@ -1199,10 +1182,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode
finalQuery += " GROUP BY httpMethod"
var dBResponse []model.DBResponseHttpMethod
err := r.db.Select(ctx, &dBResponse, finalQuery, args...)
- zap.S().Info(finalQuery)
+ zap.L().Info(finalQuery)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)}
}
for _, service := range dBResponse {
@@ -1216,10 +1199,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode
finalQuery += " GROUP BY httpHost"
var dBResponse []model.DBResponseHttpHost
err := r.db.Select(ctx, &dBResponse, finalQuery, args...)
- zap.S().Info(finalQuery)
+ zap.L().Info(finalQuery)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)}
}
for _, service := range dBResponse {
@@ -1233,10 +1216,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode
finalQuery += " GROUP BY name"
var dBResponse []model.DBResponseOperation
err := r.db.Select(ctx, &dBResponse, finalQuery, args...)
- zap.S().Info(finalQuery)
+ zap.L().Info(finalQuery)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)}
}
for _, service := range dBResponse {
@@ -1250,10 +1233,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode
finalQuery += " GROUP BY component"
var dBResponse []model.DBResponseComponent
err := r.db.Select(ctx, &dBResponse, finalQuery, args...)
- zap.S().Info(finalQuery)
+ zap.L().Info(finalQuery)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)}
}
for _, service := range dBResponse {
@@ -1266,10 +1249,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode
finalQuery += query
var dBResponse []model.DBResponseTotal
err := r.db.Select(ctx, &dBResponse, finalQuery, args...)
- zap.S().Info(finalQuery)
+ zap.L().Info(finalQuery)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)}
}
@@ -1277,10 +1260,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode
finalQuery2 += query
var dBResponse2 []model.DBResponseTotal
err = r.db.Select(ctx, &dBResponse2, finalQuery2, args...)
- zap.S().Info(finalQuery2)
+ zap.L().Info(finalQuery2)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)}
}
if len(dBResponse) > 0 && len(dBResponse2) > 0 {
@@ -1302,9 +1285,9 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode
finalQuery += query
var dBResponse []model.DBResponseMinMax
err = r.db.Select(ctx, &dBResponse, finalQuery, args...)
- zap.S().Info(finalQuery)
+ zap.L().Info(finalQuery)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)}
}
if len(dBResponse) > 0 {
@@ -1317,10 +1300,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode
finalQuery += " ORDER BY durationNano LIMIT 1"
var dBResponse []model.DBResponseTotal
err = r.db.Select(ctx, &dBResponse, finalQuery, args...)
- zap.S().Info(finalQuery)
+ zap.L().Info(finalQuery)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)}
}
@@ -1329,10 +1312,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode
finalQuery += " ORDER BY durationNano DESC LIMIT 1"
var dBResponse2 []model.DBResponseTotal
err = r.db.Select(ctx, &dBResponse2, finalQuery, args...)
- zap.S().Info(finalQuery)
+ zap.L().Info(finalQuery)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)}
}
if len(dBResponse) > 0 {
@@ -1348,10 +1331,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode
finalQuery += " GROUP BY rpcMethod"
var dBResponse []model.DBResponseRPCMethod
err := r.db.Select(ctx, &dBResponse, finalQuery, args...)
- zap.S().Info(finalQuery)
+ zap.L().Info(finalQuery)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query: %s", err)}
}
for _, service := range dBResponse {
@@ -1366,10 +1349,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode
finalQuery += " GROUP BY responseStatusCode"
var dBResponse []model.DBResponseStatusCodeMethod
err := r.db.Select(ctx, &dBResponse, finalQuery, args...)
- zap.S().Info(finalQuery)
+ zap.L().Info(finalQuery)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query: %s", err)}
}
for _, service := range dBResponse {
@@ -1494,10 +1477,10 @@ func (r *ClickHouseReader) GetFilteredSpans(ctx context.Context, queryParams *mo
projectionOptQuery := "SET allow_experimental_projection_optimization = 1"
err := r.db.Exec(ctx, projectionOptQuery)
- zap.S().Info(projectionOptQuery)
+ zap.L().Info(projectionOptQuery)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
}
if queryParams.Order == constants.Descending {
@@ -1532,10 +1515,10 @@ func (r *ClickHouseReader) GetFilteredSpans(ctx context.Context, queryParams *mo
}
}
- zap.S().Info(baseQuery)
+ zap.L().Info(baseQuery)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
}
@@ -1772,10 +1755,10 @@ func (r *ClickHouseReader) GetTagFilters(ctx context.Context, queryParams *model
finalQuery += query
err := r.db.Select(ctx, &tagFilters, finalQuery, args...)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
}
tagFiltersResult := model.TagFilters{
@@ -1894,10 +1877,10 @@ func (r *ClickHouseReader) GetTagValues(ctx context.Context, queryParams *model.
args = append(args, clickhouse.Named("limit", queryParams.Limit))
err := r.db.Select(ctx, &tagValues, finalQuery, args...)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
}
@@ -1956,10 +1939,8 @@ func (r *ClickHouseReader) GetTopOperations(ctx context.Context, queryParams *mo
}
err := r.db.Select(ctx, &topOperationsItems, query, args...)
- zap.S().Debug(query)
-
if err != nil {
- zap.S().Error("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
@@ -1988,10 +1969,10 @@ func (r *ClickHouseReader) GetUsage(ctx context.Context, queryParams *model.GetU
err := r.db.Select(ctx, &usageItems, query, namedArgs...)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, fmt.Errorf("Error in processing sql query")
}
@@ -2016,14 +1997,14 @@ func (r *ClickHouseReader) SearchTraces(ctx context.Context, traceId string, spa
err := r.db.Select(ctx, &searchScanResponses, query, traceId)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
- return nil, fmt.Errorf("Error in processing sql query")
+ zap.L().Error("Error in processing sql query", zap.Error(err))
+ return nil, fmt.Errorf("error in processing sql query")
}
end := time.Now()
- zap.S().Debug("getTraceSQLQuery took: ", end.Sub(start))
+ zap.L().Debug("getTraceSQLQuery took: ", zap.Duration("duration", end.Sub(start)))
searchSpansResult := []model.SearchSpansResult{{
Columns: []string{"__time", "SpanId", "TraceId", "ServiceName", "Name", "Kind", "DurationNano", "TagsKeys", "TagsValues", "References", "Events", "HasError"},
Events: make([][]interface{}, len(searchScanResponses)),
@@ -2039,7 +2020,7 @@ func (r *ClickHouseReader) SearchTraces(ctx context.Context, traceId string, spa
searchSpanResponses = append(searchSpanResponses, jsonItem)
}
end = time.Now()
- zap.S().Debug("getTraceSQLQuery unmarshal took: ", end.Sub(start))
+ zap.L().Debug("getTraceSQLQuery unmarshal took: ", zap.Duration("duration", end.Sub(start)))
err = r.featureFlags.CheckFeature(model.SmartTraceDetail)
smartAlgoEnabled := err == nil
@@ -2050,7 +2031,7 @@ func (r *ClickHouseReader) SearchTraces(ctx context.Context, traceId string, spa
return nil, err
}
end = time.Now()
- zap.S().Debug("smartTraceAlgo took: ", end.Sub(start))
+ zap.L().Debug("smartTraceAlgo took: ", zap.Duration("duration", end.Sub(start)))
} else {
for i, item := range searchSpanResponses {
spanEvents := item.GetValues()
@@ -2097,12 +2078,12 @@ func (r *ClickHouseReader) GetDependencyGraph(ctx context.Context, queryParams *
query += filterQuery + " GROUP BY src, dest;"
args = append(args, filterArgs...)
- zap.S().Debug(query, args)
+ zap.L().Debug("GetDependencyGraph query", zap.String("query", query), zap.Any("args", args))
err := r.db.Select(ctx, &response, query, args...)
if err != nil {
- zap.S().Error("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, fmt.Errorf("error in processing sql query %w", err)
}
@@ -2250,10 +2231,10 @@ func (r *ClickHouseReader) GetFilteredSpansAggregates(ctx context.Context, query
err := r.db.Select(ctx, &SpanAggregatesDBResponseItems, query, args...)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
}
@@ -2336,7 +2317,7 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context,
go func(tableName string) {
_, dbErr := r.localDB.Exec("INSERT INTO ttl_status (transaction_id, created_at, updated_at, table_name, ttl, status, cold_storage_ttl) VALUES (?, ?, ?, ?, ?, ?, ?)", uuid, time.Now(), time.Now(), tableName, params.DelDuration, constants.StatusPending, coldStorageDuration)
if dbErr != nil {
- zap.S().Error(fmt.Errorf("Error in inserting to ttl_status table: %s", dbErr.Error()))
+ zap.L().Error("Error in inserting to ttl_status table", zap.Error(dbErr))
return
}
req := fmt.Sprintf(
@@ -2348,32 +2329,32 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context,
}
err := r.setColdStorage(context.Background(), tableName, params.ColdStorageVolume)
if err != nil {
- zap.S().Error(fmt.Errorf("Error in setting cold storage: %s", err.Err.Error()))
+ zap.L().Error("Error in setting cold storage", zap.Error(err))
statusItem, err := r.checkTTLStatusItem(ctx, tableName)
if err == nil {
_, dbErr := r.localDB.Exec("UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?", time.Now(), constants.StatusFailed, statusItem.Id)
if dbErr != nil {
- zap.S().Debug("Error in processing ttl_status update sql query: ", dbErr)
+ zap.L().Error("Error in processing ttl_status update sql query", zap.Error(dbErr))
return
}
}
return
}
req += fmt.Sprint(" SETTINGS distributed_ddl_task_timeout = -1;")
- zap.S().Debugf("Executing TTL request: %s\n", req)
+ zap.L().Error("Executing TTL request: ", zap.String("request", req))
statusItem, _ := r.checkTTLStatusItem(ctx, tableName)
if err := r.db.Exec(context.Background(), req); err != nil {
- zap.S().Error(fmt.Errorf("Error in executing set TTL query: %s", err.Error()))
+ zap.L().Error("Error in executing set TTL query", zap.Error(err))
_, dbErr := r.localDB.Exec("UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?", time.Now(), constants.StatusFailed, statusItem.Id)
if dbErr != nil {
- zap.S().Debug("Error in processing ttl_status update sql query: ", dbErr)
+ zap.L().Error("Error in processing ttl_status update sql query", zap.Error(dbErr))
return
}
return
}
_, dbErr = r.localDB.Exec("UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?", time.Now(), constants.StatusSuccess, statusItem.Id)
if dbErr != nil {
- zap.S().Debug("Error in processing ttl_status update sql query: ", dbErr)
+ zap.L().Error("Error in processing ttl_status update sql query", zap.Error(dbErr))
return
}
}(tableName)
@@ -2391,7 +2372,7 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context,
go func(tableName string) {
_, dbErr := r.localDB.Exec("INSERT INTO ttl_status (transaction_id, created_at, updated_at, table_name, ttl, status, cold_storage_ttl) VALUES (?, ?, ?, ?, ?, ?, ?)", uuid, time.Now(), time.Now(), tableName, params.DelDuration, constants.StatusPending, coldStorageDuration)
if dbErr != nil {
- zap.S().Error(fmt.Errorf("Error in inserting to ttl_status table: %s", dbErr.Error()))
+ zap.L().Error("Error in inserting to ttl_status table", zap.Error(dbErr))
return
}
req := fmt.Sprintf(
@@ -2404,32 +2385,32 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context,
}
err := r.setColdStorage(context.Background(), tableName, params.ColdStorageVolume)
if err != nil {
- zap.S().Error(fmt.Errorf("Error in setting cold storage: %s", err.Err.Error()))
+ zap.L().Error("Error in setting cold storage", zap.Error(err))
statusItem, err := r.checkTTLStatusItem(ctx, tableName)
if err == nil {
_, dbErr := r.localDB.Exec("UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?", time.Now(), constants.StatusFailed, statusItem.Id)
if dbErr != nil {
- zap.S().Debug("Error in processing ttl_status update sql query: ", dbErr)
+ zap.L().Error("Error in processing ttl_status update sql query", zap.Error(dbErr))
return
}
}
return
}
req += fmt.Sprint(" SETTINGS distributed_ddl_task_timeout = -1")
- zap.S().Debugf("Executing TTL request: %s\n", req)
+ zap.L().Info("Executing TTL request: ", zap.String("request", req))
statusItem, _ := r.checkTTLStatusItem(ctx, tableName)
if err := r.db.Exec(ctx, req); err != nil {
- zap.S().Error(fmt.Errorf("error while setting ttl. Err=%v", err))
+ zap.L().Error("error while setting ttl.", zap.Error(err))
_, dbErr := r.localDB.Exec("UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?", time.Now(), constants.StatusFailed, statusItem.Id)
if dbErr != nil {
- zap.S().Debug("Error in processing ttl_status update sql query: ", dbErr)
+ zap.L().Error("Error in processing ttl_status update sql query", zap.Error(dbErr))
return
}
return
}
_, dbErr = r.localDB.Exec("UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?", time.Now(), constants.StatusSuccess, statusItem.Id)
if dbErr != nil {
- zap.S().Debug("Error in processing ttl_status update sql query: ", dbErr)
+ zap.L().Error("Error in processing ttl_status update sql query", zap.Error(dbErr))
return
}
}(tableName)
@@ -2445,7 +2426,7 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context,
go func(tableName string) {
_, dbErr := r.localDB.Exec("INSERT INTO ttl_status (transaction_id, created_at, updated_at, table_name, ttl, status, cold_storage_ttl) VALUES (?, ?, ?, ?, ?, ?, ?)", uuid, time.Now(), time.Now(), tableName, params.DelDuration, constants.StatusPending, coldStorageDuration)
if dbErr != nil {
- zap.S().Error(fmt.Errorf("error in inserting to ttl_status table: %s", dbErr.Error()))
+ zap.L().Error("error in inserting to ttl_status table", zap.Error(dbErr))
return
}
req := fmt.Sprintf(
@@ -2458,32 +2439,32 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context,
}
err := r.setColdStorage(context.Background(), tableName, params.ColdStorageVolume)
if err != nil {
- zap.S().Error(fmt.Errorf("error in setting cold storage: %s", err.Err.Error()))
+ zap.L().Error("error in setting cold storage", zap.Error(err))
statusItem, err := r.checkTTLStatusItem(ctx, tableName)
if err == nil {
_, dbErr := r.localDB.Exec("UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?", time.Now(), constants.StatusFailed, statusItem.Id)
if dbErr != nil {
- zap.S().Debug("Error in processing ttl_status update sql query: ", dbErr)
+ zap.L().Error("Error in processing ttl_status update sql query", zap.Error(dbErr))
return
}
}
return
}
req += fmt.Sprint(" SETTINGS distributed_ddl_task_timeout = -1")
- zap.S().Debugf("Executing TTL request: %s\n", req)
+ zap.L().Info("Executing TTL request: ", zap.String("request", req))
statusItem, _ := r.checkTTLStatusItem(ctx, tableName)
if err := r.db.Exec(ctx, req); err != nil {
- zap.S().Error(fmt.Errorf("error while setting ttl. Err=%v", err))
+ zap.L().Error("error while setting ttl", zap.Error(err))
_, dbErr := r.localDB.Exec("UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?", time.Now(), constants.StatusFailed, statusItem.Id)
if dbErr != nil {
- zap.S().Debug("Error in processing ttl_status update sql query: ", dbErr)
+ zap.L().Error("Error in processing ttl_status update sql query", zap.Error(dbErr))
return
}
return
}
_, dbErr = r.localDB.Exec("UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?", time.Now(), constants.StatusSuccess, statusItem.Id)
if dbErr != nil {
- zap.S().Debug("Error in processing ttl_status update sql query: ", dbErr)
+ zap.L().Error("Error in processing ttl_status update sql query", zap.Error(dbErr))
return
}
}(tableName)
@@ -2499,7 +2480,7 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context,
func (r *ClickHouseReader) deleteTtlTransactions(ctx context.Context, numberOfTransactionsStore int) {
_, err := r.localDB.Exec("DELETE FROM ttl_status WHERE transaction_id NOT IN (SELECT distinct transaction_id FROM ttl_status ORDER BY created_at DESC LIMIT ?)", numberOfTransactionsStore)
if err != nil {
- zap.S().Debug("Error in processing ttl_status delete sql query: ", err)
+ zap.L().Error("Error in processing ttl_status delete sql query", zap.Error(err))
}
}
@@ -2509,12 +2490,12 @@ func (r *ClickHouseReader) checkTTLStatusItem(ctx context.Context, tableName str
query := `SELECT id, status, ttl, cold_storage_ttl FROM ttl_status WHERE table_name = ? ORDER BY created_at DESC`
- zap.S().Info(query, tableName)
+ zap.L().Info("checkTTLStatusItem query", zap.String("query", query), zap.String("tableName", tableName))
stmt, err := r.localDB.Preparex(query)
if err != nil {
- zap.S().Debug("Error preparing query for checkTTLStatusItem: ", err)
+ zap.L().Error("Error preparing query for checkTTLStatusItem", zap.Error(err))
return model.TTLStatusItem{}, &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
@@ -2524,7 +2505,7 @@ func (r *ClickHouseReader) checkTTLStatusItem(ctx context.Context, tableName str
return model.TTLStatusItem{}, nil
}
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return model.TTLStatusItem{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing ttl_status check sql query")}
}
return statusItem[0], nil
@@ -2565,9 +2546,9 @@ func (r *ClickHouseReader) setColdStorage(ctx context.Context, tableName string,
if len(coldStorageVolume) > 0 {
policyReq := fmt.Sprintf("ALTER TABLE %s ON CLUSTER %s MODIFY SETTING storage_policy='tiered'", tableName, r.cluster)
- zap.S().Debugf("Executing Storage policy request: %s\n", policyReq)
+ zap.L().Info("Executing Storage policy request: ", zap.String("request", policyReq))
if err := r.db.Exec(ctx, policyReq); err != nil {
- zap.S().Error(fmt.Errorf("error while setting storage policy. Err=%v", err))
+ zap.L().Error("error while setting storage policy", zap.Error(err))
return &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error while setting storage policy. Err=%v", err)}
}
}
@@ -2580,12 +2561,10 @@ func (r *ClickHouseReader) GetDisks(ctx context.Context) (*[]model.DiskItem, *mo
query := "SELECT name,type FROM system.disks"
if err := r.db.Select(ctx, &diskItems, query); err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error while getting disks. Err=%v", err)}
}
- zap.S().Infof("Got response: %+v\n", diskItems)
-
return &diskItems, nil
}
@@ -2603,7 +2582,7 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, ttlParams *model.GetTTLPa
parseTTL := func(queryResp string) (int, int) {
- zap.S().Debugf("Parsing TTL from: %s", queryResp)
+ zap.L().Info("Parsing TTL from: ", zap.String("queryResp", queryResp))
deleteTTLExp := regexp.MustCompile(`toIntervalSecond\(([0-9]*)\)`)
moveTTLExp := regexp.MustCompile(`toIntervalSecond\(([0-9]*)\) TO VOLUME`)
@@ -2638,7 +2617,7 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, ttlParams *model.GetTTLPa
err := r.db.Select(ctx, &dbResp, query)
if err != nil {
- zap.S().Error(fmt.Errorf("error while getting ttl. Err=%v", err))
+ zap.L().Error("error while getting ttl", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error while getting ttl. Err=%v", err)}
}
if len(dbResp) == 0 {
@@ -2656,7 +2635,7 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, ttlParams *model.GetTTLPa
err := r.db.Select(ctx, &dbResp, query)
if err != nil {
- zap.S().Error(fmt.Errorf("error while getting ttl. Err=%v", err))
+ zap.L().Error("error while getting ttl", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error while getting ttl. Err=%v", err)}
}
if len(dbResp) == 0 {
@@ -2674,7 +2653,7 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, ttlParams *model.GetTTLPa
err := r.db.Select(ctx, &dbResp, query)
if err != nil {
- zap.S().Error(fmt.Errorf("error while getting ttl. Err=%v", err))
+ zap.L().Error("error while getting ttl", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error while getting ttl. Err=%v", err)}
}
if len(dbResp) == 0 {
@@ -2796,7 +2775,7 @@ func (r *ClickHouseReader) ListErrors(ctx context.Context, queryParams *model.Li
args = append(args, argsSubQuery...)
if errStatus != nil {
- zap.S().Error("Error in processing tags: ", errStatus)
+ zap.L().Error("Error in processing tags", zap.Error(errStatus))
return nil, errStatus
}
query = query + " GROUP BY groupID"
@@ -2824,10 +2803,10 @@ func (r *ClickHouseReader) ListErrors(ctx context.Context, queryParams *model.Li
}
err := r.db.Select(ctx, &getErrorResponses, query, args...)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
}
@@ -2856,15 +2835,15 @@ func (r *ClickHouseReader) CountErrors(ctx context.Context, queryParams *model.C
args = append(args, argsSubQuery...)
if errStatus != nil {
- zap.S().Error("Error in processing tags: ", errStatus)
+ zap.L().Error("Error in processing tags", zap.Error(errStatus))
return 0, errStatus
}
err := r.db.QueryRow(ctx, query, args...).Scan(&errorCount)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return 0, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
}
@@ -2874,7 +2853,7 @@ func (r *ClickHouseReader) CountErrors(ctx context.Context, queryParams *model.C
func (r *ClickHouseReader) GetErrorFromErrorID(ctx context.Context, queryParams *model.GetErrorParams) (*model.ErrorWithSpan, *model.ApiError) {
if queryParams.ErrorID == "" {
- zap.S().Debug("errorId missing from params")
+ zap.L().Error("errorId missing from params")
return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("ErrorID missing from params")}
}
var getErrorWithSpanReponse []model.ErrorWithSpan
@@ -2883,10 +2862,10 @@ func (r *ClickHouseReader) GetErrorFromErrorID(ctx context.Context, queryParams
args := []interface{}{clickhouse.Named("errorID", queryParams.ErrorID), clickhouse.Named("groupID", queryParams.GroupID), clickhouse.Named("timestamp", strconv.FormatInt(queryParams.Timestamp.UnixNano(), 10))}
err := r.db.Select(ctx, &getErrorWithSpanReponse, query, args...)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
}
@@ -2907,10 +2886,10 @@ func (r *ClickHouseReader) GetErrorFromGroupID(ctx context.Context, queryParams
err := r.db.Select(ctx, &getErrorWithSpanReponse, query, args...)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
}
@@ -2925,7 +2904,7 @@ func (r *ClickHouseReader) GetErrorFromGroupID(ctx context.Context, queryParams
func (r *ClickHouseReader) GetNextPrevErrorIDs(ctx context.Context, queryParams *model.GetErrorParams) (*model.NextPrevErrorIDs, *model.ApiError) {
if queryParams.ErrorID == "" {
- zap.S().Debug("errorId missing from params")
+ zap.L().Error("errorId missing from params")
return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("ErrorID missing from params")}
}
var err *model.ApiError
@@ -2934,12 +2913,12 @@ func (r *ClickHouseReader) GetNextPrevErrorIDs(ctx context.Context, queryParams
}
getNextPrevErrorIDsResponse.NextErrorID, getNextPrevErrorIDsResponse.NextTimestamp, err = r.getNextErrorID(ctx, queryParams)
if err != nil {
- zap.S().Debug("Unable to get next error ID due to err: ", err)
+ zap.L().Error("Unable to get next error ID due to err: ", zap.Error(err))
return nil, err
}
getNextPrevErrorIDsResponse.PrevErrorID, getNextPrevErrorIDsResponse.PrevTimestamp, err = r.getPrevErrorID(ctx, queryParams)
if err != nil {
- zap.S().Debug("Unable to get prev error ID due to err: ", err)
+ zap.L().Error("Unable to get prev error ID due to err: ", zap.Error(err))
return nil, err
}
return &getNextPrevErrorIDsResponse, nil
@@ -2955,17 +2934,17 @@ func (r *ClickHouseReader) getNextErrorID(ctx context.Context, queryParams *mode
err := r.db.Select(ctx, &getNextErrorIDReponse, query, args...)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return "", time.Time{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
}
if len(getNextErrorIDReponse) == 0 {
- zap.S().Info("NextErrorID not found")
+ zap.L().Info("NextErrorID not found")
return "", time.Time{}, nil
} else if len(getNextErrorIDReponse) == 1 {
- zap.S().Info("NextErrorID found")
+ zap.L().Info("NextErrorID found")
return getNextErrorIDReponse[0].NextErrorID, getNextErrorIDReponse[0].NextTimestamp, nil
} else {
if getNextErrorIDReponse[0].Timestamp.UnixNano() == getNextErrorIDReponse[1].Timestamp.UnixNano() {
@@ -2976,10 +2955,10 @@ func (r *ClickHouseReader) getNextErrorID(ctx context.Context, queryParams *mode
err := r.db.Select(ctx, &getNextErrorIDReponse, query, args...)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return "", time.Time{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
}
if len(getNextErrorIDReponse) == 0 {
@@ -2990,26 +2969,26 @@ func (r *ClickHouseReader) getNextErrorID(ctx context.Context, queryParams *mode
err := r.db.Select(ctx, &getNextErrorIDReponse, query, args...)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return "", time.Time{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
}
if len(getNextErrorIDReponse) == 0 {
- zap.S().Info("NextErrorID not found")
+ zap.L().Info("NextErrorID not found")
return "", time.Time{}, nil
} else {
- zap.S().Info("NextErrorID found")
+ zap.L().Info("NextErrorID found")
return getNextErrorIDReponse[0].NextErrorID, getNextErrorIDReponse[0].NextTimestamp, nil
}
} else {
- zap.S().Info("NextErrorID found")
+ zap.L().Info("NextErrorID found")
return getNextErrorIDReponse[0].NextErrorID, getNextErrorIDReponse[0].NextTimestamp, nil
}
} else {
- zap.S().Info("NextErrorID found")
+ zap.L().Info("NextErrorID found")
return getNextErrorIDReponse[0].NextErrorID, getNextErrorIDReponse[0].NextTimestamp, nil
}
}
@@ -3024,17 +3003,17 @@ func (r *ClickHouseReader) getPrevErrorID(ctx context.Context, queryParams *mode
err := r.db.Select(ctx, &getPrevErrorIDReponse, query, args...)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return "", time.Time{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
}
if len(getPrevErrorIDReponse) == 0 {
- zap.S().Info("PrevErrorID not found")
+ zap.L().Info("PrevErrorID not found")
return "", time.Time{}, nil
} else if len(getPrevErrorIDReponse) == 1 {
- zap.S().Info("PrevErrorID found")
+ zap.L().Info("PrevErrorID found")
return getPrevErrorIDReponse[0].PrevErrorID, getPrevErrorIDReponse[0].PrevTimestamp, nil
} else {
if getPrevErrorIDReponse[0].Timestamp.UnixNano() == getPrevErrorIDReponse[1].Timestamp.UnixNano() {
@@ -3045,10 +3024,10 @@ func (r *ClickHouseReader) getPrevErrorID(ctx context.Context, queryParams *mode
err := r.db.Select(ctx, &getPrevErrorIDReponse, query, args...)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return "", time.Time{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
}
if len(getPrevErrorIDReponse) == 0 {
@@ -3059,26 +3038,26 @@ func (r *ClickHouseReader) getPrevErrorID(ctx context.Context, queryParams *mode
err := r.db.Select(ctx, &getPrevErrorIDReponse, query, args...)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return "", time.Time{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")}
}
if len(getPrevErrorIDReponse) == 0 {
- zap.S().Info("PrevErrorID not found")
+ zap.L().Info("PrevErrorID not found")
return "", time.Time{}, nil
} else {
- zap.S().Info("PrevErrorID found")
+ zap.L().Info("PrevErrorID found")
return getPrevErrorIDReponse[0].PrevErrorID, getPrevErrorIDReponse[0].PrevTimestamp, nil
}
} else {
- zap.S().Info("PrevErrorID found")
+ zap.L().Info("PrevErrorID found")
return getPrevErrorIDReponse[0].PrevErrorID, getPrevErrorIDReponse[0].PrevTimestamp, nil
}
} else {
- zap.S().Info("PrevErrorID found")
+ zap.L().Info("PrevErrorID found")
return getPrevErrorIDReponse[0].PrevErrorID, getPrevErrorIDReponse[0].PrevTimestamp, nil
}
}
@@ -3109,7 +3088,7 @@ func (r *ClickHouseReader) GetMetricAutocompleteTagKey(ctx context.Context, para
}
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: err}
}
@@ -3148,7 +3127,7 @@ func (r *ClickHouseReader) GetMetricAutocompleteTagValue(ctx context.Context, pa
}
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: err}
}
@@ -3178,7 +3157,7 @@ func (r *ClickHouseReader) GetMetricAutocompleteMetricNames(ctx context.Context,
rows, err = r.db.Query(ctx, query, fmt.Sprintf("%%%s%%", matchText))
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: err}
}
@@ -3196,7 +3175,7 @@ func (r *ClickHouseReader) GetMetricAutocompleteMetricNames(ctx context.Context,
}
func (r *ClickHouseReader) GetMetricResultEE(ctx context.Context, query string) ([]*model.Series, string, error) {
- zap.S().Error("GetMetricResultEE is not implemented for opensource version")
+ zap.L().Error("GetMetricResultEE is not implemented for opensource version")
return nil, "", fmt.Errorf("GetMetricResultEE is not implemented for opensource version")
}
@@ -3205,12 +3184,12 @@ func (r *ClickHouseReader) GetMetricResult(ctx context.Context, query string) ([
defer utils.Elapsed("GetMetricResult")()
- zap.S().Infof("Executing metric result query: %s", query)
+ zap.L().Info("Executing metric result query: ", zap.String("query", query))
rows, err := r.db.Query(ctx, query)
if err != nil {
- zap.S().Debug("Error in processing query: ", err)
+ zap.L().Error("Error in processing query", zap.Error(err))
return nil, err
}
@@ -3287,7 +3266,7 @@ func (r *ClickHouseReader) GetMetricResult(ctx context.Context, query string) ([
groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int())
}
default:
- zap.S().Errorf("invalid var found in metric builder query result", v, colName)
+ zap.L().Error("invalid var found in metric builder query result", zap.Any("v", v), zap.String("colName", colName))
}
}
sort.Strings(groupBy)
@@ -3455,8 +3434,7 @@ func (r *ClickHouseReader) GetTagsInfoInLastHeartBeatInterval(ctx context.Contex
err := r.db.Select(ctx, &tagTelemetryDataList, queryStr)
if err != nil {
- zap.S().Info(queryStr)
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query: ", zap.Error(err))
return nil, err
}
@@ -3513,7 +3491,7 @@ func (r *ClickHouseReader) GetDashboardsInfo(ctx context.Context) (*model.Dashbo
var dashboardsData []dashboards.Dashboard
err := r.localDB.Select(&dashboardsData, query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return &dashboardsInfo, err
}
totalDashboardsWithPanelAndName := 0
@@ -3599,14 +3577,14 @@ func (r *ClickHouseReader) GetAlertsInfo(ctx context.Context) (*model.AlertsInfo
var alertsData []string
err := r.localDB.Select(&alertsData, query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return &alertsInfo, err
}
for _, alert := range alertsData {
var rule rules.GettableRule
err = json.Unmarshal([]byte(alert), &rule)
if err != nil {
- zap.S().Errorf("msg:", "invalid rule data", "\t err:", err)
+ zap.L().Error("invalid rule data", zap.Error(err))
continue
}
if rule.AlertType == "LOGS_BASED_ALERT" {
@@ -3622,6 +3600,24 @@ func (r *ClickHouseReader) GetAlertsInfo(ctx context.Context) (*model.AlertsInfo
return &alertsInfo, nil
}
+func (r *ClickHouseReader) GetSavedViewsInfo(ctx context.Context) (*model.SavedViewsInfo, error) {
+ savedViewsInfo := model.SavedViewsInfo{}
+ savedViews, err := explorer.GetViews()
+ if err != nil {
+ zap.S().Debug("Error in fetching saved views info: ", err)
+ return &savedViewsInfo, err
+ }
+ savedViewsInfo.TotalSavedViews = len(savedViews)
+ for _, view := range savedViews {
+ if view.SourcePage == "traces" {
+ savedViewsInfo.TracesSavedViews += 1
+ } else if view.SourcePage == "logs" {
+ savedViewsInfo.LogsSavedViews += 1
+ }
+ }
+ return &savedViewsInfo, nil
+}
+
func (r *ClickHouseReader) GetLogFields(ctx context.Context) (*model.GetFieldsResponse, *model.ApiError) {
// response will contain top level fields from the otel log model
response := model.GetFieldsResponse{
@@ -3817,7 +3813,6 @@ func (r *ClickHouseReader) GetLogs(ctx context.Context, params *model.LogsFilter
}
query = fmt.Sprintf("%s order by %s %s limit %d", query, params.OrderBy, params.Order, params.Limit)
- zap.S().Debug(query)
err = r.db.Select(ctx, &response, query)
if err != nil {
return nil, &model.ApiError{Err: err, Typ: model.ErrorInternal}
@@ -3877,7 +3872,7 @@ func (r *ClickHouseReader) TailLogs(ctx context.Context, client *model.LogsTailC
case <-ctx.Done():
done := true
client.Done <- &done
- zap.S().Debug("closing go routine : " + client.Name)
+ zap.L().Debug("closing go routine : " + client.Name)
return
case <-ticker.C:
// get the new 100 logs as anything more older won't make sense
@@ -3889,11 +3884,10 @@ func (r *ClickHouseReader) TailLogs(ctx context.Context, client *model.LogsTailC
tmpQuery = fmt.Sprintf("%s and id > '%s'", tmpQuery, idStart)
}
tmpQuery = fmt.Sprintf("%s order by timestamp desc, id desc limit 100", tmpQuery)
- zap.S().Debug(tmpQuery)
response := []model.SignozLog{}
err := r.db.Select(ctx, &response, tmpQuery)
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error while getting logs", zap.Error(err))
client.Error <- err
return
}
@@ -3902,7 +3896,7 @@ func (r *ClickHouseReader) TailLogs(ctx context.Context, client *model.LogsTailC
case <-ctx.Done():
done := true
client.Done <- &done
- zap.S().Debug("closing go routine while sending logs : " + client.Name)
+ zap.L().Debug("closing go routine while sending logs : " + client.Name)
return
default:
client.Logs <- &response[i]
@@ -3967,7 +3961,6 @@ func (r *ClickHouseReader) AggregateLogs(ctx context.Context, params *model.Logs
query = fmt.Sprintf("%s GROUP BY ts_start_interval ORDER BY ts_start_interval", query)
}
- zap.S().Debug(query)
err = r.db.Select(ctx, &logAggregatesDBResponseItems, query)
if err != nil {
return nil, &model.ApiError{Err: err, Typ: model.ErrorInternal}
@@ -4006,10 +3999,10 @@ func (r *ClickHouseReader) QueryDashboardVars(ctx context.Context, query string)
var result model.DashboardVar
rows, err := r.db.Query(ctx, query)
- zap.S().Info(query)
+ zap.L().Info(query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, err
}
@@ -4052,7 +4045,7 @@ func (r *ClickHouseReader) GetMetricAggregateAttributes(ctx context.Context, req
rows, err = r.db.Query(ctx, query, fmt.Sprintf("%%%s%%", req.SearchText))
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error while executing query", zap.Error(err))
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4101,7 +4094,7 @@ func (r *ClickHouseReader) GetMetricAttributeKeys(ctx context.Context, req *v3.F
}
rows, err = r.db.Query(ctx, query, req.AggregateAttribute, common.PastDayRoundOff(), fmt.Sprintf("%%%s%%", req.SearchText))
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error while executing query", zap.Error(err))
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4137,7 +4130,7 @@ func (r *ClickHouseReader) GetMetricAttributeValues(ctx context.Context, req *v3
rows, err = r.db.Query(ctx, query, req.FilterAttributeKey, req.AggregateAttribute, req.FilterAttributeKey, fmt.Sprintf("%%%s%%", req.SearchText), common.PastDayRoundOff())
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error while executing query", zap.Error(err))
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4159,7 +4152,7 @@ func (r *ClickHouseReader) GetLatencyMetricMetadata(ctx context.Context, metricN
query := fmt.Sprintf("SELECT DISTINCT(temporality) from %s.%s WHERE metric_name='%s' AND JSONExtractString(labels, 'service_name') = '%s'", signozMetricDBName, signozTSTableName, metricName, serviceName)
rows, err := r.db.Query(ctx, query, metricName)
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error while executing query", zap.Error(err))
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4178,7 +4171,7 @@ func (r *ClickHouseReader) GetLatencyMetricMetadata(ctx context.Context, metricN
query = fmt.Sprintf("SELECT DISTINCT(JSONExtractString(labels, 'le')) as le from %s.%s WHERE metric_name='%s' AND JSONExtractString(labels, 'service_name') = '%s' ORDER BY le", signozMetricDBName, signozTSTableName, metricName, serviceName)
rows, err = r.db.Query(ctx, query, metricName)
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error while executing query", zap.Error(err))
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4194,7 +4187,7 @@ func (r *ClickHouseReader) GetLatencyMetricMetadata(ctx context.Context, metricN
// ideally this should not happen but we have seen ClickHouse
// returning empty string for some values
if err != nil {
- zap.S().Error("error while parsing le value: ", err)
+ zap.L().Error("error while parsing le value", zap.Error(err))
continue
}
if math.IsInf(le, 0) {
@@ -4216,7 +4209,7 @@ func (r *ClickHouseReader) GetMetricMetadata(ctx context.Context, metricName, se
query := fmt.Sprintf("SELECT DISTINCT temporality, description, type, unit, is_monotonic from %s.%s WHERE metric_name=$1", signozMetricDBName, signozTSTableNameV41Day)
rows, err := r.db.Query(ctx, query, metricName)
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error while fetching metric metadata", zap.Error(err))
return nil, fmt.Errorf("error while fetching metric metadata: %s", err.Error())
}
defer rows.Close()
@@ -4235,7 +4228,7 @@ func (r *ClickHouseReader) GetMetricMetadata(ctx context.Context, metricName, se
query = fmt.Sprintf("SELECT DISTINCT(JSONExtractString(labels, 'le')) as le from %s.%s WHERE metric_name=$1 AND type = 'Histogram' AND JSONExtractString(labels, 'service_name') = $2 ORDER BY le", signozMetricDBName, signozTSTableNameV41Day)
rows, err = r.db.Query(ctx, query, metricName, serviceName)
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error while executing query", zap.Error(err))
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4251,7 +4244,7 @@ func (r *ClickHouseReader) GetMetricMetadata(ctx context.Context, metricName, se
// ideally this should not happen but we have seen ClickHouse
// returning empty string for some values
if err != nil {
- zap.S().Error("error while parsing le value: ", err)
+ zap.L().Error("error while parsing le value", zap.Error(err))
continue
}
if math.IsInf(le, 0) {
@@ -4271,6 +4264,67 @@ func (r *ClickHouseReader) GetMetricMetadata(ctx context.Context, metricName, se
}, nil
}
+func (r *ClickHouseReader) GetLatestReceivedMetric(
+ ctx context.Context, metricNames []string,
+) (*model.MetricStatus, *model.ApiError) {
+ if len(metricNames) < 1 {
+ return nil, nil
+ }
+
+ quotedMetricNames := []string{}
+ for _, m := range metricNames {
+ quotedMetricNames = append(quotedMetricNames, fmt.Sprintf(`'%s'`, m))
+ }
+ commaSeparatedMetricNames := strings.Join(quotedMetricNames, ", ")
+
+ query := fmt.Sprintf(`
+ SELECT metric_name, labels, unix_milli
+ from %s.%s
+ where metric_name in (
+ %s
+ )
+ order by unix_milli desc
+ limit 1
+ `, signozMetricDBName, signozTSTableNameV4, commaSeparatedMetricNames,
+ )
+
+ rows, err := r.db.Query(ctx, query)
+ if err != nil {
+ return nil, model.InternalError(fmt.Errorf(
+ "couldn't query clickhouse for received metrics status: %w", err,
+ ))
+ }
+ defer rows.Close()
+
+ var result *model.MetricStatus
+
+ if rows.Next() {
+
+ result = &model.MetricStatus{}
+ var labelsJson string
+
+ err := rows.Scan(
+ &result.MetricName,
+ &labelsJson,
+ &result.LastReceivedTsMillis,
+ )
+ if err != nil {
+ return nil, model.InternalError(fmt.Errorf(
+ "couldn't scan metric status row: %w", err,
+ ))
+ }
+
+ err = json.Unmarshal([]byte(labelsJson), &result.LastReceivedLabels)
+ if err != nil {
+ return nil, model.InternalError(fmt.Errorf(
+ "couldn't unmarshal metric labels json: %w", err,
+ ))
+ }
+ }
+
+ return result, nil
+}
+
func isColumn(tableStatement, attrType, field, datType string) bool {
// value of attrType will be `resource` or `tag`, if `tag` change it to `attribute`
name := utils.GetClickhouseColumnName(attrType, datType, field)
@@ -4324,7 +4378,7 @@ func (r *ClickHouseReader) GetLogAggregateAttributes(ctx context.Context, req *v
query = fmt.Sprintf("SELECT DISTINCT(tagKey), tagType, tagDataType from %s.%s WHERE %s limit $2", r.logsDB, r.logsTagAttributeTable, where)
rows, err = r.db.Query(ctx, query, fmt.Sprintf("%%%s%%", req.SearchText), req.Limit)
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error while executing query", zap.Error(err))
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4378,7 +4432,7 @@ func (r *ClickHouseReader) GetLogAttributeKeys(ctx context.Context, req *v3.Filt
}
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error while executing query", zap.Error(err))
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4490,7 +4544,7 @@ func (r *ClickHouseReader) GetLogAttributeValues(ctx context.Context, req *v3.Fi
}
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error while executing query", zap.Error(err))
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4551,7 +4605,7 @@ func readRow(vars []interface{}, columnNames []string) ([]string, map[string]str
var metric map[string]string
err := json.Unmarshal([]byte(*v), &metric)
if err != nil {
- zap.S().Errorf("unexpected error encountered %v", err)
+ zap.L().Error("unexpected error encountered", zap.Error(err))
}
for key, val := range metric {
groupBy = append(groupBy, val)
@@ -4607,7 +4661,7 @@ func readRow(vars []interface{}, columnNames []string) ([]string, map[string]str
groupAttributes[colName] = fmt.Sprintf("%v", *v)
default:
- zap.S().Errorf("unsupported var type %v found in query builder query result for column %s", v, colName)
+ zap.L().Error("unsupported var type found in query builder query result", zap.Any("v", v), zap.String("colName", colName))
}
}
return groupBy, groupAttributes, groupAttributesArray, point
@@ -4705,7 +4759,7 @@ func (r *ClickHouseReader) GetTimeSeriesResultV3(ctx context.Context, query stri
rows, err := r.db.Query(ctx, query)
if err != nil {
- zap.S().Errorf("error while reading time series result %v", err)
+ zap.L().Error("error while reading time series result", zap.Error(err))
return nil, err
}
defer rows.Close()
@@ -4730,7 +4784,7 @@ func (r *ClickHouseReader) GetListResultV3(ctx context.Context, query string) ([
rows, err := r.db.Query(ctx, query)
if err != nil {
- zap.S().Errorf("error while reading time series result %v", err)
+ zap.L().Error("error while reading time series result", zap.Error(err))
return nil, err
}
defer rows.Close()
@@ -4873,7 +4927,7 @@ func (r *ClickHouseReader) GetTraceAggregateAttributes(ctx context.Context, req
rows, err = r.db.Query(ctx, query, fmt.Sprintf("%%%s%%", req.SearchText))
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error while executing query", zap.Error(err))
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4914,7 +4968,7 @@ func (r *ClickHouseReader) GetTraceAttributeKeys(ctx context.Context, req *v3.Fi
rows, err = r.db.Query(ctx, query, fmt.Sprintf("%%%s%%", req.SearchText))
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error while executing query", zap.Error(err))
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4968,7 +5022,7 @@ func (r *ClickHouseReader) GetTraceAttributeValues(ctx context.Context, req *v3.
query = fmt.Sprintf("SELECT DISTINCT stringTagValue from %s.%s WHERE tagKey = $1 AND stringTagValue ILIKE $2 AND tagType=$3 limit $4", r.TraceDB, r.spanAttributeTable)
rows, err = r.db.Query(ctx, query, req.FilterAttributeKey, fmt.Sprintf("%%%s%%", req.SearchText), req.TagType, req.Limit)
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error while executing query", zap.Error(err))
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4984,7 +5038,7 @@ func (r *ClickHouseReader) GetTraceAttributeValues(ctx context.Context, req *v3.
query = fmt.Sprintf("SELECT DISTINCT float64TagValue from %s.%s where tagKey = $1 AND toString(float64TagValue) ILIKE $2 AND tagType=$3 limit $4", r.TraceDB, r.spanAttributeTable)
rows, err = r.db.Query(ctx, query, req.FilterAttributeKey, fmt.Sprintf("%%%s%%", req.SearchText), req.TagType, req.Limit)
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error while executing query", zap.Error(err))
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -5018,7 +5072,7 @@ func (r *ClickHouseReader) GetSpanAttributeKeys(ctx context.Context) (map[string
rows, err = r.db.Query(ctx, query)
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error while executing query", zap.Error(err))
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -5056,7 +5110,7 @@ func (r *ClickHouseReader) LiveTailLogsV3(ctx context.Context, query string, tim
case <-ctx.Done():
done := true
client.Done <- &done
- zap.S().Debug("closing go routine : " + client.Name)
+ zap.L().Debug("closing go routine : " + client.Name)
return
case <-ticker.C:
// get the new 100 logs as anything more older won't make sense
@@ -5071,7 +5125,7 @@ func (r *ClickHouseReader) LiveTailLogsV3(ctx context.Context, query string, tim
response := []model.SignozLog{}
err := r.db.Select(ctx, &response, tmpQuery)
if err != nil {
- zap.S().Error(err)
+ zap.L().Error("Error while getting logs", zap.Error(err))
client.Error <- err
return
}
diff --git a/pkg/query-service/app/dashboards/model.go b/pkg/query-service/app/dashboards/model.go
index 698b697279..c69f30a6bd 100644
--- a/pkg/query-service/app/dashboards/model.go
+++ b/pkg/query-service/app/dashboards/model.go
@@ -25,12 +25,14 @@ import (
var db *sqlx.DB
// User for mapping job,instance from grafana
-var instanceEQRE = regexp.MustCompile("instance(?s)=(?s)\\\"{{.instance}}\\\"")
-var nodeEQRE = regexp.MustCompile("instance(?s)=(?s)\\\"{{.node}}\\\"")
-var jobEQRE = regexp.MustCompile("job(?s)=(?s)\\\"{{.job}}\\\"")
-var instanceRERE = regexp.MustCompile("instance(?s)=~(?s)\\\"{{.instance}}\\\"")
-var nodeRERE = regexp.MustCompile("instance(?s)=~(?s)\\\"{{.node}}\\\"")
-var jobRERE = regexp.MustCompile("job(?s)=~(?s)\\\"{{.job}}\\\"")
+var (
+ instanceEQRE = regexp.MustCompile("instance(?s)=(?s)\\\"{{.instance}}\\\"")
+ nodeEQRE = regexp.MustCompile("instance(?s)=(?s)\\\"{{.node}}\\\"")
+ jobEQRE = regexp.MustCompile("job(?s)=(?s)\\\"{{.job}}\\\"")
+ instanceRERE = regexp.MustCompile("instance(?s)=~(?s)\\\"{{.instance}}\\\"")
+ nodeRERE = regexp.MustCompile("instance(?s)=~(?s)\\\"{{.node}}\\\"")
+ jobRERE = regexp.MustCompile("job(?s)=~(?s)\\\"{{.job}}\\\"")
+)
// InitDB sets up setting up the connection pool global variable.
func InitDB(dataSourceName string) (*sqlx.DB, error) {
@@ -188,10 +190,13 @@ func CreateDashboard(ctx context.Context, data map[string]interface{}, fm interf
dash.UpdateBy = &userEmail
dash.UpdateSlug()
dash.Uuid = uuid.New().String()
+ if data["uuid"] != nil {
+ dash.Uuid = data["uuid"].(string)
+ }
mapData, err := json.Marshal(dash.Data)
if err != nil {
- zap.S().Errorf("Error in marshalling data field in dashboard: ", dash, err)
+ zap.L().Error("Error in marshalling data field in dashboard: ", zap.Any("dashboard", dash), zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: err}
}
@@ -207,11 +212,10 @@ func CreateDashboard(ctx context.Context, data map[string]interface{}, fm interf
dash.Uuid, dash.CreatedAt, userEmail, dash.UpdatedAt, userEmail, mapData)
if err != nil {
- zap.S().Errorf("Error in inserting dashboard data: ", dash, err)
+ zap.L().Error("Error in inserting dashboard data: ", zap.Any("dashboard", dash), zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: err}
}
lastInsertId, err := result.LastInsertId()
-
if err != nil {
return nil, &model.ApiError{Typ: model.ErrorExec, Err: err}
}
@@ -242,7 +246,7 @@ func DeleteDashboard(ctx context.Context, uuid string, fm interfaces.FeatureLook
dashboard, dErr := GetDashboard(ctx, uuid)
if dErr != nil {
- zap.S().Errorf("Error in getting dashboard: ", uuid, dErr)
+ zap.L().Error("Error in getting dashboard: ", zap.String("uuid", uuid), zap.Any("error", dErr))
return dErr
}
@@ -255,7 +259,6 @@ func DeleteDashboard(ctx context.Context, uuid string, fm interfaces.FeatureLook
query := `DELETE FROM dashboards WHERE uuid=?`
result, err := db.Exec(query, uuid)
-
if err != nil {
return &model.ApiError{Typ: model.ErrorExec, Err: err}
}
@@ -293,7 +296,7 @@ func UpdateDashboard(ctx context.Context, uuid string, data map[string]interface
mapData, err := json.Marshal(data)
if err != nil {
- zap.S().Errorf("Error in marshalling data field in dashboard: ", data, err)
+ zap.L().Error("Error in marshalling data field in dashboard: ", zap.Any("data", data), zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorBadData, Err: err}
}
@@ -334,7 +337,7 @@ func UpdateDashboard(ctx context.Context, uuid string, data map[string]interface
dashboard.UpdatedAt, userEmail, mapData, dashboard.Uuid)
if err != nil {
- zap.S().Errorf("Error in inserting dashboard data: ", data, err)
+ zap.L().Error("Error in inserting dashboard data", zap.Any("data", data), zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: err}
}
if existingCount != newCount {
@@ -355,7 +358,7 @@ func LockUnlockDashboard(ctx context.Context, uuid string, lock bool) *model.Api
_, err := db.Exec(query, uuid)
if err != nil {
- zap.S().Errorf("Error in updating dashboard: ", uuid, err)
+ zap.L().Error("Error in updating dashboard", zap.String("uuid", uuid), zap.Error(err))
return &model.ApiError{Typ: model.ErrorExec, Err: err}
}
@@ -367,10 +370,10 @@ func updateFeatureUsage(fm interfaces.FeatureLookup, usage int64) *model.ApiErro
if err != nil {
switch err.(type) {
case model.ErrFeatureUnavailable:
- zap.S().Errorf("feature unavailable", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err))
+ zap.L().Error("feature unavailable", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err))
return model.BadRequest(err)
default:
- zap.S().Errorf("feature check failed", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err))
+ zap.L().Error("feature check failed", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err))
return model.BadRequest(err)
}
}
@@ -394,10 +397,10 @@ func checkFeatureUsage(fm interfaces.FeatureLookup, usage int64) *model.ApiError
if err != nil {
switch err.(type) {
case model.ErrFeatureUnavailable:
- zap.S().Errorf("feature unavailable", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err))
+ zap.L().Error("feature unavailable", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err))
return model.BadRequest(err)
default:
- zap.S().Errorf("feature check failed", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err))
+ zap.L().Error("feature check failed", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err))
return model.BadRequest(err)
}
}
@@ -419,7 +422,6 @@ func (d *Dashboard) UpdateSlug() {
}
func IsPostDataSane(data *map[string]interface{}) error {
-
val, ok := (*data)["title"]
if !ok || val == nil {
return fmt.Errorf("title not found in post data")
@@ -533,13 +535,13 @@ func TransformGrafanaJSONToSignoz(grafanaJSON model.GrafanaJSON) model.Dashboard
if template.Type == "query" {
if template.Datasource == nil {
- zap.S().Warnf("Skipping panel %d as it has no datasource", templateIdx)
+ zap.L().Warn("Skipping panel as it has no datasource", zap.Int("templateIdx", templateIdx))
continue
}
// Skip if the source is not prometheus
source, stringOk := template.Datasource.(string)
if stringOk && !strings.Contains(strings.ToLower(source), "prometheus") {
- zap.S().Warnf("Skipping template %d as it is not prometheus", templateIdx)
+ zap.L().Warn("Skipping template as it is not prometheus", zap.Int("templateIdx", templateIdx))
continue
}
var result model.Datasource
@@ -551,12 +553,12 @@ func TransformGrafanaJSONToSignoz(grafanaJSON model.GrafanaJSON) model.Dashboard
}
}
if result.Type != "prometheus" && result.Type != "" {
- zap.S().Warnf("Skipping template %d as it is not prometheus", templateIdx)
+ zap.L().Warn("Skipping template as it is not prometheus", zap.Int("templateIdx", templateIdx))
continue
}
if !stringOk && !structOk {
- zap.S().Warnf("Didn't recognize source, skipping")
+ zap.L().Warn("Didn't recognize source, skipping")
continue
}
typ = "QUERY"
@@ -627,13 +629,13 @@ func TransformGrafanaJSONToSignoz(grafanaJSON model.GrafanaJSON) model.Dashboard
continue
}
if panel.Datasource == nil {
- zap.S().Warnf("Skipping panel %d as it has no datasource", idx)
+ zap.L().Warn("Skipping panel as it has no datasource", zap.Int("idx", idx))
continue
}
// Skip if the datasource is not prometheus
source, stringOk := panel.Datasource.(string)
if stringOk && !strings.Contains(strings.ToLower(source), "prometheus") {
- zap.S().Warnf("Skipping panel %d as it is not prometheus", idx)
+ zap.L().Warn("Skipping panel as it is not prometheus", zap.Int("idx", idx))
continue
}
var result model.Datasource
@@ -645,12 +647,12 @@ func TransformGrafanaJSONToSignoz(grafanaJSON model.GrafanaJSON) model.Dashboard
}
}
if result.Type != "prometheus" && result.Type != "" {
- zap.S().Warnf("Skipping panel %d as it is not prometheus", idx)
+ zap.L().Warn("Skipping panel as it is not prometheus", zap.Int("idx", idx))
continue
}
if !stringOk && !structOk {
- zap.S().Warnf("Didn't recognize source, skipping")
+ zap.L().Warn("Didn't recognize source, skipping")
continue
}
diff --git a/pkg/query-service/app/dashboards/provision.go b/pkg/query-service/app/dashboards/provision.go
index 6f60dc50fe..fb97a960c1 100644
--- a/pkg/query-service/app/dashboards/provision.go
+++ b/pkg/query-service/app/dashboards/provision.go
@@ -10,55 +10,70 @@ import (
"go.signoz.io/signoz/pkg/query-service/constants"
"go.signoz.io/signoz/pkg/query-service/interfaces"
+ "go.signoz.io/signoz/pkg/query-service/model"
)
func readCurrentDir(dir string, fm interfaces.FeatureLookup) error {
file, err := os.Open(dir)
if err != nil {
- zap.S().Errorf("failed opening directory: %s", err)
- return err
+ zap.L().Warn("failed opening directory", zap.Error(err))
+ return nil
}
defer file.Close()
list, _ := file.Readdirnames(0) // 0 to read all files and folders
for _, filename := range list {
- zap.S().Info("Provisioning dashboard: ", filename)
+ zap.L().Info("Provisioning dashboard: ", zap.String("filename", filename))
// using filepath.Join for platform specific path creation
// which is equivalent to "dir+/+filename" (on unix based systems) but cleaner
plan, err := os.ReadFile(filepath.Join(dir, filename))
if err != nil {
- zap.S().Errorf("Creating Dashboards: Error in reading json fron file: %s\t%s", filename, err)
+ zap.L().Error("Creating Dashboards: Error in reading json fron file", zap.String("filename", filename), zap.Error(err))
continue
}
var data map[string]interface{}
err = json.Unmarshal(plan, &data)
if err != nil {
- zap.S().Errorf("Creating Dashboards: Error in unmarshalling json from file: %s\t%s", filename, err)
+ zap.L().Error("Creating Dashboards: Error in unmarshalling json from file", zap.String("filename", filename), zap.Error(err))
continue
}
err = IsPostDataSane(&data)
if err != nil {
- zap.S().Infof("Creating Dashboards: Error in file: %s\t%s", filename, err)
+ zap.L().Info("Creating Dashboards: Error in file", zap.String("filename", filename), zap.Error(err))
continue
}
- _, apiErr := GetDashboard(context.Background(), data["uuid"].(string))
- if apiErr == nil {
- zap.S().Infof("Creating Dashboards: Error in file: %s\t%s", filename, "Dashboard already present in database")
+ id := data["uuid"]
+ if id == nil {
+ _, apiErr := CreateDashboard(context.Background(), data, fm)
+ if apiErr != nil {
+ zap.L().Error("Creating Dashboards: Error in file", zap.String("filename", filename), zap.Error(apiErr.Err))
+ }
continue
}
- _, apiErr = CreateDashboard(context.Background(), data, fm)
+ apiErr := upsertDashboard(id.(string), data, filename, fm)
if apiErr != nil {
- zap.S().Errorf("Creating Dashboards: Error in file: %s\t%s", filename, apiErr.Err)
- continue
+ zap.L().Error("Creating Dashboards: Error upserting dashboard", zap.String("filename", filename), zap.Error(apiErr.Err))
}
-
}
return nil
}
+func upsertDashboard(uuid string, data map[string]interface{}, filename string, fm interfaces.FeatureLookup) *model.ApiError {
+ _, apiErr := GetDashboard(context.Background(), uuid)
+ if apiErr == nil {
+ zap.S().Infof("Creating Dashboards: Already exists: %s\t%s", filename, "Dashboard already present in database, Updating dashboard")
+ _, apiErr := UpdateDashboard(context.Background(), uuid, data, fm)
+ return apiErr
+ }
+
+ zap.S().Infof("Creating Dashboards: UUID not found: %s\t%s", filename, "Dashboard not present in database, Creating dashboard")
+ _, apiErr = CreateDashboard(context.Background(), data, fm)
+ return apiErr
+}
+
func LoadDashboardFiles(fm interfaces.FeatureLookup) error {
dashboardsPath := constants.GetOrDefaultEnv("DASHBOARDS_PATH", "./config/dashboards")
return readCurrentDir(dashboardsPath, fm)
diff --git a/pkg/query-service/app/http_handler.go b/pkg/query-service/app/http_handler.go
index e8200635f7..c025345cef 100644
--- a/pkg/query-service/app/http_handler.go
+++ b/pkg/query-service/app/http_handler.go
@@ -9,6 +9,7 @@ import (
"io"
"net/http"
"regexp"
+ "slices"
"strconv"
"strings"
"sync"
@@ -206,7 +207,7 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
hasUsers, err := aH.appDao.GetUsersWithOpts(context.Background(), 1)
if err.Error() != "" {
// raise warning but no panic as this is a recoverable condition
- zap.S().Warnf("unexpected error while fetch user count while initializing base api handler", err.Error())
+ zap.L().Warn("unexpected error while fetch user count while initializing base api handler", zap.Error(err))
}
if len(hasUsers) != 0 {
// first user is already created, we can mark the app ready for general use.
@@ -272,7 +273,7 @@ func RespondError(w http.ResponseWriter, apiErr model.BaseApiError, data interfa
Data: data,
})
if err != nil {
- zap.S().Error("msg", "error marshalling json response", "err", err)
+ zap.L().Error("error marshalling json response", zap.Error(err))
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
@@ -302,7 +303,7 @@ func RespondError(w http.ResponseWriter, apiErr model.BaseApiError, data interfa
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
if n, err := w.Write(b); err != nil {
- zap.S().Error("msg", "error writing response", "bytesWritten", n, "err", err)
+ zap.L().Error("error writing response", zap.Int("bytesWritten", n), zap.Error(err))
}
}
@@ -313,7 +314,7 @@ func writeHttpResponse(w http.ResponseWriter, data interface{}) {
Data: data,
})
if err != nil {
- zap.S().Error("msg", "error marshalling json response", "err", err)
+ zap.L().Error("error marshalling json response", zap.Error(err))
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
@@ -321,7 +322,7 @@ func writeHttpResponse(w http.ResponseWriter, data interface{}) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
if n, err := w.Write(b); err != nil {
- zap.S().Error("msg", "error writing response", "bytesWritten", n, "err", err)
+ zap.L().Error("error writing response", zap.Int("bytesWritten", n), zap.Error(err))
}
}
@@ -566,7 +567,7 @@ func (aH *APIHandler) addTemporality(ctx context.Context, qp *v3.QueryRangeParam
var err error
if aH.preferDelta {
- zap.S().Debug("fetching metric temporality")
+ zap.L().Debug("fetching metric temporality")
metricNameToTemporality, err = aH.reader.FetchTemporality(ctx, metricNames)
if err != nil {
return err
@@ -594,7 +595,7 @@ func (aH *APIHandler) QueryRangeMetricsV2(w http.ResponseWriter, r *http.Request
metricsQueryRangeParams, apiErrorObj := parser.ParseMetricQueryRangeParams(r)
if apiErrorObj != nil {
- zap.S().Errorf(apiErrorObj.Err.Error())
+ zap.L().Error("error parsing metric query range params", zap.Error(apiErrorObj.Err))
RespondError(w, apiErrorObj, nil)
return
}
@@ -1129,7 +1130,7 @@ func (aH *APIHandler) testRule(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
body, err := io.ReadAll(r.Body)
if err != nil {
- zap.S().Errorf("Error in getting req body in test rule API\n", err)
+ zap.L().Error("Error in getting req body in test rule API", zap.Error(err))
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
@@ -1172,7 +1173,7 @@ func (aH *APIHandler) patchRule(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
body, err := io.ReadAll(r.Body)
if err != nil {
- zap.S().Errorf("msg: error in getting req body of patch rule API\n", "\t error:", err)
+ zap.L().Error("error in getting req body of patch rule API\n", zap.Error(err))
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
@@ -1193,7 +1194,7 @@ func (aH *APIHandler) editRule(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
body, err := io.ReadAll(r.Body)
if err != nil {
- zap.S().Errorf("msg: error in getting req body of edit rule API\n", "\t error:", err)
+ zap.L().Error("error in getting req body of edit rule API", zap.Error(err))
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
@@ -1244,14 +1245,14 @@ func (aH *APIHandler) testChannel(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
body, err := io.ReadAll(r.Body)
if err != nil {
- zap.S().Errorf("Error in getting req body of testChannel API\n", err)
+ zap.L().Error("Error in getting req body of testChannel API", zap.Error(err))
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
receiver := &am.Receiver{}
if err := json.Unmarshal(body, receiver); err != nil { // Parse []byte to go struct pointer
- zap.S().Errorf("Error in parsing req body of testChannel API\n", err)
+ zap.L().Error("Error in parsing req body of testChannel API\n", zap.Error(err))
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
@@ -1271,14 +1272,14 @@ func (aH *APIHandler) editChannel(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
body, err := io.ReadAll(r.Body)
if err != nil {
- zap.S().Errorf("Error in getting req body of editChannel API\n", err)
+ zap.L().Error("Error in getting req body of editChannel API", zap.Error(err))
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
receiver := &am.Receiver{}
if err := json.Unmarshal(body, receiver); err != nil { // Parse []byte to go struct pointer
- zap.S().Errorf("Error in parsing req body of editChannel API\n", err)
+ zap.L().Error("Error in parsing req body of editChannel API", zap.Error(err))
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
@@ -1299,14 +1300,14 @@ func (aH *APIHandler) createChannel(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
body, err := io.ReadAll(r.Body)
if err != nil {
- zap.S().Errorf("Error in getting req body of createChannel API\n", err)
+ zap.L().Error("Error in getting req body of createChannel API", zap.Error(err))
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
receiver := &am.Receiver{}
if err := json.Unmarshal(body, receiver); err != nil { // Parse []byte to go struct pointer
- zap.S().Errorf("Error in parsing req body of createChannel API\n", err)
+ zap.L().Error("Error in parsing req body of createChannel API", zap.Error(err))
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
@@ -1346,7 +1347,7 @@ func (aH *APIHandler) createRule(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
body, err := io.ReadAll(r.Body)
if err != nil {
- zap.S().Errorf("Error in getting req body for create rule API\n", err)
+ zap.L().Error("Error in getting req body for create rule API", zap.Error(err))
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
@@ -1373,7 +1374,7 @@ func (aH *APIHandler) queryRangeMetrics(w http.ResponseWriter, r *http.Request)
return
}
- // zap.S().Info(query, apiError)
+ // zap.L().Info(query, apiError)
ctx := r.Context()
if to := r.FormValue("timeout"); to != "" {
@@ -1395,7 +1396,7 @@ func (aH *APIHandler) queryRangeMetrics(w http.ResponseWriter, r *http.Request)
}
if res.Err != nil {
- zap.S().Error(res.Err)
+ zap.L().Error("error in query range metrics", zap.Error(res.Err))
}
if res.Err != nil {
@@ -1428,7 +1429,7 @@ func (aH *APIHandler) queryMetrics(w http.ResponseWriter, r *http.Request) {
return
}
- // zap.S().Info(query, apiError)
+ // zap.L().Info(query, apiError)
ctx := r.Context()
if to := r.FormValue("timeout"); to != "" {
@@ -1450,7 +1451,7 @@ func (aH *APIHandler) queryMetrics(w http.ResponseWriter, r *http.Request) {
}
if res.Err != nil {
- zap.S().Error(res.Err)
+ zap.L().Error("error in query range metrics", zap.Error(res.Err))
}
if res.Err != nil {
@@ -2044,7 +2045,7 @@ func (aH *APIHandler) loginUser(w http.ResponseWriter, r *http.Request) {
func (aH *APIHandler) listUsers(w http.ResponseWriter, r *http.Request) {
users, err := dao.DB().GetUsers(context.Background())
if err != nil {
- zap.S().Debugf("[listUsers] Failed to query list of users, err: %v", err)
+ zap.L().Error("[listUsers] Failed to query list of users", zap.Error(err))
RespondError(w, err, nil)
return
}
@@ -2061,7 +2062,7 @@ func (aH *APIHandler) getUser(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
user, err := dao.DB().GetUser(ctx, id)
if err != nil {
- zap.S().Debugf("[getUser] Failed to query user, err: %v", err)
+ zap.L().Error("[getUser] Failed to query user", zap.Error(err))
RespondError(w, err, "Failed to get user")
return
}
@@ -2091,7 +2092,7 @@ func (aH *APIHandler) editUser(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
old, apiErr := dao.DB().GetUser(ctx, id)
if apiErr != nil {
- zap.S().Debugf("[editUser] Failed to query user, err: %v", err)
+ zap.L().Error("[editUser] Failed to query user", zap.Error(err))
RespondError(w, apiErr, nil)
return
}
@@ -2175,7 +2176,7 @@ func (aH *APIHandler) patchUserFlag(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
b, err := io.ReadAll(r.Body)
if err != nil {
- zap.S().Errorf("failed read user flags from http request for userId ", userId, "with error: ", err)
+ zap.L().Error("failed read user flags from http request for userId ", zap.String("userId", userId), zap.Error(err))
RespondError(w, model.BadRequestStr("received user flags in invalid format"), nil)
return
}
@@ -2183,7 +2184,7 @@ func (aH *APIHandler) patchUserFlag(w http.ResponseWriter, r *http.Request) {
err = json.Unmarshal(b, &flags)
if err != nil {
- zap.S().Errorf("failed parsing user flags for userId ", userId, "with error: ", err)
+ zap.L().Error("failed parsing user flags for userId ", zap.String("userId", userId), zap.Error(err))
RespondError(w, model.BadRequestStr("received user flags in invalid format"), nil)
return
}
@@ -2347,7 +2348,7 @@ func (aH *APIHandler) resetPassword(w http.ResponseWriter, r *http.Request) {
}
if err := auth.ResetPassword(context.Background(), req); err != nil {
- zap.S().Debugf("resetPassword failed, err: %v\n", err)
+ zap.L().Error("resetPassword failed", zap.Error(err))
if aH.HandleError(w, err, http.StatusInternalServerError) {
return
}
@@ -2362,10 +2363,9 @@ func (aH *APIHandler) changePassword(w http.ResponseWriter, r *http.Request) {
return
}
- if err := auth.ChangePassword(context.Background(), req); err != nil {
- if aH.HandleError(w, err, http.StatusInternalServerError) {
- return
- }
+ if apiErr := auth.ChangePassword(context.Background(), req); apiErr != nil {
+ RespondError(w, apiErr, nil)
+ return
}
aH.WriteJSON(w, r, map[string]string{"data": "password changed successfully"})
@@ -2391,7 +2391,7 @@ func (aH *APIHandler) HandleError(w http.ResponseWriter, err error, statusCode i
return false
}
if statusCode == http.StatusInternalServerError {
- zap.S().Error("HTTP handler, Internal Server Error", zap.Error(err))
+ zap.L().Error("HTTP handler, Internal Server Error", zap.Error(err))
}
structuredResp := structuredResponse{
Errors: []structuredError{
@@ -2481,11 +2481,25 @@ func (ah *APIHandler) GetIntegrationConnectionStatus(
w http.ResponseWriter, r *http.Request,
) {
integrationId := mux.Vars(r)["integrationId"]
+ isInstalled, apiErr := ah.IntegrationsController.IsIntegrationInstalled(
+ r.Context(), integrationId,
+ )
+ if apiErr != nil {
+ RespondError(w, apiErr, "failed to check if integration is installed")
+ return
+ }
+
+ // Do not spend resources calculating connection status unless installed.
+ if !isInstalled {
+ ah.Respond(w, &integrations.IntegrationConnectionStatus{})
+ return
+ }
+
connectionTests, apiErr := ah.IntegrationsController.GetIntegrationConnectionTests(
r.Context(), integrationId,
)
if apiErr != nil {
- RespondError(w, apiErr, "Failed to fetch integration connection tests")
+ RespondError(w, apiErr, "failed to fetch integration connection tests")
return
}
@@ -2511,65 +2525,150 @@ func (ah *APIHandler) calculateConnectionStatus(
connectionTests *integrations.IntegrationConnectionTests,
lookbackSeconds int64,
) (*integrations.IntegrationConnectionStatus, *model.ApiError) {
+ // Calculate connection status for signals in parallel
+
result := &integrations.IntegrationConnectionStatus{}
+ errors := []*model.ApiError{}
+ var resultLock sync.Mutex
- if connectionTests.Logs != nil {
- qrParams := &v3.QueryRangeParamsV3{
- Start: time.Now().UnixMilli() - (lookbackSeconds * 1000),
- End: time.Now().UnixMilli(),
- CompositeQuery: &v3.CompositeQuery{
- PanelType: v3.PanelTypeList,
- QueryType: v3.QueryTypeBuilder,
- BuilderQueries: map[string]*v3.BuilderQuery{
- "A": {
- PageSize: 1,
- Filters: connectionTests.Logs,
- QueryName: "A",
- DataSource: v3.DataSourceLogs,
- Expression: "A",
- AggregateOperator: v3.AggregateOperatorNoOp,
- },
- },
- },
- }
- queryRes, err, _ := ah.querier.QueryRange(
- ctx, qrParams, map[string]v3.AttributeKey{},
+ var wg sync.WaitGroup
+
+ // Calculate logs connection status
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ logsConnStatus, apiErr := ah.calculateLogsConnectionStatus(
+ ctx, connectionTests.Logs, lookbackSeconds,
)
- if err != nil {
- return nil, model.InternalError(fmt.Errorf(
- "could not query for integration connection status: %w", err,
- ))
- }
- if len(queryRes) > 0 && queryRes[0].List != nil && len(queryRes[0].List) > 0 {
- lastLog := queryRes[0].List[0]
+ resultLock.Lock()
+ defer resultLock.Unlock()
+
+ if apiErr != nil {
+ errors = append(errors, apiErr)
+ } else {
+ result.Logs = logsConnStatus
+ }
+ }()
+
+ // Calculate metrics connection status
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ if connectionTests.Metrics == nil || len(connectionTests.Metrics) < 1 {
+ return
+ }
+
+ statusForLastReceivedMetric, apiErr := ah.reader.GetLatestReceivedMetric(
+ ctx, connectionTests.Metrics,
+ )
+
+ resultLock.Lock()
+ defer resultLock.Unlock()
+
+ if apiErr != nil {
+ errors = append(errors, apiErr)
+
+ } else if statusForLastReceivedMetric != nil {
resourceSummaryParts := []string{}
- lastLogResourceAttribs := lastLog.Data["resources_string"]
- if lastLogResourceAttribs != nil {
- resourceAttribs, ok := lastLogResourceAttribs.(*map[string]string)
- if !ok {
- return nil, model.InternalError(fmt.Errorf(
- "could not cast log resource attribs",
- ))
+ for k, v := range statusForLastReceivedMetric.LastReceivedLabels {
+ interestingLabels := []string{
+ "container_name", "host_name", "node_name",
+ "pod_name", "deployment_name", "cluster_name",
+ "namespace_name", "job_name", "service_name",
}
- for k, v := range *resourceAttribs {
+ isInterestingKey := !strings.HasPrefix(k, "_") && slices.ContainsFunc(
+ interestingLabels, func(l string) bool { return strings.Contains(k, l) },
+ )
+ if isInterestingKey {
resourceSummaryParts = append(resourceSummaryParts, fmt.Sprintf(
"%s=%s", k, v,
))
}
}
- lastLogResourceSummary := strings.Join(resourceSummaryParts, ", ")
- result.Logs = &integrations.SignalConnectionStatus{
- LastReceivedTsMillis: lastLog.Timestamp.UnixMilli(),
- LastReceivedFrom: lastLogResourceSummary,
+ result.Metrics = &integrations.SignalConnectionStatus{
+ LastReceivedFrom: strings.Join(resourceSummaryParts, ", "),
+ LastReceivedTsMillis: statusForLastReceivedMetric.LastReceivedTsMillis,
}
}
+ }()
+
+ wg.Wait()
+
+ if len(errors) > 0 {
+ return nil, errors[0]
}
return result, nil
}
+func (ah *APIHandler) calculateLogsConnectionStatus(
+ ctx context.Context,
+ logsConnectionTest *v3.FilterSet,
+ lookbackSeconds int64,
+) (*integrations.SignalConnectionStatus, *model.ApiError) {
+ if logsConnectionTest == nil {
+ return nil, nil
+ }
+
+ qrParams := &v3.QueryRangeParamsV3{
+ Start: time.Now().UnixMilli() - (lookbackSeconds * 1000),
+ End: time.Now().UnixMilli(),
+ CompositeQuery: &v3.CompositeQuery{
+ PanelType: v3.PanelTypeList,
+ QueryType: v3.QueryTypeBuilder,
+ BuilderQueries: map[string]*v3.BuilderQuery{
+ "A": {
+ PageSize: 1,
+ Filters: logsConnectionTest,
+ QueryName: "A",
+ DataSource: v3.DataSourceLogs,
+ Expression: "A",
+ AggregateOperator: v3.AggregateOperatorNoOp,
+ },
+ },
+ },
+ }
+ queryRes, err, _ := ah.querier.QueryRange(
+ ctx, qrParams, map[string]v3.AttributeKey{},
+ )
+ if err != nil {
+ return nil, model.InternalError(fmt.Errorf(
+ "could not query for integration connection status: %w", err,
+ ))
+ }
+ if len(queryRes) > 0 && queryRes[0].List != nil && len(queryRes[0].List) > 0 {
+ lastLog := queryRes[0].List[0]
+
+ resourceSummaryParts := []string{}
+ lastLogResourceAttribs := lastLog.Data["resources_string"]
+ if lastLogResourceAttribs != nil {
+ resourceAttribs, ok := lastLogResourceAttribs.(*map[string]string)
+ if !ok {
+ return nil, model.InternalError(fmt.Errorf(
+ "could not cast log resource attribs",
+ ))
+ }
+ for k, v := range *resourceAttribs {
+ resourceSummaryParts = append(resourceSummaryParts, fmt.Sprintf(
+ "%s=%s", k, v,
+ ))
+ }
+ }
+ lastLogResourceSummary := strings.Join(resourceSummaryParts, ", ")
+
+ return &integrations.SignalConnectionStatus{
+ LastReceivedTsMillis: lastLog.Timestamp.UnixMilli(),
+ LastReceivedFrom: lastLogResourceSummary,
+ }, nil
+ }
+
+ return nil, nil
+}
+
func (ah *APIHandler) InstallIntegration(
w http.ResponseWriter, r *http.Request,
) {
@@ -2710,10 +2809,10 @@ func (aH *APIHandler) tailLogs(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "data: %v\n\n", buf.String())
flusher.Flush()
case <-client.Done:
- zap.S().Debug("done!")
+ zap.L().Debug("done!")
return
case err := <-client.Error:
- zap.S().Error("error occured!", err)
+ zap.L().Error("error occured", zap.Error(err))
return
}
}
@@ -2864,7 +2963,7 @@ func (ah *APIHandler) CreateLogsPipeline(w http.ResponseWriter, r *http.Request)
postable []logparsingpipeline.PostablePipeline,
) (*logparsingpipeline.PipelinesResponse, *model.ApiError) {
if len(postable) == 0 {
- zap.S().Warnf("found no pipelines in the http request, this will delete all the pipelines")
+ zap.L().Warn("found no pipelines in the http request, this will delete all the pipelines")
}
for _, p := range postable {
@@ -3304,7 +3403,7 @@ func (aH *APIHandler) QueryRangeV3Format(w http.ResponseWriter, r *http.Request)
queryRangeParams, apiErrorObj := ParseQueryRangeParams(r)
if apiErrorObj != nil {
- zap.S().Errorf(apiErrorObj.Err.Error())
+ zap.L().Error(apiErrorObj.Err.Error())
RespondError(w, apiErrorObj, nil)
return
}
@@ -3379,11 +3478,11 @@ func sendQueryResultEvents(r *http.Request, result []*v3.Result, queryRangeParam
dashboardMatched, err := regexp.MatchString(`/dashboard/[a-zA-Z0-9\-]+/(new|edit)(?:\?.*)?$`, referrer)
if err != nil {
- zap.S().Errorf("error while matching the referrer: %v", err)
+ zap.L().Error("error while matching the referrer", zap.Error(err))
}
alertMatched, err := regexp.MatchString(`/alerts/(new|edit)(?:\?.*)?$`, referrer)
if err != nil {
- zap.S().Errorf("error while matching the referrer: %v", err)
+ zap.L().Error("error while matching the alert: ", zap.Error(err))
}
if alertMatched || dashboardMatched {
@@ -3394,22 +3493,60 @@ func sendQueryResultEvents(r *http.Request, result []*v3.Result, queryRangeParam
if err == nil {
signozLogsUsed, signozMetricsUsed, signozTracesUsed := telemetry.GetInstance().CheckSigNozSignals(queryRangeParams)
if signozLogsUsed || signozMetricsUsed || signozTracesUsed {
+
if dashboardMatched {
+ var dashboardID, widgetID string
+ var dashboardIDMatch, widgetIDMatch []string
+ dashboardIDRegex, err := regexp.Compile(`/dashboard/([a-f0-9\-]+)/`)
+ if err == nil {
+ dashboardIDMatch = dashboardIDRegex.FindStringSubmatch(referrer)
+ } else {
+ zap.S().Errorf("error while matching the dashboardIDRegex: %v", err)
+ }
+ widgetIDRegex, err := regexp.Compile(`widgetId=([a-f0-9\-]+)`)
+ if err == nil {
+ widgetIDMatch = widgetIDRegex.FindStringSubmatch(referrer)
+ } else {
+ zap.S().Errorf("error while matching the widgetIDRegex: %v", err)
+ }
+
+ if len(dashboardIDMatch) > 1 {
+ dashboardID = dashboardIDMatch[1]
+ }
+
+ if len(widgetIDMatch) > 1 {
+ widgetID = widgetIDMatch[1]
+ }
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_SUCCESSFUL_DASHBOARD_PANEL_QUERY, map[string]interface{}{
"queryType": queryRangeParams.CompositeQuery.QueryType,
"panelType": queryRangeParams.CompositeQuery.PanelType,
"tracesUsed": signozTracesUsed,
"logsUsed": signozLogsUsed,
"metricsUsed": signozMetricsUsed,
+ "dashboardId": dashboardID,
+ "widgetId": widgetID,
}, userEmail)
}
if alertMatched {
+ var alertID string
+ var alertIDMatch []string
+ alertIDRegex, err := regexp.Compile(`ruleId=(\d+)`)
+ if err != nil {
+ zap.S().Errorf("error while matching the alertIDRegex: %v", err)
+ } else {
+ alertIDMatch = alertIDRegex.FindStringSubmatch(referrer)
+ }
+
+ if len(alertIDMatch) > 1 {
+ alertID = alertIDMatch[1]
+ }
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_SUCCESSFUL_ALERT_QUERY, map[string]interface{}{
"queryType": queryRangeParams.CompositeQuery.QueryType,
"panelType": queryRangeParams.CompositeQuery.PanelType,
"tracesUsed": signozTracesUsed,
"logsUsed": signozLogsUsed,
"metricsUsed": signozMetricsUsed,
+ "alertId": alertID,
}, userEmail)
}
}
@@ -3422,7 +3559,7 @@ func (aH *APIHandler) QueryRangeV3(w http.ResponseWriter, r *http.Request) {
queryRangeParams, apiErrorObj := ParseQueryRangeParams(r)
if apiErrorObj != nil {
- zap.S().Errorf(apiErrorObj.Err.Error())
+ zap.L().Error("error parsing metric query range params", zap.Error(apiErrorObj.Err))
RespondError(w, apiErrorObj, nil)
return
}
@@ -3431,7 +3568,7 @@ func (aH *APIHandler) QueryRangeV3(w http.ResponseWriter, r *http.Request) {
temporalityErr := aH.addTemporality(r.Context(), queryRangeParams)
if temporalityErr != nil {
- zap.S().Errorf("Error while adding temporality for metrics: %v", temporalityErr)
+ zap.L().Error("Error while adding temporality for metrics", zap.Error(temporalityErr))
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: temporalityErr}, nil)
return
}
@@ -3447,7 +3584,7 @@ func (aH *APIHandler) liveTailLogs(w http.ResponseWriter, r *http.Request) {
queryRangeParams, apiErrorObj := ParseQueryRangeParams(r)
if apiErrorObj != nil {
- zap.S().Errorf(apiErrorObj.Err.Error())
+ zap.L().Error(apiErrorObj.Err.Error())
RespondError(w, apiErrorObj, nil)
return
}
@@ -3508,10 +3645,10 @@ func (aH *APIHandler) liveTailLogs(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "data: %v\n\n", buf.String())
flusher.Flush()
case <-client.Done:
- zap.S().Debug("done!")
+ zap.L().Debug("done!")
return
case err := <-client.Error:
- zap.S().Error("error occurred!", err)
+ zap.L().Error("error occurred", zap.Error(err))
fmt.Fprintf(w, "event: error\ndata: %v\n\n", err.Error())
flusher.Flush()
return
@@ -3588,7 +3725,7 @@ func (aH *APIHandler) QueryRangeV4(w http.ResponseWriter, r *http.Request) {
queryRangeParams, apiErrorObj := ParseQueryRangeParams(r)
if apiErrorObj != nil {
- zap.S().Errorf(apiErrorObj.Err.Error())
+ zap.L().Error("error parsing metric query range params", zap.Error(apiErrorObj.Err))
RespondError(w, apiErrorObj, nil)
return
}
@@ -3596,7 +3733,7 @@ func (aH *APIHandler) QueryRangeV4(w http.ResponseWriter, r *http.Request) {
// add temporality for each metric
temporalityErr := aH.populateTemporality(r.Context(), queryRangeParams)
if temporalityErr != nil {
- zap.S().Errorf("Error while adding temporality for metrics: %v", temporalityErr)
+ zap.L().Error("Error while adding temporality for metrics", zap.Error(temporalityErr))
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: temporalityErr}, nil)
return
}
@@ -3640,12 +3777,12 @@ func postProcessResult(result []*v3.Result, queryRangeParams *v3.QueryRangeParam
expression, err := govaluate.NewEvaluableExpressionWithFunctions(query.Expression, evalFuncs())
// This shouldn't happen here, because it should have been caught earlier in validation
if err != nil {
- zap.S().Errorf("error in expression: %s", err.Error())
+ zap.L().Error("error in expression", zap.Error(err))
return nil, err
}
formulaResult, err := processResults(result, expression)
if err != nil {
- zap.S().Errorf("error in expression: %s", err.Error())
+ zap.L().Error("error in expression", zap.Error(err))
return nil, err
}
formulaResult.QueryName = query.QueryName
diff --git a/pkg/query-service/app/integrations/builtin.go b/pkg/query-service/app/integrations/builtin.go
index a612e45ed3..cf98b3ff9d 100644
--- a/pkg/query-service/app/integrations/builtin.go
+++ b/pkg/query-service/app/integrations/builtin.go
@@ -4,6 +4,7 @@ import (
"context"
"embed"
"strings"
+ "unicode"
"encoding/base64"
"encoding/json"
@@ -133,6 +134,14 @@ func readBuiltInIntegration(dirpath string) (
}
integration.Id = "builtin-" + integration.Id
+ if len(integration.DataCollected.Metrics) > 0 {
+ metricsForConnTest := []string{}
+ for _, collectedMetric := range integration.DataCollected.Metrics {
+ promName := toPromMetricName(collectedMetric.Name)
+ metricsForConnTest = append(metricsForConnTest, promName)
+ }
+ integration.ConnectionTests.Metrics = metricsForConnTest
+ }
return &integration, nil
}
@@ -223,3 +232,34 @@ func readFileIfUri(maybeFileUri string, basedir string) (interface{}, error) {
return nil, fmt.Errorf("unsupported file type %s", maybeFileUri)
}
+
+// copied from signoz clickhouse exporter's `sanitize` which
+// in turn is copied from prometheus-go-metric-exporter
+//
+// replaces non-alphanumeric characters with underscores in s.
+func toPromMetricName(s string) string {
+ if len(s) == 0 {
+ return s
+ }
+
+ // Note: No length limit for label keys because Prometheus doesn't
+ // define a length limit, thus we should NOT be truncating label keys.
+ // See https://github.com/orijtech/prometheus-go-metrics-exporter/issues/4.
+
+ s = strings.Map(func(r rune) rune {
+ // sanitizeRune converts anything that is not a letter or digit to an underscore
+ if unicode.IsLetter(r) || unicode.IsDigit(r) {
+ return r
+ }
+ // Everything else turns into an underscore
+ return '_'
+ }, s)
+
+ if unicode.IsDigit(rune(s[0])) {
+ s = "key" + "_" + s
+ }
+ if s[0] == '_' {
+ s = "key" + s
+ }
+ return s
+}
diff --git a/pkg/query-service/app/integrations/builtin_integrations/postgres/assets/pipelines/log-parser.json b/pkg/query-service/app/integrations/builtin_integrations/postgres/assets/pipelines/log-parser.json
deleted file mode 100644
index 776565861c..0000000000
--- a/pkg/query-service/app/integrations/builtin_integrations/postgres/assets/pipelines/log-parser.json
+++ /dev/null
@@ -1,33 +0,0 @@
-{
- "id": "parse-default-postgres-access-log",
- "name": "Parse default postgres access log",
- "alias": "parse-default-postgres-access-log",
- "description": "Parse standard postgres access log",
- "enabled": true,
- "filter": {
- "op": "AND",
- "items": [
- {
- "key": {
- "type": "tag",
- "key": "source",
- "dataType": "string"
- },
- "op": "=",
- "value": "postgres"
- }
- ]
- },
- "config": [
- {
- "type": "grok_parser",
- "id": "parse-body-grok",
- "enabled": true,
- "orderId": 1,
- "name": "Parse Body",
- "parse_to": "attributes",
- "pattern": "%{GREEDYDATA}",
- "parse_from": "body"
- }
- ]
-}
\ No newline at end of file
diff --git a/pkg/query-service/app/integrations/builtin_integrations/postgres/config/collect-logs.md b/pkg/query-service/app/integrations/builtin_integrations/postgres/config/collect-logs.md
new file mode 100644
index 0000000000..f048ec63b0
--- /dev/null
+++ b/pkg/query-service/app/integrations/builtin_integrations/postgres/config/collect-logs.md
@@ -0,0 +1,111 @@
+### Collect Postgres Logs
+
+You can configure Postgres logs collection by providing the required collector config to your collector.
+
+#### Create collector config file
+
+Save the following config for collecting postgres logs in a file named `postgres-logs-collection-config.yaml`
+
+```yaml
+receivers:
+ filelog/postgresql:
+ include: ["${env:POSTGRESQL_LOG_FILE}"]
+ operators:
+ # Parse default postgresql text log format.
+ # `log_line_prefix` postgres setting defaults to '%m [%p] ' which logs the timestamp and the process ID
+ # See https://www.postgresql.org/docs/current/runtime-config-logging.html#GUC-LOG-LINE-PREFIX for more details
+ - type: regex_parser
+ if: body matches '^(?P\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}.?[0-9]*? [A-Z]*) \\[(?P[0-9]+)\\] (?P[A-Z]*). (?P.*)$'
+ parse_from: body
+ regex: '^(?P\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.?[0-9]*? [A-Z]*) \[(?P[0-9]+)\] (?P[A-Z]*). (?P.*)$'
+ timestamp:
+ parse_from: attributes.ts
+ layout: '%Y-%m-%d %H:%M:%S %Z'
+ severity:
+ parse_from: attributes.log_level
+ mapping:
+ debug:
+ - DEBUG1
+ - DEBUG2
+ - DEBUG3
+ - DEBUG4
+ - DEBUG5
+ info:
+ - INFO
+ - LOG
+ - NOTICE
+ - DETAIL
+ warning: WARNING
+ error: ERROR
+ fatal:
+ - FATAL
+ - PANIC
+ on_error: send
+ - type: move
+ if: attributes.message != nil
+ from: attributes.message
+ to: body
+ - type: remove
+ if: attributes.log_level != nil
+ field: attributes.log_level
+ - type: remove
+ if: attributes.ts != nil
+ field: attributes.ts
+ - type: add
+ field: attributes.source
+ value: postgres
+
+processors:
+ batch:
+ send_batch_size: 10000
+ send_batch_max_size: 11000
+ timeout: 10s
+
+exporters:
+ # export to SigNoz cloud
+ otlp/postgres-logs:
+ endpoint: "${env:OTLP_DESTINATION_ENDPOINT}"
+ tls:
+ insecure: false
+ headers:
+ "signoz-access-token": "${env:SIGNOZ_INGESTION_KEY}"
+
+ # export to local collector
+ # otlp/postgres-logs:
+ # endpoint: "localhost:4317"
+ # tls:
+ # insecure: true
+
+service:
+ pipelines:
+ logs/postgresql:
+ receivers: [filelog/postgresql]
+ processors: [batch]
+ exporters: [otlp/postgresql-logs]
+```
+
+#### Set Environment Variables
+
+Set the following environment variables in your otel-collector environment:
+
+```bash
+
+# path of Postgres server log file. must be accessible by the otel collector
+export POSTGRESQL_LOG_FILE=/usr/local/var/log/postgres.log
+
+# region specific SigNoz cloud ingestion endpoint
+export OTLP_DESTINATION_ENDPOINT="ingest.us.signoz.cloud:443"
+
+# your SigNoz ingestion key
+export SIGNOZ_INGESTION_KEY="signoz-ingestion-key"
+
+```
+
+#### Use collector config file
+
+Make the collector config file available to your otel collector and use it by adding the following flag to the command for running your collector
+```bash
+--config postgres-logs-collection-config.yaml
+```
+Note: the collector can use multiple config files, specified by multiple occurrences of the --config flag.
+
diff --git a/pkg/query-service/app/integrations/builtin_integrations/postgres/config/configure-otel-collector.md b/pkg/query-service/app/integrations/builtin_integrations/postgres/config/collect-metrics.md
similarity index 56%
rename from pkg/query-service/app/integrations/builtin_integrations/postgres/config/configure-otel-collector.md
rename to pkg/query-service/app/integrations/builtin_integrations/postgres/config/collect-metrics.md
index d0dcf896c1..94a6fc7609 100644
--- a/pkg/query-service/app/integrations/builtin_integrations/postgres/config/configure-otel-collector.md
+++ b/pkg/query-service/app/integrations/builtin_integrations/postgres/config/collect-metrics.md
@@ -1,24 +1,24 @@
-### Configure otel collector
+### Collect Postgres Metrics
-#### Save collector config file
+You can configure Postgres metrics collection by providing the required collector config to your collector.
-Save the following collector config in a file named `postgres-collector-config.yaml`
+#### Create collector config file
-```bash
+Save the following config for collecting postgres metrics in a file named `postgres-metrics-collection-config.yaml`
+
+```yaml
receivers:
postgresql:
# The endpoint of the postgresql server. Whether using TCP or Unix sockets, this value should be host:port. If transport is set to unix, the endpoint will internally be translated from host:port to /host.s.PGSQL.port
- endpoint: "localhost:5432"
+ endpoint: ${env:POSTGRESQL_ENDPOINT}
# The frequency at which to collect metrics from the Postgres instance.
collection_interval: 60s
# The username used to access the postgres instance
- username: monitoring
+ username: ${env:POSTGRESQL_USERNAME}
# The password used to access the postgres instance
password: ${env:POSTGRESQL_PASSWORD}
# The list of databases for which the receiver will attempt to collect statistics. If an empty list is provided, the receiver will attempt to collect statistics for all non-template databases
databases: []
- # List of databases which will be excluded when collecting statistics.
- exclude_databases: []
# # Defines the network to use for connecting to the server. Valid Values are `tcp` or `unix`
# transport: tcp
tls:
@@ -45,18 +45,19 @@ processors:
hostname_sources: ["os"]
exporters:
- # export to local collector
- otlp/local:
- endpoint: "localhost:4317"
- tls:
- insecure: true
# export to SigNoz cloud
- otlp/signoz:
- endpoint: "ingest.{region}.signoz.cloud:443"
+ otlp/postgres:
+ endpoint: "${env:OTLP_DESTINATION_ENDPOINT}"
tls:
insecure: false
headers:
- "signoz-access-token": ""
+ "signoz-access-token": "${env:SIGNOZ_INGESTION_KEY}"
+
+ # export to local collector
+ # otlp/postgres:
+ # endpoint: "localhost:4317"
+ # tls:
+ # insecure: true
service:
pipelines:
@@ -64,9 +65,37 @@ service:
receivers: [postgresql]
# note: remove this processor if the collector host is not running on the same host as the postgres instance
processors: [resourcedetection/system]
- exporters: [otlp/local]
+ exporters: [otlp/postgres]
+```
+
+#### Set Environment Variables
+
+Set the following environment variables in your otel-collector environment:
+
+```bash
+
+# password for Postgres monitoring user"
+export POSTGRESQL_USERNAME="monitoring"
+
+# password for Postgres monitoring user"
+export POSTGRESQL_PASSWORD=""
+
+# Postgres endpoint reachable from the otel collector"
+export POSTGRESQL_ENDPOINT="host:port"
+
+
+# region specific SigNoz cloud ingestion endpoint
+export OTLP_DESTINATION_ENDPOINT="ingest.us.signoz.cloud:443"
+
+# your SigNoz ingestion key
+export SIGNOZ_INGESTION_KEY="signoz-ingestion-key"
+
```
#### Use collector config file
-Run your collector with the added flag `--config postgres-collector-config.yaml`
+Make the collector config file available to your otel collector and use it by adding the following flag to the command for running your collector
+```bash
+--config postgres-metrics-collection-config.yaml
+```
+Note: the collector can use multiple config files, specified by multiple occurrences of the --config flag.
diff --git a/pkg/query-service/app/integrations/builtin_integrations/postgres/config/prerequisites.md b/pkg/query-service/app/integrations/builtin_integrations/postgres/config/prerequisites.md
index 519509e4e2..e50282d2a8 100644
--- a/pkg/query-service/app/integrations/builtin_integrations/postgres/config/prerequisites.md
+++ b/pkg/query-service/app/integrations/builtin_integrations/postgres/config/prerequisites.md
@@ -1,26 +1,40 @@
-### Prepare postgres for monitoring
+## Before You Begin
-- Have a running postgresql instance
-- Have the monitoring user created
-- Have the monitoring user granted the necessary permissions
+To configure metrics and logs collection for a Postgres server, you need the following.
-This receiver supports PostgreSQL versions 9.6+
+### Ensure Postgres server is prepared for monitoring
-For PostgreSQL versions 10+, run:
+- **Ensure that the Postgres server is running a supported version**
+ Postgres versions 9.6+ are supported.
+ You can use the following SQL statement to determine server version
+ ```SQL
+ SELECT version();
+ ```
-```bash
-create user monitoring with password '';
-grant pg_monitor to monitoring;
-grant SELECT ON pg_stat_database to monitoring;
-```
+- **If collecting metrics, ensure that there is a Postgres user with required permissions**
+ To create a monitoring user for Postgres versions 10+, run:
+ ```SQL
+ create user monitoring with password '';
+ grant pg_monitor to monitoring;
+ grant SELECT ON pg_stat_database to monitoring;
+ ```
+
+ To create a monitoring user for Postgres versions >= 9.6 and <10, run:
+ ```SQL
+ create user monitoring with password '';
+ grant SELECT ON pg_stat_database to monitoring;
+ ```
+
-For PostgreSQL versions >= 9.6 and <10, run:
+### Ensure OTEL Collector is running with access to the Postgres server
-```bash
-create user monitoring with password '';
-grant SELECT ON pg_stat_database to monitoring;
-```
+- **Ensure that an OTEL collector is running in your deployment environment**
+ If needed, please [install an OTEL Collector](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/)
+ If already installed, ensure that the collector version is v0.88.0 or newer.
-Set the following environment variables:
+ Also ensure that you can provide config files to the collector and that you can set environment variables and command line flags used for running it.
-- POSTGRESQL_PASSWORD
+- **Ensure that the OTEL collector can access the Postgres server**
+ In order to collect metrics, the collector must be able to access the Postgres server as a client using the monitoring user.
+
+ In order to collect logs, the collector must be able to read the Postgres server log file.
diff --git a/pkg/query-service/app/integrations/builtin_integrations/postgres/integration.json b/pkg/query-service/app/integrations/builtin_integrations/postgres/integration.json
index 9b3da798bc..823ba61223 100644
--- a/pkg/query-service/app/integrations/builtin_integrations/postgres/integration.json
+++ b/pkg/query-service/app/integrations/builtin_integrations/postgres/integration.json
@@ -1,7 +1,7 @@
{
"id": "postgres",
"title": "PostgreSQL",
- "description": "Monitor postgres using logs and metrics.",
+ "description": "Monitor Postgres with metrics and logs",
"author": {
"name": "SigNoz",
"email": "integrations@signoz.io",
@@ -18,18 +18,20 @@
"instructions": "file://config/prerequisites.md"
},
{
- "title": "Configure Otel Collector",
- "instructions": "file://config/configure-otel-collector.md"
+ "title": "Collect Metrics",
+ "instructions": "file://config/collect-metrics.md"
+ },
+ {
+ "title": "Collect Logs",
+ "instructions": "file://config/collect-logs.md"
}
],
"assets": {
"logs": {
- "pipelines": [
- "file://assets/pipelines/log-parser.json"
- ]
+ "pipelines": []
},
"dashboards": [
- "file://assets/dashboards/overview.json"
+ "file://assets/dashboards/overview.json"
],
"alerts": []
},
@@ -52,37 +54,189 @@
"data_collected": {
"logs": [
{
- "name": "Request Method",
- "path": "attributes[\"http.request.method\"]",
- "type": "string",
- "description": "HTTP method"
+ "name": "Process ID",
+ "path": "attributes.pid",
+ "type": "string"
},
{
- "name": "Request Path",
- "path": "attributes[\"url.path\"]",
- "type": "string",
- "description": "path requested"
+ "name": "Timestamp",
+ "path": "timestamp",
+ "type": "timestamp"
},
{
- "name": "Response Status Code",
- "path": "attributes[\"http.response.status_code\"]",
- "type": "int",
- "description": "HTTP response code"
+ "name": "Severity Text",
+ "path": "severity_text",
+ "type": "string"
+ },
+ {
+ "name": "Severity Number",
+ "path": "severity_number",
+ "type": "number"
}
],
"metrics": [
{
- "name": "http.server.request.duration",
- "type": "Histogram",
- "unit": "s",
- "description": "Duration of HTTP server requests"
+ "name": "postgresql_backends",
+ "type": "sum",
+ "unit": "number",
+ "description": "The number of backends."
},
{
- "name": "http.server.active_requests",
- "type": "UpDownCounter",
- "unit": "{ request }",
- "description": "Number of active HTTP server requests"
+ "name": "postgresql_bgwriter_buffers_allocated",
+ "type": "sum",
+ "unit": "number",
+ "description": "Number of buffers allocated."
+ },
+ {
+ "name": "postgresql_bgwriter_buffers_writes",
+ "type": "sum",
+ "unit": "number",
+ "description": "Number of buffers written."
+ },
+ {
+ "name": "postgresql_bgwriter_checkpoint_count",
+ "type": "sum",
+ "unit": "number",
+ "description": "The number of checkpoints performed."
+ },
+ {
+ "name": "postgresql_bgwriter_duration",
+ "type": "sum",
+ "unit": "ms",
+ "description": "Total time spent writing and syncing files to disk by checkpoints."
+ },
+ {
+ "name": "postgresql_bgwriter_maxwritten",
+ "type": "sum",
+ "unit": "number",
+ "description": "Number of times the background writer stopped a cleaning scan because it had written too many buffers."
+ },
+ {
+ "name": "postgresql_blocks_read",
+ "type": "sum",
+ "unit": "number",
+ "description": "The number of blocks read."
+ },
+ {
+ "name": "postgresql_commits",
+ "type": "sum",
+ "unit": "number",
+ "description": "The number of commits."
+ },
+ {
+ "name": "postgresql_connection_max",
+ "type": "gauge",
+ "unit": "number",
+ "description": "Configured maximum number of client connections allowed"
+ },
+ {
+ "name": "postgresql_database_count",
+ "type": "sum",
+ "unit": "number",
+ "description": "Number of user databases."
+ },
+ {
+ "name": "postgresql_database_locks",
+ "type": "gauge",
+ "unit": "number",
+ "description": "The number of database locks."
+ },
+ {
+ "name": "postgresql_db_size",
+ "type": "sum",
+ "unit": "Bytes",
+ "description": "The database disk usage."
+ },
+ {
+ "name": "postgresql_deadlocks",
+ "type": "sum",
+ "unit": "number",
+ "description": "The number of deadlocks."
+ },
+ {
+ "name": "postgresql_index_scans",
+ "type": "sum",
+ "unit": "number",
+ "description": "The number of index scans on a table."
+ },
+ {
+ "name": "postgresql_index_size",
+ "type": "gauge",
+ "unit": "Bytes",
+ "description": "The size of the index on disk."
+ },
+ {
+ "name": "postgresql_operations",
+ "type": "sum",
+ "unit": "number",
+ "description": "The number of db row operations."
+ },
+ {
+ "name": "postgresql_replication_data_delay",
+ "type": "gauge",
+ "unit": "Bytes",
+ "description": "The amount of data delayed in replication."
+ },
+ {
+ "name": "postgresql_rollbacks",
+ "type": "sum",
+ "unit": "number",
+ "description": "The number of rollbacks."
+ },
+ {
+ "name": "postgresql_rows",
+ "type": "sum",
+ "unit": "number",
+ "description": "The number of rows in the database."
+ },
+ {
+ "name": "postgresql_sequential_scans",
+ "type": "sum",
+ "unit": "number",
+ "description": "The number of sequential scans."
+ },
+ {
+ "name": "postgresql_table_count",
+ "type": "sum",
+ "unit": "number",
+ "description": "Number of user tables in a database."
+ },
+ {
+ "name": "postgresql_table_size",
+ "type": "sum",
+ "unit": "Bytes",
+ "description": "Disk space used by a table."
+ },
+ {
+ "name": "postgresql_table_vacuum_count",
+ "type": "sum",
+ "unit": "number",
+ "description": "Number of times a table has manually been vacuumed."
+ },
+ {
+ "name": "postgresql_temp_files",
+ "type": "sum",
+ "unit": "number",
+ "description": "The number of temp files."
+ },
+ {
+ "name": "postgresql_wal_age",
+ "type": "gauge",
+ "unit": "seconds",
+ "description": "Age of the oldest WAL file."
+ },
+ {
+ "name": "postgresql_wal_delay",
+ "type": "gauge",
+ "unit": "seconds",
+ "description": "Time between flushing recent WAL locally and receiving notification that the standby server has completed an operation with it."
+ },
+ {
+ "name": "postgresql_wal_lag",
+ "type": "gauge",
+ "unit": "seconds",
+ "description": "Time between flushing recent WAL locally and receiving notification that the standby server has completed an operation with it."
}
]
}
-}
+}
\ No newline at end of file
diff --git a/pkg/query-service/app/integrations/builtin_integrations/postgres/overview.md b/pkg/query-service/app/integrations/builtin_integrations/postgres/overview.md
index 4af57e6b20..ac6e061eca 100644
--- a/pkg/query-service/app/integrations/builtin_integrations/postgres/overview.md
+++ b/pkg/query-service/app/integrations/builtin_integrations/postgres/overview.md
@@ -1,3 +1,5 @@
### Monitor Postgres with SigNoz
-Parse your Postgres logs and collect key metrics.
+Collect key Postgres metrics and view them with an out of the box dashboard.
+
+Collect and parse Postgres logs to populate timestamp, severity, and other log attributes for better querying and aggregation.
diff --git a/pkg/query-service/app/integrations/builtin_integrations/redis/config/collect-logs.md b/pkg/query-service/app/integrations/builtin_integrations/redis/config/collect-logs.md
new file mode 100644
index 0000000000..7be122de4c
--- /dev/null
+++ b/pkg/query-service/app/integrations/builtin_integrations/redis/config/collect-logs.md
@@ -0,0 +1,102 @@
+### Collect Redis Logs
+
+You can configure Redis logs collection by providing the required collector config to your collector.
+
+#### Create collector config file
+
+Save the following config for collecting redis logs in a file named `redis-logs-collection-config.yaml`
+
+```yaml
+receivers:
+ filelog/redis:
+ include: ["${env:REDIS_LOG_FILE}"]
+ operators:
+ # Parse default redis log format
+ # pid:role timestamp log_level message
+ - type: regex_parser
+ if: body matches '^(?P\\d+):(?P\\w+) (?P\\d{2} \\w+ \\d{4} \\d{2}:\\d{2}:\\d{2}\\.\\d+) (?P[.\\-*#]) (?P.*)$'
+ parse_from: body
+ regex: '^(?P\d+):(?P\w+) (?P\d{2} \w+ \d{4} \d{2}:\d{2}:\d{2}\.\d+) (?P[.\-*#]) (?P.*)$'
+ timestamp:
+ parse_from: attributes.ts
+ layout: '02 Jan 2006 15:04:05.000'
+ layout_type: gotime
+ severity:
+ parse_from: attributes.log_level
+ overwrite_text: true
+ mapping:
+ debug: '.'
+ info:
+ - '-'
+ - '*'
+ warning: '#'
+ on_error: send
+ - type: move
+ if: attributes.message != nil
+ from: attributes.message
+ to: body
+ - type: remove
+ if: attributes.log_level != nil
+ field: attributes.log_level
+ - type: remove
+ if: attributes.ts != nil
+ field: attributes.ts
+ - type: add
+ field: attributes.source
+ value: redis
+
+processors:
+ batch:
+ send_batch_size: 10000
+ send_batch_max_size: 11000
+ timeout: 10s
+
+exporters:
+ # export to SigNoz cloud
+ otlp/redis-logs:
+ endpoint: "${env:OTLP_DESTINATION_ENDPOINT}"
+ tls:
+ insecure: false
+ headers:
+ "signoz-access-token": "${env:SIGNOZ_INGESTION_KEY}"
+
+ # export to local collector
+ # otlp/redis-logs:
+ # endpoint: "localhost:4317"
+ # tls:
+ # insecure: true
+
+
+service:
+ pipelines:
+ logs/redis:
+ receivers: [filelog/redis]
+ processors: [batch]
+ exporters: [otlp/redis-logs]
+```
+
+#### Set Environment Variables
+
+Set the following environment variables in your otel-collector environment:
+
+```bash
+
+# path of Redis server log file. must be accessible by the otel collector
+export REDIS_LOG_FILE=/var/log/redis.log
+
+# region specific SigNoz cloud ingestion endpoint
+export OTLP_DESTINATION_ENDPOINT="ingest.us.signoz.cloud:443"
+
+# your SigNoz ingestion key
+export SIGNOZ_INGESTION_KEY="signoz-ingestion-key"
+
+```
+
+#### Use collector config file
+
+Make the collector config file available to your otel collector and use it by adding the following flag to the command for running your collector
+```bash
+--config redis-logs-collection-config.yaml
+```
+Note: the collector can use multiple config files, specified by multiple occurrences of the --config flag.
+
diff --git a/pkg/query-service/app/integrations/builtin_integrations/redis/config/collect-metrics.md b/pkg/query-service/app/integrations/builtin_integrations/redis/config/collect-metrics.md
new file mode 100644
index 0000000000..1b6e4259b7
--- /dev/null
+++ b/pkg/query-service/app/integrations/builtin_integrations/redis/config/collect-metrics.md
@@ -0,0 +1,93 @@
+### Collect Redis Metrics
+
+You can configure Redis metrics collection by providing the required collector config to your collector.
+
+#### Create collector config file
+
+Save the following config for collecting Redis metrics in a file named `redis-metrics-collection-config.yaml`
+
+
+```yaml
+receivers:
+ redis:
+ # The hostname and port of the Redis instance, separated by a colon.
+ endpoint: ${env:REDIS_ENDPOINT}
+ # The frequency at which to collect metrics from the Redis instance.
+ collection_interval: 60s
+ # # The password used to access the Redis instance; must match the password specified in the requirepass server configuration option.
+ password: ${env:REDIS_PASSWORD}
+ # # Defines the network to use for connecting to the server. Valid Values are `tcp` or `Unix`
+ # transport: tcp
+ # tls:
+ # insecure: false
+ # ca_file: /etc/ssl/certs/ca-certificates.crt
+ # cert_file: /etc/ssl/certs/redis.crt
+ # key_file: /etc/ssl/certs/redis.key
+ metrics:
+ redis.maxmemory:
+ enabled: true
+ redis.cmd.latency:
+ enabled: true
+
+processors:
+ # enriches the data with additional host information
+ # see https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/resourcedetectionprocessor#resource-detection-processor
+ resourcedetection/system:
+ # add additional detectors if needed
+ detectors: ["system"]
+ system:
+ hostname_sources: ["os"]
+
+exporters:
+ # export to SigNoz cloud
+ otlp/redis:
+ endpoint: "${env:OTLP_DESTINATION_ENDPOINT}"
+ tls:
+ insecure: false
+ headers:
+ "signoz-access-token": "${env:SIGNOZ_INGESTION_KEY}"
+
+ # export to local collector
+ # otlp/redis:
+ # endpoint: "localhost:4317"
+ # tls:
+ # insecure: true
+
+service:
+ pipelines:
+ metrics/redis:
+ receivers: [redis]
+ # note: remove this processor if the collector host is not running on the same host as the redis instance
+ processors: [resourcedetection/system]
+ exporters: [otlp/redis]
+```
+
+#### Set Environment Variables
+
+Set the following environment variables in your otel-collector environment:
+
+```bash
+
+# redis endpoint reachable from the otel collector"
+export REDIS_ENDPOINT="localhost:6379"
+
+# password used to access the Redis instance.
+# must match the password specified in the requirepass server configuration option.
+# can be left empty if the redis server is not configured to require a password.
+export REDIS_PASSWORD=""
+
+# region specific SigNoz cloud ingestion endpoint
+export OTLP_DESTINATION_ENDPOINT="ingest.us.signoz.cloud:443"
+
+# your SigNoz ingestion key
+export SIGNOZ_INGESTION_KEY="signoz-ingestion-key"
+
+```
+
+#### Use collector config file
+
+Make the collector config file available to your otel collector and use it by adding the following flag to the command for running your collector
+```bash
+--config redis-metrics-collection-config.yaml
+```
+Note: the collector can use multiple config files, specified by multiple occurrences of the --config flag.
diff --git a/pkg/query-service/app/integrations/builtin_integrations/redis/config/configure-otel-collector.md b/pkg/query-service/app/integrations/builtin_integrations/redis/config/configure-otel-collector.md
deleted file mode 100644
index 8dd52dd07e..0000000000
--- a/pkg/query-service/app/integrations/builtin_integrations/redis/config/configure-otel-collector.md
+++ /dev/null
@@ -1,63 +0,0 @@
-### Configure otel collector
-
-#### Save collector config file
-
-Save the following collector config in a file named `redis-collector-config.yaml`
-
-```bash
-receivers:
- redis:
- # The hostname and port of the Redis instance, separated by a colon.
- endpoint: "localhost:6379"
- # The frequency at which to collect metrics from the Redis instance.
- collection_interval: 60s
- # # The password used to access the Redis instance; must match the password specified in the requirepass server configuration option.
- # password: ${env:REDIS_PASSWORD}
- # # Defines the network to use for connecting to the server. Valid Values are `tcp` or `Unix`
- # transport: tcp
- # tls:
- # insecure: false
- # ca_file: /etc/ssl/certs/ca-certificates.crt
- # cert_file: /etc/ssl/certs/redis.crt
- # key_file: /etc/ssl/certs/redis.key
- metrics:
- redis.maxmemory:
- enabled: true
- redis.cmd.latency:
- enabled: true
-
-processors:
- # enriches the data with additional host information
- # see https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/resourcedetectionprocessor#resource-detection-processor
- resourcedetection/system:
- # add additional detectors if needed
- detectors: ["system"]
- system:
- hostname_sources: ["os"]
-
-exporters:
- # export to local collector
- otlp/local:
- endpoint: "localhost:4317"
- tls:
- insecure: true
- # export to SigNoz cloud
- otlp/signoz:
- endpoint: "ingest.{region}.signoz.cloud:443"
- tls:
- insecure: false
- headers:
- "signoz-access-token": ""
-
-service:
- pipelines:
- metrics/redis:
- receivers: [redis]
- # note: remove this processor if the collector host is not running on the same host as the redis instance
- processors: [resourcedetection/system]
- exporters: [otlp/local]
-```
-
-#### Use collector config file
-
-Run your collector with the added flag `--config redis-collector-config.yaml`
diff --git a/pkg/query-service/app/integrations/builtin_integrations/redis/config/prerequisites.md b/pkg/query-service/app/integrations/builtin_integrations/redis/config/prerequisites.md
index 4e98933b69..ea0b553abc 100644
--- a/pkg/query-service/app/integrations/builtin_integrations/redis/config/prerequisites.md
+++ b/pkg/query-service/app/integrations/builtin_integrations/redis/config/prerequisites.md
@@ -1,5 +1,20 @@
-### Prepare redis for monitoring
+## Before You Begin
-- Have a running redis instance
-- Have the monitoring user created
-- Have the monitoring user granted the necessary permissions
+To configure metrics and logs collection for a Redis server, you need the following.
+
+### Ensure Redis server is running a supported version
+
+Redis server versions newer than 3.0 are supported.
+
+### Ensure OTEL Collector is running with access to the Redis server
+
+#### Ensure that an OTEL collector is running in your deployment environment
+If needed, please [install an OTEL Collector](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/)
+If already installed, ensure that the collector version is v0.88.0 or newer.
+
+Also ensure that you can provide config files to the collector and that you can set environment variables and command line flags used for running it.
+
+#### Ensure that the OTEL collector can access the Redis server
+In order to collect metrics, the collector must be able to access the Redis server as a client.
+
+In order to collect logs, the collector must be able to read the Redis server log file.
diff --git a/pkg/query-service/app/integrations/builtin_integrations/redis/integration.json b/pkg/query-service/app/integrations/builtin_integrations/redis/integration.json
index 862a98b306..a1f27ead72 100644
--- a/pkg/query-service/app/integrations/builtin_integrations/redis/integration.json
+++ b/pkg/query-service/app/integrations/builtin_integrations/redis/integration.json
@@ -1,7 +1,7 @@
{
"id": "redis",
"title": "Redis",
- "description": "Monitor redis using logs and metrics.",
+ "description": "Monitor redis with metrics and logs",
"author": {
"name": "SigNoz",
"email": "integrations@signoz.io",
@@ -18,8 +18,12 @@
"instructions": "file://config/prerequisites.md"
},
{
- "title": "Configure Otel Collector",
- "instructions": "file://config/configure-otel-collector.md"
+ "title": "Collect Metrics",
+ "instructions": "file://config/collect-metrics.md"
+ },
+ {
+ "title": "Collect Logs",
+ "instructions": "file://config/collect-logs.md"
}
],
"assets": {
@@ -29,7 +33,7 @@
]
},
"dashboards": [
- "file://assets/dashboards/overview.json"
+ "file://assets/dashboards/overview.json"
],
"alerts": []
},
@@ -52,37 +56,218 @@
"data_collected": {
"logs": [
{
- "name": "Request Method",
- "path": "attributes[\"http.request.method\"]",
- "type": "string",
- "description": "HTTP method"
+ "name": "Process ID",
+ "path": "attributes.pid",
+ "type": "string"
},
{
- "name": "Request Path",
- "path": "attributes[\"url.path\"]",
- "type": "string",
- "description": "path requested"
+ "name": "Process Role",
+ "path": "attributes.role",
+ "type": "string"
},
{
- "name": "Response Status Code",
- "path": "attributes[\"http.response.status_code\"]",
- "type": "int",
- "description": "HTTP response code"
+ "name": "Timestamp",
+ "path": "timestamp",
+ "type": "timestamp"
+ },
+ {
+ "name": "Severity Text",
+ "path": "severity_text",
+ "type": "string"
+ },
+ {
+ "name": "Severity Number",
+ "path": "severity_number",
+ "type": "number"
}
],
"metrics": [
{
- "name": "http.server.request.duration",
- "type": "Histogram",
- "unit": "s",
- "description": "Duration of HTTP server requests"
+ "name": "redis_commands_processed",
+ "type": "Sum",
+ "unit": "number",
+ "description": "Total number of commands processed by the server"
},
{
- "name": "http.server.active_requests",
- "type": "UpDownCounter",
- "unit": "{ request }",
- "description": "Number of active HTTP server requests"
+ "name": "redis_cpu_time",
+ "type": "Sum",
+ "unit": "s",
+ "description": "System CPU consumed by the Redis server in seconds since server start"
+ },
+ {
+ "name": "redis_keys_expired",
+ "type": "Sum",
+ "unit": "number",
+ "description": "Total number of key expiration events"
+ },
+ {
+ "name": "redis_db_expires",
+ "type": "Gauge",
+ "unit": "number",
+ "description": "Number of keyspace keys with an expiration"
+ },
+ {
+ "name": "redis_commands",
+ "type": "Gauge",
+ "unit": "ops/s",
+ "description": "Number of commands processed per second"
+ },
+ {
+ "name": "redis_replication_offset",
+ "type": "Gauge",
+ "unit": "Bytes",
+ "description": "The server's current replication offset"
+ },
+ {
+ "name": "redis_net_input",
+ "type": "Sum",
+ "unit": "Bytes",
+ "description": "The total number of bytes read from the network"
+ },
+ {
+ "name": "redis_clients_connected",
+ "type": "Sum",
+ "unit": "number",
+ "description": "Number of client connections (excluding connections from replicas)"
+ },
+ {
+ "name": "redis_keys_evicted",
+ "type": "Sum",
+ "unit": "number",
+ "description": "Number of evicted keys due to maxmemory limit"
+ },
+ {
+ "name": "redis_maxmemory",
+ "type": "Gauge",
+ "unit": "Bytes",
+ "description": "The value of the maxmemory configuration directive"
+ },
+ {
+ "name": "redis_clients_max_input_buffer",
+ "type": "Gauge",
+ "unit": "Bytes",
+ "description": "Biggest input buffer among current client connections"
+ },
+ {
+ "name": "redis_cmd_latency",
+ "type": "Gauge",
+ "unit": "s",
+ "description": "Command execution latency"
+ },
+ {
+ "name": "redis_memory_lua",
+ "type": "Gauge",
+ "unit": "Bytes",
+ "description": "Number of bytes used by the Lua engine"
+ },
+ {
+ "name": "redis_replication_backlog_first_byte_offset",
+ "type": "Gauge",
+ "unit": "Bytes",
+ "description": "The master offset of the replication backlog buffer"
+ },
+ {
+ "name": "redis_keyspace_hits",
+ "type": "Sum",
+ "unit": "number",
+ "description": "Number of successful lookup of keys in the main dictionary"
+ },
+ {
+ "name": "redis_clients_blocked",
+ "type": "Sum",
+ "unit": "number",
+ "description": "Number of clients pending on a blocking call"
+ },
+ {
+ "name": "redis_connections_rejected",
+ "type": "Sum",
+ "unit": "number",
+ "description": "Number of connections rejected because of maxclients limit"
+ },
+ {
+ "name": "redis_latest_fork",
+ "type": "Gauge",
+ "unit": "us",
+ "description": "Duration of the latest fork operation in microseconds"
+ },
+ {
+ "name": "redis_clients_max_output_buffer",
+ "type": "Gauge",
+ "unit": "Bytes",
+ "description": "Longest output list among current client connections"
+ },
+ {
+ "name": "redis_slaves_connected",
+ "type": "Sum",
+ "unit": "number",
+ "description": "Number of connected replicas"
+ },
+ {
+ "name": "redis_db_keys",
+ "type": "Gauge",
+ "unit": "number",
+ "description": "Number of keyspace keys"
+ },
+ {
+ "name": "redis_keyspace_misses",
+ "type": "Sum",
+ "unit": "number",
+ "description": "Number of failed lookup of keys in the main dictionary"
+ },
+ {
+ "name": "redis_uptime",
+ "type": "Sum",
+ "unit": "s",
+ "description": "Number of seconds since Redis server start"
+ },
+ {
+ "name": "redis_memory_used",
+ "type": "Gauge",
+ "unit": "Bytes",
+ "description": "Total number of bytes allocated by Redis using its allocator"
+ },
+ {
+ "name": "redis_net_output",
+ "type": "Sum",
+ "unit": "Bytes",
+ "description": "The total number of bytes written to the network"
+ },
+ {
+ "name": "redis_connections_received",
+ "type": "Sum",
+ "unit": "number",
+ "description": "Total number of connections accepted by the server"
+ },
+ {
+ "name": "redis_rdb_changes_since_last_save",
+ "type": "Sum",
+ "unit": "number",
+ "description": "Number of changes since the last dump"
+ },
+ {
+ "name": "redis_memory_rss",
+ "type": "Gauge",
+ "unit": "Bytes",
+ "description": "Number of bytes that Redis allocated as seen by the operating system"
+ },
+ {
+ "name": "redis_db_avg_ttl",
+ "type": "Gauge",
+ "unit": "ms",
+ "description": "Average keyspace keys TTL"
+ },
+ {
+ "name": "redis_memory_peak",
+ "type": "Gauge",
+ "unit": "Bytes",
+ "description": "Peak memory consumed by Redis (in bytes)"
+ },
+ {
+ "name": "redis_memory_fragmentation_ratio",
+ "type": "Gauge",
+ "unit": "number",
+ "description": "Ratio between used_memory_rss and used_memory"
}
]
}
-}
+}
\ No newline at end of file
diff --git a/pkg/query-service/app/integrations/builtin_integrations/redis/overview.md b/pkg/query-service/app/integrations/builtin_integrations/redis/overview.md
index 60ce2337b6..8e5d517da2 100644
--- a/pkg/query-service/app/integrations/builtin_integrations/redis/overview.md
+++ b/pkg/query-service/app/integrations/builtin_integrations/redis/overview.md
@@ -1,3 +1,5 @@
### Monitor Redis with SigNoz
-Parse your Redis logs and collect key metrics.
+Collect key Redis metrics and view them with an out of the box dashboard.
+
+Collect and parse Redis logs to populate timestamp, severity, and other log attributes for better querying and aggregation.
diff --git a/pkg/query-service/app/integrations/controller.go b/pkg/query-service/app/integrations/controller.go
index a45ab3fb04..8695c4b1cd 100644
--- a/pkg/query-service/app/integrations/controller.go
+++ b/pkg/query-service/app/integrations/controller.go
@@ -63,6 +63,18 @@ func (c *Controller) GetIntegration(
return c.mgr.GetIntegration(ctx, integrationId)
}
+func (c *Controller) IsIntegrationInstalled(
+ ctx context.Context,
+ integrationId string,
+) (bool, *model.ApiError) {
+ installation, apiErr := c.mgr.getInstalledIntegration(ctx, integrationId)
+ if apiErr != nil {
+ return false, apiErr
+ }
+ isInstalled := installation != nil
+ return isInstalled, nil
+}
+
func (c *Controller) GetIntegrationConnectionTests(
ctx context.Context, integrationId string,
) (*IntegrationConnectionTests, *model.ApiError) {
diff --git a/pkg/query-service/app/integrations/manager.go b/pkg/query-service/app/integrations/manager.go
index 110d370c1b..c3ebd21cc2 100644
--- a/pkg/query-service/app/integrations/manager.go
+++ b/pkg/query-service/app/integrations/manager.go
@@ -76,9 +76,11 @@ type IntegrationConnectionStatus struct {
}
type IntegrationConnectionTests struct {
+ // Filter to use for finding logs for the integration.
Logs *v3.FilterSet `json:"logs"`
- // TODO(Raj): Add connection tests for other signals.
+ // Metric names expected to have been received for the integration.
+ Metrics []string `json:"metrics"`
}
type IntegrationDetails struct {
diff --git a/pkg/query-service/app/logparsingpipeline/collector_config.go b/pkg/query-service/app/logparsingpipeline/collector_config.go
index c370441210..17b8d96c1e 100644
--- a/pkg/query-service/app/logparsingpipeline/collector_config.go
+++ b/pkg/query-service/app/logparsingpipeline/collector_config.go
@@ -138,7 +138,7 @@ func buildLogsProcessors(current []string, logsParserPipeline []string) ([]strin
func checkDuplicateString(pipeline []string) bool {
exists := make(map[string]bool, len(pipeline))
- zap.S().Debugf("checking duplicate processors in the pipeline:", pipeline)
+ zap.L().Debug("checking duplicate processors in the pipeline:", zap.Any("pipeline", pipeline))
for _, processor := range pipeline {
name := processor
if _, ok := exists[name]; ok {
diff --git a/pkg/query-service/app/logparsingpipeline/controller.go b/pkg/query-service/app/logparsingpipeline/controller.go
index 9527fe9e8d..2e6b0ba4d3 100644
--- a/pkg/query-service/app/logparsingpipeline/controller.go
+++ b/pkg/query-service/app/logparsingpipeline/controller.go
@@ -104,7 +104,7 @@ func (ic *LogParsingPipelineController) getEffectivePipelinesByVersion(
if version >= 0 {
savedPipelines, errors := ic.getPipelinesByVersion(ctx, version)
if errors != nil {
- zap.S().Errorf("failed to get pipelines for version %d, %w", version, errors)
+ zap.L().Error("failed to get pipelines for version", zap.Int("version", version), zap.Errors("errors", errors))
return nil, model.InternalError(fmt.Errorf("failed to get pipelines for given version"))
}
result = savedPipelines
@@ -158,7 +158,7 @@ func (ic *LogParsingPipelineController) GetPipelinesByVersion(
) (*PipelinesResponse, *model.ApiError) {
pipelines, errors := ic.getEffectivePipelinesByVersion(ctx, version)
if errors != nil {
- zap.S().Errorf("failed to get pipelines for version %d, %w", version, errors)
+ zap.L().Error("failed to get pipelines for version", zap.Int("version", version), zap.Error(errors))
return nil, model.InternalError(fmt.Errorf("failed to get pipelines for given version"))
}
@@ -166,7 +166,7 @@ func (ic *LogParsingPipelineController) GetPipelinesByVersion(
if version >= 0 {
cv, err := agentConf.GetConfigVersion(ctx, agentConf.ElementTypeLogPipelines, version)
if err != nil {
- zap.S().Errorf("failed to get config for version %d, %s", version, err.Error())
+ zap.L().Error("failed to get config for version", zap.Int("version", version), zap.Error(err))
return nil, model.WrapApiError(err, "failed to get config for given version")
}
configVersion = cv
diff --git a/pkg/query-service/app/logparsingpipeline/db.go b/pkg/query-service/app/logparsingpipeline/db.go
index df187f0de3..618060d105 100644
--- a/pkg/query-service/app/logparsingpipeline/db.go
+++ b/pkg/query-service/app/logparsingpipeline/db.go
@@ -99,7 +99,7 @@ func (r *Repo) insertPipeline(
insertRow.RawConfig)
if err != nil {
- zap.S().Errorf("error in inserting pipeline data: ", zap.Error(err))
+ zap.L().Error("error in inserting pipeline data", zap.Error(err))
return nil, model.InternalError(errors.Wrap(err, "failed to insert pipeline"))
}
@@ -171,19 +171,19 @@ func (r *Repo) GetPipeline(
err := r.db.SelectContext(ctx, &pipelines, pipelineQuery, id)
if err != nil {
- zap.S().Errorf("failed to get ingestion pipeline from db", err)
+ zap.L().Error("failed to get ingestion pipeline from db", zap.Error(err))
return nil, model.InternalError(errors.Wrap(err, "failed to get ingestion pipeline from db"))
}
if len(pipelines) == 0 {
- zap.S().Warnf("No row found for ingestion pipeline id", id)
+ zap.L().Warn("No row found for ingestion pipeline id", zap.String("id", id))
return nil, model.NotFoundError(fmt.Errorf("No row found for ingestion pipeline id %v", id))
}
if len(pipelines) == 1 {
err := pipelines[0].ParseRawConfig()
if err != nil {
- zap.S().Errorf("invalid pipeline config found", id, err)
+ zap.L().Error("invalid pipeline config found", zap.String("id", id), zap.Error(err))
return nil, model.InternalError(
errors.Wrap(err, "found an invalid pipeline config"),
)
diff --git a/pkg/query-service/app/opamp/configure_ingestionRules.go b/pkg/query-service/app/opamp/configure_ingestionRules.go
index bd71aa38b0..ec9c9e5b88 100644
--- a/pkg/query-service/app/opamp/configure_ingestionRules.go
+++ b/pkg/query-service/app/opamp/configure_ingestionRules.go
@@ -27,10 +27,10 @@ func UpsertControlProcessors(
// AddToTracePipeline() or RemoveFromTracesPipeline() prior to calling
// this method
- zap.S().Debug("initiating ingestion rules deployment config", signal, processors)
+ zap.L().Debug("initiating ingestion rules deployment config", zap.String("signal", signal), zap.Any("processors", processors))
if signal != string(Metrics) && signal != string(Traces) {
- zap.S().Error("received invalid signal int UpsertControlProcessors", signal)
+ zap.L().Error("received invalid signal int UpsertControlProcessors", zap.String("signal", signal))
fnerr = coreModel.BadRequest(fmt.Errorf(
"signal not supported in ingestion rules: %s", signal,
))
@@ -51,7 +51,7 @@ func UpsertControlProcessors(
}
if len(agents) > 1 && signal == string(Traces) {
- zap.S().Debug("found multiple agents. this feature is not supported for traces pipeline (sampling rules)")
+ zap.L().Debug("found multiple agents. this feature is not supported for traces pipeline (sampling rules)")
fnerr = coreModel.BadRequest(fmt.Errorf("multiple agents not supported in sampling rules"))
return
}
@@ -60,7 +60,7 @@ func UpsertControlProcessors(
agenthash, err := addIngestionControlToAgent(agent, signal, processors, false)
if err != nil {
- zap.S().Error("failed to push ingestion rules config to agent", agent.ID, err)
+ zap.L().Error("failed to push ingestion rules config to agent", zap.String("agentID", agent.ID), zap.Error(err))
continue
}
@@ -89,7 +89,7 @@ func addIngestionControlToAgent(agent *model.Agent, signal string, processors ma
// add ingestion control spec
err = makeIngestionControlSpec(agentConf, Signal(signal), processors)
if err != nil {
- zap.S().Error("failed to prepare ingestion control processors for agent ", agent.ID, err)
+ zap.L().Error("failed to prepare ingestion control processors for agent", zap.String("agentID", agent.ID), zap.Error(err))
return confHash, err
}
@@ -99,7 +99,7 @@ func addIngestionControlToAgent(agent *model.Agent, signal string, processors ma
return confHash, err
}
- zap.S().Debugf("sending new config", string(configR))
+ zap.L().Debug("sending new config", zap.String("config", string(configR)))
hash := sha256.New()
_, err = hash.Write(configR)
if err != nil {
@@ -140,7 +140,7 @@ func makeIngestionControlSpec(agentConf *confmap.Conf, signal Signal, processors
// merge tracesPipelinePlan with current pipeline
mergedPipeline, err := buildPipeline(signal, currentPipeline)
if err != nil {
- zap.S().Error("failed to build pipeline", signal, err)
+ zap.L().Error("failed to build pipeline", zap.String("signal", string(signal)), zap.Error(err))
return err
}
diff --git a/pkg/query-service/app/opamp/model/agent.go b/pkg/query-service/app/opamp/model/agent.go
index 1eef7bb4cf..5751bd255b 100644
--- a/pkg/query-service/app/opamp/model/agent.go
+++ b/pkg/query-service/app/opamp/model/agent.go
@@ -276,7 +276,7 @@ func (agent *Agent) processStatusUpdate(
func (agent *Agent) updateRemoteConfig(configProvider AgentConfigProvider) bool {
recommendedConfig, confId, err := configProvider.RecommendAgentConfig([]byte(agent.EffectiveConfig))
if err != nil {
- zap.S().Error("could not generate config recommendation for agent:", agent.ID, err)
+ zap.L().Error("could not generate config recommendation for agent", zap.String("agentID", agent.ID), zap.Error(err))
return false
}
@@ -293,7 +293,7 @@ func (agent *Agent) updateRemoteConfig(configProvider AgentConfigProvider) bool
if len(confId) < 1 {
// Should never happen. Handle gracefully if it does by some chance.
- zap.S().Errorf("config provider recommended a config with empty confId. Using content hash for configId")
+ zap.L().Error("config provider recommended a config with empty confId. Using content hash for configId")
hash := sha256.New()
for k, v := range cfg.Config.ConfigMap {
diff --git a/pkg/query-service/app/opamp/model/agents.go b/pkg/query-service/app/opamp/model/agents.go
index 2e2118e216..e984cafce2 100644
--- a/pkg/query-service/app/opamp/model/agents.go
+++ b/pkg/query-service/app/opamp/model/agents.go
@@ -131,8 +131,8 @@ func (agents *Agents) RecommendLatestConfigToAll(
// Recommendation is same as current config
if string(newConfig) == agent.EffectiveConfig {
- zap.S().Infof(
- "Recommended config same as current effective config for agent %s", agent.ID,
+ zap.L().Info(
+ "Recommended config same as current effective config for agent", zap.String("agentID", agent.ID),
)
return nil
}
diff --git a/pkg/query-service/app/opamp/opamp_server.go b/pkg/query-service/app/opamp/opamp_server.go
index 2a7ba4c6fa..75d8d877be 100644
--- a/pkg/query-service/app/opamp/opamp_server.go
+++ b/pkg/query-service/app/opamp/opamp_server.go
@@ -40,7 +40,7 @@ func InitializeServer(
agents: agents,
agentConfigProvider: agentConfigProvider,
}
- opAmpServer.server = server.New(zap.S())
+ opAmpServer.server = server.New(zap.L().Sugar())
return opAmpServer
}
@@ -58,8 +58,8 @@ func (srv *Server) Start(listener string) error {
unsubscribe := srv.agentConfigProvider.SubscribeToConfigUpdates(func() {
err := srv.agents.RecommendLatestConfigToAll(srv.agentConfigProvider)
if err != nil {
- zap.S().Errorf(
- "could not roll out latest config recommendation to connected agents: %w", err,
+ zap.L().Error(
+ "could not roll out latest config recommendation to connected agents", zap.Error(err),
)
}
})
@@ -85,15 +85,14 @@ func (srv *Server) OnMessage(conn types.Connection, msg *protobufs.AgentToServer
agent, created, err := srv.agents.FindOrCreateAgent(agentID, conn)
if err != nil {
- zap.S().Error("Failed to find or create agent %q: %v", agentID, err)
+ zap.L().Error("Failed to find or create agent", zap.String("agentID", agentID), zap.Error(err))
// TODO: handle error
}
if created {
agent.CanLB = model.ExtractLbFlag(msg.AgentDescription)
- zap.S().Debugf(
- "New agent added:",
- zap.Bool("canLb", agent.CanLB),
+ zap.L().Debug(
+ "New agent added", zap.Bool("canLb", agent.CanLB),
zap.String("ID", agent.ID),
zap.Any("status", agent.CurrentStatus),
)
@@ -117,7 +116,7 @@ func Ready() bool {
return false
}
if opAmpServer.agents.Count() == 0 {
- zap.S().Warnf("no agents available, all agent config requests will be rejected")
+ zap.L().Warn("no agents available, all agent config requests will be rejected")
return false
}
return true
diff --git a/pkg/query-service/app/opamp/pipeline_builder.go b/pkg/query-service/app/opamp/pipeline_builder.go
index 841a9ce5c6..7654fe8c4f 100644
--- a/pkg/query-service/app/opamp/pipeline_builder.go
+++ b/pkg/query-service/app/opamp/pipeline_builder.go
@@ -89,7 +89,7 @@ func RemoveFromMetricsPipelineSpec(name string) {
func checkDuplicates(pipeline []interface{}) bool {
exists := make(map[string]bool, len(pipeline))
- zap.S().Debugf("checking duplicate processors in the pipeline:", pipeline)
+ zap.L().Debug("checking duplicate processors in the pipeline", zap.Any("pipeline", pipeline))
for _, processor := range pipeline {
name := processor.(string)
if _, ok := exists[name]; ok {
@@ -149,7 +149,7 @@ func buildPipeline(signal Signal, current []interface{}) ([]interface{}, error)
currentPos := loc + inserts
// if disabled then remove from the pipeline
if !m.Enabled {
- zap.S().Debugf("build_pipeline: found a disabled item, removing from pipeline at position", currentPos-1, " ", m.Name)
+ zap.L().Debug("build_pipeline: found a disabled item, removing from pipeline at position", zap.Int("position", currentPos-1), zap.String("processor", m.Name))
if currentPos-1 <= 0 {
pipeline = pipeline[currentPos+1:]
} else {
@@ -170,10 +170,10 @@ func buildPipeline(signal Signal, current []interface{}) ([]interface{}, error)
// right after last matched processsor (e.g. insert filters after tail_sampling for existing list of [batch, tail_sampling])
if lastMatched <= 0 {
- zap.S().Debugf("build_pipeline: found a new item to be inserted, inserting at position 0:", m.Name)
+ zap.L().Debug("build_pipeline: found a new item to be inserted, inserting at position 0", zap.String("processor", m.Name))
pipeline = append([]interface{}{m.Name}, pipeline[lastMatched+1:]...)
} else {
- zap.S().Debugf("build_pipeline: found a new item to be inserted, inserting at position :", lastMatched, " ", m.Name)
+ zap.L().Debug("build_pipeline: found a new item to be inserted, inserting at position", zap.Int("position", lastMatched), zap.String("processor", m.Name))
prior := make([]interface{}, len(pipeline[:lastMatched]))
next := make([]interface{}, len(pipeline[lastMatched:]))
copy(prior, pipeline[:lastMatched])
diff --git a/pkg/query-service/app/querier/helper.go b/pkg/query-service/app/querier/helper.go
index 47f65fe007..71ee5da72d 100644
--- a/pkg/query-service/app/querier/helper.go
+++ b/pkg/query-service/app/querier/helper.go
@@ -116,7 +116,7 @@ func (q *querier) runBuilderQuery(
if !params.NoCache && q.cache != nil {
var retrieveStatus status.RetrieveStatus
data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true)
- zap.S().Infof("cache retrieve status: %s", retrieveStatus.String())
+ zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String()))
if err == nil {
cachedData = data
}
@@ -143,7 +143,7 @@ func (q *querier) runBuilderQuery(
missedSeries = append(missedSeries, series...)
}
if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil {
- zap.S().Error("error unmarshalling cached data", zap.Error(err))
+ zap.L().Error("error unmarshalling cached data", zap.Error(err))
}
mergedSeries := mergeSerieses(cachedSeries, missedSeries)
@@ -154,7 +154,7 @@ func (q *querier) runBuilderQuery(
// caching the data
mergedSeriesData, marshallingErr = json.Marshal(mergedSeries)
if marshallingErr != nil {
- zap.S().Error("error marshalling merged series", zap.Error(marshallingErr))
+ zap.L().Error("error marshalling merged series", zap.Error(marshallingErr))
}
}
@@ -172,7 +172,7 @@ func (q *querier) runBuilderQuery(
// caching the data
err = q.cache.Store(cacheKey, mergedSeriesData, time.Hour)
if err != nil {
- zap.S().Error("error storing merged series", zap.Error(err))
+ zap.L().Error("error storing merged series", zap.Error(err))
return
}
}
@@ -251,7 +251,7 @@ func (q *querier) runBuilderQuery(
if !params.NoCache && q.cache != nil {
var retrieveStatus status.RetrieveStatus
data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true)
- zap.S().Infof("cache retrieve status: %s", retrieveStatus.String())
+ zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String()))
if err == nil {
cachedData = data
}
@@ -290,7 +290,7 @@ func (q *querier) runBuilderQuery(
missedSeries = append(missedSeries, series...)
}
if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil {
- zap.S().Error("error unmarshalling cached data", zap.Error(err))
+ zap.L().Error("error unmarshalling cached data", zap.Error(err))
}
mergedSeries := mergeSerieses(cachedSeries, missedSeries)
var mergedSeriesData []byte
@@ -300,7 +300,7 @@ func (q *querier) runBuilderQuery(
// caching the data
mergedSeriesData, marshallingErr = json.Marshal(mergedSeries)
if marshallingErr != nil {
- zap.S().Error("error marshalling merged series", zap.Error(marshallingErr))
+ zap.L().Error("error marshalling merged series", zap.Error(marshallingErr))
}
}
@@ -316,7 +316,7 @@ func (q *querier) runBuilderQuery(
if missedSeriesLen > 0 && !params.NoCache && q.cache != nil && marshallingErr == nil {
err := q.cache.Store(cacheKey, mergedSeriesData, time.Hour)
if err != nil {
- zap.S().Error("error storing merged series", zap.Error(err))
+ zap.L().Error("error storing merged series", zap.Error(err))
return
}
}
@@ -353,7 +353,7 @@ func (q *querier) runBuilderExpression(
if !params.NoCache && q.cache != nil {
var retrieveStatus status.RetrieveStatus
data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true)
- zap.S().Infof("cache retrieve status: %s", retrieveStatus.String())
+ zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String()))
if err == nil {
cachedData = data
}
@@ -379,7 +379,7 @@ func (q *querier) runBuilderExpression(
missedSeries = append(missedSeries, series...)
}
if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil {
- zap.S().Error("error unmarshalling cached data", zap.Error(err))
+ zap.L().Error("error unmarshalling cached data", zap.Error(err))
}
mergedSeries := mergeSerieses(cachedSeries, missedSeries)
@@ -390,7 +390,7 @@ func (q *querier) runBuilderExpression(
// caching the data
mergedSeriesData, marshallingErr = json.Marshal(mergedSeries)
if marshallingErr != nil {
- zap.S().Error("error marshalling merged series", zap.Error(marshallingErr))
+ zap.L().Error("error marshalling merged series", zap.Error(marshallingErr))
}
}
@@ -406,7 +406,7 @@ func (q *querier) runBuilderExpression(
if len(missedSeries) > 0 && !params.NoCache && q.cache != nil && marshallingErr == nil {
err = q.cache.Store(cacheKey, mergedSeriesData, time.Hour)
if err != nil {
- zap.S().Error("error storing merged series", zap.Error(err))
+ zap.L().Error("error storing merged series", zap.Error(err))
return
}
}
diff --git a/pkg/query-service/app/querier/querier.go b/pkg/query-service/app/querier/querier.go
index 103660f8bc..d735e00a1f 100644
--- a/pkg/query-service/app/querier/querier.go
+++ b/pkg/query-service/app/querier/querier.go
@@ -108,7 +108,7 @@ func (q *querier) execClickHouseQuery(ctx context.Context, query string) ([]*v3.
series.Points = points
}
if pointsWithNegativeTimestamps > 0 {
- zap.S().Errorf("found points with negative timestamps for query %s", query)
+ zap.L().Error("found points with negative timestamps for query", zap.String("query", query))
}
return result, err
}
@@ -346,7 +346,7 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam
// Ensure NoCache is not set and cache is not nil
if !params.NoCache && q.cache != nil {
data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true)
- zap.S().Infof("cache retrieve status: %s", retrieveStatus.String())
+ zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String()))
if err == nil {
cachedData = data
}
@@ -365,7 +365,7 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam
}
if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil {
// ideally we should not be getting an error here
- zap.S().Error("error unmarshalling cached data", zap.Error(err))
+ zap.L().Error("error unmarshalling cached data", zap.Error(err))
}
mergedSeries := mergeSerieses(cachedSeries, missedSeries)
@@ -375,12 +375,12 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam
if len(missedSeries) > 0 && !params.NoCache && q.cache != nil {
mergedSeriesData, err := json.Marshal(mergedSeries)
if err != nil {
- zap.S().Error("error marshalling merged series", zap.Error(err))
+ zap.L().Error("error marshalling merged series", zap.Error(err))
return
}
err = q.cache.Store(cacheKey, mergedSeriesData, time.Hour)
if err != nil {
- zap.S().Error("error storing merged series", zap.Error(err))
+ zap.L().Error("error storing merged series", zap.Error(err))
return
}
}
diff --git a/pkg/query-service/app/querier/v2/helper.go b/pkg/query-service/app/querier/v2/helper.go
index 24738806d3..e564956f19 100644
--- a/pkg/query-service/app/querier/v2/helper.go
+++ b/pkg/query-service/app/querier/v2/helper.go
@@ -169,7 +169,7 @@ func (q *querier) runBuilderQuery(
if !params.NoCache && q.cache != nil {
var retrieveStatus status.RetrieveStatus
data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true)
- zap.S().Infof("cache retrieve status: %s", retrieveStatus.String())
+ zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String()))
if err == nil {
cachedData = data
}
@@ -208,7 +208,7 @@ func (q *querier) runBuilderQuery(
missedSeries = append(missedSeries, series...)
}
if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil {
- zap.S().Error("error unmarshalling cached data", zap.Error(err))
+ zap.L().Error("error unmarshalling cached data", zap.Error(err))
}
mergedSeries := mergeSerieses(cachedSeries, missedSeries)
@@ -221,12 +221,12 @@ func (q *querier) runBuilderQuery(
if len(missedSeries) > 0 && !params.NoCache && q.cache != nil {
mergedSeriesData, err := json.Marshal(mergedSeries)
if err != nil {
- zap.S().Error("error marshalling merged series", zap.Error(err))
+ zap.L().Error("error marshalling merged series", zap.Error(err))
return
}
err = q.cache.Store(cacheKey, mergedSeriesData, time.Hour)
if err != nil {
- zap.S().Error("error storing merged series", zap.Error(err))
+ zap.L().Error("error storing merged series", zap.Error(err))
return
}
}
@@ -263,7 +263,7 @@ func (q *querier) runBuilderExpression(
if !params.NoCache && q.cache != nil {
var retrieveStatus status.RetrieveStatus
data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true)
- zap.S().Infof("cache retrieve status: %s", retrieveStatus.String())
+ zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String()))
if err == nil {
cachedData = data
}
@@ -289,7 +289,7 @@ func (q *querier) runBuilderExpression(
missedSeries = append(missedSeries, series...)
}
if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil {
- zap.S().Error("error unmarshalling cached data", zap.Error(err))
+ zap.L().Error("error unmarshalling cached data", zap.Error(err))
}
mergedSeries := mergeSerieses(cachedSeries, missedSeries)
@@ -302,12 +302,12 @@ func (q *querier) runBuilderExpression(
if len(missedSeries) > 0 && !params.NoCache && q.cache != nil {
mergedSeriesData, err := json.Marshal(mergedSeries)
if err != nil {
- zap.S().Error("error marshalling merged series", zap.Error(err))
+ zap.L().Error("error marshalling merged series", zap.Error(err))
return
}
err = q.cache.Store(cacheKey, mergedSeriesData, time.Hour)
if err != nil {
- zap.S().Error("error storing merged series", zap.Error(err))
+ zap.L().Error("error storing merged series", zap.Error(err))
return
}
}
diff --git a/pkg/query-service/app/querier/v2/querier.go b/pkg/query-service/app/querier/v2/querier.go
index 50f19b89b1..e45153da7d 100644
--- a/pkg/query-service/app/querier/v2/querier.go
+++ b/pkg/query-service/app/querier/v2/querier.go
@@ -108,7 +108,7 @@ func (q *querier) execClickHouseQuery(ctx context.Context, query string) ([]*v3.
series.Points = points
}
if pointsWithNegativeTimestamps > 0 {
- zap.S().Errorf("found points with negative timestamps for query %s", query)
+ zap.L().Error("found points with negative timestamps for query", zap.String("query", query))
}
return result, err
}
@@ -326,7 +326,7 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam
// Ensure NoCache is not set and cache is not nil
if !params.NoCache && q.cache != nil {
data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true)
- zap.S().Infof("cache retrieve status: %s", retrieveStatus.String())
+ zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String()))
if err == nil {
cachedData = data
}
@@ -345,7 +345,7 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam
}
if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil {
// ideally we should not be getting an error here
- zap.S().Error("error unmarshalling cached data", zap.Error(err))
+ zap.L().Error("error unmarshalling cached data", zap.Error(err))
}
mergedSeries := mergeSerieses(cachedSeries, missedSeries)
@@ -355,12 +355,12 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam
if len(missedSeries) > 0 && !params.NoCache && q.cache != nil {
mergedSeriesData, err := json.Marshal(mergedSeries)
if err != nil {
- zap.S().Error("error marshalling merged series", zap.Error(err))
+ zap.L().Error("error marshalling merged series", zap.Error(err))
return
}
err = q.cache.Store(cacheKey, mergedSeriesData, time.Hour)
if err != nil {
- zap.S().Error("error storing merged series", zap.Error(err))
+ zap.L().Error("error storing merged series", zap.Error(err))
return
}
}
diff --git a/pkg/query-service/app/queryBuilder/query_builder.go b/pkg/query-service/app/queryBuilder/query_builder.go
index 647edd191b..693bc88f44 100644
--- a/pkg/query-service/app/queryBuilder/query_builder.go
+++ b/pkg/query-service/app/queryBuilder/query_builder.go
@@ -246,7 +246,7 @@ func (qb *QueryBuilder) PrepareQueries(params *v3.QueryRangeParamsV3, args ...in
}
queries[queryName] = queryString
default:
- zap.S().Errorf("Unknown data source %s", query.DataSource)
+ zap.L().Error("Unknown data source", zap.String("dataSource", string(query.DataSource)))
}
}
}
diff --git a/pkg/query-service/app/server.go b/pkg/query-service/app/server.go
index e9c80c2507..549e74e976 100644
--- a/pkg/query-service/app/server.go
+++ b/pkg/query-service/app/server.go
@@ -115,7 +115,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
var reader interfaces.Reader
storage := os.Getenv("STORAGE")
if storage == "clickhouse" {
- zap.S().Info("Using ClickHouse as datastore ...")
+ zap.L().Info("Using ClickHouse as datastore ...")
clickhouseReader := clickhouseReader.NewReader(
localDB,
serverOptions.PromConfigPath,
@@ -304,7 +304,7 @@ func loggingMiddleware(next http.Handler) http.Handler {
path, _ := route.GetPathTemplate()
startTime := time.Now()
next.ServeHTTP(w, r)
- zap.S().Info(path+"\ttimeTaken:"+time.Now().Sub(startTime).String(), zap.Duration("timeTaken", time.Now().Sub(startTime)), zap.String("path", path))
+ zap.L().Info(path+"\ttimeTaken:"+time.Now().Sub(startTime).String(), zap.Duration("timeTaken", time.Now().Sub(startTime)), zap.String("path", path))
})
}
@@ -375,7 +375,7 @@ func loggingMiddlewarePrivate(next http.Handler) http.Handler {
path, _ := route.GetPathTemplate()
startTime := time.Now()
next.ServeHTTP(w, r)
- zap.S().Info(path+"\tprivatePort: true \ttimeTaken"+time.Now().Sub(startTime).String(), zap.Duration("timeTaken", time.Now().Sub(startTime)), zap.String("path", path), zap.Bool("tprivatePort", true))
+ zap.L().Info(path+"\tprivatePort: true \ttimeTaken"+time.Now().Sub(startTime).String(), zap.Duration("timeTaken", time.Now().Sub(startTime)), zap.String("path", path), zap.Bool("tprivatePort", true))
})
}
@@ -426,30 +426,33 @@ func extractQueryRangeV3Data(path string, r *http.Request) (map[string]interface
signozMetricsUsed := false
signozLogsUsed := false
- dataSources := []string{}
+ signozTracesUsed := false
if postData != nil {
if postData.CompositeQuery != nil {
data["queryType"] = postData.CompositeQuery.QueryType
data["panelType"] = postData.CompositeQuery.PanelType
- signozLogsUsed, signozMetricsUsed, _ = telemetry.GetInstance().CheckSigNozSignals(postData)
+ signozLogsUsed, signozMetricsUsed, signozTracesUsed = telemetry.GetInstance().CheckSigNozSignals(postData)
}
}
- if signozMetricsUsed || signozLogsUsed {
+ if signozMetricsUsed || signozLogsUsed || signozTracesUsed {
if signozMetricsUsed {
- dataSources = append(dataSources, "metrics")
telemetry.GetInstance().AddActiveMetricsUser()
}
if signozLogsUsed {
- dataSources = append(dataSources, "logs")
telemetry.GetInstance().AddActiveLogsUser()
}
- data["dataSources"] = dataSources
+ if signozTracesUsed {
+ telemetry.GetInstance().AddActiveTracesUser()
+ }
+ data["metricsUsed"] = signozMetricsUsed
+ data["logsUsed"] = signozLogsUsed
+ data["tracesUsed"] = signozTracesUsed
userEmail, err := auth.GetEmailFromJwt(r.Context())
if err == nil {
- telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_QUERY_RANGE_V3, data, userEmail, true)
+ telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_QUERY_RANGE_API, data, userEmail)
}
}
return data, true
@@ -547,7 +550,7 @@ func (s *Server) initListeners() error {
return err
}
- zap.S().Info(fmt.Sprintf("Query server started listening on %s...", s.serverOptions.HTTPHostPort))
+ zap.L().Info(fmt.Sprintf("Query server started listening on %s...", s.serverOptions.HTTPHostPort))
// listen on private port to support internal services
privateHostPort := s.serverOptions.PrivateHostPort
@@ -560,7 +563,7 @@ func (s *Server) initListeners() error {
if err != nil {
return err
}
- zap.S().Info(fmt.Sprintf("Query server started listening on private port %s...", s.serverOptions.PrivateHostPort))
+ zap.L().Info(fmt.Sprintf("Query server started listening on private port %s...", s.serverOptions.PrivateHostPort))
return nil
}
@@ -572,7 +575,7 @@ func (s *Server) Start() error {
if !s.serverOptions.DisableRules {
s.ruleManager.Start()
} else {
- zap.S().Info("msg: Rules disabled as rules.disable is set to TRUE")
+ zap.L().Info("msg: Rules disabled as rules.disable is set to TRUE")
}
err := s.initListeners()
@@ -586,23 +589,23 @@ func (s *Server) Start() error {
}
go func() {
- zap.S().Info("Starting HTTP server", zap.Int("port", httpPort), zap.String("addr", s.serverOptions.HTTPHostPort))
+ zap.L().Info("Starting HTTP server", zap.Int("port", httpPort), zap.String("addr", s.serverOptions.HTTPHostPort))
switch err := s.httpServer.Serve(s.httpConn); err {
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
// normal exit, nothing to do
default:
- zap.S().Error("Could not start HTTP server", zap.Error(err))
+ zap.L().Error("Could not start HTTP server", zap.Error(err))
}
s.unavailableChannel <- healthcheck.Unavailable
}()
go func() {
- zap.S().Info("Starting pprof server", zap.String("addr", constants.DebugHttpPort))
+ zap.L().Info("Starting pprof server", zap.String("addr", constants.DebugHttpPort))
err = http.ListenAndServe(constants.DebugHttpPort, nil)
if err != nil {
- zap.S().Error("Could not start pprof server", zap.Error(err))
+ zap.L().Error("Could not start pprof server", zap.Error(err))
}
}()
@@ -612,14 +615,14 @@ func (s *Server) Start() error {
}
fmt.Println("starting private http")
go func() {
- zap.S().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.serverOptions.PrivateHostPort))
+ zap.L().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.serverOptions.PrivateHostPort))
switch err := s.privateHTTP.Serve(s.privateConn); err {
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
// normal exit, nothing to do
- zap.S().Info("private http server closed")
+ zap.L().Info("private http server closed")
default:
- zap.S().Error("Could not start private HTTP server", zap.Error(err))
+ zap.L().Error("Could not start private HTTP server", zap.Error(err))
}
s.unavailableChannel <- healthcheck.Unavailable
@@ -627,10 +630,10 @@ func (s *Server) Start() error {
}()
go func() {
- zap.S().Info("Starting OpAmp Websocket server", zap.String("addr", constants.OpAmpWsEndpoint))
+ zap.L().Info("Starting OpAmp Websocket server", zap.String("addr", constants.OpAmpWsEndpoint))
err := s.opampServer.Start(constants.OpAmpWsEndpoint)
if err != nil {
- zap.S().Info("opamp ws server failed to start", err)
+ zap.L().Info("opamp ws server failed to start", zap.Error(err))
s.unavailableChannel <- healthcheck.Unavailable
}
}()
@@ -703,7 +706,7 @@ func makeRulesManager(
return nil, fmt.Errorf("rule manager error: %v", err)
}
- zap.S().Info("rules manager is ready")
+ zap.L().Info("rules manager is ready")
return manager, nil
}
diff --git a/pkg/query-service/auth/auth.go b/pkg/query-service/auth/auth.go
index 6b96a6da85..0a90c8c730 100644
--- a/pkg/query-service/auth/auth.go
+++ b/pkg/query-service/auth/auth.go
@@ -40,7 +40,7 @@ type InviteEmailData struct {
// The root user should be able to invite people to create account on SigNoz cluster.
func Invite(ctx context.Context, req *model.InviteRequest) (*model.InviteResponse, error) {
- zap.S().Debugf("Got an invite request for email: %s\n", req.Email)
+ zap.L().Debug("Got an invite request for email", zap.String("email", req.Email))
token, err := utils.RandomHex(opaqueTokenSize)
if err != nil {
@@ -110,13 +110,13 @@ func inviteEmail(req *model.InviteRequest, au *model.UserPayload, token string)
tmpl, err := template.ParseFiles(constants.InviteEmailTemplate)
if err != nil {
- zap.S().Errorf("failed to send email", err)
+ zap.L().Error("failed to send email", zap.Error(err))
return
}
var body bytes.Buffer
if err := tmpl.Execute(&body, data); err != nil {
- zap.S().Errorf("failed to send email", err)
+ zap.L().Error("failed to send email", zap.Error(err))
return
}
@@ -126,7 +126,7 @@ func inviteEmail(req *model.InviteRequest, au *model.UserPayload, token string)
body.String(),
)
if err != nil {
- zap.S().Errorf("failed to send email", err)
+ zap.L().Error("failed to send email", zap.Error(err))
return
}
return
@@ -134,7 +134,7 @@ func inviteEmail(req *model.InviteRequest, au *model.UserPayload, token string)
// RevokeInvite is used to revoke the invitation for the given email.
func RevokeInvite(ctx context.Context, email string) error {
- zap.S().Debugf("RevokeInvite method invoked for email: %s\n", email)
+ zap.L().Debug("RevokeInvite method invoked for email", zap.String("email", email))
if !isValidEmail(email) {
return ErrorInvalidInviteToken
@@ -148,7 +148,7 @@ func RevokeInvite(ctx context.Context, email string) error {
// GetInvite returns an invitation object for the given token.
func GetInvite(ctx context.Context, token string) (*model.InvitationResponseObject, error) {
- zap.S().Debugf("GetInvite method invoked for token: %s\n", token)
+ zap.L().Debug("GetInvite method invoked for token", zap.String("token", token))
inv, apiErr := dao.DB().GetInviteFromToken(ctx, token)
if apiErr != nil {
@@ -234,24 +234,23 @@ func ResetPassword(ctx context.Context, req *model.ResetPasswordRequest) error {
return nil
}
-func ChangePassword(ctx context.Context, req *model.ChangePasswordRequest) error {
-
+func ChangePassword(ctx context.Context, req *model.ChangePasswordRequest) *model.ApiError {
user, apiErr := dao.DB().GetUser(ctx, req.UserId)
if apiErr != nil {
- return errors.Wrap(apiErr.Err, "failed to query user from the DB")
+ return apiErr
}
if user == nil || !passwordMatch(user.Password, req.OldPassword) {
- return ErrorInvalidCreds
+ return model.ForbiddenError(ErrorInvalidCreds)
}
hash, err := PasswordHash(req.NewPassword)
if err != nil {
- return errors.Wrap(err, "Failed to generate password hash")
+ return model.InternalError(errors.New("Failed to generate password hash"))
}
if apiErr := dao.DB().UpdateUserPassword(ctx, hash, user.Id); apiErr != nil {
- return apiErr.Err
+ return apiErr
}
return nil
@@ -283,13 +282,13 @@ func RegisterFirstUser(ctx context.Context, req *RegisterRequest) (*model.User,
org, apierr := dao.DB().CreateOrg(ctx,
&model.Organization{Name: req.OrgName})
if apierr != nil {
- zap.S().Debugf("CreateOrg failed, err: %v\n", zap.Error(apierr.ToError()))
+ zap.L().Error("CreateOrg failed", zap.Error(apierr.ToError()))
return nil, apierr
}
group, apiErr := dao.DB().GetGroupByName(ctx, groupName)
if apiErr != nil {
- zap.S().Debugf("GetGroupByName failed, err: %v\n", apiErr.Err)
+ zap.L().Error("GetGroupByName failed", zap.Error(apiErr.Err))
return nil, apiErr
}
@@ -298,7 +297,7 @@ func RegisterFirstUser(ctx context.Context, req *RegisterRequest) (*model.User,
hash, err = PasswordHash(req.Password)
if err != nil {
- zap.S().Errorf("failed to generate password hash when registering a user", zap.Error(err))
+ zap.L().Error("failed to generate password hash when registering a user", zap.Error(err))
return nil, model.InternalError(model.ErrSignupFailed{})
}
@@ -329,7 +328,7 @@ func RegisterInvitedUser(ctx context.Context, req *RegisterRequest, nopassword b
invite, err := ValidateInvite(ctx, req)
if err != nil {
- zap.S().Errorf("failed to validate invite token", err)
+ zap.L().Error("failed to validate invite token", zap.Error(err))
return nil, model.BadRequest(model.ErrSignupFailed{})
}
@@ -338,7 +337,7 @@ func RegisterInvitedUser(ctx context.Context, req *RegisterRequest, nopassword b
// in the same transaction at the end of this function
userPayload, apierr := dao.DB().GetUserByEmail(ctx, invite.Email)
if apierr != nil {
- zap.S().Debugf("failed to get user by email", apierr.Err)
+ zap.L().Error("failed to get user by email", zap.Error(apierr.Err))
return nil, apierr
}
@@ -348,7 +347,7 @@ func RegisterInvitedUser(ctx context.Context, req *RegisterRequest, nopassword b
}
if invite.OrgId == "" {
- zap.S().Errorf("failed to find org in the invite")
+ zap.L().Error("failed to find org in the invite")
return nil, model.InternalError(fmt.Errorf("invalid invite, org not found"))
}
@@ -359,7 +358,7 @@ func RegisterInvitedUser(ctx context.Context, req *RegisterRequest, nopassword b
group, apiErr := dao.DB().GetGroupByName(ctx, invite.Role)
if apiErr != nil {
- zap.S().Debugf("GetGroupByName failed, err: %v\n", apiErr.Err)
+ zap.L().Error("GetGroupByName failed", zap.Error(apiErr.Err))
return nil, model.InternalError(model.ErrSignupFailed{})
}
@@ -369,13 +368,13 @@ func RegisterInvitedUser(ctx context.Context, req *RegisterRequest, nopassword b
if req.Password != "" {
hash, err = PasswordHash(req.Password)
if err != nil {
- zap.S().Errorf("failed to generate password hash when registering a user", zap.Error(err))
+ zap.L().Error("failed to generate password hash when registering a user", zap.Error(err))
return nil, model.InternalError(model.ErrSignupFailed{})
}
} else {
hash, err = PasswordHash(utils.GeneratePassowrd())
if err != nil {
- zap.S().Errorf("failed to generate password hash when registering a user", zap.Error(err))
+ zap.L().Error("failed to generate password hash when registering a user", zap.Error(err))
return nil, model.InternalError(model.ErrSignupFailed{})
}
}
@@ -394,13 +393,13 @@ func RegisterInvitedUser(ctx context.Context, req *RegisterRequest, nopassword b
// TODO(Ahsan): Ideally create user and delete invitation should happen in a txn.
user, apiErr = dao.DB().CreateUser(ctx, user, false)
if apiErr != nil {
- zap.S().Debugf("CreateUser failed, err: %v\n", apiErr.Err)
+ zap.L().Error("CreateUser failed", zap.Error(apiErr.Err))
return nil, apiErr
}
apiErr = dao.DB().DeleteInvitation(ctx, user.Email)
if apiErr != nil {
- zap.S().Debugf("delete invitation failed, err: %v\n", apiErr.Err)
+ zap.L().Error("delete invitation failed", zap.Error(apiErr.Err))
return nil, apiErr
}
@@ -429,17 +428,17 @@ func Register(ctx context.Context, req *RegisterRequest) (*model.User, *model.Ap
// Login method returns access and refresh tokens on successful login, else it errors out.
func Login(ctx context.Context, request *model.LoginRequest) (*model.LoginResponse, error) {
- zap.S().Debugf("Login method called for user: %s\n", request.Email)
+ zap.L().Debug("Login method called for user", zap.String("email", request.Email))
user, err := authenticateLogin(ctx, request)
if err != nil {
- zap.S().Debugf("Failed to authenticate login request, %v", err)
+ zap.L().Error("Failed to authenticate login request", zap.Error(err))
return nil, err
}
userjwt, err := GenerateJWTForUser(&user.User)
if err != nil {
- zap.S().Debugf("Failed to generate JWT against login creds, %v", err)
+ zap.L().Error("Failed to generate JWT against login creds", zap.Error(err))
return nil, err
}
diff --git a/pkg/query-service/auth/jwt.go b/pkg/query-service/auth/jwt.go
index 90e2f7008d..b27d43fb9d 100644
--- a/pkg/query-service/auth/jwt.go
+++ b/pkg/query-service/auth/jwt.go
@@ -60,7 +60,7 @@ func validateUser(tok string) (*model.UserPayload, error) {
func AttachJwtToContext(ctx context.Context, r *http.Request) context.Context {
token, err := ExtractJwtFromRequest(r)
if err != nil {
- zap.S().Debugf("Error while getting token from header, %v", err)
+ zap.L().Error("Error while getting token from header", zap.Error(err))
return ctx
}
diff --git a/pkg/query-service/cache/redis/redis.go b/pkg/query-service/cache/redis/redis.go
index 22278c52ed..6338eca6f3 100644
--- a/pkg/query-service/cache/redis/redis.go
+++ b/pkg/query-service/cache/redis/redis.go
@@ -59,7 +59,7 @@ func (c *cache) Retrieve(cacheKey string, allowExpired bool) ([]byte, status.Ret
func (c *cache) SetTTL(cacheKey string, ttl time.Duration) {
err := c.client.Expire(context.Background(), cacheKey, ttl).Err()
if err != nil {
- zap.S().Error("error setting TTL for cache key", zap.String("cacheKey", cacheKey), zap.Duration("ttl", ttl), zap.Error(err))
+ zap.L().Error("error setting TTL for cache key", zap.String("cacheKey", cacheKey), zap.Duration("ttl", ttl), zap.Error(err))
}
}
@@ -67,7 +67,7 @@ func (c *cache) SetTTL(cacheKey string, ttl time.Duration) {
func (c *cache) Remove(cacheKey string) {
err := c.client.Del(context.Background(), cacheKey).Err()
if err != nil {
- zap.S().Error("error deleting cache key", zap.String("cacheKey", cacheKey), zap.Error(err))
+ zap.L().Error("error deleting cache key", zap.String("cacheKey", cacheKey), zap.Error(err))
}
}
@@ -102,7 +102,7 @@ func (c *cache) GetOptions() *Options {
func (c *cache) GetTTL(cacheKey string) time.Duration {
ttl, err := c.client.TTL(context.Background(), cacheKey).Result()
if err != nil {
- zap.S().Error("error getting TTL for cache key", zap.String("cacheKey", cacheKey), zap.Error(err))
+ zap.L().Error("error getting TTL for cache key", zap.String("cacheKey", cacheKey), zap.Error(err))
}
return ttl
}
diff --git a/pkg/query-service/constants/constants.go b/pkg/query-service/constants/constants.go
index 6181a66ea8..54fc819478 100644
--- a/pkg/query-service/constants/constants.go
+++ b/pkg/query-service/constants/constants.go
@@ -58,8 +58,8 @@ var InviteEmailTemplate = GetOrDefaultEnv("INVITE_EMAIL_TEMPLATE", "/root/templa
// Alert manager channel subpath
var AmChannelApiPath = GetOrDefaultEnv("ALERTMANAGER_API_CHANNEL_PATH", "v1/routes")
-var OTLPTarget = GetOrDefaultEnv("OTLP_TARGET", "")
-var LogExportBatchSize = GetOrDefaultEnv("LOG_EXPORT_BATCH_SIZE", "1000")
+var OTLPTarget = GetOrDefaultEnv("OTEL_EXPORTER_OTLP_ENDPOINT", "")
+var LogExportBatchSize = GetOrDefaultEnv("OTEL_BLRP_MAX_EXPORT_BATCH_SIZE", "512")
var RELATIONAL_DATASOURCE_PATH = GetOrDefaultEnv("SIGNOZ_LOCAL_DB_PATH", "/var/lib/signoz/signoz.db")
diff --git a/pkg/query-service/dao/sqlite/connection.go b/pkg/query-service/dao/sqlite/connection.go
index a7335d6426..a2545e9531 100644
--- a/pkg/query-service/dao/sqlite/connection.go
+++ b/pkg/query-service/dao/sqlite/connection.go
@@ -180,7 +180,7 @@ func (mds *ModelDaoSqlite) createGroupIfNotPresent(ctx context.Context,
return group, nil
}
- zap.S().Debugf("%s is not found, creating it", name)
+ zap.L().Debug("group is not found, creating it", zap.String("group_name", name))
group, cErr := mds.CreateGroup(ctx, &model.Group{Name: name})
if cErr != nil {
return nil, cErr.Err
diff --git a/pkg/query-service/featureManager/manager.go b/pkg/query-service/featureManager/manager.go
index 15175b1882..439b8b7bd2 100644
--- a/pkg/query-service/featureManager/manager.go
+++ b/pkg/query-service/featureManager/manager.go
@@ -43,12 +43,12 @@ func (fm *FeatureManager) GetFeatureFlags() (model.FeatureSet, error) {
}
func (fm *FeatureManager) InitFeatures(req model.FeatureSet) error {
- zap.S().Error("InitFeatures not implemented in OSS")
+ zap.L().Error("InitFeatures not implemented in OSS")
return nil
}
func (fm *FeatureManager) UpdateFeatureFlag(req model.Feature) error {
- zap.S().Error("UpdateFeatureFlag not implemented in OSS")
+ zap.L().Error("UpdateFeatureFlag not implemented in OSS")
return nil
}
@@ -63,4 +63,4 @@ func (fm *FeatureManager) GetFeatureFlag(key string) (model.Feature, error) {
}
}
return model.Feature{}, model.ErrFeatureUnavailable{Key: key}
-}
\ No newline at end of file
+}
diff --git a/pkg/query-service/integrations/alertManager/manager.go b/pkg/query-service/integrations/alertManager/manager.go
index 3b7df3ce56..d80893010e 100644
--- a/pkg/query-service/integrations/alertManager/manager.go
+++ b/pkg/query-service/integrations/alertManager/manager.go
@@ -83,13 +83,12 @@ func (m *manager) AddRoute(receiver *Receiver) *model.ApiError {
response, err := http.Post(amURL, contentType, bytes.NewBuffer(receiverString))
if err != nil {
- zap.S().Errorf(fmt.Sprintf("Error in getting response of API call to alertmanager(POST %s)\n", amURL), err)
+ zap.L().Error("Error in getting response of API call to alertmanager", zap.String("url", amURL), zap.Error(err))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
if response.StatusCode > 299 {
- err := fmt.Errorf(fmt.Sprintf("Error in getting 2xx response in API call to alertmanager(POST %s)\n", amURL), response.Status)
- zap.S().Error(err)
+ zap.L().Error("Error in getting 2xx response in API call to alertmanager", zap.String("url", amURL), zap.String("status", response.Status))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
return nil
@@ -102,7 +101,7 @@ func (m *manager) EditRoute(receiver *Receiver) *model.ApiError {
req, err := http.NewRequest(http.MethodPut, amURL, bytes.NewBuffer(receiverString))
if err != nil {
- zap.S().Errorf(fmt.Sprintf("Error creating new update request for API call to alertmanager(PUT %s)\n", amURL), err)
+ zap.L().Error("Error creating new update request for API call to alertmanager", zap.String("url", amURL), zap.Error(err))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
@@ -112,13 +111,12 @@ func (m *manager) EditRoute(receiver *Receiver) *model.ApiError {
response, err := client.Do(req)
if err != nil {
- zap.S().Errorf(fmt.Sprintf("Error in getting response of API call to alertmanager(PUT %s)\n", amURL), err)
+ zap.L().Error("Error in getting response of API call to alertmanager", zap.String("url", amURL), zap.Error(err))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
if response.StatusCode > 299 {
- err := fmt.Errorf(fmt.Sprintf("Error in getting 2xx response in PUT API call to alertmanager(PUT %s)\n", amURL), response.Status)
- zap.S().Error(err)
+ zap.L().Error("Error in getting 2xx response in PUT API call to alertmanager", zap.String("url", amURL), zap.String("status", response.Status))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
return nil
@@ -132,7 +130,7 @@ func (m *manager) DeleteRoute(name string) *model.ApiError {
req, err := http.NewRequest(http.MethodDelete, amURL, bytes.NewBuffer(requestData))
if err != nil {
- zap.S().Errorf("Error in creating new delete request to alertmanager/v1/receivers\n", err)
+ zap.L().Error("Error in creating new delete request to alertmanager/v1/receivers", zap.Error(err))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
@@ -142,13 +140,13 @@ func (m *manager) DeleteRoute(name string) *model.ApiError {
response, err := client.Do(req)
if err != nil {
- zap.S().Errorf(fmt.Sprintf("Error in getting response of API call to alertmanager(DELETE %s)\n", amURL), err)
+ zap.L().Error("Error in getting response of API call to alertmanager", zap.String("url", amURL), zap.Error(err))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
if response.StatusCode > 299 {
err := fmt.Errorf(fmt.Sprintf("Error in getting 2xx response in PUT API call to alertmanager(DELETE %s)\n", amURL), response.Status)
- zap.S().Error(err)
+ zap.L().Error("Error in getting 2xx response in PUT API call to alertmanager", zap.String("url", amURL), zap.String("status", response.Status))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
return nil
@@ -162,19 +160,19 @@ func (m *manager) TestReceiver(receiver *Receiver) *model.ApiError {
response, err := http.Post(amTestURL, contentType, bytes.NewBuffer(receiverBytes))
if err != nil {
- zap.S().Errorf(fmt.Sprintf("Error in getting response of API call to alertmanager(POST %s)\n", amTestURL), err)
+ zap.L().Error("Error in getting response of API call to alertmanager", zap.String("url", amTestURL), zap.Error(err))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
if response.StatusCode > 201 && response.StatusCode < 400 {
err := fmt.Errorf(fmt.Sprintf("Invalid parameters in test alert api for alertmanager(POST %s)\n", amTestURL), response.Status)
- zap.S().Error(err)
+ zap.L().Error("Invalid parameters in test alert api for alertmanager", zap.Error(err))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
if response.StatusCode > 400 {
err := fmt.Errorf(fmt.Sprintf("Received Server Error response for API call to alertmanager(POST %s)\n", amTestURL), response.Status)
- zap.S().Error(err)
+ zap.L().Error("Received Server Error response for API call to alertmanager", zap.Error(err))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
diff --git a/pkg/query-service/integrations/alertManager/notifier.go b/pkg/query-service/integrations/alertManager/notifier.go
index 148d489ed0..e86cf28c5e 100644
--- a/pkg/query-service/integrations/alertManager/notifier.go
+++ b/pkg/query-service/integrations/alertManager/notifier.go
@@ -87,11 +87,11 @@ func NewNotifier(o *NotifierOptions, logger log.Logger) (*Notifier, error) {
amset, err := newAlertmanagerSet(o.AlertManagerURLs, timeout, logger)
if err != nil {
- zap.S().Errorf("failed to parse alert manager urls")
+ zap.L().Error("failed to parse alert manager urls")
return n, err
}
n.alertmanagers = amset
- zap.S().Info("Starting notifier with alert manager:", o.AlertManagerURLs)
+ zap.L().Info("Starting notifier with alert manager", zap.Strings("urls", o.AlertManagerURLs))
return n, nil
}
@@ -123,7 +123,7 @@ func (n *Notifier) nextBatch() []*Alert {
// Run dispatches notifications continuously.
func (n *Notifier) Run() {
- zap.S().Info("msg: Initiating alert notifier...")
+ zap.L().Info("msg: Initiating alert notifier...")
for {
select {
case <-n.ctx.Done():
@@ -133,7 +133,7 @@ func (n *Notifier) Run() {
alerts := n.nextBatch()
if !n.sendAll(alerts...) {
- zap.S().Warn("msg: dropped alerts", "\t count:", len(alerts))
+ zap.L().Warn("msg: dropped alerts", zap.Int("count", len(alerts)))
// n.metrics.dropped.Add(float64(len(alerts)))
}
// If the queue still has items left, kick off the next iteration.
@@ -205,7 +205,7 @@ func (n *Notifier) sendAll(alerts ...*Alert) bool {
b, err := json.Marshal(alerts)
if err != nil {
- zap.S().Errorf("msg", "Encoding alerts failed", "err", err)
+ zap.L().Error("Encoding alerts failed", zap.Error(err))
return false
}
@@ -229,7 +229,7 @@ func (n *Notifier) sendAll(alerts ...*Alert) bool {
go func(ams *alertmanagerSet, am Manager) {
u := am.URLPath(alertPushEndpoint).String()
if err := n.sendOne(ctx, ams.client, u, b); err != nil {
- zap.S().Errorf("alertmanager", u, "count", len(alerts), "msg", "Error calling alert API", "err", err)
+ zap.L().Error("Error calling alert API", zap.String("alertmanager", u), zap.Int("count", len(alerts)), zap.Error(err))
} else {
atomic.AddUint64(&numSuccess, 1)
}
diff --git a/pkg/query-service/interfaces/interface.go b/pkg/query-service/interfaces/interface.go
index 1ca1fd9958..dfe24c9064 100644
--- a/pkg/query-service/interfaces/interface.go
+++ b/pkg/query-service/interfaces/interface.go
@@ -67,6 +67,9 @@ type Reader interface {
GetMetricAttributeKeys(ctx context.Context, req *v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error)
GetMetricAttributeValues(ctx context.Context, req *v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error)
+ // Returns `MetricStatus` for latest received metric among `metricNames`. Useful for status calculations
+ GetLatestReceivedMetric(ctx context.Context, metricNames []string) (*model.MetricStatus, *model.ApiError)
+
// QB V3 metrics/traces/logs
GetTimeSeriesResultV3(ctx context.Context, query string) ([]*v3.Series, error)
GetListResultV3(ctx context.Context, query string) ([]*v3.Row, error)
@@ -74,6 +77,7 @@ type Reader interface {
GetDashboardsInfo(ctx context.Context) (*model.DashboardsInfo, error)
GetAlertsInfo(ctx context.Context) (*model.AlertsInfo, error)
+ GetSavedViewsInfo(ctx context.Context) (*model.SavedViewsInfo, error)
GetTotalSpans(ctx context.Context) (uint64, error)
GetTotalLogs(ctx context.Context) (uint64, error)
GetTotalSamples(ctx context.Context) (uint64, error)
diff --git a/pkg/query-service/main.go b/pkg/query-service/main.go
index f0602c4dcd..ec68c61939 100644
--- a/pkg/query-service/main.go
+++ b/pkg/query-service/main.go
@@ -18,7 +18,7 @@ import (
)
func initZapLog() *zap.Logger {
- config := zap.NewDevelopmentConfig()
+ config := zap.NewProductionConfig()
config.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder
config.EncoderConfig.TimeKey = "timestamp"
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
@@ -85,9 +85,9 @@ func main() {
auth.JwtSecret = os.Getenv("SIGNOZ_JWT_SECRET")
if len(auth.JwtSecret) == 0 {
- zap.S().Warn("No JWT secret key is specified.")
+ zap.L().Warn("No JWT secret key is specified.")
} else {
- zap.S().Info("No JWT secret key set successfully.")
+ zap.L().Info("No JWT secret key set successfully.")
}
server, err := app.NewServer(serverOptions)
diff --git a/pkg/query-service/model/response.go b/pkg/query-service/model/response.go
index 05da7f5ab7..1f3970e0d4 100644
--- a/pkg/query-service/model/response.go
+++ b/pkg/query-service/model/response.go
@@ -112,6 +112,13 @@ func UnavailableError(err error) *ApiError {
}
}
+func ForbiddenError(err error) *ApiError {
+ return &ApiError{
+ Typ: ErrorForbidden,
+ Err: err,
+ }
+}
+
func WrapApiError(err *ApiError, msg string) *ApiError {
return &ApiError{
Typ: err.Type(),
@@ -511,6 +518,12 @@ type MetricPoint struct {
Value float64
}
+type MetricStatus struct {
+ MetricName string
+ LastReceivedTsMillis int64
+ LastReceivedLabels map[string]string
+}
+
// MarshalJSON implements json.Marshaler.
func (p *MetricPoint) MarshalJSON() ([]byte, error) {
v := strconv.FormatFloat(p.Value, 'f', -1, 64)
@@ -628,6 +641,12 @@ type AlertsInfo struct {
TracesBasedAlerts int `json:"tracesBasedAlerts"`
}
+type SavedViewsInfo struct {
+ TotalSavedViews int `json:"totalSavedViews"`
+ TracesSavedViews int `json:"tracesSavedViews"`
+ LogsSavedViews int `json:"logsSavedViews"`
+}
+
type DashboardsInfo struct {
TotalDashboards int `json:"totalDashboards"`
TotalDashboardsWithPanelAndName int `json:"totalDashboardsWithPanelAndName"` // dashboards with panel and name without sample title
diff --git a/pkg/query-service/queryBuilderToExpr/queryBuilderToExpr.go b/pkg/query-service/queryBuilderToExpr/queryBuilderToExpr.go
index 0139792dfa..e853a37685 100644
--- a/pkg/query-service/queryBuilderToExpr/queryBuilderToExpr.go
+++ b/pkg/query-service/queryBuilderToExpr/queryBuilderToExpr.go
@@ -143,11 +143,11 @@ func exprFormattedValue(v interface{}) string {
case uint8, uint16, uint32, uint64, int, int8, int16, int32, int64, float32, float64, bool:
return strings.Join(strings.Fields(fmt.Sprint(x)), ",")
default:
- zap.S().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x[0])))
+ zap.L().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x[0])))
return ""
}
default:
- zap.S().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x)))
+ zap.L().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x)))
return ""
}
}
diff --git a/pkg/query-service/rules/alerting.go b/pkg/query-service/rules/alerting.go
index b2ee0b53d0..b2f511c6c0 100644
--- a/pkg/query-service/rules/alerting.go
+++ b/pkg/query-service/rules/alerting.go
@@ -15,15 +15,9 @@ import (
// this file contains common structs and methods used by
// rule engine
-// how long before re-sending the alert
-const resolvedRetention = 15 * time.Minute
-
const (
- // AlertMetricName is the metric name for synthetic alert timeseries.
- alertMetricName = "ALERTS"
-
- // AlertForStateMetricName is the metric name for 'for' state of alert.
- alertForStateMetricName = "ALERTS_FOR_STATE"
+ // how long before re-sending the alert
+ resolvedRetention = 15 * time.Minute
TestAlertPostFix = "_TEST_ALERT"
)
@@ -143,7 +137,7 @@ type RuleCondition struct {
CompareOp CompareOp `yaml:"op,omitempty" json:"op,omitempty"`
Target *float64 `yaml:"target,omitempty" json:"target,omitempty"`
AlertOnAbsent bool `yaml:"alertOnAbsent,omitempty" json:"alertOnAbsent,omitempty"`
- AbsentFor time.Duration `yaml:"absentFor,omitempty" json:"absentFor,omitempty"`
+ AbsentFor uint64 `yaml:"absentFor,omitempty" json:"absentFor,omitempty"`
MatchType MatchType `json:"matchType,omitempty"`
TargetUnit string `json:"targetUnit,omitempty"`
SelectedQuery string `json:"selectedQueryName,omitempty"`
diff --git a/pkg/query-service/rules/apiParams.go b/pkg/query-service/rules/apiParams.go
index 0ccf885b3d..af7e9378f6 100644
--- a/pkg/query-service/rules/apiParams.go
+++ b/pkg/query-service/rules/apiParams.go
@@ -10,7 +10,6 @@ import (
"github.com/pkg/errors"
"go.signoz.io/signoz/pkg/query-service/model"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
- "go.uber.org/zap"
"go.signoz.io/signoz/pkg/query-service/utils/times"
"go.signoz.io/signoz/pkg/query-service/utils/timestamp"
@@ -32,7 +31,7 @@ func newApiErrorBadData(err error) *model.ApiError {
// PostableRule is used to create alerting rule from HTTP api
type PostableRule struct {
- Alert string `yaml:"alert,omitempty" json:"alert,omitempty"`
+ AlertName string `yaml:"alert,omitempty" json:"alert,omitempty"`
AlertType string `yaml:"alertType,omitempty" json:"alertType,omitempty"`
Description string `yaml:"description,omitempty" json:"description,omitempty"`
RuleType RuleType `yaml:"ruleType,omitempty" json:"ruleType,omitempty"`
@@ -74,18 +73,15 @@ func parseIntoRule(initRule PostableRule, content []byte, kind string) (*Postabl
var err error
if kind == "json" {
if err = json.Unmarshal(content, rule); err != nil {
- zap.S().Debugf("postable rule content", string(content), "\t kind:", kind)
return nil, []error{fmt.Errorf("failed to load json")}
}
} else if kind == "yaml" {
if err = yaml.Unmarshal(content, rule); err != nil {
- zap.S().Debugf("postable rule content", string(content), "\t kind:", kind)
return nil, []error{fmt.Errorf("failed to load yaml")}
}
} else {
return nil, []error{fmt.Errorf("invalid data type")}
}
- zap.S().Debugf("postable rule(parsed):", rule)
if rule.RuleCondition == nil && rule.Expr != "" {
// account for legacy rules
@@ -126,8 +122,6 @@ func parseIntoRule(initRule PostableRule, content []byte, kind string) (*Postabl
}
}
- zap.S().Debugf("postable rule:", rule, "\t condition", rule.RuleCondition.String())
-
if errs := rule.Validate(); len(errs) > 0 {
return nil, errs
}
@@ -194,7 +188,7 @@ func (r *PostableRule) Validate() (errs []error) {
}
func testTemplateParsing(rl *PostableRule) (errs []error) {
- if rl.Alert == "" {
+ if rl.AlertName == "" {
// Not an alerting rule.
return errs
}
@@ -206,7 +200,7 @@ func testTemplateParsing(rl *PostableRule) (errs []error) {
tmpl := NewTemplateExpander(
context.TODO(),
defs+text,
- "__alert_"+rl.Alert,
+ "__alert_"+rl.AlertName,
tmplData,
times.Time(timestamp.FromTime(time.Now())),
nil,
diff --git a/pkg/query-service/rules/db.go b/pkg/query-service/rules/db.go
index f0b1bb3281..cf903884fd 100644
--- a/pkg/query-service/rules/db.go
+++ b/pkg/query-service/rules/db.go
@@ -73,7 +73,7 @@ func (r *ruleDB) CreateRuleTx(ctx context.Context, rule string) (int64, Tx, erro
stmt, err := tx.Prepare(`INSERT into rules (created_at, created_by, updated_at, updated_by, data) VALUES($1,$2,$3,$4,$5);`)
if err != nil {
- zap.S().Errorf("Error in preparing statement for INSERT to rules\n", err)
+ zap.L().Error("Error in preparing statement for INSERT to rules", zap.Error(err))
tx.Rollback()
return lastInsertId, nil, err
}
@@ -82,14 +82,14 @@ func (r *ruleDB) CreateRuleTx(ctx context.Context, rule string) (int64, Tx, erro
result, err := stmt.Exec(createdAt, userEmail, updatedAt, userEmail, rule)
if err != nil {
- zap.S().Errorf("Error in Executing prepared statement for INSERT to rules\n", err)
+ zap.L().Error("Error in Executing prepared statement for INSERT to rules", zap.Error(err))
tx.Rollback() // return an error too, we may want to wrap them
return lastInsertId, nil, err
}
lastInsertId, err = result.LastInsertId()
if err != nil {
- zap.S().Errorf("Error in getting last insert id for INSERT to rules\n", err)
+ zap.L().Error("Error in getting last insert id for INSERT to rules\n", zap.Error(err))
tx.Rollback() // return an error too, we may want to wrap them
return lastInsertId, nil, err
}
@@ -122,14 +122,14 @@ func (r *ruleDB) EditRuleTx(ctx context.Context, rule string, id string) (string
//}
stmt, err := r.Prepare(`UPDATE rules SET updated_by=$1, updated_at=$2, data=$3 WHERE id=$4;`)
if err != nil {
- zap.S().Errorf("Error in preparing statement for UPDATE to rules\n", err)
+ zap.L().Error("Error in preparing statement for UPDATE to rules", zap.Error(err))
// tx.Rollback()
return groupName, nil, err
}
defer stmt.Close()
if _, err := stmt.Exec(userEmail, updatedAt, rule, idInt); err != nil {
- zap.S().Errorf("Error in Executing prepared statement for UPDATE to rules\n", err)
+ zap.L().Error("Error in Executing prepared statement for UPDATE to rules", zap.Error(err))
// tx.Rollback() // return an error too, we may want to wrap them
return groupName, nil, err
}
@@ -158,7 +158,7 @@ func (r *ruleDB) DeleteRuleTx(ctx context.Context, id string) (string, Tx, error
defer stmt.Close()
if _, err := stmt.Exec(idInt); err != nil {
- zap.S().Errorf("Error in Executing prepared statement for DELETE to rules\n", err)
+ zap.L().Error("Error in Executing prepared statement for DELETE to rules", zap.Error(err))
// tx.Rollback()
return groupName, nil, err
}
@@ -175,7 +175,7 @@ func (r *ruleDB) GetStoredRules(ctx context.Context) ([]StoredRule, error) {
err := r.Select(&rules, query)
if err != nil {
- zap.S().Debug("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, err
}
@@ -193,10 +193,10 @@ func (r *ruleDB) GetStoredRule(ctx context.Context, id string) (*StoredRule, err
query := fmt.Sprintf("SELECT id, created_at, created_by, updated_at, updated_by, data FROM rules WHERE id=%d", intId)
err = r.Get(rule, query)
- // zap.S().Info(query)
+ // zap.L().Info(query)
if err != nil {
- zap.S().Error("Error in processing sql query: ", err)
+ zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, err
}
diff --git a/pkg/query-service/rules/manager.go b/pkg/query-service/rules/manager.go
index 530bb30d14..cad02523d7 100644
--- a/pkg/query-service/rules/manager.go
+++ b/pkg/query-service/rules/manager.go
@@ -125,7 +125,7 @@ func NewManager(o *ManagerOptions) (*Manager, error) {
func (m *Manager) Start() {
if err := m.initiate(); err != nil {
- zap.S().Errorf("failed to initialize alerting rules manager: %v", err)
+ zap.L().Error("failed to initialize alerting rules manager", zap.Error(err))
}
m.run()
}
@@ -154,40 +154,40 @@ func (m *Manager) initiate() error {
if len(errs) > 0 {
if errs[0].Error() == "failed to load json" {
- zap.S().Info("failed to load rule in json format, trying yaml now:", rec.Data)
+ zap.L().Info("failed to load rule in json format, trying yaml now:", zap.String("name", taskName))
// see if rule is stored in yaml format
parsedRule, errs = parsePostableRule([]byte(rec.Data), "yaml")
if parsedRule == nil {
- zap.S().Errorf("failed to parse and initialize yaml rule:", errs)
+ zap.L().Error("failed to parse and initialize yaml rule", zap.String("name", taskName), zap.Error(err))
// just one rule is being parsed so expect just one error
loadErrors = append(loadErrors, errs[0])
continue
} else {
// rule stored in yaml, so migrate it to json
- zap.S().Info("msg:", "migrating rule from JSON to yaml", "\t rule:", rec.Data, "\t parsed rule:", parsedRule)
+ zap.L().Info("migrating rule from JSON to yaml", zap.String("name", taskName))
ruleJSON, err := json.Marshal(parsedRule)
if err == nil {
taskName, _, err := m.ruleDB.EditRuleTx(context.Background(), string(ruleJSON), fmt.Sprintf("%d", rec.Id))
if err != nil {
- zap.S().Errorf("msg: failed to migrate rule ", "/t error:", err)
+ zap.L().Error("failed to migrate rule", zap.String("name", taskName), zap.Error(err))
} else {
- zap.S().Info("msg:", "migrated rule from yaml to json", "/t rule:", taskName)
+ zap.L().Info("migrated rule from yaml to json", zap.String("name", taskName))
}
}
}
} else {
- zap.S().Errorf("failed to parse and initialize rule:", errs)
+ zap.L().Error("failed to parse and initialize rule", zap.String("name", taskName), zap.Error(err))
// just one rule is being parsed so expect just one error
- loadErrors = append(loadErrors, errs[0])
+ loadErrors = append(loadErrors, err)
continue
}
}
if !parsedRule.Disabled {
err := m.addTask(parsedRule, taskName)
if err != nil {
- zap.S().Errorf("failed to load the rule definition (%s): %v", taskName, err)
+ zap.L().Error("failed to load the rule definition", zap.String("name", taskName), zap.Error(err))
}
}
}
@@ -213,13 +213,13 @@ func (m *Manager) Stop() {
m.mtx.Lock()
defer m.mtx.Unlock()
- zap.S().Info("msg: ", "Stopping rule manager...")
+ zap.L().Info("Stopping rule manager...")
for _, t := range m.tasks {
t.Stop()
}
- zap.S().Info("msg: ", "Rule manager stopped")
+ zap.L().Info("Rule manager stopped")
}
// EditRuleDefinition writes the rule definition to the
@@ -230,7 +230,7 @@ func (m *Manager) EditRule(ctx context.Context, ruleStr string, id string) error
currentRule, err := m.GetRule(ctx, id)
if err != nil {
- zap.S().Errorf("msg: ", "failed to get the rule from rule db", "\t ruleid: ", id)
+ zap.L().Error("failed to get the rule from rule db", zap.String("id", id), zap.Error(err))
return err
}
@@ -243,7 +243,7 @@ func (m *Manager) EditRule(ctx context.Context, ruleStr string, id string) error
}
if len(errs) > 0 {
- zap.S().Errorf("failed to parse rules:", errs)
+ zap.L().Error("failed to parse rules", zap.Errors("errors", errs))
// just one rule is being parsed so expect just one error
return errs[0]
}
@@ -264,13 +264,13 @@ func (m *Manager) EditRule(ctx context.Context, ruleStr string, id string) error
if !checkIfTraceOrLogQB(¤tRule.PostableRule) {
err = m.updateFeatureUsage(parsedRule, 1)
if err != nil {
- zap.S().Errorf("error updating feature usage: %v", err)
+ zap.L().Error("error updating feature usage", zap.Error(err))
}
// update feature usage if the new rule is not a trace or log query builder and the current rule is
} else if !checkIfTraceOrLogQB(parsedRule) {
err = m.updateFeatureUsage(¤tRule.PostableRule, -1)
if err != nil {
- zap.S().Errorf("error updating feature usage: %v", err)
+ zap.L().Error("error updating feature usage", zap.Error(err))
}
}
@@ -281,12 +281,12 @@ func (m *Manager) editTask(rule *PostableRule, taskName string) error {
m.mtx.Lock()
defer m.mtx.Unlock()
- zap.S().Debugf("msg:", "editing a rule task", "\t task name:", taskName)
+ zap.L().Debug("editing a rule task", zap.String("name", taskName))
newTask, err := m.prepareTask(false, rule, taskName)
if err != nil {
- zap.S().Errorf("msg:", "loading tasks failed", "\t err:", err)
+ zap.L().Error("loading tasks failed", zap.Error(err))
return errors.New("error preparing rule with given parameters, previous rule set restored")
}
@@ -294,7 +294,7 @@ func (m *Manager) editTask(rule *PostableRule, taskName string) error {
// it to finish the current iteration. Then copy it into the new group.
oldTask, ok := m.tasks[taskName]
if !ok {
- zap.S().Warnf("msg:", "rule task not found, a new task will be created ", "\t task name:", taskName)
+ zap.L().Warn("rule task not found, a new task will be created", zap.String("name", taskName))
}
delete(m.tasks, taskName)
@@ -319,14 +319,14 @@ func (m *Manager) DeleteRule(ctx context.Context, id string) error {
idInt, err := strconv.Atoi(id)
if err != nil {
- zap.S().Errorf("msg: ", "delete rule received an rule id in invalid format, must be a number", "\t ruleid:", id)
+ zap.L().Error("delete rule received an rule id in invalid format, must be a number", zap.String("id", id), zap.Error(err))
return fmt.Errorf("delete rule received an rule id in invalid format, must be a number")
}
// update feature usage
rule, err := m.GetRule(ctx, id)
if err != nil {
- zap.S().Errorf("msg: ", "failed to get the rule from rule db", "\t ruleid: ", id)
+ zap.L().Error("failed to get the rule from rule db", zap.String("id", id), zap.Error(err))
return err
}
@@ -336,13 +336,13 @@ func (m *Manager) DeleteRule(ctx context.Context, id string) error {
}
if _, _, err := m.ruleDB.DeleteRuleTx(ctx, id); err != nil {
- zap.S().Errorf("msg: ", "failed to delete the rule from rule db", "\t ruleid: ", id)
+ zap.L().Error("failed to delete the rule from rule db", zap.String("id", id), zap.Error(err))
return err
}
err = m.updateFeatureUsage(&rule.PostableRule, -1)
if err != nil {
- zap.S().Errorf("error updating feature usage: %v", err)
+ zap.L().Error("error updating feature usage", zap.Error(err))
}
return nil
@@ -351,16 +351,16 @@ func (m *Manager) DeleteRule(ctx context.Context, id string) error {
func (m *Manager) deleteTask(taskName string) {
m.mtx.Lock()
defer m.mtx.Unlock()
- zap.S().Debugf("msg:", "deleting a rule task", "\t task name:", taskName)
+ zap.L().Debug("deleting a rule task", zap.String("name", taskName))
oldg, ok := m.tasks[taskName]
if ok {
oldg.Stop()
delete(m.tasks, taskName)
delete(m.rules, ruleIdFromTaskName(taskName))
- zap.S().Debugf("msg:", "rule task deleted", "\t task name:", taskName)
+ zap.L().Debug("rule task deleted", zap.String("name", taskName))
} else {
- zap.S().Info("msg: ", "rule not found for deletion", "\t name:", taskName)
+ zap.L().Info("rule not found for deletion", zap.String("name", taskName))
}
}
@@ -376,7 +376,7 @@ func (m *Manager) CreateRule(ctx context.Context, ruleStr string) (*GettableRule
}
if len(errs) > 0 {
- zap.S().Errorf("failed to parse rules:", errs)
+ zap.L().Error("failed to parse rules", zap.Errors("errors", errs))
// just one rule is being parsed so expect just one error
return nil, errs[0]
}
@@ -400,7 +400,7 @@ func (m *Manager) CreateRule(ctx context.Context, ruleStr string) (*GettableRule
// update feature usage
err = m.updateFeatureUsage(parsedRule, 1)
if err != nil {
- zap.S().Errorf("error updating feature usage: %v", err)
+ zap.L().Error("error updating feature usage", zap.Error(err))
}
gettableRule := &GettableRule{
Id: fmt.Sprintf("%d", lastInsertId),
@@ -438,10 +438,10 @@ func (m *Manager) checkFeatureUsage(parsedRule *PostableRule) error {
if err != nil {
switch err.(type) {
case model.ErrFeatureUnavailable:
- zap.S().Errorf("feature unavailable", zap.String("featureKey", model.QueryBuilderAlerts), zap.Error(err))
+ zap.L().Error("feature unavailable", zap.String("featureKey", model.QueryBuilderAlerts), zap.Error(err))
return model.BadRequest(err)
default:
- zap.S().Errorf("feature check failed", zap.String("featureKey", model.QueryBuilderAlerts), zap.Error(err))
+ zap.L().Error("feature check failed", zap.String("featureKey", model.QueryBuilderAlerts), zap.Error(err))
return model.BadRequest(err)
}
}
@@ -466,11 +466,11 @@ func (m *Manager) addTask(rule *PostableRule, taskName string) error {
m.mtx.Lock()
defer m.mtx.Unlock()
- zap.S().Debugf("msg:", "adding a new rule task", "\t task name:", taskName)
+ zap.L().Debug("adding a new rule task", zap.String("name", taskName))
newTask, err := m.prepareTask(false, rule, taskName)
if err != nil {
- zap.S().Errorf("msg:", "creating rule task failed", "\t name:", taskName, "\t err", err)
+ zap.L().Error("creating rule task failed", zap.String("name", taskName), zap.Error(err))
return errors.New("error loading rules, previous rule set restored")
}
@@ -503,8 +503,8 @@ func (m *Manager) prepareTask(acquireLock bool, r *PostableRule, taskName string
rules := make([]Rule, 0)
var task Task
- if r.Alert == "" {
- zap.S().Errorf("msg:", "task load failed, at least one rule must be set", "\t task name:", taskName)
+ if r.AlertName == "" {
+ zap.L().Error("task load failed, at least one rule must be set", zap.String("name", taskName))
return task, fmt.Errorf("task load failed, at least one rule must be set")
}
@@ -536,7 +536,7 @@ func (m *Manager) prepareTask(acquireLock bool, r *PostableRule, taskName string
pr, err := NewPromRule(
ruleId,
r,
- log.With(m.logger, "alert", r.Alert),
+ log.With(m.logger, "alert", r.AlertName),
PromRuleOpts{},
)
@@ -686,7 +686,7 @@ func (m *Manager) ListRuleStates(ctx context.Context) (*GettableRules, error) {
ruleResponse := &GettableRule{}
if err := json.Unmarshal([]byte(s.Data), ruleResponse); err != nil { // Parse []byte to go struct pointer
- zap.S().Errorf("msg:", "invalid rule data", "\t err:", err)
+ zap.L().Error("failed to unmarshal rule from db", zap.Int("id", s.Id), zap.Error(err))
continue
}
@@ -779,28 +779,28 @@ func (m *Manager) PatchRule(ctx context.Context, ruleStr string, ruleId string)
// retrieve rule from DB
storedJSON, err := m.ruleDB.GetStoredRule(ctx, ruleId)
if err != nil {
- zap.S().Errorf("msg:", "failed to get stored rule with given id", "\t error:", err)
+ zap.L().Error("failed to get stored rule with given id", zap.String("id", ruleId), zap.Error(err))
return nil, err
}
// storedRule holds the current stored rule from DB
storedRule := PostableRule{}
if err := json.Unmarshal([]byte(storedJSON.Data), &storedRule); err != nil {
- zap.S().Errorf("msg:", "failed to get unmarshal stored rule with given id", "\t error:", err)
+ zap.L().Error("failed to unmarshal stored rule with given id", zap.String("id", ruleId), zap.Error(err))
return nil, err
}
// patchedRule is combo of stored rule and patch received in the request
patchedRule, errs := parseIntoRule(storedRule, []byte(ruleStr), "json")
if len(errs) > 0 {
- zap.S().Errorf("failed to parse rules:", errs)
+ zap.L().Error("failed to parse rules", zap.Errors("errors", errs))
// just one rule is being parsed so expect just one error
return nil, errs[0]
}
// deploy or un-deploy task according to patched (new) rule state
if err := m.syncRuleStateWithTask(taskName, patchedRule); err != nil {
- zap.S().Errorf("failed to sync stored rule state with the task")
+ zap.L().Error("failed to sync stored rule state with the task", zap.String("taskName", taskName), zap.Error(err))
return nil, err
}
@@ -816,7 +816,7 @@ func (m *Manager) PatchRule(ctx context.Context, ruleStr string, ruleId string)
// restore task state from the stored rule
if err := m.syncRuleStateWithTask(taskName, &storedRule); err != nil {
- zap.S().Errorf("msg: ", "failed to restore rule after patch failure", "\t error:", err)
+ zap.L().Error("failed to restore rule after patch failure", zap.String("taskName", taskName), zap.Error(err))
}
return nil, err
@@ -846,11 +846,11 @@ func (m *Manager) TestNotification(ctx context.Context, ruleStr string) (int, *m
parsedRule, errs := ParsePostableRule([]byte(ruleStr))
if len(errs) > 0 {
- zap.S().Errorf("msg: failed to parse rule from request:", "\t error: ", errs)
+ zap.L().Error("failed to parse rule from request", zap.Errors("errors", errs))
return 0, newApiErrorBadData(errs[0])
}
- var alertname = parsedRule.Alert
+ var alertname = parsedRule.AlertName
if alertname == "" {
// alertname is not mandatory for testing, so picking
// a random string here
@@ -858,7 +858,7 @@ func (m *Manager) TestNotification(ctx context.Context, ruleStr string) (int, *m
}
// append name to indicate this is test alert
- parsedRule.Alert = fmt.Sprintf("%s%s", alertname, TestAlertPostFix)
+ parsedRule.AlertName = fmt.Sprintf("%s%s", alertname, TestAlertPostFix)
var rule Rule
var err error
@@ -882,7 +882,7 @@ func (m *Manager) TestNotification(ctx context.Context, ruleStr string) (int, *m
)
if err != nil {
- zap.S().Errorf("msg: failed to prepare a new threshold rule for test:", "\t error: ", err)
+ zap.L().Error("failed to prepare a new threshold rule for test", zap.String("name", rule.Name()), zap.Error(err))
return 0, newApiErrorBadData(err)
}
@@ -899,7 +899,7 @@ func (m *Manager) TestNotification(ctx context.Context, ruleStr string) (int, *m
)
if err != nil {
- zap.S().Errorf("msg: failed to prepare a new promql rule for test:", "\t error: ", err)
+ zap.L().Error("failed to prepare a new promql rule for test", zap.String("name", rule.Name()), zap.Error(err))
return 0, newApiErrorBadData(err)
}
} else {
@@ -911,10 +911,13 @@ func (m *Manager) TestNotification(ctx context.Context, ruleStr string) (int, *m
count, err := rule.Eval(ctx, ts, m.opts.Queriers)
if err != nil {
- zap.S().Warn("msg:", "Evaluating rule failed", "\t rule:", rule, "\t err: ", err)
+ zap.L().Error("evaluating rule failed", zap.String("rule", rule.Name()), zap.Error(err))
return 0, newApiErrorInternal(fmt.Errorf("rule evaluation failed"))
}
- alertsFound := count.(int)
+ alertsFound, ok := count.(int)
+ if !ok {
+ return 0, newApiErrorInternal(fmt.Errorf("something went wrong"))
+ }
rule.SendAlerts(ctx, ts, 0, time.Duration(1*time.Minute), m.prepareNotifyFunc())
return alertsFound, nil
diff --git a/pkg/query-service/rules/promRule.go b/pkg/query-service/rules/promRule.go
index 1a4a89e3d2..a998de243e 100644
--- a/pkg/query-service/rules/promRule.go
+++ b/pkg/query-service/rules/promRule.go
@@ -71,7 +71,7 @@ func NewPromRule(
p := PromRule{
id: id,
- name: postableRule.Alert,
+ name: postableRule.AlertName,
source: postableRule.Source,
ruleCondition: postableRule.RuleCondition,
evalWindow: time.Duration(postableRule.EvalWindow),
@@ -94,7 +94,7 @@ func NewPromRule(
return nil, err
}
- zap.S().Info("msg:", "creating new alerting rule", "\t name:", p.name, "\t condition:", p.ruleCondition.String(), "\t query:", query)
+ zap.L().Info("creating new alerting rule", zap.String("name", p.name), zap.String("condition", p.ruleCondition.String()), zap.String("query", query))
return &p, nil
}
@@ -339,7 +339,7 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time, queriers *Queriers) (
if err != nil {
return nil, err
}
- zap.S().Info("rule:", r.Name(), "\t evaluating promql query: ", q)
+ zap.L().Info("evaluating promql query", zap.String("name", r.Name()), zap.String("query", q))
res, err := queriers.PqlEngine.RunAlertQuery(ctx, q, start, end, interval)
if err != nil {
r.SetHealth(HealthBad)
@@ -368,7 +368,7 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time, queriers *Queriers) (
if !shouldAlert {
continue
}
- zap.S().Debugf("rule: %s, alerting for series: %v", r.Name(), series)
+ zap.L().Debug("alerting for series", zap.String("name", r.Name()), zap.Any("series", series))
thresholdFormatter := formatter.FromUnit(r.ruleCondition.TargetUnit)
threshold := thresholdFormatter.Format(r.targetVal(), r.ruleCondition.TargetUnit)
@@ -435,7 +435,7 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time, queriers *Queriers) (
}
}
- zap.S().Debugf("For rule: %s, found %d alerts", r.Name(), len(alerts))
+ zap.L().Debug("found alerts for rule", zap.Int("count", len(alerts)), zap.String("name", r.Name()))
// alerts[h] is ready, add or update active list now
for h, a := range alerts {
// Check whether we already have alerting state for the identifying label set.
@@ -612,7 +612,7 @@ func (r *PromRule) shouldAlert(series pql.Series) (pql.Sample, bool) {
func (r *PromRule) String() string {
ar := PostableRule{
- Alert: r.name,
+ AlertName: r.name,
RuleCondition: r.ruleCondition,
EvalWindow: Duration(r.evalWindow),
Labels: r.labels.Map(),
diff --git a/pkg/query-service/rules/promRuleTask.go b/pkg/query-service/rules/promRuleTask.go
index d4a853d844..af38488f7c 100644
--- a/pkg/query-service/rules/promRuleTask.go
+++ b/pkg/query-service/rules/promRuleTask.go
@@ -40,7 +40,7 @@ type PromRuleTask struct {
// newPromRuleTask holds rules that have promql condition
// and evalutes the rule at a given frequency
func newPromRuleTask(name, file string, frequency time.Duration, rules []Rule, opts *ManagerOptions, notify NotifyFunc) *PromRuleTask {
- zap.S().Info("Initiating a new rule group:", name, "\t frequency:", frequency)
+ zap.L().Info("Initiating a new rule group", zap.String("name", name), zap.Duration("frequency", frequency))
if time.Now() == time.Now().Add(frequency) {
frequency = DefaultFrequency
@@ -312,7 +312,7 @@ func (g *PromRuleTask) CopyState(fromTask Task) error {
// Eval runs a single evaluation cycle in which all rules are evaluated sequentially.
func (g *PromRuleTask) Eval(ctx context.Context, ts time.Time) {
- zap.S().Info("promql rule task:", g.name, "\t eval started at:", ts)
+ zap.L().Info("promql rule task", zap.String("name", g.name), zap.Time("eval started at", ts))
for i, rule := range g.rules {
if rule == nil {
continue
@@ -340,7 +340,7 @@ func (g *PromRuleTask) Eval(ctx context.Context, ts time.Time) {
rule.SetHealth(HealthBad)
rule.SetLastError(err)
- zap.S().Warn("msg", "Evaluating rule failed", "rule", rule, "err", err)
+ zap.L().Warn("Evaluating rule failed", zap.String("ruleid", rule.ID()), zap.Error(err))
// Canceled queries are intentional termination of queries. This normally
// happens on shutdown and thus we skip logging of any errors here.
diff --git a/pkg/query-service/rules/promrule_test.go b/pkg/query-service/rules/promrule_test.go
index ee843b9b64..0707933b89 100644
--- a/pkg/query-service/rules/promrule_test.go
+++ b/pkg/query-service/rules/promrule_test.go
@@ -20,7 +20,7 @@ func (l testLogger) Log(args ...interface{}) error {
func TestPromRuleShouldAlert(t *testing.T) {
postableRule := PostableRule{
- Alert: "Test Rule",
+ AlertName: "Test Rule",
AlertType: "METRIC_BASED_ALERT",
RuleType: RuleTypeProm,
EvalWindow: Duration(5 * time.Minute),
diff --git a/pkg/query-service/rules/ruleTask.go b/pkg/query-service/rules/ruleTask.go
index b2f6f09921..edf3957a6f 100644
--- a/pkg/query-service/rules/ruleTask.go
+++ b/pkg/query-service/rules/ruleTask.go
@@ -25,10 +25,8 @@ type RuleTask struct {
evaluationTime time.Duration
lastEvaluation time.Time
- markStale bool
- done chan struct{}
- terminated chan struct{}
- managerDone chan struct{}
+ done chan struct{}
+ terminated chan struct{}
pause bool
notify NotifyFunc
@@ -42,7 +40,7 @@ func newRuleTask(name, file string, frequency time.Duration, rules []Rule, opts
if time.Now() == time.Now().Add(frequency) {
frequency = DefaultFrequency
}
- zap.S().Info("msg:", "initiating a new rule task", "\t name:", name, "\t frequency:", frequency)
+ zap.L().Info("initiating a new rule task", zap.String("name", name), zap.Duration("frequency", frequency))
return &RuleTask{
name: name,
@@ -91,7 +89,7 @@ func (g *RuleTask) Run(ctx context.Context) {
// Wait an initial amount to have consistently slotted intervals.
evalTimestamp := g.EvalTimestamp(time.Now().UnixNano()).Add(g.frequency)
- zap.S().Debugf("group:", g.name, "\t group run to begin at: ", evalTimestamp)
+ zap.L().Debug("group run to begin at", zap.Time("evalTimestamp", evalTimestamp))
select {
case <-time.After(time.Until(evalTimestamp)):
case <-g.done:
@@ -294,7 +292,7 @@ func (g *RuleTask) CopyState(fromTask Task) error {
// Eval runs a single evaluation cycle in which all rules are evaluated sequentially.
func (g *RuleTask) Eval(ctx context.Context, ts time.Time) {
- zap.S().Debugf("msg:", "rule task eval started", "\t name:", g.name, "\t start time:", ts)
+ zap.L().Debug("rule task eval started", zap.String("name", g.name), zap.Time("start time", ts))
for i, rule := range g.rules {
if rule == nil {
@@ -330,7 +328,7 @@ func (g *RuleTask) Eval(ctx context.Context, ts time.Time) {
rule.SetHealth(HealthBad)
rule.SetLastError(err)
- zap.S().Warn("msg:", "Evaluating rule failed", "\t rule:", rule, "\t err: ", err)
+ zap.L().Warn("Evaluating rule failed", zap.String("ruleid", rule.ID()), zap.Error(err))
// Canceled queries are intentional termination of queries. This normally
// happens on shutdown and thus we skip logging of any errors here.
diff --git a/pkg/query-service/rules/thresholdRule.go b/pkg/query-service/rules/thresholdRule.go
index 0fdb3745ca..05fd526b79 100644
--- a/pkg/query-service/rules/thresholdRule.go
+++ b/pkg/query-service/rules/thresholdRule.go
@@ -102,7 +102,7 @@ func NewThresholdRule(
t := ThresholdRule{
id: id,
- name: p.Alert,
+ name: p.AlertName,
source: p.Source,
ruleCondition: p.RuleCondition,
evalWindow: time.Duration(p.EvalWindow),
@@ -135,7 +135,7 @@ func NewThresholdRule(
}
t.queryBuilderV4 = queryBuilder.NewQueryBuilder(builderOptsV4, featureFlags)
- zap.S().Info("msg:", "creating new alerting rule", "\t name:", t.name, "\t condition:", t.ruleCondition.String(), "\t generatorURL:", t.GeneratorURL())
+ zap.L().Info("creating new ThresholdRule", zap.String("name", t.name), zap.String("id", t.id))
return &t, nil
}
@@ -386,7 +386,7 @@ func (r *ThresholdRule) ForEachActiveAlert(f func(*Alert)) {
}
func (r *ThresholdRule) SendAlerts(ctx context.Context, ts time.Time, resendDelay time.Duration, interval time.Duration, notifyFunc NotifyFunc) {
- zap.S().Info("msg:", "sending alerts", "\t rule:", r.Name())
+ zap.L().Info("sending alerts", zap.String("rule", r.Name()))
alerts := []*Alert{}
r.ForEachActiveAlert(func(alert *Alert) {
if r.opts.SendAlways || alert.needsSending(ts, resendDelay) {
@@ -400,7 +400,7 @@ func (r *ThresholdRule) SendAlerts(ctx context.Context, ts time.Time, resendDela
anew := *alert
alerts = append(alerts, &anew)
} else {
- zap.S().Debugf("msg: skipping send alert due to resend delay", "\t rule: ", r.Name(), "\t alert:", alert.Labels)
+ zap.L().Debug("skipping send alert due to resend delay", zap.String("rule", r.Name()), zap.Any("alert", alert.Labels))
}
})
notifyFunc(ctx, "", alerts...)
@@ -416,12 +416,12 @@ func (r *ThresholdRule) Unit() string {
func (r *ThresholdRule) CheckCondition(v float64) bool {
if math.IsNaN(v) {
- zap.S().Debugf("msg:", "found NaN in rule condition", "\t rule name:", r.Name())
+ zap.L().Debug("found NaN in rule condition", zap.String("rule", r.Name()))
return false
}
if r.ruleCondition.Target == nil {
- zap.S().Debugf("msg:", "found null target in rule condition", "\t rulename:", r.Name())
+ zap.L().Debug("found null target in rule condition", zap.String("rule", r.Name()))
return false
}
@@ -429,7 +429,7 @@ func (r *ThresholdRule) CheckCondition(v float64) bool {
value := unitConverter.Convert(converter.Value{F: *r.ruleCondition.Target, U: converter.Unit(r.ruleCondition.TargetUnit)}, converter.Unit(r.Unit()))
- zap.S().Debugf("Checking condition for rule: %s, Converter=%s, Value=%f, Target=%f, CompareOp=%s", r.Name(), unitConverter.Name(), v, value.F, r.ruleCondition.CompareOp)
+ zap.L().Info("Checking condition for rule", zap.String("rule", r.Name()), zap.String("converter", unitConverter.Name()), zap.Float64("value", v), zap.Float64("target", value.F), zap.String("compareOp", string(r.ruleCondition.CompareOp)))
switch r.ruleCondition.CompareOp {
case ValueIsEq:
return v == value.F
@@ -496,7 +496,7 @@ func (r *ThresholdRule) shouldSkipFirstRecord() bool {
func (r *ThresholdRule) runChQuery(ctx context.Context, db clickhouse.Conn, query string) (Vector, error) {
rows, err := db.Query(ctx, query)
if err != nil {
- zap.S().Errorf("rule:", r.Name(), "\t failed to get alert query result")
+ zap.L().Error("failed to get alert query result", zap.String("rule", r.Name()), zap.Error(err))
return nil, err
}
@@ -604,7 +604,7 @@ func (r *ThresholdRule) runChQuery(ctx context.Context, db clickhouse.Conn, quer
lblsOrig.Set(columnNames[i], fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int()))
}
default:
- zap.S().Errorf("ruleId:", r.ID(), "\t error: invalid var found in query result", v, columnNames[i])
+ zap.L().Error("invalid var found in query result", zap.String("ruleId", r.ID()), zap.Any("value", v), zap.Any("column", columnNames[i]))
}
}
@@ -710,11 +710,11 @@ func (r *ThresholdRule) runChQuery(ctx context.Context, db clickhouse.Conn, quer
}
}
- zap.S().Debugf("ruleid:", r.ID(), "\t resultmap(potential alerts):", len(resultMap))
+ zap.L().Debug("resultmap(potential alerts)", zap.String("ruleid", r.ID()), zap.Int("count", len(resultMap)))
// if the data is missing for `For` duration then we should send alert
- if r.ruleCondition.AlertOnAbsent && r.lastTimestampWithDatapoints.Add(r.Condition().AbsentFor).Before(time.Now()) {
- zap.S().Debugf("ruleid:", r.ID(), "\t msg: no data found for rule condition")
+ if r.ruleCondition.AlertOnAbsent && r.lastTimestampWithDatapoints.Add(time.Duration(r.Condition().AbsentFor)*time.Minute).Before(time.Now()) {
+ zap.L().Info("no data found for rule condition", zap.String("ruleid", r.ID()))
lbls := labels.NewBuilder(labels.Labels{})
if !r.lastTimestampWithDatapoints.IsZero() {
lbls.Set("lastSeen", r.lastTimestampWithDatapoints.Format(constants.AlertTimeFormat))
@@ -734,7 +734,7 @@ func (r *ThresholdRule) runChQuery(ctx context.Context, db clickhouse.Conn, quer
}
}
if len(result) != 0 {
- zap.S().Infof("For rule %s, with ClickHouseQuery %s, found %d alerts", r.ID(), query, len(result))
+ zap.L().Info("found alerts", zap.String("ruleid", r.ID()), zap.String("query", query), zap.Int("count", len(result)))
}
return result, nil
}
@@ -979,7 +979,7 @@ func (r *ThresholdRule) prepareClickhouseQueries(ts time.Time) (map[string]strin
}
if r.ruleCondition.QueryType() != v3.QueryTypeClickHouseSQL {
- zap.S().Debugf("ruleid:", r.ID(), "\t msg: unsupported query type in prepareClickhouseQueries()")
+ zap.L().Error("unsupported query type in prepareClickhouseQueries", zap.String("ruleid", r.ID()))
return nil, fmt.Errorf("failed to prepare clickhouse queries")
}
@@ -995,18 +995,17 @@ func (r *ThresholdRule) prepareClickhouseQueries(ts time.Time) (map[string]strin
tmpl := template.New("clickhouse-query")
tmpl, err := tmpl.Parse(chQuery.Query)
if err != nil {
- zap.S().Errorf("ruleid:", r.ID(), "\t msg: failed to parse clickhouse query to populate vars", err)
+ zap.L().Error("failed to parse clickhouse query to populate vars", zap.String("ruleid", r.ID()), zap.Error(err))
r.SetHealth(HealthBad)
return nil, err
}
var query bytes.Buffer
err = tmpl.Execute(&query, params.Variables)
if err != nil {
- zap.S().Errorf("ruleid:", r.ID(), "\t msg: failed to populate clickhouse query", err)
+ zap.L().Error("failed to populate clickhouse query", zap.String("ruleid", r.ID()), zap.Error(err))
r.SetHealth(HealthBad)
return nil, err
}
- zap.S().Debugf("ruleid:", r.ID(), "\t query:", query.String())
queries[name] = query.String()
}
return queries, nil
@@ -1023,13 +1022,13 @@ func (r *ThresholdRule) GetSelectedQuery() string {
if r.ruleCondition.QueryType() == v3.QueryTypeBuilder {
queries, err = r.prepareBuilderQueries(time.Now(), nil)
if err != nil {
- zap.S().Errorf("ruleid:", r.ID(), "\t msg: failed to prepare metric queries", zap.Error(err))
+ zap.L().Error("failed to prepare metric queries", zap.String("ruleid", r.ID()), zap.Error(err))
return ""
}
} else if r.ruleCondition.QueryType() == v3.QueryTypeClickHouseSQL {
queries, err = r.prepareClickhouseQueries(time.Now())
if err != nil {
- zap.S().Errorf("ruleid:", r.ID(), "\t msg: failed to prepare clickhouse queries", zap.Error(err))
+ zap.L().Error("failed to prepare clickhouse queries", zap.String("ruleid", r.ID()), zap.Error(err))
return ""
}
}
@@ -1078,7 +1077,7 @@ func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, ts time.Time, ch c
queries, err = r.prepareBuilderQueries(ts, ch)
if err != nil {
- zap.S().Errorf("ruleid:", r.ID(), "\t msg: failed to prepare metric queries", zap.Error(err))
+ zap.L().Error("failed to prepare metric queries", zap.String("ruleid", r.ID()), zap.Error(err))
return nil, fmt.Errorf("failed to prepare metric queries")
}
@@ -1087,7 +1086,7 @@ func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, ts time.Time, ch c
queries, err = r.prepareClickhouseQueries(ts)
if err != nil {
- zap.S().Errorf("ruleid:", r.ID(), "\t msg: failed to prepare clickhouse queries", zap.Error(err))
+ zap.L().Error("failed to prepare clickhouse queries", zap.String("ruleid", r.ID()), zap.Error(err))
return nil, fmt.Errorf("failed to prepare clickhouse queries")
}
@@ -1099,16 +1098,16 @@ func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, ts time.Time, ch c
return nil, fmt.Errorf("no queries could be built with the rule config")
}
- zap.S().Debugf("ruleid:", r.ID(), "\t runQueries:", queries)
+ zap.L().Info("prepared queries", zap.String("ruleid", r.ID()), zap.Any("queries", queries))
queryLabel := r.GetSelectedQuery()
- zap.S().Debugf("ruleId: ", r.ID(), "\t result query label:", queryLabel)
+ zap.L().Debug("Selected query lable for rule", zap.String("ruleid", r.ID()), zap.String("label", queryLabel))
if queryString, ok := queries[queryLabel]; ok {
return r.runChQuery(ctx, ch, queryString)
}
- zap.S().Errorf("ruleId: ", r.ID(), "\t invalid query label:", queryLabel, "\t queries:", queries)
+ zap.L().Error("invalid query label", zap.String("ruleid", r.ID()), zap.String("label", queryLabel), zap.Any("queries", queries))
return nil, fmt.Errorf("this is unexpected, invalid query label")
}
@@ -1137,7 +1136,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time, queriers *Querie
if err != nil {
r.SetHealth(HealthBad)
r.SetLastError(err)
- zap.S().Debugf("ruleid:", r.ID(), "\t failure in buildAndRunQuery:", err)
+ zap.L().Error("failure in buildAndRunQuery", zap.String("ruleid", r.ID()), zap.Error(err))
return nil, err
}
@@ -1156,7 +1155,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time, queriers *Querie
value := valueFormatter.Format(smpl.V, r.Unit())
thresholdFormatter := formatter.FromUnit(r.ruleCondition.TargetUnit)
threshold := thresholdFormatter.Format(r.targetVal(), r.ruleCondition.TargetUnit)
- zap.S().Debugf("Alert template data for rule %s: Formatter=%s, Value=%s, Threshold=%s", r.Name(), valueFormatter.Name(), value, threshold)
+ zap.L().Debug("Alert template data for rule", zap.String("name", r.Name()), zap.String("formatter", valueFormatter.Name()), zap.String("value", value), zap.String("threshold", threshold))
tmplData := AlertTemplateData(l, value, threshold)
// Inject some convenience variables that are easier to remember for users
@@ -1177,7 +1176,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time, queriers *Querie
result, err := tmpl.Expand()
if err != nil {
result = fmt.Sprintf("", err)
- zap.S().Errorf("msg:", "Expanding alert template failed", "\t err", err, "\t data", tmplData)
+ zap.L().Error("Expanding alert template failed", zap.Error(err), zap.Any("data", tmplData))
}
return result
}
@@ -1222,7 +1221,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time, queriers *Querie
resultFPs[h] = struct{}{}
if _, ok := alerts[h]; ok {
- zap.S().Errorf("ruleId: ", r.ID(), "\t msg:", "the alert query returns duplicate records:", alerts[h])
+ zap.L().Error("the alert query returns duplicate records", zap.String("ruleid", r.ID()), zap.Any("alert", alerts[h]))
err = fmt.Errorf("duplicate alert found, vector contains metrics with the same labelset after applying alert labels")
// We have already acquired the lock above hence using SetHealth and
// SetLastError will deadlock.
@@ -1242,7 +1241,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time, queriers *Querie
}
}
- zap.S().Info("rule:", r.Name(), "\t alerts found: ", len(alerts))
+ zap.L().Info("alerts found", zap.String("name", r.Name()), zap.Int("count", len(alerts)))
// alerts[h] is ready, add or update active list now
for h, a := range alerts {
@@ -1290,7 +1289,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time, queriers *Querie
func (r *ThresholdRule) String() string {
ar := PostableRule{
- Alert: r.name,
+ AlertName: r.name,
RuleCondition: r.ruleCondition,
EvalWindow: Duration(r.evalWindow),
Labels: r.labels.Map(),
diff --git a/pkg/query-service/rules/thresholdRule_test.go b/pkg/query-service/rules/thresholdRule_test.go
index fde35364bc..b7d3cc5fee 100644
--- a/pkg/query-service/rules/thresholdRule_test.go
+++ b/pkg/query-service/rules/thresholdRule_test.go
@@ -14,7 +14,7 @@ import (
func TestThresholdRuleCombinations(t *testing.T) {
postableRule := PostableRule{
- Alert: "Tricky Condition Tests",
+ AlertName: "Tricky Condition Tests",
AlertType: "METRIC_BASED_ALERT",
RuleType: RuleTypeThreshold,
EvalWindow: Duration(5 * time.Minute),
@@ -339,7 +339,7 @@ func TestNormalizeLabelName(t *testing.T) {
func TestPrepareLinksToLogs(t *testing.T) {
postableRule := PostableRule{
- Alert: "Tricky Condition Tests",
+ AlertName: "Tricky Condition Tests",
AlertType: "LOGS_BASED_ALERT",
RuleType: RuleTypeThreshold,
EvalWindow: Duration(5 * time.Minute),
@@ -381,7 +381,7 @@ func TestPrepareLinksToLogs(t *testing.T) {
func TestPrepareLinksToTraces(t *testing.T) {
postableRule := PostableRule{
- Alert: "Links to traces test",
+ AlertName: "Links to traces test",
AlertType: "TRACES_BASED_ALERT",
RuleType: RuleTypeThreshold,
EvalWindow: Duration(5 * time.Minute),
diff --git a/pkg/query-service/telemetry/telemetry.go b/pkg/query-service/telemetry/telemetry.go
index ff2ed9aa1a..4c23cbd092 100644
--- a/pkg/query-service/telemetry/telemetry.go
+++ b/pkg/query-service/telemetry/telemetry.go
@@ -38,7 +38,7 @@ const (
TELEMETRY_EVENT_SERVICE = "ServiceName"
TELEMETRY_EVENT_LOGS_FILTERS = "Logs Filters"
TELEMETRY_EVENT_DISTRIBUTED = "Distributed"
- TELEMETRY_EVENT_QUERY_RANGE_V3 = "Query Range V3 Metadata"
+ TELEMETRY_EVENT_QUERY_RANGE_API = "Query Range API"
TELEMETRY_EVENT_DASHBOARDS_ALERTS = "Dashboards/Alerts Info"
TELEMETRY_EVENT_ACTIVE_USER = "Active User"
TELEMETRY_EVENT_ACTIVE_USER_PH = "Active User V2"
@@ -61,6 +61,7 @@ var SAAS_EVENTS_LIST = map[string]struct{}{
TELEMETRY_EVENT_DASHBOARDS_ALERTS: {},
TELEMETRY_EVENT_SUCCESSFUL_DASHBOARD_PANEL_QUERY: {},
TELEMETRY_EVENT_SUCCESSFUL_ALERT_QUERY: {},
+ // TELEMETRY_EVENT_QUERY_RANGE_API: {}, // this event is not part of SAAS_EVENTS_LIST as it may cause too many events to be sent
}
const api_key = "4Gmoa4ixJAUHx2BpJxsjwA1bEfnwEeRz"
@@ -282,30 +283,39 @@ func createTelemetry() {
telemetry.SendEvent(TELEMETRY_EVENT_HEART_BEAT, data, "")
alertsInfo, err := telemetry.reader.GetAlertsInfo(context.Background())
- if err != nil {
- telemetry.SendEvent(TELEMETRY_EVENT_DASHBOARDS_ALERTS, map[string]interface{}{"error": err.Error()}, "")
- } else {
+ if err == nil {
dashboardsInfo, err := telemetry.reader.GetDashboardsInfo(context.Background())
if err == nil {
- dashboardsAlertsData := map[string]interface{}{
- "totalDashboards": dashboardsInfo.TotalDashboards,
- "totalDashboardsWithPanelAndName": dashboardsInfo.TotalDashboardsWithPanelAndName,
- "logsBasedPanels": dashboardsInfo.LogsBasedPanels,
- "metricBasedPanels": dashboardsInfo.MetricBasedPanels,
- "tracesBasedPanels": dashboardsInfo.TracesBasedPanels,
- "totalAlerts": alertsInfo.TotalAlerts,
- "logsBasedAlerts": alertsInfo.LogsBasedAlerts,
- "metricBasedAlerts": alertsInfo.MetricBasedAlerts,
- "tracesBasedAlerts": alertsInfo.TracesBasedAlerts,
+ channels, err := telemetry.reader.GetChannels()
+ if err == nil {
+ savedViewsInfo, err := telemetry.reader.GetSavedViewsInfo(context.Background())
+ if err == nil {
+ dashboardsAlertsData := map[string]interface{}{
+ "totalDashboards": dashboardsInfo.TotalDashboards,
+ "totalDashboardsWithPanelAndName": dashboardsInfo.TotalDashboardsWithPanelAndName,
+ "logsBasedPanels": dashboardsInfo.LogsBasedPanels,
+ "metricBasedPanels": dashboardsInfo.MetricBasedPanels,
+ "tracesBasedPanels": dashboardsInfo.TracesBasedPanels,
+ "totalAlerts": alertsInfo.TotalAlerts,
+ "logsBasedAlerts": alertsInfo.LogsBasedAlerts,
+ "metricBasedAlerts": alertsInfo.MetricBasedAlerts,
+ "tracesBasedAlerts": alertsInfo.TracesBasedAlerts,
+ "totalChannels": len(*channels),
+ "totalSavedViews": savedViewsInfo.TotalSavedViews,
+ "logsSavedViews": savedViewsInfo.LogsSavedViews,
+ "tracesSavedViews": savedViewsInfo.TracesSavedViews,
+ }
+ // send event only if there are dashboards or alerts or channels
+ if dashboardsInfo.TotalDashboards > 0 || alertsInfo.TotalAlerts > 0 || len(*channels) > 0 || savedViewsInfo.TotalSavedViews > 0 {
+ telemetry.SendEvent(TELEMETRY_EVENT_DASHBOARDS_ALERTS, dashboardsAlertsData, "")
+ }
+ }
}
- // send event only if there are dashboards or alerts
- if dashboardsInfo.TotalDashboards > 0 || alertsInfo.TotalAlerts > 0 {
- telemetry.SendEvent(TELEMETRY_EVENT_DASHBOARDS_ALERTS, dashboardsAlertsData, "")
- }
- } else {
- telemetry.SendEvent(TELEMETRY_EVENT_DASHBOARDS_ALERTS, map[string]interface{}{"error": err.Error()}, "")
}
}
+ if err != nil {
+ telemetry.SendEvent(TELEMETRY_EVENT_DASHBOARDS_ALERTS, map[string]interface{}{"error": err.Error()}, "")
+ }
getDistributedInfoInLastHeartBeatInterval, _ := telemetry.reader.GetDistributedInfoInLastHeartBeatInterval(context.Background())
telemetry.SendEvent(TELEMETRY_EVENT_DISTRIBUTED, getDistributedInfoInLastHeartBeatInterval, "")
@@ -419,7 +429,7 @@ func (a *Telemetry) checkEvents(event string) bool {
func (a *Telemetry) SendEvent(event string, data map[string]interface{}, userEmail string, opts ...bool) {
// ignore telemetry for default user
- if userEmail == DEFAULT_CLOUD_EMAIL {
+ if userEmail == DEFAULT_CLOUD_EMAIL || a.GetUserEmail() == DEFAULT_CLOUD_EMAIL {
return
}
@@ -457,7 +467,7 @@ func (a *Telemetry) SendEvent(event string, data map[string]interface{}, userEma
}
}
- // zap.S().Info(data)
+ // zap.L().Info(data)
properties := analytics.NewProperties()
properties.Set("version", version.GetVersion())
properties.Set("deploymentType", getDeploymentType())
diff --git a/pkg/query-service/tests/docker.go b/pkg/query-service/tests/docker.go
index a710161a0e..c65a627512 100644
--- a/pkg/query-service/tests/docker.go
+++ b/pkg/query-service/tests/docker.go
@@ -13,7 +13,6 @@ import (
"log"
minio "github.com/minio/minio-go/v6"
- "go.uber.org/zap"
)
const (
@@ -36,7 +35,7 @@ func init() {
} else if goArch == "amd64" {
composeFile = "./test-deploy/docker-compose.yaml"
} else {
- zap.S().Fatalf("Unsupported architecture: %s", goArch)
+ log.Fatalf("Unsupported architecture: %s", goArch)
}
}
diff --git a/pkg/query-service/tests/integration/signoz_integrations_test.go b/pkg/query-service/tests/integration/signoz_integrations_test.go
index 5294d06081..292e353401 100644
--- a/pkg/query-service/tests/integration/signoz_integrations_test.go
+++ b/pkg/query-service/tests/integration/signoz_integrations_test.go
@@ -9,6 +9,7 @@ import (
"runtime/debug"
"slices"
"testing"
+ "time"
"github.com/jmoiron/sqlx"
mockhouse "github.com/srikanthccv/ClickHouse-go-mock"
@@ -65,18 +66,30 @@ func TestSignozIntegrationLifeCycle(t *testing.T) {
// Integration connection status should get updated after signal data has been received.
testbed.mockLogQueryResponse([]model.SignozLog{})
+ testbed.mockMetricStatusQueryResponse(nil)
connectionStatus := testbed.GetIntegrationConnectionStatus(ii.Id)
require.NotNil(connectionStatus)
require.Nil(connectionStatus.Logs)
+ require.Nil(connectionStatus.Metrics)
testLog := makeTestSignozLog("test log body", map[string]interface{}{
"source": "nginx",
})
testbed.mockLogQueryResponse([]model.SignozLog{testLog})
+
+ testMetricName := ii.ConnectionTests.Metrics[0]
+ testMetricLastReceivedTs := time.Now().UnixMilli()
+ testbed.mockMetricStatusQueryResponse(&model.MetricStatus{
+ MetricName: testMetricName,
+ LastReceivedTsMillis: testMetricLastReceivedTs,
+ })
+
connectionStatus = testbed.GetIntegrationConnectionStatus(ii.Id)
require.NotNil(connectionStatus)
require.NotNil(connectionStatus.Logs)
require.Equal(connectionStatus.Logs.LastReceivedTsMillis, int64(testLog.Timestamp/1000000))
+ require.NotNil(connectionStatus.Metrics)
+ require.Equal(connectionStatus.Metrics.LastReceivedTsMillis, testMetricLastReceivedTs)
// Should be able to uninstall integration
require.True(availableIntegrations[0].IsInstalled)
@@ -516,6 +529,32 @@ func (tb *IntegrationsTestBed) mockLogQueryResponse(logsInResponse []model.Signo
addLogsQueryExpectation(tb.mockClickhouse, logsInResponse)
}
+func (tb *IntegrationsTestBed) mockMetricStatusQueryResponse(expectation *model.MetricStatus) {
+ cols := []mockhouse.ColumnType{}
+ cols = append(cols, mockhouse.ColumnType{Type: "String", Name: "metric_name"})
+ cols = append(cols, mockhouse.ColumnType{Type: "String", Name: "labels"})
+ cols = append(cols, mockhouse.ColumnType{Type: "Int64", Name: "unix_milli"})
+
+ values := [][]any{}
+ if expectation != nil {
+ rowValues := []any{}
+
+ rowValues = append(rowValues, expectation.MetricName)
+
+ labelsJson, err := json.Marshal(expectation.LastReceivedLabels)
+ require.Nil(tb.t, err)
+ rowValues = append(rowValues, labelsJson)
+
+ rowValues = append(rowValues, expectation.LastReceivedTsMillis)
+
+ values = append(values, rowValues)
+ }
+
+ tb.mockClickhouse.ExpectQuery(
+ `SELECT.*metric_name, labels, unix_milli.*from.*signoz_metrics.*where metric_name in.*limit 1.*`,
+ ).WillReturnRows(mockhouse.NewRows(cols, values))
+}
+
// testDB can be injected for sharing a DB across multiple integration testbeds.
func NewIntegrationsTestBed(t *testing.T, testDB *sqlx.DB) *IntegrationsTestBed {
if testDB == nil {
@@ -529,6 +568,7 @@ func NewIntegrationsTestBed(t *testing.T, testDB *sqlx.DB) *IntegrationsTestBed
fm := featureManager.StartManager()
reader, mockClickhouse := NewMockClickhouseReader(t, testDB, fm)
+ mockClickhouse.MatchExpectationsInOrder(false)
apiHandler, err := app.NewAPIHandler(app.APIHandlerOpts{
Reader: reader,
diff --git a/pkg/query-service/utils/format.go b/pkg/query-service/utils/format.go
index bc15a8a1e9..0a614e2987 100644
--- a/pkg/query-service/utils/format.go
+++ b/pkg/query-service/utils/format.go
@@ -183,11 +183,11 @@ func ClickHouseFormattedValue(v interface{}) string {
case uint8, uint16, uint32, uint64, int, int8, int16, int32, int64, float32, float64, bool:
return strings.Join(strings.Fields(fmt.Sprint(x)), ",")
default:
- zap.S().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x[0])))
+ zap.L().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x[0])))
return ""
}
default:
- zap.S().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x)))
+ zap.L().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x)))
return ""
}
}
diff --git a/pkg/query-service/utils/time.go b/pkg/query-service/utils/time.go
index 69b49e42ac..274b032cdb 100644
--- a/pkg/query-service/utils/time.go
+++ b/pkg/query-service/utils/time.go
@@ -15,6 +15,6 @@ func Elapsed(funcName string, args ...interface{}) func() {
}
argsStr = argsStr[:len(argsStr)-2]
return func() {
- zap.S().Infof("func %s took %v with args %v", funcName, time.Since(start), string(argsStr))
+ zap.L().Info("Elapsed time", zap.String("func_name", funcName), zap.Duration("duration", time.Since(start)), zap.String("args", argsStr))
}
}
diff --git a/pkg/query-service/version/version.go b/pkg/query-service/version/version.go
index 577fe6789c..68c37a4e0e 100644
--- a/pkg/query-service/version/version.go
+++ b/pkg/query-service/version/version.go
@@ -3,8 +3,6 @@ package version
import (
"fmt"
"runtime"
-
- "go.uber.org/zap"
)
// These fields are set during an official build
@@ -40,7 +38,7 @@ Copyright 2022 SigNoz
// PrintVersion prints version and other helpful information.
func PrintVersion() {
- zap.S().Infof("\n%s\n", BuildDetails())
+ fmt.Println(BuildDetails())
}
func GetVersion() string {