Merge pull request #1996 from SigNoz/release/v0.13.1

Release/v0.13.1
This commit is contained in:
Ankit Nayan 2023-01-07 20:56:08 +05:30 committed by GitHub
commit 435d74c37e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
29 changed files with 381 additions and 102 deletions

View File

@ -11,6 +11,6 @@ jobs:
- name: Remove label
uses: buildsville/add-remove-label@v1
with:
label: ok-to-test
label: ok-to-test,testing-deploy
type: remove
token: ${{ secrets.GITHUB_TOKEN }}

View File

@ -0,0 +1,38 @@
name: staging-deployment
# Trigger deployment only on push to develop branch
on:
push:
branches:
- develop
jobs:
deploy:
name: Deploy latest develop branch to staging
runs-on: ubuntu-latest
environment: staging
steps:
- name: Executing remote ssh commands using ssh key
uses: appleboy/ssh-action@v0.1.6
env:
GITHUB_BRANCH: develop
GITHUB_SHA: ${{ github.sha }}
with:
host: ${{ secrets.HOST_DNS }}
username: ${{ secrets.USERNAME }}
key: ${{ secrets.EC2_SSH_KEY }}
envs: GITHUB_BRANCH,GITHUB_SHA
command_timeout: 60m
script: |
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
echo "GITHUB_SHA: ${GITHUB_SHA}"
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
docker system prune --force
cd ~/signoz
git status
git add .
git stash push -m "stashed on $(date --iso-8601=seconds)"
git fetch origin
git checkout ${GITHUB_BRANCH}
git pull
make build-ee-query-service-amd64
make build-frontend-amd64
make run-signoz

View File

@ -0,0 +1,39 @@
name: testing-deployment
# Trigger deployment only on testing-deploy label on pull request
on:
pull_request:
types: [labeled]
jobs:
deploy:
name: Deploy PR branch to testing
runs-on: ubuntu-latest
environment: testing
if: ${{ github.event.label.name == 'testing-deploy' }}
steps:
- name: Executing remote ssh commands using ssh key
uses: appleboy/ssh-action@v0.1.6
env:
GITHUB_BRANCH: ${{ github.head_ref || github.ref_name }}
GITHUB_SHA: ${{ github.sha }}
with:
host: ${{ secrets.HOST_DNS }}
username: ${{ secrets.USERNAME }}
key: ${{ secrets.EC2_SSH_KEY }}
envs: GITHUB_BRANCH,GITHUB_SHA
command_timeout: 60m
script: |
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
echo "GITHUB_SHA: ${GITHUB_SHA}"
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
export DEV_BUILD="1"
docker system prune --force
cd ~/signoz
git status
git add .
git stash push -m "stashed on $(date --iso-8601=seconds)"
git fetch origin
git checkout ${GITHUB_BRANCH}
git pull
make build-ee-query-service-amd64
make build-frontend-amd64
make run-signoz

View File

@ -215,9 +215,26 @@ Please ping us in the [`#contributing`](https://signoz-community.slack.com/archi
# 4. Contribute to Backend (Query-Service) 🌑
[**https://github.com/SigNoz/signoz/tree/develop/pkg/query-service**](https://github.com/SigNoz/signoz/tree/develop/pkg/query-service)
**Need to Update: [https://github.com/SigNoz/signoz/tree/develop/pkg/query-service](https://github.com/SigNoz/signoz/tree/develop/pkg/query-service)**
## 4.1 To run ClickHouse setup (recommended for local development)
## 4.1 Prerequisites
### 4.1.1 Install SQLite3
- Run `sqlite3` command to check if you already have SQLite3 installed on your machine.
- If not installed already, Install using below command
- on Linux
- on Debian / Ubuntu
```
sudo apt install sqlite3
```
- on CentOS / Fedora / RedHat
```
sudo yum install sqlite3
```
## 4.2 To run ClickHouse setup (recommended for local development)
- Clone the SigNoz repository and cd into signoz directory,
```

View File

@ -45,7 +45,7 @@ build-frontend-amd64:
@echo "--> Building frontend docker image for amd64"
@echo "------------------"
@cd $(FRONTEND_DIRECTORY) && \
docker build --file Dockerfile --no-cache -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) \
docker build --file Dockerfile -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) \
--build-arg TARGETPLATFORM="linux/amd64" .
# Step to build and push docker image of frontend(used in push pipeline)
@ -54,7 +54,7 @@ build-push-frontend:
@echo "--> Building and pushing frontend docker image"
@echo "------------------"
@cd $(FRONTEND_DIRECTORY) && \
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/amd64 \
docker buildx build --file Dockerfile --progress plane --push --platform linux/amd64 \
--tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) .
# Steps to build and push docker image of query service
@ -65,7 +65,7 @@ build-query-service-amd64:
@echo "--> Building query-service docker image for amd64"
@echo "------------------"
@docker build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile \
--no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
-t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="$(LD_FLAGS)" .
# Step to build and push docker image of query in amd64 and arm64 (used in push pipeline)
@ -73,7 +73,7 @@ build-push-query-service:
@echo "------------------"
@echo "--> Building and pushing query-service docker image"
@echo "------------------"
@docker buildx build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile --progress plane --no-cache \
@docker buildx build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile --progress plane \
--push --platform linux/arm64,linux/amd64 --build-arg LD_FLAGS="$(LD_FLAGS)" \
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
@ -84,11 +84,11 @@ build-ee-query-service-amd64:
@echo "------------------"
@if [ $(DEV_BUILD) != "" ]; then \
docker build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
--no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
-t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="${LD_FLAGS} ${DEV_LD_FLAGS}" .; \
else \
docker build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
--no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
-t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
--build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS="$(LD_FLAGS)" .; \
fi
@ -98,7 +98,7 @@ build-push-ee-query-service:
@echo "--> Building and pushing query-service docker image"
@echo "------------------"
@docker buildx build --file $(EE_QUERY_SERVICE_DIRECTORY)/Dockerfile \
--progress plane --no-cache --push --platform linux/arm64,linux/amd64 \
--progress plane --push --platform linux/arm64,linux/amd64 \
--build-arg LD_FLAGS="$(LD_FLAGS)" --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
dev-setup:
@ -119,16 +119,19 @@ down-local:
$(STANDALONE_DIRECTORY)/docker-compose-core.yaml -f $(STANDALONE_DIRECTORY)/docker-compose-local.yaml \
down -v
run-x86:
pull-signoz:
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml pull
run-signoz:
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml up --build -d
down-x86:
down-signoz:
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml down -v
clear-standalone-data:
@docker run --rm -v "$(PWD)/$(STANDALONE_DIRECTORY)/data:/pwd" busybox \
sh -c "cd /pwd && rm -rf alertmanager/* clickhous*/* signoz/* zookeeper-*/*"
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse*/* signoz/* zookeeper-*/*"
clear-swarm-data:
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
sh -c "cd /pwd && rm -rf alertmanager/* clickhous*/* signoz/* zookeeper-*/*"
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse*/* signoz/* zookeeper-*/*"

View File

@ -137,7 +137,7 @@ services:
condition: on-failure
query-service:
image: signoz/query-service:0.13.0
image: signoz/query-service:0.13.1
command: ["-config=/root/config/prometheus.yml"]
# ports:
# - "6060:6060" # pprof port
@ -166,7 +166,7 @@ services:
<<: *clickhouse-depend
frontend:
image: signoz/frontend:0.13.0
image: signoz/frontend:0.13.1
deploy:
restart_policy:
condition: on-failure

View File

@ -65,6 +65,8 @@ receivers:
static_configs:
- targets:
- localhost:8888
labels:
job_name: otel-collector
processors:
batch:
@ -104,11 +106,12 @@ exporters:
clickhousetraces:
datasource: tcp://clickhouse:9000/?database=signoz_traces
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
resource_to_telemetry_conversion:
enabled: true
clickhousemetricswrite/prometheus:
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
prometheus:
endpoint: 0.0.0.0:8889
# logging: {}
@ -147,9 +150,13 @@ service:
processors: [batch]
exporters: [clickhousemetricswrite]
metrics/generic:
receivers: [hostmetrics, prometheus]
receivers: [hostmetrics]
processors: [resourcedetection, batch]
exporters: [clickhousemetricswrite]
metrics/prometheus:
receivers: [prometheus]
processors: [batch]
exporters: [clickhousemetricswrite/prometheus]
metrics/spanmetrics:
receivers: [otlp/spanmetrics]
exporters: [prometheus]

View File

@ -8,6 +8,8 @@ receivers:
static_configs:
- targets:
- localhost:8888
labels:
job_name: otel-collector-metrics
# SigNoz span metrics
- job_name: signozspanmetrics-collector
scrape_interval: 60s

View File

@ -132,7 +132,7 @@ services:
# - ./data/clickhouse-3/:/var/lib/clickhouse/
alertmanager:
image: signoz/alertmanager:0.23.0-0.2
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.0-0.2}
volumes:
- ./data/alertmanager:/data
depends_on:
@ -146,7 +146,7 @@ services:
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
query-service:
image: signoz/query-service:0.13.0
image: signoz/query-service:${DOCKER_TAG:-0.13.1}
container_name: query-service
command: ["-config=/root/config/prometheus.yml"]
# ports:
@ -174,7 +174,7 @@ services:
<<: *clickhouse-depend
frontend:
image: signoz/frontend:0.13.0
image: signoz/frontend:${DOCKER_TAG:-0.13.1}
container_name: frontend
restart: on-failure
depends_on:
@ -186,7 +186,7 @@ services:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
image: signoz/signoz-otel-collector:0.66.1
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.66.1}
command: ["--config=/etc/otel-collector-config.yaml"]
user: root # required for reading docker container logs
volumes:
@ -211,7 +211,7 @@ services:
<<: *clickhouse-depend
otel-collector-metrics:
image: signoz/signoz-otel-collector:0.66.1
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.66.1}
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
volumes:
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml

View File

@ -65,6 +65,9 @@ receivers:
static_configs:
- targets:
- localhost:8888
labels:
job_name: otel-collector
processors:
batch:
@ -112,11 +115,12 @@ exporters:
clickhousetraces:
datasource: tcp://clickhouse:9000/?database=signoz_traces
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
resource_to_telemetry_conversion:
enabled: true
clickhousemetricswrite/prometheus:
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
prometheus:
endpoint: 0.0.0.0:8889
# logging: {}
@ -151,9 +155,13 @@ service:
processors: [batch]
exporters: [clickhousemetricswrite]
metrics/generic:
receivers: [hostmetrics, prometheus]
receivers: [hostmetrics]
processors: [resourcedetection, batch]
exporters: [clickhousemetricswrite]
metrics/prometheus:
receivers: [prometheus]
processors: [batch]
exporters: [clickhousemetricswrite/prometheus]
metrics/spanmetrics:
receivers: [otlp/spanmetrics]
exporters: [prometheus]

View File

@ -12,6 +12,8 @@ receivers:
static_configs:
- targets:
- localhost:8888
labels:
job_name: otel-collector-metrics
# SigNoz span metrics
- job_name: signozspanmetrics-collector
scrape_interval: 60s

View File

@ -1,8 +1,11 @@
package app
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
_ "net/http/pprof" // http profiler
@ -266,15 +269,82 @@ func (lrw *loggingResponseWriter) Flush() {
lrw.ResponseWriter.(http.Flusher).Flush()
}
func extractDashboardMetaData(path string, r *http.Request) (map[string]interface{}, bool) {
pathToExtractBodyFrom := "/api/v2/metrics/query_range"
var requestBody map[string]interface{}
data := map[string]interface{}{}
if path == pathToExtractBodyFrom && (r.Method == "POST") {
bodyBytes, _ := ioutil.ReadAll(r.Body)
r.Body.Close() // must close
r.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes))
json.Unmarshal(bodyBytes, &requestBody)
} else {
return nil, false
}
compositeMetricQuery, compositeMetricQueryExists := requestBody["compositeMetricQuery"]
compositeMetricQueryMap := compositeMetricQuery.(map[string]interface{})
signozMetricFound := false
if compositeMetricQueryExists {
signozMetricFound = telemetry.GetInstance().CheckSigNozMetrics(compositeMetricQueryMap)
queryType, queryTypeExists := compositeMetricQueryMap["queryType"]
if queryTypeExists {
data["queryType"] = queryType
}
panelType, panelTypeExists := compositeMetricQueryMap["panelType"]
if panelTypeExists {
data["panelType"] = panelType
}
}
datasource, datasourceExists := requestBody["dataSource"]
if datasourceExists {
data["datasource"] = datasource
}
if !signozMetricFound {
telemetry.GetInstance().AddActiveMetricsUser()
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_DASHBOARDS_METADATA, data, true)
}
return data, true
}
func getActiveLogs(path string, r *http.Request) {
// if path == "/api/v1/dashboards/{uuid}" {
// telemetry.GetInstance().AddActiveMetricsUser()
// }
if path == "/api/v1/logs" {
hasFilters := len(r.URL.Query().Get("q"))
if hasFilters > 0 {
telemetry.GetInstance().AddActiveLogsUser()
}
}
}
func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
route := mux.CurrentRoute(r)
path, _ := route.GetPathTemplate()
dashboardMetadata, metadataExists := extractDashboardMetaData(path, r)
getActiveLogs(path, r)
lrw := NewLoggingResponseWriter(w)
next.ServeHTTP(lrw, r)
data := map[string]interface{}{"path": path, "statusCode": lrw.statusCode}
if metadataExists {
for key, value := range dashboardMetadata {
data[key] = value
}
}
if _, ok := telemetry.IgnoredPaths()[path]; !ok {
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_PATH, data)

View File

@ -108,12 +108,21 @@ function AllErrors(): JSX.Element {
enabled: !loading,
},
{
queryKey: ['getErrorCounts', maxTime, minTime],
queryKey: [
'getErrorCounts',
maxTime,
minTime,
getUpdatedExceptionType,
getUpdatedServiceName,
],
queryFn: (): Promise<ErrorResponse | SuccessResponse<number>> =>
getErrorCounts({
end: maxTime,
start: minTime,
exceptionType: getUpdatedExceptionType,
serviceName: getUpdatedServiceName,
}),
enabled: !loading,
},
]);

View File

@ -0,0 +1 @@
export const ITEMS_PER_PAGE_OPTIONS = [25, 50, 100, 200];

View File

@ -4,7 +4,7 @@ import {
RightOutlined,
} from '@ant-design/icons';
import { Button, Divider, Select } from 'antd';
import React, { memo } from 'react';
import React, { memo, useMemo } from 'react';
import { useDispatch, useSelector } from 'react-redux';
import { AppState } from 'store/reducers';
import {
@ -15,16 +15,19 @@ import {
} from 'types/actions/logs';
import { ILogsReducer } from 'types/reducer/logs';
import { ITEMS_PER_PAGE_OPTIONS } from './config';
import { Container } from './styles';
const { Option } = Select;
const ITEMS_PER_PAGE_OPTIONS = [25, 50, 100, 200];
function LogControls(): JSX.Element | null {
const { logLinesPerPage, liveTail } = useSelector<AppState, ILogsReducer>(
(state) => state.logs,
);
const {
logLinesPerPage,
liveTail,
isLoading: isLogsLoading,
isLoadingAggregate,
logs,
} = useSelector<AppState, ILogsReducer>((state) => state.logs);
const dispatch = useDispatch();
const handleLogLinesPerPageChange = (e: number): void => {
@ -51,29 +54,58 @@ function LogControls(): JSX.Element | null {
});
};
const isLoading = isLogsLoading || isLoadingAggregate;
const isNextAndPreviousDisabled = useMemo(
() =>
isLoading ||
logLinesPerPage === 0 ||
logs.length === 0 ||
logs.length < logLinesPerPage,
[isLoading, logLinesPerPage, logs.length],
);
if (liveTail !== 'STOPPED') {
return null;
}
return (
<Container>
<Button size="small" type="link" onClick={handleGoToLatest}>
<Button
loading={isLoading}
size="small"
type="link"
onClick={handleGoToLatest}
>
<FastBackwardOutlined /> Go to latest
</Button>
<Divider type="vertical" />
<Button size="small" type="link" onClick={handleNavigatePrevious}>
<Button
loading={isLoading}
size="small"
type="link"
disabled={isNextAndPreviousDisabled}
onClick={handleNavigatePrevious}
>
<LeftOutlined /> Previous
</Button>
<Button size="small" type="link" onClick={handleNavigateNext}>
<Button
loading={isLoading}
size="small"
type="link"
disabled={isNextAndPreviousDisabled}
onClick={handleNavigateNext}
>
Next <RightOutlined />
</Button>
<Select
style={{ width: 120 }}
loading={isLoading}
value={logLinesPerPage}
onChange={handleLogLinesPerPageChange}
>
{ITEMS_PER_PAGE_OPTIONS.map((count) => {
return <Option key={count} value={count}>{`${count} / page`}</Option>;
})}
{ITEMS_PER_PAGE_OPTIONS.map((count) => (
<Option key={count} value={count}>{`${count} / page`}</Option>
))}
</Select>
</Container>
);

View File

@ -3,7 +3,7 @@ import Graph from 'components/Graph';
import Spinner from 'components/Spinner';
import dayjs from 'dayjs';
import getStep from 'lib/getStep';
import React, { memo, useEffect, useRef } from 'react';
import React, { memo, useEffect, useMemo, useRef } from 'react';
import { connect, useSelector } from 'react-redux';
import { bindActionCreators, Dispatch } from 'redux';
import { ThunkDispatch } from 'redux-thunk';
@ -77,14 +77,8 @@ function LogsAggregate({ getLogsAggregate }: LogsAggregateProps): JSX.Element {
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [getLogsAggregate, maxTime, minTime, liveTail]);
return (
<Container>
{isLoadingAggregate ? (
<Spinner size="default" height="100%" />
) : (
<Graph
name="usage"
data={{
const graphData = useMemo(() => {
return {
labels: logsAggregate.map((s) => new Date(s.timestamp / 1000000)),
datasets: [
{
@ -92,7 +86,17 @@ function LogsAggregate({ getLogsAggregate }: LogsAggregateProps): JSX.Element {
backgroundColor: blue[4],
},
],
}}
};
}, [logsAggregate]);
return (
<Container>
{isLoadingAggregate ? (
<Spinner size="default" height="100%" />
) : (
<Graph
name="usage"
data={graphData}
type="bar"
containerHeight="100%"
animate

View File

@ -16,7 +16,12 @@ import { getLogs } from 'store/actions/logs/getLogs';
import { getLogsAggregate } from 'store/actions/logs/getLogsAggregate';
import { AppState } from 'store/reducers';
import AppActions from 'types/actions';
import { FLUSH_LOGS, TOGGLE_LIVE_TAIL } from 'types/actions/logs';
import {
FLUSH_LOGS,
SET_LOADING,
SET_LOADING_AGGREGATE,
TOGGLE_LIVE_TAIL,
} from 'types/actions/logs';
import { GlobalReducer } from 'types/reducer/globalTime';
import { ILogsReducer } from 'types/reducer/logs';
@ -120,14 +125,33 @@ function SearchFilter({
const urlQuery = useUrlQuery();
const urlQueryString = urlQuery.get('q');
const debouncedHandleSearch = useMemo(() => debounce(handleSearch, 600), [
handleSearch,
]);
useEffect(() => {
dispatch({
type: SET_LOADING,
payload: true,
});
dispatch({
type: SET_LOADING_AGGREGATE,
payload: true,
});
const debouncedHandleSearch = debounce(handleSearch, 600);
debouncedHandleSearch(urlQueryString || '');
return (): void => {
debouncedHandleSearch.cancel();
};
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [urlQueryString, maxTime, minTime, idEnd, idStart, logLinesPerPage]);
}, [
urlQueryString,
maxTime,
minTime,
idEnd,
idStart,
logLinesPerPage,
dispatch,
]);
return (
<Container>
@ -161,7 +185,6 @@ function SearchFilter({
debouncedupdateQueryString(value);
}}
allowClear
onSearch={handleSearch}
/>
</Popover>
</Container>
@ -169,12 +192,8 @@ function SearchFilter({
}
interface DispatchProps {
getLogs: (
props: Parameters<typeof getLogs>[0],
) => (dispatch: Dispatch<AppActions>) => void;
getLogsAggregate: (
props: Parameters<typeof getLogsAggregate>[0],
) => (dispatch: Dispatch<AppActions>) => void;
getLogs: typeof getLogs;
getLogsAggregate: typeof getLogsAggregate;
}
type SearchFilterProps = DispatchProps;

View File

@ -79,7 +79,7 @@ function DisplayName({
/>
<Button
onClick={onClickHandler}
disabled={isLoading}
disabled={isLoading || orgName === name}
loading={isLoading}
type="primary"
>

View File

@ -89,12 +89,14 @@ function SideNav(): JSX.Element {
},
];
const currentMenu = menus.find((menu) => pathname.startsWith(menu.to));
return (
<Sider collapsible collapsed={collapsed} onCollapse={onCollapse} width={200}>
<Menu
theme="dark"
defaultSelectedKeys={[ROUTES.APPLICATION]}
selectedKeys={[pathname]}
selectedKeys={currentMenu ? [currentMenu?.to] : []}
mode="inline"
>
{menus.map(({ to, Icon, name, tags }) => (

View File

@ -27,7 +27,7 @@ const menus: SidebarMenu[] = [
Icon: AlignLeftOutlined,
to: ROUTES.LOGS,
name: 'Logs',
tags: ['Beta'],
// tags: ['Beta'],
},
{
Icon: DashboardFilled,

View File

@ -6,6 +6,7 @@ import LogsAggregate from 'container/LogsAggregate';
import LogsFilters from 'container/LogsFilters';
import LogsSearchFilter from 'container/LogsSearchFilter';
import LogsTable from 'container/LogsTable';
import useMountedState from 'hooks/useMountedState';
import useUrlQuery from 'hooks/useUrlQuery';
import React, { memo, useEffect } from 'react';
import { connect, useDispatch } from 'react-redux';
@ -18,16 +19,21 @@ import { SET_SEARCH_QUERY_STRING } from 'types/actions/logs';
import SpaceContainer from './styles';
function Logs({ getLogsFields }: LogsProps): JSX.Element {
const urlQuery = useUrlQuery();
const getMountedState = useMountedState();
const urlQuery = useUrlQuery();
const dispatch = useDispatch();
useEffect(() => {
const hasMounted = getMountedState();
if (!hasMounted) {
dispatch({
type: SET_SEARCH_QUERY_STRING,
payload: urlQuery.get('q'),
});
}, [dispatch, urlQuery]);
}
}, [dispatch, getMountedState, urlQuery]);
useEffect(() => {
getLogsFields();

View File

@ -3,6 +3,8 @@ import { GlobalTime } from 'types/actions/globalTime';
export type Props = {
start: GlobalTime['minTime'];
end: GlobalTime['minTime'];
exceptionType: string;
serviceName: string;
};
export type PayloadProps = number;

View File

@ -1401,18 +1401,13 @@ func (r *ClickHouseReader) GetFilteredSpans(ctx context.Context, queryParams *mo
var getFilterSpansResponseItems []model.GetFilterSpansResponseItem
baseQuery := fmt.Sprintf("SELECT timestamp, spanID, traceID, serviceName, name, durationNano, httpCode, gRPCCode, gRPCMethod, httpMethod, rpcMethod, responseStatusCode FROM %s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryTable)
baseQuery := fmt.Sprintf("SELECT timestamp, spanID, traceID, serviceName, name, durationNano, httpMethod, rpcMethod, responseStatusCode FROM %s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryTable)
baseQuery += query
err := r.db.Select(ctx, &getFilterSpansResponseItems, baseQuery, args...)
// Fill status and method
for i, e := range getFilterSpansResponseItems {
if e.GRPCode != "" {
getFilterSpansResponseItems[i].StatusCode = e.GRPCode
} else {
getFilterSpansResponseItems[i].StatusCode = e.HttpCode
}
if e.GRPMethod != "" {
getFilterSpansResponseItems[i].Method = e.GRPMethod
if e.RPCMethod != "" {
getFilterSpansResponseItems[i].Method = e.RPCMethod
} else {
getFilterSpansResponseItems[i].Method = e.HttpMethod
}
@ -2592,12 +2587,12 @@ func (r *ClickHouseReader) CountErrors(ctx context.Context, queryParams *model.C
query := fmt.Sprintf("SELECT count(distinct(groupID)) FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", r.TraceDB, r.errorTable)
args := []interface{}{clickhouse.Named("timestampL", strconv.FormatInt(queryParams.Start.UnixNano(), 10)), clickhouse.Named("timestampU", strconv.FormatInt(queryParams.End.UnixNano(), 10))}
if len(queryParams.ServiceName) != 0 {
query = query + " AND serviceName = @serviceName"
args = append(args, clickhouse.Named("serviceName", queryParams.ServiceName))
query = query + " AND serviceName ilike @serviceName"
args = append(args, clickhouse.Named("serviceName", "%"+queryParams.ServiceName+"%"))
}
if len(queryParams.ExceptionType) != 0 {
query = query + " AND exceptionType = @exceptionType"
args = append(args, clickhouse.Named("exceptionType", queryParams.ExceptionType))
query = query + " AND exceptionType ilike @exceptionType"
args = append(args, clickhouse.Named("exceptionType", "%"+queryParams.ExceptionType+"%"))
}
err := r.db.QueryRow(ctx, query, args...).Scan(&errorCount)
zap.S().Info(query)

View File

@ -507,10 +507,14 @@ func parseCountErrorsRequest(r *http.Request) (*model.CountErrorsParams, error)
if err != nil {
return nil, err
}
serviceName := r.URL.Query().Get("serviceName")
exceptionType := r.URL.Query().Get("exceptionType")
params := &model.CountErrorsParams{
Start: startTime,
End: endTime,
ServiceName: serviceName,
ExceptionType: exceptionType,
}
return params, nil

View File

@ -247,7 +247,6 @@ func extractDashboardMetaData(path string, r *http.Request) (map[string]interfac
bodyBytes, _ := ioutil.ReadAll(r.Body)
r.Body.Close() // must close
r.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes))
json.Unmarshal(bodyBytes, &requestBody)
} else {
@ -278,7 +277,7 @@ func extractDashboardMetaData(path string, r *http.Request) (map[string]interfac
if !signozMetricFound {
telemetry.GetInstance().AddActiveMetricsUser()
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_DASHBOARDS_METADATA, data, false)
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_DASHBOARDS_METADATA, data, true)
}
return data, true
@ -316,11 +315,11 @@ func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
}
}
if telemetry.GetInstance().IsSampled() {
// if telemetry.GetInstance().IsSampled() {
if _, ok := telemetry.IgnoredPaths()[path]; !ok {
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_PATH, data)
}
}
// }
})
}

View File

@ -12,6 +12,7 @@ import (
"go.signoz.io/signoz/pkg/query-service/constants"
"go.signoz.io/signoz/pkg/query-service/dao"
"go.signoz.io/signoz/pkg/query-service/model"
"go.signoz.io/signoz/pkg/query-service/telemetry"
"go.signoz.io/signoz/pkg/query-service/utils"
"go.uber.org/zap"
"golang.org/x/crypto/bcrypt"
@ -386,6 +387,8 @@ func Login(ctx context.Context, request *model.LoginRequest) (*model.LoginRespon
return nil, err
}
telemetry.GetInstance().IdentifyUser(&user.User)
return &model.LoginResponse{
UserJwtObject: userjwt,
UserId: user.User.Id,

View File

@ -164,6 +164,8 @@ func (mds *ModelDaoSqlite) EditOrg(ctx context.Context, org *model.Organization)
}
telemetry.GetInstance().SetTelemetryAnonymous(org.IsAnonymous)
telemetry.GetInstance().SetDistinctId(org.Id)
return nil
}

View File

@ -180,13 +180,9 @@ type GetFilterSpansResponseItem struct {
ServiceName string `ch:"serviceName" json:"serviceName"`
Operation string `ch:"name" json:"operation"`
DurationNano uint64 `ch:"durationNano" json:"durationNano"`
HttpCode string `ch:"httpCode"`
HttpMethod string `ch:"httpMethod"`
GRPCode string `ch:"gRPCCode"`
GRPMethod string `ch:"gRPCMethod"`
StatusCode string `json:"statusCode"`
Method string `json:"method"`
ResponseStatusCode string `ch:"responseStatusCode"`
ResponseStatusCode string `ch:"responseStatusCode" json:"statusCode"`
RPCMethod string `ch:"rpcMethod"`
}

View File

@ -37,6 +37,7 @@ const (
TELEMETRY_EVENT_DISTRIBUTED = "Distributed"
TELEMETRY_EVENT_DASHBOARDS_METADATA = "Dashboards Metadata"
TELEMETRY_EVENT_ACTIVE_USER = "Active User"
TELEMETRY_EVENT_ACTIVE_USER_PH = "Active User V2"
)
const api_key = "4Gmoa4ixJAUHx2BpJxsjwA1bEfnwEeRz"
@ -47,7 +48,7 @@ const DEFAULT_NUMBER_OF_SERVICES = 6
const HEART_BEAT_DURATION = 6 * time.Hour
// const HEART_BEAT_DURATION = 10 * time.Second
// const HEART_BEAT_DURATION = 30 * time.Second
const RATE_LIMIT_CHECK_DURATION = 1 * time.Minute
const RATE_LIMIT_VALUE = 2
@ -88,13 +89,19 @@ func (telemetry *Telemetry) CheckSigNozMetrics(compositeMetricQueryMap map[strin
}
func (telemetry *Telemetry) AddActiveTracesUser() {
telemetry.mutex.Lock()
telemetry.activeUser["traces"] = 1
telemetry.mutex.Unlock()
}
func (telemetry *Telemetry) AddActiveMetricsUser() {
telemetry.mutex.Lock()
telemetry.activeUser["metrics"] = 1
telemetry.mutex.Unlock()
}
func (telemetry *Telemetry) AddActiveLogsUser() {
telemetry.mutex.Lock()
telemetry.activeUser["logs"] = 1
telemetry.mutex.Unlock()
}
type Telemetry struct {
@ -111,6 +118,7 @@ type Telemetry struct {
rateLimits map[string]int8
activeUser map[string]int8
countUsers int8
mutex sync.RWMutex
}
func createTelemetry() {
@ -221,10 +229,10 @@ func getOutboundIP() string {
}
func (a *Telemetry) IdentifyUser(user *model.User) {
a.SetCompanyDomain(user.Email)
if !a.isTelemetryEnabled() || a.isTelemetryAnonymous() {
return
}
a.SetCompanyDomain(user.Email)
a.operator.Enqueue(analytics.Identify{
UserId: a.ipAddress,
@ -322,6 +330,17 @@ func (a *Telemetry) SendEvent(event string, data map[string]interface{}, opts ..
})
}
if event == TELEMETRY_EVENT_ACTIVE_USER {
a.phOperator.Enqueue(ph.Capture{
DistinctId: userId,
Event: TELEMETRY_EVENT_ACTIVE_USER_PH,
Properties: ph.Properties(properties),
Groups: ph.NewGroups().
Set("companyDomain", a.getCompanyDomain()),
})
}
}
func (a *Telemetry) GetDistinctId() string {