From 13a6d7f7c6d0007466f20e802522152be58bc6ba Mon Sep 17 00:00:00 2001 From: Palash Gupta Date: Tue, 27 Dec 2022 13:36:37 +0530 Subject: [PATCH 1/7] fix: live tail time out is updated (#1899) * fix: live tail time out is updated * Update livetail.ts Co-authored-by: Pranay Prateek Co-authored-by: Ankit Nayan --- frontend/src/api/logs/livetail.ts | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/frontend/src/api/logs/livetail.ts b/frontend/src/api/logs/livetail.ts index f1cec5472a..150f63d193 100644 --- a/frontend/src/api/logs/livetail.ts +++ b/frontend/src/api/logs/livetail.ts @@ -4,14 +4,16 @@ import { ENVIRONMENT } from 'constants/env'; import { LOCALSTORAGE } from 'constants/localStorage'; import { EventSourcePolyfill } from 'event-source-polyfill'; -export const LiveTail = (queryParams: string): EventSourcePolyfill => { - const dict = { - headers: { - Authorization: `Bearer ${getLocalStorageKey(LOCALSTORAGE.AUTH_TOKEN)}`, - }, - }; - return new EventSourcePolyfill( +// 10 min in ms +const TIMEOUT_IN_MS = 10 * 60 * 1000; + +export const LiveTail = (queryParams: string): EventSourcePolyfill => + new EventSourcePolyfill( `${ENVIRONMENT.baseURL}${apiV1}logs/tail?${queryParams}`, - dict, + { + headers: { + Authorization: `Bearer ${getLocalStorageKey(LOCALSTORAGE.AUTH_TOKEN)}`, + }, + heartbeatTimeout: TIMEOUT_IN_MS, + }, ); -}; From 48a6f536fad59d38b5f791d18d56faa342663498 Mon Sep 17 00:00:00 2001 From: Srikanth Chekuri Date: Tue, 27 Dec 2022 15:44:39 +0530 Subject: [PATCH 2/7] chore: increase dimensions_cache_size for signozspanmetrics processor (#1925) --- deploy/docker-swarm/clickhouse-setup/otel-collector-config.yaml | 2 +- deploy/docker/clickhouse-setup/otel-collector-config.yaml | 2 +- pkg/query-service/tests/test-deploy/otel-collector-config.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/deploy/docker-swarm/clickhouse-setup/otel-collector-config.yaml b/deploy/docker-swarm/clickhouse-setup/otel-collector-config.yaml index c198c2f57c..0636b518cf 100644 --- a/deploy/docker-swarm/clickhouse-setup/otel-collector-config.yaml +++ b/deploy/docker-swarm/clickhouse-setup/otel-collector-config.yaml @@ -78,7 +78,7 @@ processors: signozspanmetrics/prometheus: metrics_exporter: prometheus latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ] - dimensions_cache_size: 10000 + dimensions_cache_size: 100000 dimensions: - name: service.namespace default: default diff --git a/deploy/docker/clickhouse-setup/otel-collector-config.yaml b/deploy/docker/clickhouse-setup/otel-collector-config.yaml index 2829bf966f..c6773d187d 100644 --- a/deploy/docker/clickhouse-setup/otel-collector-config.yaml +++ b/deploy/docker/clickhouse-setup/otel-collector-config.yaml @@ -74,7 +74,7 @@ processors: signozspanmetrics/prometheus: metrics_exporter: prometheus latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ] - dimensions_cache_size: 10000 + dimensions_cache_size: 100000 dimensions: - name: service.namespace default: default diff --git a/pkg/query-service/tests/test-deploy/otel-collector-config.yaml b/pkg/query-service/tests/test-deploy/otel-collector-config.yaml index 9f7ee3fd18..517c7bc643 100644 --- a/pkg/query-service/tests/test-deploy/otel-collector-config.yaml +++ b/pkg/query-service/tests/test-deploy/otel-collector-config.yaml @@ -74,7 +74,7 @@ processors: signozspanmetrics/prometheus: metrics_exporter: prometheus latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ] - dimensions_cache_size: 10000 + dimensions_cache_size: 100000 dimensions: - name: service.namespace default: default From 40ec4517c27cde70f8a2436e55741ed01a411e22 Mon Sep 17 00:00:00 2001 From: Palash Gupta Date: Tue, 27 Dec 2022 19:01:56 +0530 Subject: [PATCH 3/7] fix: per page is added in the dependancy (#1926) --- frontend/src/container/LogsSearchFilter/index.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/src/container/LogsSearchFilter/index.tsx b/frontend/src/container/LogsSearchFilter/index.tsx index c7e79b14ca..b5cb71faa9 100644 --- a/frontend/src/container/LogsSearchFilter/index.tsx +++ b/frontend/src/container/LogsSearchFilter/index.tsx @@ -127,7 +127,7 @@ function SearchFilter({ useEffect(() => { debouncedHandleSearch(urlQueryString || ''); // eslint-disable-next-line react-hooks/exhaustive-deps - }, [urlQueryString, maxTime, minTime, idEnd, idStart]); + }, [urlQueryString, maxTime, minTime, idEnd, idStart, logLinesPerPage]); return ( From 35f5fb69578dc70c896bd9fd94da902837cf5923 Mon Sep 17 00:00:00 2001 From: Vishal Sharma Date: Tue, 27 Dec 2022 21:09:36 +0530 Subject: [PATCH 4/7] fix: respect durationSort feature flag on getSpanFilters API (#1900) * fix: respect durationSort feature flag on getSpanFilters API * chore: update DB query --- .../app/clickhouseReader/reader.go | 71 ++++++++++++------- pkg/query-service/model/response.go | 5 ++ 2 files changed, 51 insertions(+), 25 deletions(-) diff --git a/pkg/query-service/app/clickhouseReader/reader.go b/pkg/query-service/app/clickhouseReader/reader.go index ab07dbbd23..c7f1e45816 100644 --- a/pkg/query-service/app/clickhouseReader/reader.go +++ b/pkg/query-service/app/clickhouseReader/reader.go @@ -1177,33 +1177,54 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode traceFilterReponse.Status = map[string]uint64{"ok": 0, "error": 0} } case constants.Duration: - finalQuery := fmt.Sprintf("SELECT durationNano as numTotal FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", r.TraceDB, r.durationTable) - finalQuery += query - finalQuery += " ORDER BY durationNano LIMIT 1" - var dBResponse []model.DBResponseTotal - err := r.db.Select(ctx, &dBResponse, finalQuery, args...) - zap.S().Info(finalQuery) + err := r.featureFlags.CheckFeature(constants.DurationSort) + durationSortEnabled := err == nil + finalQuery := "" + if !durationSortEnabled { + // if duration sort is not enabled, we need to get the min and max duration from the index table + finalQuery = fmt.Sprintf("SELECT min(durationNano) as min, max(durationNano) as max FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", r.TraceDB, r.indexTable) + finalQuery += query + var dBResponse []model.DBResponseMinMax + err = r.db.Select(ctx, &dBResponse, finalQuery, args...) + zap.S().Info(finalQuery) + if err != nil { + zap.S().Debug("Error in processing sql query: ", err) + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)} + } + if len(dBResponse) > 0 { + traceFilterReponse.Duration = map[string]uint64{"minDuration": dBResponse[0].Min, "maxDuration": dBResponse[0].Max} + } + } else { + // when duration sort is enabled, we need to get the min and max duration from the duration table + finalQuery = fmt.Sprintf("SELECT durationNano as numTotal FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", r.TraceDB, r.durationTable) + finalQuery += query + finalQuery += " ORDER BY durationNano LIMIT 1" + var dBResponse []model.DBResponseTotal + err = r.db.Select(ctx, &dBResponse, finalQuery, args...) + zap.S().Info(finalQuery) - if err != nil { - zap.S().Debug("Error in processing sql query: ", err) - return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)} - } - finalQuery = fmt.Sprintf("SELECT durationNano as numTotal FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", r.TraceDB, r.durationTable) - finalQuery += query - finalQuery += " ORDER BY durationNano DESC LIMIT 1" - var dBResponse2 []model.DBResponseTotal - err = r.db.Select(ctx, &dBResponse2, finalQuery, args...) - zap.S().Info(finalQuery) + if err != nil { + zap.S().Debug("Error in processing sql query: ", err) + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)} + } - if err != nil { - zap.S().Debug("Error in processing sql query: ", err) - return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)} - } - if len(dBResponse) > 0 { - traceFilterReponse.Duration["minDuration"] = dBResponse[0].NumTotal - } - if len(dBResponse2) > 0 { - traceFilterReponse.Duration["maxDuration"] = dBResponse2[0].NumTotal + finalQuery = fmt.Sprintf("SELECT durationNano as numTotal FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", r.TraceDB, r.durationTable) + finalQuery += query + finalQuery += " ORDER BY durationNano DESC LIMIT 1" + var dBResponse2 []model.DBResponseTotal + err = r.db.Select(ctx, &dBResponse2, finalQuery, args...) + zap.S().Info(finalQuery) + + if err != nil { + zap.S().Debug("Error in processing sql query: ", err) + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)} + } + if len(dBResponse) > 0 { + traceFilterReponse.Duration["minDuration"] = dBResponse[0].NumTotal + } + if len(dBResponse2) > 0 { + traceFilterReponse.Duration["maxDuration"] = dBResponse2[0].NumTotal + } } case constants.RPCMethod: finalQuery := fmt.Sprintf("SELECT rpcMethod, count() as count FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", r.TraceDB, r.indexTable) diff --git a/pkg/query-service/model/response.go b/pkg/query-service/model/response.go index 7ae79be456..d80a56c642 100644 --- a/pkg/query-service/model/response.go +++ b/pkg/query-service/model/response.go @@ -399,6 +399,11 @@ type DBResponseTotal struct { NumTotal uint64 `ch:"numTotal"` } +type DBResponseMinMax struct { + Min uint64 `ch:"min"` + Max uint64 `ch:"max"` +} + type SpanFiltersResponse struct { ServiceName map[string]uint64 `json:"serviceName"` Status map[string]uint64 `json:"status"` From 895c721b37f09371bb3f3d828a23c7713146295d Mon Sep 17 00:00:00 2001 From: Yash Joshi Date: Tue, 27 Dec 2022 23:13:13 +0530 Subject: [PATCH 5/7] fix(version): use link instead of click handler (#1931) Co-authored-by: Palash Gupta --- frontend/src/container/Version/index.tsx | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/frontend/src/container/Version/index.tsx b/frontend/src/container/Version/index.tsx index e36a0c6d18..cad08351e3 100644 --- a/frontend/src/container/Version/index.tsx +++ b/frontend/src/container/Version/index.tsx @@ -1,6 +1,6 @@ import { WarningFilled } from '@ant-design/icons'; import { Button, Card, Form, Space, Typography } from 'antd'; -import React, { useCallback } from 'react'; +import React from 'react'; import { useTranslation } from 'react-i18next'; import { useSelector } from 'react-redux'; import { AppState } from 'store/reducers'; @@ -14,10 +14,6 @@ function Version(): JSX.Element { const [form] = Form.useForm(); const { t } = useTranslation(); - const onClickUpgradeHandler = useCallback((link: string) => { - window.open(link, '_blank'); - }, []); - const { currentVersion, latestVersion, @@ -60,9 +56,8 @@ function Version(): JSX.Element { placeholder={t('latest_version')} /> From b11f79b4c717c2b4a1fc0bfa422c533ff32db829 Mon Sep 17 00:00:00 2001 From: Ankit Nayan Date: Wed, 28 Dec 2022 02:16:46 +0530 Subject: [PATCH 6/7] Chore/analytics (#1922) * fix: reduced rate limit to 2 of each events in 1 min * feat: added new event for length of filters in logs search page * feat: added distributed cluster info * fix: length of filters in logs * feat: dashboard metadata with no rateLimit * feat: active user Co-authored-by: Srikanth Chekuri --- .../app/clickhouseReader/reader.go | 36 +++++++++- pkg/query-service/app/http_handler.go | 3 + pkg/query-service/app/logs/parser.go | 11 ++-- pkg/query-service/app/server.go | 65 +++++++++++++++++++ pkg/query-service/interfaces/interface.go | 1 + pkg/query-service/model/response.go | 16 +++++ pkg/query-service/telemetry/telemetry.go | 46 +++++++++++-- 7 files changed, 165 insertions(+), 13 deletions(-) diff --git a/pkg/query-service/app/clickhouseReader/reader.go b/pkg/query-service/app/clickhouseReader/reader.go index c7f1e45816..a9626d887b 100644 --- a/pkg/query-service/app/clickhouseReader/reader.go +++ b/pkg/query-service/app/clickhouseReader/reader.go @@ -45,6 +45,7 @@ import ( am "go.signoz.io/signoz/pkg/query-service/integrations/alertManager" "go.signoz.io/signoz/pkg/query-service/interfaces" "go.signoz.io/signoz/pkg/query-service/model" + "go.signoz.io/signoz/pkg/query-service/telemetry" "go.signoz.io/signoz/pkg/query-service/utils" "go.uber.org/zap" ) @@ -3088,6 +3089,20 @@ func (r *ClickHouseReader) GetSamplesInfoInLastHeartBeatInterval(ctx context.Con return totalSamples, nil } + +func (r *ClickHouseReader) GetDistributedInfoInLastHeartBeatInterval(ctx context.Context) (map[string]interface{}, error) { + + clusterInfo := []model.ClusterInfo{} + + queryStr := `SELECT shard_num, shard_weight, replica_num, errors_count, slowdowns_count, estimated_recovery_time FROM system.clusters where cluster='cluster';` + r.db.Select(ctx, &clusterInfo, queryStr) + if len(clusterInfo) == 1 { + return clusterInfo[0].GetMapFromStruct(), nil + } + + return nil, nil +} + func (r *ClickHouseReader) GetLogsInfoInLastHeartBeatInterval(ctx context.Context) (uint64, error) { var totalLogLines uint64 @@ -3233,11 +3248,16 @@ func (r *ClickHouseReader) GetLogs(ctx context.Context, params *model.LogsFilter } isPaginatePrev := logs.CheckIfPrevousPaginateAndModifyOrder(params) - filterSql, err := logs.GenerateSQLWhere(fields, params) + filterSql, lenFilters, err := logs.GenerateSQLWhere(fields, params) if err != nil { return nil, &model.ApiError{Err: err, Typ: model.ErrorBadData} } + data := map[string]interface{}{ + "lenFilters": lenFilters, + } + telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_LOGS_FILTERS, data) + query := fmt.Sprintf("%s from %s.%s", constants.LogsSQLSelect, r.logsDB, r.logsTable) if filterSql != "" { @@ -3267,10 +3287,15 @@ func (r *ClickHouseReader) TailLogs(ctx context.Context, client *model.LogsTailC return } - filterSql, err := logs.GenerateSQLWhere(fields, &model.LogsFilterParams{ + filterSql, lenFilters, err := logs.GenerateSQLWhere(fields, &model.LogsFilterParams{ Query: client.Filter.Query, }) + data := map[string]interface{}{ + "lenFilters": lenFilters, + } + telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_LOGS_FILTERS, data) + if err != nil { client.Error <- err return @@ -3347,13 +3372,18 @@ func (r *ClickHouseReader) AggregateLogs(ctx context.Context, params *model.Logs return nil, apiErr } - filterSql, err := logs.GenerateSQLWhere(fields, &model.LogsFilterParams{ + filterSql, lenFilters, err := logs.GenerateSQLWhere(fields, &model.LogsFilterParams{ Query: params.Query, }) if err != nil { return nil, &model.ApiError{Err: err, Typ: model.ErrorBadData} } + data := map[string]interface{}{ + "lenFilters": lenFilters, + } + telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_LOGS_FILTERS, data) + query := "" if params.GroupBy != "" { query = fmt.Sprintf("SELECT toInt64(toUnixTimestamp(toStartOfInterval(toDateTime(timestamp/1000000000), INTERVAL %d minute))*1000000000) as ts_start_interval, toString(%s) as groupBy, "+ diff --git a/pkg/query-service/app/http_handler.go b/pkg/query-service/app/http_handler.go index 69c0f92b8d..927b825792 100644 --- a/pkg/query-service/app/http_handler.go +++ b/pkg/query-service/app/http_handler.go @@ -1333,6 +1333,9 @@ func (aH *APIHandler) getServices(w http.ResponseWriter, r *http.Request) { } telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_NUMBER_OF_SERVICES, data) + if (data["number"] != 0) || (data["number"] != telemetry.DEFAULT_NUMBER_OF_SERVICES) { + telemetry.GetInstance().AddActiveTracesUser() + } aH.WriteJSON(w, r, result) } diff --git a/pkg/query-service/app/logs/parser.go b/pkg/query-service/app/logs/parser.go index 5e4a195817..18760ba972 100644 --- a/pkg/query-service/app/logs/parser.go +++ b/pkg/query-service/app/logs/parser.go @@ -279,20 +279,23 @@ func CheckIfPrevousPaginateAndModifyOrder(params *model.LogsFilterParams) (isPag return } -func GenerateSQLWhere(allFields *model.GetFieldsResponse, params *model.LogsFilterParams) (string, error) { +func GenerateSQLWhere(allFields *model.GetFieldsResponse, params *model.LogsFilterParams) (string, int, error) { var tokens []string var err error var sqlWhere string + var lenTokens = 0 if params.Query != "" { tokens, err = parseLogQuery(params.Query) + if err != nil { - return sqlWhere, err + return sqlWhere, -1, err } + lenTokens = len(tokens) } tokens, err = replaceInterestingFields(allFields, tokens) if err != nil { - return sqlWhere, err + return sqlWhere, -1, err } filterTokens := []string{} @@ -342,5 +345,5 @@ func GenerateSQLWhere(allFields *model.GetFieldsResponse, params *model.LogsFilt sqlWhere = strings.Join(tokens, "") - return sqlWhere, nil + return sqlWhere, lenTokens, nil } diff --git a/pkg/query-service/app/server.go b/pkg/query-service/app/server.go index b79b7d011f..3f00483d10 100644 --- a/pkg/query-service/app/server.go +++ b/pkg/query-service/app/server.go @@ -1,8 +1,11 @@ package app import ( + "bytes" "context" + "encoding/json" "fmt" + "io/ioutil" "net" "net/http" _ "net/http/pprof" // http profiler @@ -235,15 +238,77 @@ func (lrw *loggingResponseWriter) Flush() { lrw.ResponseWriter.(http.Flusher).Flush() } +func extractDashboardMetaData(path string, r *http.Request) (map[string]interface{}, bool) { + pathToExtractBodyFrom := "/api/v2/metrics/query_range" + var requestBody map[string]interface{} + data := map[string]interface{}{} + + if path == pathToExtractBodyFrom && (r.Method == "POST") { + bodyBytes, _ := ioutil.ReadAll(r.Body) + r.Body.Close() // must close + r.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes)) + + json.Unmarshal(bodyBytes, &requestBody) + + } else { + return nil, false + } + + compositeMetricQuery, compositeMetricQueryExists := requestBody["compositeMetricQuery"] + compositeMetricQueryMap := compositeMetricQuery.(map[string]interface{}) + if compositeMetricQueryExists { + queryType, queryTypeExists := compositeMetricQueryMap["queryType"] + if queryTypeExists { + data["queryType"] = queryType + } + panelType, panelTypeExists := compositeMetricQueryMap["panelType"] + if panelTypeExists { + data["panelType"] = panelType + } + } + + datasource, datasourceExists := requestBody["dataSource"] + if datasourceExists { + data["datasource"] = datasource + } + + telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_DASHBOARDS_METADATA, data, false) + + return data, true +} + +func getActiveMetricsOrLogs(path string, r *http.Request) { + if path == "/api/v1/dashboards/{uuid}" { + telemetry.GetInstance().AddActiveMetricsUser() + } + if path == "/api/v1/logs" { + hasFilters := len(r.URL.Query().Get("q")) + if hasFilters > 0 { + telemetry.GetInstance().AddActiveLogsUser() + } + + } + +} + func (s *Server) analyticsMiddleware(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { route := mux.CurrentRoute(r) path, _ := route.GetPathTemplate() + dashboardMetadata, metadataExists := extractDashboardMetaData(path, r) + getActiveMetricsOrLogs(path, r) + lrw := NewLoggingResponseWriter(w) next.ServeHTTP(lrw, r) data := map[string]interface{}{"path": path, "statusCode": lrw.statusCode} + if metadataExists { + for key, value := range dashboardMetadata { + data[key] = value + } + } + if telemetry.GetInstance().IsSampled() { if _, ok := telemetry.IgnoredPaths()[path]; !ok { telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_PATH, data) diff --git a/pkg/query-service/interfaces/interface.go b/pkg/query-service/interfaces/interface.go index 3200b10c2f..bcaf889ab6 100644 --- a/pkg/query-service/interfaces/interface.go +++ b/pkg/query-service/interfaces/interface.go @@ -63,6 +63,7 @@ type Reader interface { GetSamplesInfoInLastHeartBeatInterval(ctx context.Context) (uint64, error) GetLogsInfoInLastHeartBeatInterval(ctx context.Context) (uint64, error) GetTagsInfoInLastHeartBeatInterval(ctx context.Context) (*model.TagsInfo, error) + GetDistributedInfoInLastHeartBeatInterval(ctx context.Context) (map[string]interface{}, error) // Logs GetLogFields(ctx context.Context) (*model.GetFieldsResponse, *model.ApiError) UpdateLogField(ctx context.Context, field *model.UpdateField) *model.ApiError diff --git a/pkg/query-service/model/response.go b/pkg/query-service/model/response.go index d80a56c642..5441f894b2 100644 --- a/pkg/query-service/model/response.go +++ b/pkg/query-service/model/response.go @@ -569,3 +569,19 @@ type TagTelemetryData struct { Env string `json:"env" ch:"env"` Language string `json:"language" ch:"language"` } + +type ClusterInfo struct { + ShardNum uint32 `json:"shard_num" ch:"shard_num"` + ShardWeight uint32 `json:"shard_weight" ch:"shard_weight"` + ReplicaNum uint32 `json:"replica_num" ch:"replica_num"` + ErrorsCount uint32 `json:"errors_count" ch:"errors_count"` + SlowdownsCount uint32 `json:"slowdowns_count" ch:"slowdowns_count"` + EstimatedRecoveryTime uint32 `json:"estimated_recovery_time" ch:"estimated_recovery_time"` +} + +func (ci *ClusterInfo) GetMapFromStruct() map[string]interface{} { + var clusterInfoMap map[string]interface{} + data, _ := json.Marshal(*ci) + json.Unmarshal(data, &clusterInfoMap) + return clusterInfoMap +} diff --git a/pkg/query-service/telemetry/telemetry.go b/pkg/query-service/telemetry/telemetry.go index 43f1652093..4e00933151 100644 --- a/pkg/query-service/telemetry/telemetry.go +++ b/pkg/query-service/telemetry/telemetry.go @@ -32,19 +32,24 @@ const ( TELEMETRY_LICENSE_ACT_FAILED = "License Activation Failed" TELEMETRY_EVENT_ENVIRONMENT = "Environment" TELEMETRY_EVENT_LANGUAGE = "Language" + TELEMETRY_EVENT_LOGS_FILTERS = "Logs Filters" + TELEMETRY_EVENT_DISTRIBUTED = "Distributed" + TELEMETRY_EVENT_DASHBOARDS_METADATA = "Dashboards Metadata" + TELEMETRY_EVENT_ACTIVE_USER = "Active User" ) const api_key = "4Gmoa4ixJAUHx2BpJxsjwA1bEfnwEeRz" const ph_api_key = "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w" const IP_NOT_FOUND_PLACEHOLDER = "NA" +const DEFAULT_NUMBER_OF_SERVICES = 6 const HEART_BEAT_DURATION = 6 * time.Hour // const HEART_BEAT_DURATION = 10 * time.Second const RATE_LIMIT_CHECK_DURATION = 1 * time.Minute -const RATE_LIMIT_VALUE = 10 +const RATE_LIMIT_VALUE = 2 // const RATE_LIMIT_CHECK_DURATION = 20 * time.Second // const RATE_LIMIT_VALUE = 5 @@ -64,6 +69,16 @@ func (a *Telemetry) IsSampled() bool { } +func (telemetry *Telemetry) AddActiveTracesUser() { + telemetry.activeUser["traces"] = 1 +} +func (telemetry *Telemetry) AddActiveMetricsUser() { + telemetry.activeUser["metrics"] = 1 +} +func (telemetry *Telemetry) AddActiveLogsUser() { + telemetry.activeUser["logs"] = 1 +} + type Telemetry struct { operator analytics.Client phOperator ph.Client @@ -76,6 +91,7 @@ type Telemetry struct { minRandInt int maxRandInt int rateLimits map[string]int8 + activeUser map[string]int8 } func createTelemetry() { @@ -111,6 +127,13 @@ func createTelemetry() { for { select { case <-ticker.C: + + if (telemetry.activeUser["traces"] != 0) || (telemetry.activeUser["metrics"] != 0) || (telemetry.activeUser["logs"] != 0) { + telemetry.activeUser["any"] = 1 + } + telemetry.SendEvent(TELEMETRY_EVENT_ACTIVE_USER, map[string]interface{}{"traces": telemetry.activeUser["traces"], "metrics": telemetry.activeUser["metrics"], "logs": telemetry.activeUser["logs"], "any": telemetry.activeUser["any"]}) + telemetry.activeUser = map[string]int8{"traces": 0, "metrics": 0, "logs": 0, "any": 0} + tagsInfo, _ := telemetry.reader.GetTagsInfoInLastHeartBeatInterval(context.Background()) if len(tagsInfo.Env) != 0 { @@ -138,6 +161,10 @@ func createTelemetry() { data[key] = value } telemetry.SendEvent(TELEMETRY_EVENT_HEART_BEAT, data) + + getDistributedInfoInLastHeartBeatInterval, _ := telemetry.reader.GetDistributedInfoInLastHeartBeatInterval(context.Background()) + telemetry.SendEvent(TELEMETRY_EVENT_DISTRIBUTED, getDistributedInfoInLastHeartBeatInterval) + } } }() @@ -207,7 +234,12 @@ func (a *Telemetry) checkEvents(event string) bool { return sendEvent } -func (a *Telemetry) SendEvent(event string, data map[string]interface{}) { +func (a *Telemetry) SendEvent(event string, data map[string]interface{}, opts ...bool) { + + rateLimitFlag := true + if len(opts) > 0 { + rateLimitFlag = opts[0] + } if !a.isTelemetryEnabled() { return @@ -218,10 +250,12 @@ func (a *Telemetry) SendEvent(event string, data map[string]interface{}) { return } - if a.rateLimits[event] < RATE_LIMIT_VALUE { - a.rateLimits[event] += 1 - } else { - return + if rateLimitFlag { + if a.rateLimits[event] < RATE_LIMIT_VALUE { + a.rateLimits[event] += 1 + } else { + return + } } // zap.S().Info(data) From 7f42b39684ee6124f588a2a64663f2810dd75f8f Mon Sep 17 00:00:00 2001 From: Ankit Nayan Date: Wed, 28 Dec 2022 02:33:21 +0530 Subject: [PATCH 7/7] fix: changed or to and --- pkg/query-service/app/http_handler.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/query-service/app/http_handler.go b/pkg/query-service/app/http_handler.go index 927b825792..6bb3a21999 100644 --- a/pkg/query-service/app/http_handler.go +++ b/pkg/query-service/app/http_handler.go @@ -1333,7 +1333,7 @@ func (aH *APIHandler) getServices(w http.ResponseWriter, r *http.Request) { } telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_NUMBER_OF_SERVICES, data) - if (data["number"] != 0) || (data["number"] != telemetry.DEFAULT_NUMBER_OF_SERVICES) { + if (data["number"] != 0) && (data["number"] != telemetry.DEFAULT_NUMBER_OF_SERVICES) { telemetry.GetInstance().AddActiveTracesUser() }