From 83f6dea2db7a823a21a013d7d9c51ed5d7c7fb0f Mon Sep 17 00:00:00 2001 From: Shivanshu Raj Shrivastava Date: Wed, 18 Dec 2024 17:04:01 +0530 Subject: [PATCH 01/10] Add support for trace_v3 schema in messaging queues (#6663) feat: support trace v3 queries --- .../integrations/messagingQueues/kafka/sql.go | 180 +++++++++--------- pkg/query-service/constants/constants.go | 2 +- 2 files changed, 91 insertions(+), 91 deletions(-) diff --git a/pkg/query-service/app/integrations/messagingQueues/kafka/sql.go b/pkg/query-service/app/integrations/messagingQueues/kafka/sql.go index bf07316bb2..67b32938f0 100644 --- a/pkg/query-service/app/integrations/messagingQueues/kafka/sql.go +++ b/pkg/query-service/app/integrations/messagingQueues/kafka/sql.go @@ -9,25 +9,25 @@ func generateConsumerSQL(start, end int64, topic, partition, consumerGroup, queu query := fmt.Sprintf(` WITH consumer_query AS ( SELECT - serviceName, + resource_string_service$$name, quantile(0.99)(durationNano) / 1000000 AS p99, COUNT(*) AS total_requests, - sumIf(1, statusCode = 2) AS error_count, - avg(CASE WHEN has(numberTagMap, 'messaging.message.body.size') THEN numberTagMap['messaging.message.body.size'] ELSE NULL END) AS avg_msg_size - FROM signoz_traces.distributed_signoz_index_v2 + sumIf(1, status_code = 2) AS error_count, + avg(CASE WHEN has(attributes_number, 'messaging.message.body.size') THEN attributes_number['messaging.message.body.size'] ELSE NULL END) AS avg_msg_size + FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= '%d' AND timestamp <= '%d' AND kind = 5 - AND msgSystem = '%s' - AND stringTagMap['messaging.destination.name'] = '%s' - AND stringTagMap['messaging.destination.partition.id'] = '%s' - AND stringTagMap['messaging.kafka.consumer.group'] = '%s' - GROUP BY serviceName + AND attribute_string_messaging$$system = '%s' + AND attributes_string['messaging.destination.name'] = '%s' + AND attributes_string['messaging.destination.partition.id'] = '%s' + AND attributes_string['messaging.kafka.consumer.group'] = '%s' + GROUP BY resource_string_service$$name ) SELECT - serviceName AS service_name, + resource_string_service$$name AS service_name, p99, COALESCE((error_count * 100.0) / total_requests, 0) AS error_rate, COALESCE(total_requests / %d, 0) AS throughput, @@ -35,7 +35,7 @@ SELECT FROM consumer_query ORDER BY - serviceName; + resource_string_service$$name; `, start, end, queueType, topic, partition, consumerGroup, timeRange) return query } @@ -48,14 +48,14 @@ WITH partition_query AS ( SELECT quantile(0.99)(durationNano) / 1000000 AS p99, count(*) AS total_requests, - stringTagMap['messaging.destination.name'] AS topic, - stringTagMap['messaging.destination.partition.id'] AS partition - FROM signoz_traces.distributed_signoz_index_v2 + attributes_string['messaging.destination.name'] AS topic, + attributes_string['messaging.destination.partition.id'] AS partition + FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= '%d' AND timestamp <= '%d' AND kind = 4 - AND msgSystem = '%s' + AND attribute_string_messaging$$system = '%s' GROUP BY topic, partition ) @@ -78,25 +78,25 @@ func generateConsumerPartitionLatencySQL(start, end int64, topic, partition, que query := fmt.Sprintf(` WITH consumer_pl AS ( SELECT - stringTagMap['messaging.kafka.consumer.group'] AS consumer_group, - serviceName, + attributes_string['messaging.kafka.consumer.group'] AS consumer_group, + resource_string_service$$name, quantile(0.99)(durationNano) / 1000000 AS p99, COUNT(*) AS total_requests, - sumIf(1, statusCode = 2) AS error_count - FROM signoz_traces.distributed_signoz_index_v2 + sumIf(1, status_code = 2) AS error_count + FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= '%d' AND timestamp <= '%d' AND kind = 5 - AND msgSystem = '%s' - AND stringTagMap['messaging.destination.name'] = '%s' - AND stringTagMap['messaging.destination.partition.id'] = '%s' - GROUP BY consumer_group, serviceName + AND attribute_string_messaging$$system = '%s' + AND attributes_string['messaging.destination.name'] = '%s' + AND attributes_string['messaging.destination.partition.id'] = '%s' + GROUP BY consumer_group, resource_string_service$$name ) SELECT consumer_group, - serviceName AS service_name, + resource_string_service$$name AS service_name, p99, COALESCE((error_count * 100.0) / total_requests, 0) AS error_rate, COALESCE(total_requests / %d, 0) AS throughput @@ -115,23 +115,23 @@ func generateProducerPartitionThroughputSQL(start, end int64, queueType string) query := fmt.Sprintf(` WITH producer_latency AS ( SELECT - serviceName, + resource_string_service$$name, quantile(0.99)(durationNano) / 1000000 AS p99, - stringTagMap['messaging.destination.name'] AS topic, + attributes_string['messaging.destination.name'] AS topic, COUNT(*) AS total_requests, - sumIf(1, statusCode = 2) AS error_count - FROM signoz_traces.distributed_signoz_index_v2 + sumIf(1, status_code = 2) AS error_count + FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= '%d' AND timestamp <= '%d' AND kind = 4 - AND msgSystem = '%s' - GROUP BY topic, serviceName + AND attribute_string_messaging$$system = '%s' + GROUP BY topic, resource_string_service$$name ) SELECT topic, - serviceName AS service_name, + resource_string_service$$name AS service_name, p99, COALESCE((error_count * 100.0) / total_requests, 0) AS error_rate, COALESCE(total_requests / %d, 0) AS throughput @@ -148,17 +148,17 @@ func generateProducerTopicLatencySQL(start, end int64, topic, service, queueType WITH consumer_latency AS ( SELECT quantile(0.99)(durationNano) / 1000000 AS p99, - stringTagMap['messaging.destination.partition.id'] AS partition, + attributes_string['messaging.destination.partition.id'] AS partition, COUNT(*) AS total_requests, - sumIf(1, statusCode = 2) AS error_count - FROM signoz_traces.distributed_signoz_index_v2 + sumIf(1, status_code = 2) AS error_count + FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= '%d' AND timestamp <= '%d' AND kind = 4 - AND serviceName = '%s' - AND msgSystem = '%s' - AND stringTagMap['messaging.destination.name'] = '%s' + AND resource_string_service$$name = '%s' + AND attribute_string_messaging$$system = '%s' + AND attributes_string['messaging.destination.name'] = '%s' GROUP BY partition ) @@ -179,24 +179,24 @@ func generateConsumerLatencySQL(start, end int64, queueType string) string { query := fmt.Sprintf(` WITH consumer_latency AS ( SELECT - serviceName, - stringTagMap['messaging.destination.name'] AS topic, + resource_string_service$$name, + attributes_string['messaging.destination.name'] AS topic, quantile(0.99)(durationNano) / 1000000 AS p99, COUNT(*) AS total_requests, - sumIf(1, statusCode = 2) AS error_count, - SUM(numberTagMap['messaging.message.body.size']) AS total_bytes - FROM signoz_traces.distributed_signoz_index_v2 + sumIf(1, status_code = 2) AS error_count, + SUM(attributes_number['messaging.message.body.size']) AS total_bytes + FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= '%d' AND timestamp <= '%d' AND kind = 5 - AND msgSystem = '%s' - GROUP BY topic, serviceName + AND attribute_string_messaging$$system = '%s' + GROUP BY topic, resource_string_service$$name ) SELECT topic, - serviceName AS service_name, + resource_string_service$$name AS service_name, p99, COALESCE((error_count * 100.0) / total_requests, 0) AS error_rate, COALESCE(total_requests / %d, 0) AS ingestion_rate, @@ -216,17 +216,17 @@ func generateConsumerServiceLatencySQL(start, end int64, topic, service, queueTy WITH consumer_latency AS ( SELECT quantile(0.99)(durationNano) / 1000000 AS p99, - stringTagMap['messaging.destination.partition.id'] AS partition, + attributes_string['messaging.destination.partition.id'] AS partition, COUNT(*) AS total_requests, - sumIf(1, statusCode = 2) AS error_count - FROM signoz_traces.distributed_signoz_index_v2 + sumIf(1, status_code = 2) AS error_count + FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= '%d' AND timestamp <= '%d' AND kind = 5 - AND serviceName = '%s' - AND msgSystem = '%s' - AND stringTagMap['messaging.destination.name'] = '%s' + AND resource_string_service$$name = '%s' + AND attribute_string_messaging$$system = '%s' + AND attributes_string['messaging.destination.name'] = '%s' GROUP BY partition ) @@ -246,26 +246,26 @@ func generateProducerConsumerEvalSQL(start, end int64, queueType string, evalTim query := fmt.Sprintf(` WITH trace_data AS ( SELECT - p.serviceName AS producer_service, - c.serviceName AS consumer_service, - p.traceID, + p.resource_string_service$$name AS producer_service, + c.resource_string_service$$name AS consumer_service, + p.trace_id, p.timestamp AS producer_timestamp, c.timestamp AS consumer_timestamp, p.durationNano AS durationNano, (toUnixTimestamp64Nano(c.timestamp) - toUnixTimestamp64Nano(p.timestamp)) + p.durationNano AS time_difference FROM - signoz_traces.distributed_signoz_index_v2 p + signoz_traces.distributed_signoz_index_v3 p INNER JOIN - signoz_traces.distributed_signoz_index_v2 c - ON p.traceID = c.traceID - AND c.parentSpanID = p.spanID + signoz_traces.distributed_signoz_index_v3 c + ON p.trace_id = c.trace_id + AND c.parent_span_id = p.span_id WHERE p.kind = 4 AND c.kind = 5 AND toUnixTimestamp64Nano(p.timestamp) BETWEEN '%d' AND '%d' AND toUnixTimestamp64Nano(c.timestamp) BETWEEN '%d' AND '%d' - AND c.msgSystem = '%s' - AND p.msgSystem = '%s' + AND c.attribute_string_messaging$$system = '%s' + AND p.attribute_string_messaging$$system = '%s' ) SELECT @@ -278,7 +278,7 @@ SELECT arrayMap(x -> x.1, arraySort( x -> -x.2, - groupArrayIf((traceID, time_difference), time_difference > '%d') + groupArrayIf((trace_id, time_difference), time_difference > '%d') ) ), 1, 10 @@ -296,30 +296,30 @@ func generateProducerSQL(start, end int64, topic, partition, queueType string) s query := fmt.Sprintf(` WITH producer_query AS ( SELECT - serviceName, + resource_string_service$$name, quantile(0.99)(durationNano) / 1000000 AS p99, count(*) AS total_count, - sumIf(1, statusCode = 2) AS error_count - FROM signoz_traces.distributed_signoz_index_v2 + sumIf(1, status_code = 2) AS error_count + FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= '%d' AND timestamp <= '%d' AND kind = 4 - AND msgSystem = '%s' - AND stringTagMap['messaging.destination.name'] = '%s' - AND stringTagMap['messaging.destination.partition.id'] = '%s' - GROUP BY serviceName + AND attribute_string_messaging$$system = '%s' + AND attributes_string['messaging.destination.name'] = '%s' + AND attributes_string['messaging.destination.partition.id'] = '%s' + GROUP BY resource_string_service$$name ) SELECT - serviceName AS service_name, + resource_string_service$$name AS service_name, p99, COALESCE((error_count * 100.0) / total_count, 0) AS error_percentage, COALESCE(total_count / %d, 0) AS throughput FROM producer_query ORDER BY - serviceName; + resource_string_service$$name; `, start, end, queueType, topic, partition, timeRange) return query } @@ -328,18 +328,18 @@ func generateNetworkLatencyThroughputSQL(start, end int64, consumerGroup, partit timeRange := (end - start) / 1000000000 query := fmt.Sprintf(` SELECT - stringTagMap['messaging.client_id'] AS client_id, - stringTagMap['service.instance.id'] AS service_instance_id, - serviceName AS service_name, + attributes_string['messaging.client_id'] AS client_id, + attributes_string['service.instance.id'] AS service_instance_id, + resource_string_service$$name AS service_name, count(*) / %d AS throughput -FROM signoz_traces.distributed_signoz_index_v2 +FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= '%d' AND timestamp <= '%d' AND kind = 5 - AND msgSystem = '%s' - AND stringTagMap['messaging.kafka.consumer.group'] = '%s' - AND stringTagMap['messaging.destination.partition.id'] = '%s' + AND attribute_string_messaging$$system = '%s' + AND attributes_string['messaging.kafka.consumer.group'] = '%s' + AND attributes_string['messaging.destination.partition.id'] = '%s' GROUP BY service_name, client_id, service_instance_id ORDER BY throughput DESC `, timeRange, start, end, queueType, consumerGroup, partitionID) @@ -350,12 +350,12 @@ func onboardProducersSQL(start, end int64, queueType string) string { query := fmt.Sprintf(` SELECT COUNT(*) = 0 AS entries, - COUNT(IF(msgSystem = '%s', 1, NULL)) = 0 AS queue, + COUNT(IF(attribute_string_messaging$$system = '%s', 1, NULL)) = 0 AS queue, COUNT(IF(kind = 4, 1, NULL)) = 0 AS kind, - COUNT(IF(has(stringTagMap, 'messaging.destination.name'), 1, NULL)) = 0 AS destination, - COUNT(IF(has(stringTagMap, 'messaging.destination.partition.id'), 1, NULL)) = 0 AS partition + COUNT(IF(has(attributes_string, 'messaging.destination.name'), 1, NULL)) = 0 AS destination, + COUNT(IF(has(attributes_string, 'messaging.destination.partition.id'), 1, NULL)) = 0 AS partition FROM - signoz_traces.distributed_signoz_index_v2 + signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= '%d' AND timestamp <= '%d';`, queueType, start, end) @@ -366,16 +366,16 @@ func onboardConsumerSQL(start, end int64, queueType string) string { query := fmt.Sprintf(` SELECT COUNT(*) = 0 AS entries, - COUNT(IF(msgSystem = '%s', 1, NULL)) = 0 AS queue, + COUNT(IF(attribute_string_messaging$$system = '%s', 1, NULL)) = 0 AS queue, COUNT(IF(kind = 5, 1, NULL)) = 0 AS kind, - COUNT(serviceName) = 0 AS svc, - COUNT(IF(has(stringTagMap, 'messaging.destination.name'), 1, NULL)) = 0 AS destination, - COUNT(IF(has(stringTagMap, 'messaging.destination.partition.id'), 1, NULL)) = 0 AS partition, - COUNT(IF(has(stringTagMap, 'messaging.kafka.consumer.group'), 1, NULL)) = 0 AS cgroup, - COUNT(IF(has(numberTagMap, 'messaging.message.body.size'), 1, NULL)) = 0 AS bodysize, - COUNT(IF(has(stringTagMap, 'messaging.client_id'), 1, NULL)) = 0 AS clientid, - COUNT(IF(has(stringTagMap, 'service.instance.id'), 1, NULL)) = 0 AS instanceid -FROM signoz_traces.distributed_signoz_index_v2 + COUNT(resource_string_service$$name) = 0 AS svc, + COUNT(IF(has(attributes_string, 'messaging.destination.name'), 1, NULL)) = 0 AS destination, + COUNT(IF(has(attributes_string, 'messaging.destination.partition.id'), 1, NULL)) = 0 AS partition, + COUNT(IF(has(attributes_string, 'messaging.kafka.consumer.group'), 1, NULL)) = 0 AS cgroup, + COUNT(IF(has(attributes_number, 'messaging.message.body.size'), 1, NULL)) = 0 AS bodysize, + COUNT(IF(has(attributes_string, 'messaging.client_id'), 1, NULL)) = 0 AS clientid, + COUNT(IF(has(attributes_string, 'service.instance.id'), 1, NULL)) = 0 AS instanceid +FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= '%d' AND timestamp <= '%d';`, queueType, start, end) diff --git a/pkg/query-service/constants/constants.go b/pkg/query-service/constants/constants.go index 7d6f087188..33caecb0d6 100644 --- a/pkg/query-service/constants/constants.go +++ b/pkg/query-service/constants/constants.go @@ -90,7 +90,7 @@ func EnableHostsInfraMonitoring() bool { return GetOrDefaultEnv("ENABLE_INFRA_METRICS", "true") == "true" } -var KafkaSpanEval = GetOrDefaultEnv("KAFKA_SPAN_EVAL", "false") +var KafkaSpanEval = GetOrDefaultEnv("KAFKA_SPAN_EVAL", "true") func IsDurationSortFeatureEnabled() bool { isDurationSortFeatureEnabledStr := DurationSortFeature From 83aa48c7215591470e68f1f09f814bc329a76ee3 Mon Sep 17 00:00:00 2001 From: Shivanshu Raj Shrivastava Date: Wed, 18 Dec 2024 19:06:22 +0530 Subject: [PATCH 02/10] update service.instance.id (#6665) * nit: update resource id and revert the flag --- .../app/integrations/messagingQueues/kafka/sql.go | 4 ++-- pkg/query-service/constants/constants.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/query-service/app/integrations/messagingQueues/kafka/sql.go b/pkg/query-service/app/integrations/messagingQueues/kafka/sql.go index 67b32938f0..05577ab8b9 100644 --- a/pkg/query-service/app/integrations/messagingQueues/kafka/sql.go +++ b/pkg/query-service/app/integrations/messagingQueues/kafka/sql.go @@ -329,7 +329,7 @@ func generateNetworkLatencyThroughputSQL(start, end int64, consumerGroup, partit query := fmt.Sprintf(` SELECT attributes_string['messaging.client_id'] AS client_id, - attributes_string['service.instance.id'] AS service_instance_id, + resources_string['service.instance.id'] AS service_instance_id, resource_string_service$$name AS service_name, count(*) / %d AS throughput FROM signoz_traces.distributed_signoz_index_v3 @@ -374,7 +374,7 @@ SELECT COUNT(IF(has(attributes_string, 'messaging.kafka.consumer.group'), 1, NULL)) = 0 AS cgroup, COUNT(IF(has(attributes_number, 'messaging.message.body.size'), 1, NULL)) = 0 AS bodysize, COUNT(IF(has(attributes_string, 'messaging.client_id'), 1, NULL)) = 0 AS clientid, - COUNT(IF(has(attributes_string, 'service.instance.id'), 1, NULL)) = 0 AS instanceid + COUNT(IF(has(resources_string, 'service.instance.id'), 1, NULL)) = 0 AS instanceid FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= '%d' diff --git a/pkg/query-service/constants/constants.go b/pkg/query-service/constants/constants.go index 33caecb0d6..7d6f087188 100644 --- a/pkg/query-service/constants/constants.go +++ b/pkg/query-service/constants/constants.go @@ -90,7 +90,7 @@ func EnableHostsInfraMonitoring() bool { return GetOrDefaultEnv("ENABLE_INFRA_METRICS", "true") == "true" } -var KafkaSpanEval = GetOrDefaultEnv("KAFKA_SPAN_EVAL", "true") +var KafkaSpanEval = GetOrDefaultEnv("KAFKA_SPAN_EVAL", "false") func IsDurationSortFeatureEnabled() bool { isDurationSortFeatureEnabledStr := DurationSortFeature From 85cf4f4e2ef6cfc69c912e567cbf03b2efda647c Mon Sep 17 00:00:00 2001 From: Nityananda Gohain Date: Wed, 18 Dec 2024 21:07:31 +0700 Subject: [PATCH 03/10] fix: use placehold in limit and use proper exists (#6667) --- pkg/query-service/app/querier/helper.go | 2 +- pkg/query-service/app/querier/v2/helper.go | 2 +- pkg/query-service/app/traces/v4/enrich.go | 4 ++-- .../app/traces/v4/query_builder.go | 19 ++++++++++++++++--- .../app/traces/v4/query_builder_test.go | 8 +++++--- 5 files changed, 25 insertions(+), 10 deletions(-) diff --git a/pkg/query-service/app/querier/helper.go b/pkg/query-service/app/querier/helper.go index 1b2acbab8b..a82b3f815b 100644 --- a/pkg/query-service/app/querier/helper.go +++ b/pkg/query-service/app/querier/helper.go @@ -190,7 +190,7 @@ func (q *querier) runBuilderQuery( ch <- channelResult{Err: err, Name: queryName, Query: limitQuery, Series: nil} return } - query = fmt.Sprintf(placeholderQuery, limitQuery) + query = strings.Replace(placeholderQuery, "#LIMIT_PLACEHOLDER", limitQuery, 1) } else { query, err = tracesQueryBuilder( start, diff --git a/pkg/query-service/app/querier/v2/helper.go b/pkg/query-service/app/querier/v2/helper.go index b62fffe106..4846aa3f3b 100644 --- a/pkg/query-service/app/querier/v2/helper.go +++ b/pkg/query-service/app/querier/v2/helper.go @@ -190,7 +190,7 @@ func (q *querier) runBuilderQuery( ch <- channelResult{Err: err, Name: queryName, Query: limitQuery, Series: nil} return } - query = fmt.Sprintf(placeholderQuery, limitQuery) + query = strings.Replace(placeholderQuery, "#LIMIT_PLACEHOLDER", limitQuery, 1) } else { query, err = tracesQueryBuilder( start, diff --git a/pkg/query-service/app/traces/v4/enrich.go b/pkg/query-service/app/traces/v4/enrich.go index 848e489e86..c0974ea0b0 100644 --- a/pkg/query-service/app/traces/v4/enrich.go +++ b/pkg/query-service/app/traces/v4/enrich.go @@ -34,8 +34,8 @@ func enrichKeyWithMetadata(key v3.AttributeKey, keys map[string]v3.AttributeKey) return v } - for _, key := range utils.GenerateEnrichmentKeys(key) { - if val, ok := keys[key]; ok { + for _, tkey := range utils.GenerateEnrichmentKeys(key) { + if val, ok := keys[tkey]; ok { return val } } diff --git a/pkg/query-service/app/traces/v4/query_builder.go b/pkg/query-service/app/traces/v4/query_builder.go index d650e1cdaa..80d39b9016 100644 --- a/pkg/query-service/app/traces/v4/query_builder.go +++ b/pkg/query-service/app/traces/v4/query_builder.go @@ -74,6 +74,19 @@ func getSelectLabels(groupBy []v3.AttributeKey) string { return strings.Join(labels, ",") } +// TODO(nitya): use the _exists columns as well in the future similar to logs +func existsSubQueryForFixedColumn(key v3.AttributeKey, op v3.FilterOperator) (string, error) { + if key.DataType == v3.AttributeKeyDataTypeString { + if op == v3.FilterOperatorExists { + return fmt.Sprintf("%s %s ''", getColumnName(key), tracesOperatorMappingV3[v3.FilterOperatorNotEqual]), nil + } else { + return fmt.Sprintf("%s %s ''", getColumnName(key), tracesOperatorMappingV3[v3.FilterOperatorEqual]), nil + } + } else { + return "", fmt.Errorf("unsupported operation, exists and not exists can only be applied on custom attributes or string type columns") + } +} + func buildTracesFilterQuery(fs *v3.FilterSet) (string, error) { var conditions []string @@ -110,7 +123,7 @@ func buildTracesFilterQuery(fs *v3.FilterSet) (string, error) { conditions = append(conditions, fmt.Sprintf(operator, columnName, fmtVal)) case v3.FilterOperatorExists, v3.FilterOperatorNotExists: if item.Key.IsColumn { - subQuery, err := tracesV3.ExistsSubQueryForFixedColumn(item.Key, item.Operator) + subQuery, err := existsSubQueryForFixedColumn(item.Key, item.Operator) if err != nil { return "", err } @@ -312,7 +325,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, panelType v3. } if options.GraphLimitQtype == constants.SecondQueryGraphLimit { - filterSubQuery = filterSubQuery + " AND " + fmt.Sprintf("(%s) GLOBAL IN (", tracesV3.GetSelectKeys(mq.AggregateOperator, mq.GroupBy)) + "%s)" + filterSubQuery = filterSubQuery + " AND " + fmt.Sprintf("(%s) GLOBAL IN (", tracesV3.GetSelectKeys(mq.AggregateOperator, mq.GroupBy)) + "#LIMIT_PLACEHOLDER)" } switch mq.AggregateOperator { @@ -350,7 +363,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, panelType v3. case v3.AggregateOperatorCount: if mq.AggregateAttribute.Key != "" { if mq.AggregateAttribute.IsColumn { - subQuery, err := tracesV3.ExistsSubQueryForFixedColumn(mq.AggregateAttribute, v3.FilterOperatorExists) + subQuery, err := existsSubQueryForFixedColumn(mq.AggregateAttribute, v3.FilterOperatorExists) if err == nil { filterSubQuery = fmt.Sprintf("%s AND %s", filterSubQuery, subQuery) } diff --git a/pkg/query-service/app/traces/v4/query_builder_test.go b/pkg/query-service/app/traces/v4/query_builder_test.go index 9db2e815f9..a9b055b67f 100644 --- a/pkg/query-service/app/traces/v4/query_builder_test.go +++ b/pkg/query-service/app/traces/v4/query_builder_test.go @@ -265,9 +265,11 @@ func Test_buildTracesFilterQuery(t *testing.T) { {Key: v3.AttributeKey{Key: "isDone", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, Operator: v3.FilterOperatorNotExists}, {Key: v3.AttributeKey{Key: "host1", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Operator: v3.FilterOperatorNotExists}, {Key: v3.AttributeKey{Key: "path", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Operator: v3.FilterOperatorNotExists}, + {Key: v3.AttributeKey{Key: "http_url", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Operator: v3.FilterOperatorNotExists}, + {Key: v3.AttributeKey{Key: "http.route", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Operator: v3.FilterOperatorNotExists}, }}, }, - want: "mapContains(attributes_string, 'host') AND mapContains(attributes_number, 'duration') AND NOT mapContains(attributes_bool, 'isDone') AND NOT mapContains(attributes_string, 'host1') AND path = ''", + want: "mapContains(attributes_string, 'host') AND mapContains(attributes_number, 'duration') AND NOT mapContains(attributes_bool, 'isDone') AND NOT mapContains(attributes_string, 'host1') AND `attribute_string_path` = '' AND http_url = '' AND `attribute_string_http$$route` = ''", }, } for _, tt := range tests { @@ -683,7 +685,7 @@ func TestPrepareTracesQuery(t *testing.T) { }, }, want: "SELECT attributes_string['function'] as `function`, toFloat64(count(distinct(name))) as value from signoz_traces.distributed_signoz_index_v3 where " + - "(timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND mapContains(attributes_string, 'function') AND (`function`) GLOBAL IN (%s) group by `function` order by value DESC", + "(timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND mapContains(attributes_string, 'function') AND (`function`) GLOBAL IN (#LIMIT_PLACEHOLDER) group by `function` order by value DESC", }, { name: "test with limit with resources- first", @@ -766,7 +768,7 @@ func TestPrepareTracesQuery(t *testing.T) { want: "SELECT `attribute_string_function` as `function`, serviceName as `serviceName`, toFloat64(count(distinct(name))) as value from signoz_traces.distributed_signoz_index_v3 " + "where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND attributes_number['line'] = 100 " + "AND (resource_fingerprint GLOBAL IN (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (seen_at_ts_bucket_start >= 1680064560) AND (seen_at_ts_bucket_start <= 1680066458) " + - "AND simpleJSONExtractString(labels, 'hostname') = 'server1' AND labels like '%hostname%server1%')) AND (`function`,`serviceName`) GLOBAL IN (%s) group by `function`,`serviceName` order by value DESC", + "AND simpleJSONExtractString(labels, 'hostname') = 'server1' AND labels like '%hostname%server1%')) AND (`function`,`serviceName`) GLOBAL IN (#LIMIT_PLACEHOLDER) group by `function`,`serviceName` order by value DESC", }, } From 60dc479a195cab7419528096e82bad6b9bc1e78f Mon Sep 17 00:00:00 2001 From: Shivanshu Raj Shrivastava Date: Wed, 18 Dec 2024 19:57:33 +0530 Subject: [PATCH 04/10] fix: add bucketing (#6669) Co-authored-by: Nityananda Gohain --- .../integrations/messagingQueues/kafka/sql.go | 67 +++++++++++++++---- 1 file changed, 55 insertions(+), 12 deletions(-) diff --git a/pkg/query-service/app/integrations/messagingQueues/kafka/sql.go b/pkg/query-service/app/integrations/messagingQueues/kafka/sql.go index 05577ab8b9..9b943acbc8 100644 --- a/pkg/query-service/app/integrations/messagingQueues/kafka/sql.go +++ b/pkg/query-service/app/integrations/messagingQueues/kafka/sql.go @@ -6,6 +6,8 @@ import ( func generateConsumerSQL(start, end int64, topic, partition, consumerGroup, queueType string) string { timeRange := (end - start) / 1000000000 + tsBucketStart := (start / 1000000000) - 1800 + tsBucketEnd := end / 1000000000 query := fmt.Sprintf(` WITH consumer_query AS ( SELECT @@ -18,6 +20,8 @@ WITH consumer_query AS ( WHERE timestamp >= '%d' AND timestamp <= '%d' + AND ts_bucket_start >= '%d' + AND ts_bucket_start <= '%d' AND kind = 5 AND attribute_string_messaging$$system = '%s' AND attributes_string['messaging.destination.name'] = '%s' @@ -36,13 +40,15 @@ FROM consumer_query ORDER BY resource_string_service$$name; -`, start, end, queueType, topic, partition, consumerGroup, timeRange) +`, start, end, tsBucketStart, tsBucketEnd, queueType, topic, partition, consumerGroup, timeRange) return query } // S1 landing func generatePartitionLatencySQL(start, end int64, queueType string) string { timeRange := (end - start) / 1000000000 + tsBucketStart := (start / 1000000000) - 1800 + tsBucketEnd := end / 1000000000 query := fmt.Sprintf(` WITH partition_query AS ( SELECT @@ -54,6 +60,8 @@ WITH partition_query AS ( WHERE timestamp >= '%d' AND timestamp <= '%d' + AND ts_bucket_start >= '%d' + AND ts_bucket_start <= '%d' AND kind = 4 AND attribute_string_messaging$$system = '%s' GROUP BY topic, partition @@ -68,13 +76,15 @@ FROM partition_query ORDER BY topic; -`, start, end, queueType, timeRange) +`, start, end, tsBucketStart, tsBucketEnd, queueType, timeRange) return query } // S1 consumer func generateConsumerPartitionLatencySQL(start, end int64, topic, partition, queueType string) string { timeRange := (end - start) / 1000000000 + tsBucketStart := (start / 1000000000) - 1800 + tsBucketEnd := end / 1000000000 query := fmt.Sprintf(` WITH consumer_pl AS ( SELECT @@ -87,6 +97,8 @@ WITH consumer_pl AS ( WHERE timestamp >= '%d' AND timestamp <= '%d' + AND ts_bucket_start >= '%d' + AND ts_bucket_start <= '%d' AND kind = 5 AND attribute_string_messaging$$system = '%s' AND attributes_string['messaging.destination.name'] = '%s' @@ -104,14 +116,15 @@ FROM consumer_pl ORDER BY consumer_group; -`, start, end, queueType, topic, partition, timeRange) +`, start, end, tsBucketStart, tsBucketEnd, queueType, topic, partition, timeRange) return query } // S3, producer overview func generateProducerPartitionThroughputSQL(start, end int64, queueType string) string { timeRange := (end - start) / 1000000000 - // t, svc, rps, byte*, p99, err + tsBucketStart := (start / 1000000000) - 1800 + tsBucketEnd := end / 1000000000 // t, svc, rps, byte*, p99, err query := fmt.Sprintf(` WITH producer_latency AS ( SELECT @@ -124,6 +137,8 @@ WITH producer_latency AS ( WHERE timestamp >= '%d' AND timestamp <= '%d' + AND ts_bucket_start >= '%d' + AND ts_bucket_start <= '%d' AND kind = 4 AND attribute_string_messaging$$system = '%s' GROUP BY topic, resource_string_service$$name @@ -137,13 +152,15 @@ SELECT COALESCE(total_requests / %d, 0) AS throughput FROM producer_latency -`, start, end, queueType, timeRange) +`, start, end, tsBucketStart, tsBucketEnd, queueType, timeRange) return query } // S3, producer topic/service overview func generateProducerTopicLatencySQL(start, end int64, topic, service, queueType string) string { timeRange := (end - start) / 1000000000 + tsBucketStart := (start / 1000000000) - 1800 + tsBucketEnd := end / 1000000000 query := fmt.Sprintf(` WITH consumer_latency AS ( SELECT @@ -155,6 +172,8 @@ WITH consumer_latency AS ( WHERE timestamp >= '%d' AND timestamp <= '%d' + AND ts_bucket_start >= '%d' + AND ts_bucket_start <= '%d' AND kind = 4 AND resource_string_service$$name = '%s' AND attribute_string_messaging$$system = '%s' @@ -169,13 +188,15 @@ SELECT COALESCE(total_requests / %d, 0) AS throughput FROM consumer_latency -`, start, end, service, queueType, topic, timeRange) +`, start, end, tsBucketStart, tsBucketEnd, service, queueType, topic, timeRange) return query } // S3 consumer overview func generateConsumerLatencySQL(start, end int64, queueType string) string { timeRange := (end - start) / 1000000000 + tsBucketStart := (start / 1000000000) - 1800 + tsBucketEnd := end / 1000000000 query := fmt.Sprintf(` WITH consumer_latency AS ( SELECT @@ -189,6 +210,8 @@ WITH consumer_latency AS ( WHERE timestamp >= '%d' AND timestamp <= '%d' + AND ts_bucket_start >= '%d' + AND ts_bucket_start <= '%d' AND kind = 5 AND attribute_string_messaging$$system = '%s' GROUP BY topic, resource_string_service$$name @@ -205,13 +228,15 @@ FROM consumer_latency ORDER BY topic; -`, start, end, queueType, timeRange, timeRange) +`, start, end, tsBucketStart, tsBucketEnd, queueType, timeRange, timeRange) return query } // S3 consumer topic/service func generateConsumerServiceLatencySQL(start, end int64, topic, service, queueType string) string { timeRange := (end - start) / 1000000000 + tsBucketStart := (start / 1000000000) - 1800 + tsBucketEnd := end / 1000000000 query := fmt.Sprintf(` WITH consumer_latency AS ( SELECT @@ -223,6 +248,8 @@ WITH consumer_latency AS ( WHERE timestamp >= '%d' AND timestamp <= '%d' + AND ts_bucket_start >= '%d' + AND ts_bucket_start <= '%d' AND kind = 5 AND resource_string_service$$name = '%s' AND attribute_string_messaging$$system = '%s' @@ -237,7 +264,7 @@ SELECT COALESCE(total_requests / %d, 0) AS throughput FROM consumer_latency -`, start, end, service, queueType, topic, timeRange) +`, start, end, tsBucketStart, tsBucketEnd, service, queueType, topic, timeRange) return query } @@ -293,6 +320,8 @@ GROUP BY func generateProducerSQL(start, end int64, topic, partition, queueType string) string { timeRange := (end - start) / 1000000000 + tsBucketStart := (start / 1000000000) - 1800 + tsBucketEnd := end / 1000000000 query := fmt.Sprintf(` WITH producer_query AS ( SELECT @@ -304,6 +333,8 @@ WITH producer_query AS ( WHERE timestamp >= '%d' AND timestamp <= '%d' + AND ts_bucket_start >= '%d' + AND ts_bucket_start <= '%d' AND kind = 4 AND attribute_string_messaging$$system = '%s' AND attributes_string['messaging.destination.name'] = '%s' @@ -320,12 +351,14 @@ FROM producer_query ORDER BY resource_string_service$$name; -`, start, end, queueType, topic, partition, timeRange) +`, start, end, tsBucketStart, tsBucketEnd, queueType, topic, partition, timeRange) return query } func generateNetworkLatencyThroughputSQL(start, end int64, consumerGroup, partitionID, queueType string) string { timeRange := (end - start) / 1000000000 + tsBucketStart := (start / 1000000000) - 1800 + tsBucketEnd := end / 1000000000 query := fmt.Sprintf(` SELECT attributes_string['messaging.client_id'] AS client_id, @@ -336,17 +369,21 @@ FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= '%d' AND timestamp <= '%d' + AND ts_bucket_start >= '%d' + AND ts_bucket_start <= '%d' AND kind = 5 AND attribute_string_messaging$$system = '%s' AND attributes_string['messaging.kafka.consumer.group'] = '%s' AND attributes_string['messaging.destination.partition.id'] = '%s' GROUP BY service_name, client_id, service_instance_id ORDER BY throughput DESC -`, timeRange, start, end, queueType, consumerGroup, partitionID) +`, timeRange, start, end, tsBucketStart, tsBucketEnd, queueType, consumerGroup, partitionID) return query } func onboardProducersSQL(start, end int64, queueType string) string { + tsBucketStart := (start / 1000000000) - 1800 + tsBucketEnd := end / 1000000000 query := fmt.Sprintf(` SELECT COUNT(*) = 0 AS entries, @@ -358,11 +395,15 @@ FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= '%d' - AND timestamp <= '%d';`, queueType, start, end) + AND timestamp <= '%d' + AND ts_bucket_start >= '%d' + AND ts_bucket_start <= '%d';`, queueType, start, end, tsBucketStart, tsBucketEnd) return query } func onboardConsumerSQL(start, end int64, queueType string) string { + tsBucketStart := (start / 1000000000) - 1800 + tsBucketEnd := end / 1000000000 query := fmt.Sprintf(` SELECT COUNT(*) = 0 AS entries, @@ -378,6 +419,8 @@ SELECT FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= '%d' - AND timestamp <= '%d';`, queueType, start, end) + AND timestamp <= '%d' + AND ts_bucket_start >= '%d' + AND ts_bucket_start <= '%d' ;`, queueType, start, end, tsBucketStart, tsBucketEnd) return query } From 67e822e23ef5744618b5a7d9e516c3c30a35e6c3 Mon Sep 17 00:00:00 2001 From: Nityananda Gohain Date: Thu, 19 Dec 2024 11:52:20 +0700 Subject: [PATCH 05/10] feat: api for trace materialization (#6646) * feat: api for trace materialization * fix: minor changes and cleanup * fix: minor fixes * fix: update errors * fix: address comments * fix: address comments --- .../app/clickhouseReader/reader.go | 177 +++++++++++++++++- pkg/query-service/app/http_handler.go | 35 ++++ pkg/query-service/app/logs/parser.go | 8 +- pkg/query-service/app/logs/parser_test.go | 8 +- pkg/query-service/app/logs/validator.go | 34 ++++ pkg/query-service/constants/constants.go | 2 +- pkg/query-service/interfaces/interface.go | 4 + pkg/query-service/model/response.go | 6 +- 8 files changed, 254 insertions(+), 20 deletions(-) diff --git a/pkg/query-service/app/clickhouseReader/reader.go b/pkg/query-service/app/clickhouseReader/reader.go index 9c7828af45..2a1c1f5782 100644 --- a/pkg/query-service/app/clickhouseReader/reader.go +++ b/pkg/query-service/app/clickhouseReader/reader.go @@ -2694,8 +2694,8 @@ func (r *ClickHouseReader) GetTagsInfoInLastHeartBeatInterval(ctx context.Contex } // remove this after sometime -func removeUnderscoreDuplicateFields(fields []model.LogField) []model.LogField { - lookup := map[string]model.LogField{} +func removeUnderscoreDuplicateFields(fields []model.Field) []model.Field { + lookup := map[string]model.Field{} for _, v := range fields { lookup[v.Name+v.DataType] = v } @@ -2706,7 +2706,7 @@ func removeUnderscoreDuplicateFields(fields []model.LogField) []model.LogField { } } - updatedFields := []model.LogField{} + updatedFields := []model.Field{} for _, v := range lookup { updatedFields = append(updatedFields, v) } @@ -2717,11 +2717,11 @@ func (r *ClickHouseReader) GetLogFields(ctx context.Context) (*model.GetFieldsRe // response will contain top level fields from the otel log model response := model.GetFieldsResponse{ Selected: constants.StaticSelectedLogFields, - Interesting: []model.LogField{}, + Interesting: []model.Field{}, } // get attribute keys - attributes := []model.LogField{} + attributes := []model.Field{} query := fmt.Sprintf("SELECT DISTINCT name, datatype from %s.%s group by name, datatype", r.logsDB, r.logsAttributeKeys) err := r.db.Select(ctx, &attributes, query) if err != nil { @@ -2729,7 +2729,7 @@ func (r *ClickHouseReader) GetLogFields(ctx context.Context) (*model.GetFieldsRe } // get resource keys - resources := []model.LogField{} + resources := []model.Field{} query = fmt.Sprintf("SELECT DISTINCT name, datatype from %s.%s group by name, datatype", r.logsDB, r.logsResourceKeys) err = r.db.Select(ctx, &resources, query) if err != nil { @@ -2753,9 +2753,11 @@ func (r *ClickHouseReader) GetLogFields(ctx context.Context) (*model.GetFieldsRe return &response, nil } -func (r *ClickHouseReader) extractSelectedAndInterestingFields(tableStatement string, fieldType string, fields *[]model.LogField, response *model.GetFieldsResponse) { +func (r *ClickHouseReader) extractSelectedAndInterestingFields(tableStatement string, overrideFieldType string, fields *[]model.Field, response *model.GetFieldsResponse) { for _, field := range *fields { - field.Type = fieldType + if overrideFieldType != "" { + field.Type = overrideFieldType + } // all static fields are assumed to be selected as we don't allow changing them if isColumn(r.useLogsNewSchema, tableStatement, field.Type, field.Name, field.DataType) { response.Selected = append(response.Selected, field) @@ -2945,6 +2947,165 @@ func (r *ClickHouseReader) UpdateLogField(ctx context.Context, field *model.Upda return nil } +func (r *ClickHouseReader) GetTraceFields(ctx context.Context) (*model.GetFieldsResponse, *model.ApiError) { + // response will contain top level fields from the otel trace model + response := model.GetFieldsResponse{ + Selected: []model.Field{}, + Interesting: []model.Field{}, + } + + // get the top level selected fields + for _, field := range constants.NewStaticFieldsTraces { + if (v3.AttributeKey{} == field) { + continue + } + response.Selected = append(response.Selected, model.Field{ + Name: field.Key, + DataType: field.DataType.String(), + Type: constants.Static, + }) + } + + // get attribute keys + attributes := []model.Field{} + query := fmt.Sprintf("SELECT tagKey, tagType, dataType from %s.%s group by tagKey, tagType, dataType", r.TraceDB, r.spanAttributesKeysTable) + rows, err := r.db.Query(ctx, query) + if err != nil { + return nil, &model.ApiError{Err: err, Typ: model.ErrorInternal} + } + defer rows.Close() + + var tagKey string + var dataType string + var tagType string + for rows.Next() { + if err := rows.Scan(&tagKey, &tagType, &dataType); err != nil { + return nil, &model.ApiError{Err: err, Typ: model.ErrorInternal} + } + attributes = append(attributes, model.Field{ + Name: tagKey, + DataType: dataType, + Type: tagType, + }) + } + + statements := []model.ShowCreateTableStatement{} + query = fmt.Sprintf("SHOW CREATE TABLE %s.%s", r.TraceDB, r.traceLocalTableName) + err = r.db.Select(ctx, &statements, query) + if err != nil { + return nil, &model.ApiError{Err: err, Typ: model.ErrorInternal} + } + + r.extractSelectedAndInterestingFields(statements[0].Statement, "", &attributes, &response) + + return &response, nil + +} + +func (r *ClickHouseReader) UpdateTraceField(ctx context.Context, field *model.UpdateField) *model.ApiError { + if !field.Selected { + return model.ForbiddenError(errors.New("removing a selected field is not allowed, please reach out to support.")) + } + + // name of the materialized column + colname := utils.GetClickhouseColumnNameV2(field.Type, field.DataType, field.Name) + + field.DataType = strings.ToLower(field.DataType) + + // dataType and chDataType of the materialized column + var dataTypeMap = map[string]string{ + "string": "string", + "bool": "bool", + "int64": "number", + "float64": "number", + } + var chDataTypeMap = map[string]string{ + "string": "String", + "bool": "Bool", + "int64": "Float64", + "float64": "Float64", + } + chDataType := chDataTypeMap[field.DataType] + dataType := dataTypeMap[field.DataType] + + // typeName: tag => attributes, resource => resources + typeName := field.Type + if field.Type == string(v3.AttributeKeyTypeTag) { + typeName = constants.Attributes + } else if field.Type == string(v3.AttributeKeyTypeResource) { + typeName = constants.Resources + } + + attrColName := fmt.Sprintf("%s_%s", typeName, dataType) + for _, table := range []string{r.traceLocalTableName, r.traceTableName} { + q := "ALTER TABLE %s.%s ON CLUSTER %s ADD COLUMN IF NOT EXISTS `%s` %s DEFAULT %s['%s'] CODEC(ZSTD(1))" + query := fmt.Sprintf(q, + r.TraceDB, table, + r.cluster, + colname, chDataType, + attrColName, + field.Name, + ) + err := r.db.Exec(ctx, query) + if err != nil { + return &model.ApiError{Err: err, Typ: model.ErrorInternal} + } + + query = fmt.Sprintf("ALTER TABLE %s.%s ON CLUSTER %s ADD COLUMN IF NOT EXISTS `%s_exists` bool DEFAULT if(mapContains(%s, '%s') != 0, true, false) CODEC(ZSTD(1))", + r.TraceDB, table, + r.cluster, + colname, + attrColName, + field.Name, + ) + err = r.db.Exec(ctx, query) + if err != nil { + return &model.ApiError{Err: err, Typ: model.ErrorInternal} + } + } + + // create the index + if strings.ToLower(field.DataType) == "bool" { + // there is no point in creating index for bool attributes as the cardinality is just 2 + return nil + } + + if field.IndexType == "" { + field.IndexType = constants.DefaultLogSkipIndexType + } + if field.IndexGranularity == 0 { + field.IndexGranularity = constants.DefaultLogSkipIndexGranularity + } + query := fmt.Sprintf("ALTER TABLE %s.%s ON CLUSTER %s ADD INDEX IF NOT EXISTS `%s_idx` (`%s`) TYPE %s GRANULARITY %d", + r.TraceDB, r.traceLocalTableName, + r.cluster, + colname, + colname, + field.IndexType, + field.IndexGranularity, + ) + err := r.db.Exec(ctx, query) + if err != nil { + return &model.ApiError{Err: err, Typ: model.ErrorInternal} + } + + // add a default minmax index for numbers + if dataType == "number" { + query = fmt.Sprintf("ALTER TABLE %s.%s ON CLUSTER %s ADD INDEX IF NOT EXISTS `%s_minmax_idx` (`%s`) TYPE minmax GRANULARITY 1", + r.TraceDB, r.traceLocalTableName, + r.cluster, + colname, + colname, + ) + err = r.db.Exec(ctx, query) + if err != nil { + return &model.ApiError{Err: err, Typ: model.ErrorInternal} + } + } + + return nil +} + func (r *ClickHouseReader) GetLogs(ctx context.Context, params *model.LogsFilterParams) (*[]model.SignozLog, *model.ApiError) { response := []model.SignozLog{} fields, apiErr := r.GetLogFields(ctx) diff --git a/pkg/query-service/app/http_handler.go b/pkg/query-service/app/http_handler.go index ba16894438..ee88eb7056 100644 --- a/pkg/query-service/app/http_handler.go +++ b/pkg/query-service/app/http_handler.go @@ -527,6 +527,9 @@ func (aH *APIHandler) RegisterRoutes(router *mux.Router, am *AuthMiddleware) { router.HandleFunc("/api/v1/settings/ingestion_key", am.AdminAccess(aH.insertIngestionKey)).Methods(http.MethodPost) router.HandleFunc("/api/v1/settings/ingestion_key", am.ViewAccess(aH.getIngestionKeys)).Methods(http.MethodGet) + router.HandleFunc("/api/v2/traces/fields", am.ViewAccess(aH.traceFields)).Methods(http.MethodGet) + router.HandleFunc("/api/v2/traces/fields", am.EditAccess(aH.updateTraceField)).Methods(http.MethodPost) + router.HandleFunc("/api/v1/version", am.OpenAccess(aH.getVersion)).Methods(http.MethodGet) router.HandleFunc("/api/v1/featureFlags", am.OpenAccess(aH.getFeatureFlags)).Methods(http.MethodGet) router.HandleFunc("/api/v1/configs", am.OpenAccess(aH.getConfigs)).Methods(http.MethodGet) @@ -4892,3 +4895,35 @@ func (aH *APIHandler) QueryRangeV4(w http.ResponseWriter, r *http.Request) { aH.queryRangeV4(r.Context(), queryRangeParams, w, r) } + +func (aH *APIHandler) traceFields(w http.ResponseWriter, r *http.Request) { + fields, apiErr := aH.reader.GetTraceFields(r.Context()) + if apiErr != nil { + RespondError(w, apiErr, "failed to fetch fields from the db") + return + } + aH.WriteJSON(w, r, fields) +} + +func (aH *APIHandler) updateTraceField(w http.ResponseWriter, r *http.Request) { + field := model.UpdateField{} + if err := json.NewDecoder(r.Body).Decode(&field); err != nil { + apiErr := &model.ApiError{Typ: model.ErrorBadData, Err: err} + RespondError(w, apiErr, "failed to decode payload") + return + } + + err := logs.ValidateUpdateFieldPayloadV2(&field) + if err != nil { + apiErr := &model.ApiError{Typ: model.ErrorBadData, Err: err} + RespondError(w, apiErr, "incorrect payload") + return + } + + apiErr := aH.reader.UpdateTraceField(r.Context(), &field) + if apiErr != nil { + RespondError(w, apiErr, "failed to update field in the db") + return + } + aH.WriteJSON(w, r, field) +} diff --git a/pkg/query-service/app/logs/parser.go b/pkg/query-service/app/logs/parser.go index 855a023528..be524d00da 100644 --- a/pkg/query-service/app/logs/parser.go +++ b/pkg/query-service/app/logs/parser.go @@ -228,8 +228,8 @@ func parseColumn(s string) (*string, error) { return &colName, nil } -func arrayToMap(fields []model.LogField) map[string]model.LogField { - res := map[string]model.LogField{} +func arrayToMap(fields []model.Field) map[string]model.Field { + res := map[string]model.Field{} for _, field := range fields { res[field.Name] = field } @@ -251,7 +251,7 @@ func replaceInterestingFields(allFields *model.GetFieldsResponse, queryTokens [] return queryTokens, nil } -func replaceFieldInToken(queryToken string, selectedFieldsLookup map[string]model.LogField, interestingFieldLookup map[string]model.LogField) (string, error) { +func replaceFieldInToken(queryToken string, selectedFieldsLookup map[string]model.Field, interestingFieldLookup map[string]model.Field) (string, error) { op := strings.TrimSpace(operatorRegex.FindString(queryToken)) opLower := strings.ToLower(op) @@ -283,7 +283,7 @@ func replaceFieldInToken(queryToken string, selectedFieldsLookup map[string]mode } } else { // creating the query token here as we have the metadata - field := model.LogField{} + field := model.Field{} if sfield, ok := selectedFieldsLookup[sqlColName]; ok { field = sfield diff --git a/pkg/query-service/app/logs/parser_test.go b/pkg/query-service/app/logs/parser_test.go index 6397738437..f894fcaecd 100644 --- a/pkg/query-service/app/logs/parser_test.go +++ b/pkg/query-service/app/logs/parser_test.go @@ -238,14 +238,14 @@ func TestParseColumn(t *testing.T) { func TestReplaceInterestingFields(t *testing.T) { queryTokens := []string{"id.userid IN (100) ", "and id_key >= 50 ", `AND body ILIKE '%searchstring%'`} allFields := model.GetFieldsResponse{ - Selected: []model.LogField{ + Selected: []model.Field{ { Name: "id_key", DataType: "int64", Type: "attributes", }, }, - Interesting: []model.LogField{ + Interesting: []model.Field{ { Name: "id.userid", DataType: "int64", @@ -326,7 +326,7 @@ func TestCheckIfPrevousPaginateAndModifyOrder(t *testing.T) { } var generateSQLQueryFields = model.GetFieldsResponse{ - Selected: []model.LogField{ + Selected: []model.Field{ { Name: "field1", DataType: "int64", @@ -348,7 +348,7 @@ var generateSQLQueryFields = model.GetFieldsResponse{ Type: "static", }, }, - Interesting: []model.LogField{ + Interesting: []model.Field{ { Name: "FielD1", DataType: "int64", diff --git a/pkg/query-service/app/logs/validator.go b/pkg/query-service/app/logs/validator.go index d4a1e42234..03432922dd 100644 --- a/pkg/query-service/app/logs/validator.go +++ b/pkg/query-service/app/logs/validator.go @@ -6,6 +6,7 @@ import ( "go.signoz.io/signoz/pkg/query-service/constants" "go.signoz.io/signoz/pkg/query-service/model" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" ) func ValidateUpdateFieldPayload(field *model.UpdateField) error { @@ -38,3 +39,36 @@ func ValidateUpdateFieldPayload(field *model.UpdateField) error { } return nil } + +func ValidateUpdateFieldPayloadV2(field *model.UpdateField) error { + if field.Name == "" { + return fmt.Errorf("name cannot be empty") + } + if field.Type == "" { + return fmt.Errorf("type cannot be empty") + } + if field.DataType == "" { + return fmt.Errorf("dataType cannot be empty") + } + + // the logs api uses the old names i.e attributes and resources while traces use tag and attribute. + // update log api to use tag and attribute. + matched, err := regexp.MatchString(fmt.Sprintf("^(%s|%s)$", v3.AttributeKeyTypeTag, v3.AttributeKeyTypeResource), field.Type) + if err != nil { + return err + } + if !matched { + return fmt.Errorf("type %s not supported", field.Type) + } + + if field.IndexType != "" { + matched, err := regexp.MatchString(`^(minmax|set\([0-9]\)|bloom_filter\((0?.?[0-9]+|1)\)|tokenbf_v1\([0-9]+,[0-9]+,[0-9]+\)|ngrambf_v1\([0-9]+,[0-9]+,[0-9]+,[0-9]+\))$`, field.IndexType) + if err != nil { + return err + } + if !matched { + return fmt.Errorf("index type %s not supported", field.IndexType) + } + } + return nil +} diff --git a/pkg/query-service/constants/constants.go b/pkg/query-service/constants/constants.go index 7d6f087188..242b2cd4a3 100644 --- a/pkg/query-service/constants/constants.go +++ b/pkg/query-service/constants/constants.go @@ -290,7 +290,7 @@ const ( UINT8 = "Uint8" ) -var StaticSelectedLogFields = []model.LogField{ +var StaticSelectedLogFields = []model.Field{ { Name: "timestamp", DataType: UINT32, diff --git a/pkg/query-service/interfaces/interface.go b/pkg/query-service/interfaces/interface.go index a2acd8c6c9..ac4ab91f9e 100644 --- a/pkg/query-service/interfaces/interface.go +++ b/pkg/query-service/interfaces/interface.go @@ -109,6 +109,10 @@ type Reader interface { SubscribeToQueryProgress(queryId string) (<-chan model.QueryProgress, func(), *model.ApiError) GetCountOfThings(ctx context.Context, query string) (uint64, error) + + //trace + GetTraceFields(ctx context.Context) (*model.GetFieldsResponse, *model.ApiError) + UpdateTraceField(ctx context.Context, field *model.UpdateField) *model.ApiError } type Querier interface { diff --git a/pkg/query-service/model/response.go b/pkg/query-service/model/response.go index 5058d71534..740dfa6ceb 100644 --- a/pkg/query-service/model/response.go +++ b/pkg/query-service/model/response.go @@ -509,15 +509,15 @@ type ShowCreateTableStatement struct { Statement string `json:"statement" ch:"statement"` } -type LogField struct { +type Field struct { Name string `json:"name" ch:"name"` DataType string `json:"dataType" ch:"datatype"` Type string `json:"type"` } type GetFieldsResponse struct { - Selected []LogField `json:"selected"` - Interesting []LogField `json:"interesting"` + Selected []Field `json:"selected"` + Interesting []Field `json:"interesting"` } // Represents a log record in query service requests and responses. From 7405bfbbee3e7dca963db53815b5062759cb9ad0 Mon Sep 17 00:00:00 2001 From: SagarRajput-7 <162284829+SagarRajput-7@users.noreply.github.com> Date: Thu, 19 Dec 2024 13:01:13 +0530 Subject: [PATCH 06/10] feat: changed start and end time logic for consumer lag details (#6605) --- .../pages/MessagingQueues/MQDetails/MQDetails.tsx | 6 +++--- .../pages/MessagingQueues/MessagingQueuesUtils.ts | 13 ++++++++----- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/frontend/src/pages/MessagingQueues/MQDetails/MQDetails.tsx b/frontend/src/pages/MessagingQueues/MQDetails/MQDetails.tsx index 7eab107e59..41bb54efb4 100644 --- a/frontend/src/pages/MessagingQueues/MQDetails/MQDetails.tsx +++ b/frontend/src/pages/MessagingQueues/MQDetails/MQDetails.tsx @@ -24,13 +24,13 @@ const MQServiceDetailTypePerView = ( producerLatencyOption: ProducerLatencyOptions, ): Record => ({ [MessagingQueuesViewType.consumerLag.value]: [ - MessagingQueueServiceDetailType.ConsumerDetails, MessagingQueueServiceDetailType.ProducerDetails, + MessagingQueueServiceDetailType.ConsumerDetails, MessagingQueueServiceDetailType.NetworkLatency, ], [MessagingQueuesViewType.partitionLatency.value]: [ - MessagingQueueServiceDetailType.ConsumerDetails, MessagingQueueServiceDetailType.ProducerDetails, + MessagingQueueServiceDetailType.ConsumerDetails, ], [MessagingQueuesViewType.producerLatency.value]: [ producerLatencyOption === ProducerLatencyOptions.Consumers @@ -122,7 +122,7 @@ function MessagingQueuesDetails({ producerLatencyOption: ProducerLatencyOptions; }): JSX.Element { const [currentTab, setCurrentTab] = useState( - MessagingQueueServiceDetailType.ConsumerDetails, + MessagingQueueServiceDetailType.ProducerDetails, ); useEffect(() => { diff --git a/frontend/src/pages/MessagingQueues/MessagingQueuesUtils.ts b/frontend/src/pages/MessagingQueues/MessagingQueuesUtils.ts index ec7fadcdce..e787739cb3 100644 --- a/frontend/src/pages/MessagingQueues/MessagingQueuesUtils.ts +++ b/frontend/src/pages/MessagingQueues/MessagingQueuesUtils.ts @@ -179,10 +179,13 @@ export const convertToNanoseconds = (timestamp: number): bigint => export const getStartAndEndTimesInMilliseconds = ( timestamp: number, ): { start: number; end: number } => { - const FIVE_MINUTES_IN_MILLISECONDS = 5 * 60 * 1000; // 5 minutes in milliseconds - check with Shivanshu once + const FIVE_MINUTES_IN_MILLISECONDS = 5 * 60 * 1000; // 300,000 milliseconds - const start = Math.floor(timestamp); - const end = Math.floor(start + FIVE_MINUTES_IN_MILLISECONDS); + const pointInTime = Math.floor(timestamp * 1000); + + // Convert timestamp to milliseconds and floor it + const start = Math.floor(pointInTime - FIVE_MINUTES_IN_MILLISECONDS); + const end = Math.floor(pointInTime + FIVE_MINUTES_IN_MILLISECONDS); return { start, end }; }; @@ -311,8 +314,8 @@ export const getMetaDataAndAPIPerView = ( return { [MessagingQueuesViewType.consumerLag.value]: { tableApiPayload: { - start: (selectedTimelineQuery?.start || 0) * 1e9, - end: (selectedTimelineQuery?.end || 0) * 1e9, + start: (selectedTimelineQuery?.start || 0) * 1e6, + end: (selectedTimelineQuery?.end || 0) * 1e6, variables: { partition: selectedTimelineQuery?.partition, topic: selectedTimelineQuery?.topic, From a8a3bd3f7d0e7389df3df5c3bc34ddb724c0d1d3 Mon Sep 17 00:00:00 2001 From: Prashant Shahi Date: Thu, 12 Dec 2024 15:28:09 +0530 Subject: [PATCH 07/10] chore(signoz): pin versions: SigNoz 0.62.0, SigNoz OtelCollector 0.111.15 Signed-off-by: Prashant Shahi --- .../docker-swarm/clickhouse-setup/docker-compose.yaml | 8 ++++---- .../docker/clickhouse-setup/docker-compose-core.yaml | 4 ++-- .../clickhouse-setup/docker-compose-minimal.yaml | 10 +++++----- .../clickhouse-setup/docker-compose.testing.yaml | 8 ++++---- go.mod | 2 +- go.sum | 4 ++-- 6 files changed, 18 insertions(+), 18 deletions(-) diff --git a/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml b/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml index 3887e223f7..9fa87c12ed 100644 --- a/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml +++ b/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml @@ -146,7 +146,7 @@ services: condition: on-failure query-service: - image: signoz/query-service:0.61.0 + image: signoz/query-service:0.62.0 command: [ "-config=/root/config/prometheus.yml", @@ -187,7 +187,7 @@ services: <<: *db-depend frontend: - image: signoz/frontend:0.61.0 + image: signoz/frontend:0.62.0 deploy: restart_policy: condition: on-failure @@ -200,7 +200,7 @@ services: - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf otel-collector: - image: signoz/signoz-otel-collector:0.111.14 + image: signoz/signoz-otel-collector:0.111.15 command: [ "--config=/etc/otel-collector-config.yaml", @@ -238,7 +238,7 @@ services: - query-service otel-collector-migrator: - image: signoz/signoz-schema-migrator:0.111.14 + image: signoz/signoz-schema-migrator:0.111.15 deploy: restart_policy: condition: on-failure diff --git a/deploy/docker/clickhouse-setup/docker-compose-core.yaml b/deploy/docker/clickhouse-setup/docker-compose-core.yaml index 5bade6b2da..173d39e9fc 100644 --- a/deploy/docker/clickhouse-setup/docker-compose-core.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose-core.yaml @@ -69,7 +69,7 @@ services: - --storage.path=/data otel-collector-migrator: - image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.14} + image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.15} container_name: otel-migrator command: - "sync" @@ -86,7 +86,7 @@ services: # Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md` otel-collector: container_name: signoz-otel-collector - image: signoz/signoz-otel-collector:0.111.14 + image: signoz/signoz-otel-collector:0.111.15 command: [ "--config=/etc/otel-collector-config.yaml", diff --git a/deploy/docker/clickhouse-setup/docker-compose-minimal.yaml b/deploy/docker/clickhouse-setup/docker-compose-minimal.yaml index 37df9590d3..f26cde87c0 100644 --- a/deploy/docker/clickhouse-setup/docker-compose-minimal.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose-minimal.yaml @@ -162,7 +162,7 @@ services: # Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md` query-service: - image: signoz/query-service:${DOCKER_TAG:-0.61.0} + image: signoz/query-service:${DOCKER_TAG:-0.62.0} container_name: signoz-query-service command: [ @@ -202,7 +202,7 @@ services: <<: *db-depend frontend: - image: signoz/frontend:${DOCKER_TAG:-0.61.0} + image: signoz/frontend:${DOCKER_TAG:-0.62.0} container_name: signoz-frontend restart: on-failure depends_on: @@ -214,7 +214,7 @@ services: - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf otel-collector-migrator-sync: - image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.14} + image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.15} container_name: otel-migrator-sync command: - "sync" @@ -229,7 +229,7 @@ services: # condition: service_healthy otel-collector-migrator-async: - image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.14} + image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.15} container_name: otel-migrator-async command: - "async" @@ -246,7 +246,7 @@ services: # condition: service_healthy otel-collector: - image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.14} + image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.15} container_name: signoz-otel-collector command: [ diff --git a/deploy/docker/clickhouse-setup/docker-compose.testing.yaml b/deploy/docker/clickhouse-setup/docker-compose.testing.yaml index bd00cf1702..bb3b29395c 100644 --- a/deploy/docker/clickhouse-setup/docker-compose.testing.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose.testing.yaml @@ -167,7 +167,7 @@ services: # Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md` query-service: - image: signoz/query-service:${DOCKER_TAG:-0.61.0} + image: signoz/query-service:${DOCKER_TAG:-0.62.0} container_name: signoz-query-service command: [ @@ -209,7 +209,7 @@ services: <<: *db-depend frontend: - image: signoz/frontend:${DOCKER_TAG:-0.61.0} + image: signoz/frontend:${DOCKER_TAG:-0.62.0} container_name: signoz-frontend restart: on-failure depends_on: @@ -221,7 +221,7 @@ services: - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf otel-collector-migrator: - image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.14} + image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.15} container_name: otel-migrator command: - "--dsn=tcp://clickhouse:9000" @@ -235,7 +235,7 @@ services: otel-collector: - image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.14} + image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.15} container_name: signoz-otel-collector command: [ diff --git a/go.mod b/go.mod index 9be8cf0651..5b4afe0c97 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/ClickHouse/clickhouse-go/v2 v2.25.0 github.com/DATA-DOG/go-sqlmock v1.5.2 github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd - github.com/SigNoz/signoz-otel-collector v0.111.14 + github.com/SigNoz/signoz-otel-collector v0.111.15 github.com/SigNoz/zap_otlp/zap_otlp_encoder v0.0.0-20230822164844-1b861a431974 github.com/SigNoz/zap_otlp/zap_otlp_sync v0.0.0-20230822164844-1b861a431974 github.com/antonmedv/expr v1.15.3 diff --git a/go.sum b/go.sum index f55d5d9656..448fbf26d6 100644 --- a/go.sum +++ b/go.sum @@ -70,8 +70,8 @@ github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd h1:Bk43AsDYe0fhkb github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd/go.mod h1:nxRcH/OEdM8QxzH37xkGzomr1O0JpYBRS6pwjsWW6Pc= github.com/SigNoz/prometheus v1.12.0 h1:+BXeIHyMOOWWa+xjhJ+x80JFva7r1WzWIfIhQ5PUmIE= github.com/SigNoz/prometheus v1.12.0/go.mod h1:EqNM27OwmPfqMUk+E+XG1L9rfDFcyXnzzDrg0EPOfxA= -github.com/SigNoz/signoz-otel-collector v0.111.14 h1:nvRucNK/TTtZKM3Dsr/UNx+LwkjaGwx0yPlMvGw/4j0= -github.com/SigNoz/signoz-otel-collector v0.111.14/go.mod h1:vRDT10om89DHybN7SRMlt8IN9+/pgh1D57pNHPr2LM4= +github.com/SigNoz/signoz-otel-collector v0.111.15 h1:X2aV/FpMZ9kdN10byKrc2ODF39fAcOo6D68rqUFhxSk= +github.com/SigNoz/signoz-otel-collector v0.111.15/go.mod h1:vRDT10om89DHybN7SRMlt8IN9+/pgh1D57pNHPr2LM4= github.com/SigNoz/zap_otlp v0.1.0 h1:T7rRcFN87GavY8lDGZj0Z3Xv6OhJA6Pj3I9dNPmqvRc= github.com/SigNoz/zap_otlp v0.1.0/go.mod h1:lcHvbDbRgvDnPxo9lDlaL1JK2PyOyouP/C3ynnYIvyo= github.com/SigNoz/zap_otlp/zap_otlp_encoder v0.0.0-20230822164844-1b861a431974 h1:PKVgdf83Yw+lZJbFtNGBgqXiXNf3+kOXW2qZ7Ms7OaY= From b90ed375c2932ed31985287c47709837ef46e68b Mon Sep 17 00:00:00 2001 From: Prashant Shahi Date: Wed, 18 Dec 2024 15:25:20 +0530 Subject: [PATCH 08/10] chore(signoz): pin versions: SigNoz 0.63.0, SigNoz OtelCollector 0.111.16 Signed-off-by: Prashant Shahi --- .../docker-swarm/clickhouse-setup/docker-compose.yaml | 8 ++++---- .../docker/clickhouse-setup/docker-compose-core.yaml | 4 ++-- .../clickhouse-setup/docker-compose-minimal.yaml | 10 +++++----- .../clickhouse-setup/docker-compose.testing.yaml | 8 ++++---- go.mod | 2 +- go.sum | 4 ++-- 6 files changed, 18 insertions(+), 18 deletions(-) diff --git a/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml b/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml index 9fa87c12ed..31aa9c5a93 100644 --- a/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml +++ b/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml @@ -146,7 +146,7 @@ services: condition: on-failure query-service: - image: signoz/query-service:0.62.0 + image: signoz/query-service:0.63.0 command: [ "-config=/root/config/prometheus.yml", @@ -187,7 +187,7 @@ services: <<: *db-depend frontend: - image: signoz/frontend:0.62.0 + image: signoz/frontend:0.63.0 deploy: restart_policy: condition: on-failure @@ -200,7 +200,7 @@ services: - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf otel-collector: - image: signoz/signoz-otel-collector:0.111.15 + image: signoz/signoz-otel-collector:0.111.16 command: [ "--config=/etc/otel-collector-config.yaml", @@ -238,7 +238,7 @@ services: - query-service otel-collector-migrator: - image: signoz/signoz-schema-migrator:0.111.15 + image: signoz/signoz-schema-migrator:0.111.16 deploy: restart_policy: condition: on-failure diff --git a/deploy/docker/clickhouse-setup/docker-compose-core.yaml b/deploy/docker/clickhouse-setup/docker-compose-core.yaml index 173d39e9fc..b6a6fdacad 100644 --- a/deploy/docker/clickhouse-setup/docker-compose-core.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose-core.yaml @@ -69,7 +69,7 @@ services: - --storage.path=/data otel-collector-migrator: - image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.15} + image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.16} container_name: otel-migrator command: - "sync" @@ -86,7 +86,7 @@ services: # Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md` otel-collector: container_name: signoz-otel-collector - image: signoz/signoz-otel-collector:0.111.15 + image: signoz/signoz-otel-collector:0.111.16 command: [ "--config=/etc/otel-collector-config.yaml", diff --git a/deploy/docker/clickhouse-setup/docker-compose-minimal.yaml b/deploy/docker/clickhouse-setup/docker-compose-minimal.yaml index f26cde87c0..a062d8e1a2 100644 --- a/deploy/docker/clickhouse-setup/docker-compose-minimal.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose-minimal.yaml @@ -162,7 +162,7 @@ services: # Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md` query-service: - image: signoz/query-service:${DOCKER_TAG:-0.62.0} + image: signoz/query-service:${DOCKER_TAG:-0.63.0} container_name: signoz-query-service command: [ @@ -202,7 +202,7 @@ services: <<: *db-depend frontend: - image: signoz/frontend:${DOCKER_TAG:-0.62.0} + image: signoz/frontend:${DOCKER_TAG:-0.63.0} container_name: signoz-frontend restart: on-failure depends_on: @@ -214,7 +214,7 @@ services: - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf otel-collector-migrator-sync: - image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.15} + image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.16} container_name: otel-migrator-sync command: - "sync" @@ -229,7 +229,7 @@ services: # condition: service_healthy otel-collector-migrator-async: - image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.15} + image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.16} container_name: otel-migrator-async command: - "async" @@ -246,7 +246,7 @@ services: # condition: service_healthy otel-collector: - image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.15} + image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.16} container_name: signoz-otel-collector command: [ diff --git a/deploy/docker/clickhouse-setup/docker-compose.testing.yaml b/deploy/docker/clickhouse-setup/docker-compose.testing.yaml index bb3b29395c..d15ff52680 100644 --- a/deploy/docker/clickhouse-setup/docker-compose.testing.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose.testing.yaml @@ -167,7 +167,7 @@ services: # Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md` query-service: - image: signoz/query-service:${DOCKER_TAG:-0.62.0} + image: signoz/query-service:${DOCKER_TAG:-0.63.0} container_name: signoz-query-service command: [ @@ -209,7 +209,7 @@ services: <<: *db-depend frontend: - image: signoz/frontend:${DOCKER_TAG:-0.62.0} + image: signoz/frontend:${DOCKER_TAG:-0.63.0} container_name: signoz-frontend restart: on-failure depends_on: @@ -221,7 +221,7 @@ services: - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf otel-collector-migrator: - image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.15} + image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.16} container_name: otel-migrator command: - "--dsn=tcp://clickhouse:9000" @@ -235,7 +235,7 @@ services: otel-collector: - image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.15} + image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.16} container_name: signoz-otel-collector command: [ diff --git a/go.mod b/go.mod index 5b4afe0c97..866498192d 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/ClickHouse/clickhouse-go/v2 v2.25.0 github.com/DATA-DOG/go-sqlmock v1.5.2 github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd - github.com/SigNoz/signoz-otel-collector v0.111.15 + github.com/SigNoz/signoz-otel-collector v0.111.16 github.com/SigNoz/zap_otlp/zap_otlp_encoder v0.0.0-20230822164844-1b861a431974 github.com/SigNoz/zap_otlp/zap_otlp_sync v0.0.0-20230822164844-1b861a431974 github.com/antonmedv/expr v1.15.3 diff --git a/go.sum b/go.sum index 448fbf26d6..1b0b407b45 100644 --- a/go.sum +++ b/go.sum @@ -70,8 +70,8 @@ github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd h1:Bk43AsDYe0fhkb github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd/go.mod h1:nxRcH/OEdM8QxzH37xkGzomr1O0JpYBRS6pwjsWW6Pc= github.com/SigNoz/prometheus v1.12.0 h1:+BXeIHyMOOWWa+xjhJ+x80JFva7r1WzWIfIhQ5PUmIE= github.com/SigNoz/prometheus v1.12.0/go.mod h1:EqNM27OwmPfqMUk+E+XG1L9rfDFcyXnzzDrg0EPOfxA= -github.com/SigNoz/signoz-otel-collector v0.111.15 h1:X2aV/FpMZ9kdN10byKrc2ODF39fAcOo6D68rqUFhxSk= -github.com/SigNoz/signoz-otel-collector v0.111.15/go.mod h1:vRDT10om89DHybN7SRMlt8IN9+/pgh1D57pNHPr2LM4= +github.com/SigNoz/signoz-otel-collector v0.111.16 h1:535uKH5Oux+35EsI+L3C6pnAP/Ye0PTCbVizXoL+VqE= +github.com/SigNoz/signoz-otel-collector v0.111.16/go.mod h1:HJ4m0LY1MPsuZmuRF7Ixb+bY8rxgRzI0VXzOedESsjg= github.com/SigNoz/zap_otlp v0.1.0 h1:T7rRcFN87GavY8lDGZj0Z3Xv6OhJA6Pj3I9dNPmqvRc= github.com/SigNoz/zap_otlp v0.1.0/go.mod h1:lcHvbDbRgvDnPxo9lDlaL1JK2PyOyouP/C3ynnYIvyo= github.com/SigNoz/zap_otlp/zap_otlp_encoder v0.0.0-20230822164844-1b861a431974 h1:PKVgdf83Yw+lZJbFtNGBgqXiXNf3+kOXW2qZ7Ms7OaY= From dcc659907abefe52b27196a994265dddb3dbc149 Mon Sep 17 00:00:00 2001 From: Prashant Shahi Date: Wed, 18 Dec 2024 17:57:42 +0530 Subject: [PATCH 09/10] chore(signoz): pin versions: SigNoz 0.64.0 Signed-off-by: Prashant Shahi --- deploy/docker-swarm/clickhouse-setup/docker-compose.yaml | 4 ++-- deploy/docker/clickhouse-setup/docker-compose-minimal.yaml | 4 ++-- deploy/docker/clickhouse-setup/docker-compose.testing.yaml | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml b/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml index 31aa9c5a93..1a047fec8e 100644 --- a/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml +++ b/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml @@ -146,7 +146,7 @@ services: condition: on-failure query-service: - image: signoz/query-service:0.63.0 + image: signoz/query-service:0.64.0 command: [ "-config=/root/config/prometheus.yml", @@ -187,7 +187,7 @@ services: <<: *db-depend frontend: - image: signoz/frontend:0.63.0 + image: signoz/frontend:0.64.0 deploy: restart_policy: condition: on-failure diff --git a/deploy/docker/clickhouse-setup/docker-compose-minimal.yaml b/deploy/docker/clickhouse-setup/docker-compose-minimal.yaml index a062d8e1a2..d3ceb7bf42 100644 --- a/deploy/docker/clickhouse-setup/docker-compose-minimal.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose-minimal.yaml @@ -162,7 +162,7 @@ services: # Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md` query-service: - image: signoz/query-service:${DOCKER_TAG:-0.63.0} + image: signoz/query-service:${DOCKER_TAG:-0.64.0} container_name: signoz-query-service command: [ @@ -202,7 +202,7 @@ services: <<: *db-depend frontend: - image: signoz/frontend:${DOCKER_TAG:-0.63.0} + image: signoz/frontend:${DOCKER_TAG:-0.64.0} container_name: signoz-frontend restart: on-failure depends_on: diff --git a/deploy/docker/clickhouse-setup/docker-compose.testing.yaml b/deploy/docker/clickhouse-setup/docker-compose.testing.yaml index d15ff52680..d99af9bc06 100644 --- a/deploy/docker/clickhouse-setup/docker-compose.testing.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose.testing.yaml @@ -167,7 +167,7 @@ services: # Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md` query-service: - image: signoz/query-service:${DOCKER_TAG:-0.63.0} + image: signoz/query-service:${DOCKER_TAG:-0.64.0} container_name: signoz-query-service command: [ @@ -209,7 +209,7 @@ services: <<: *db-depend frontend: - image: signoz/frontend:${DOCKER_TAG:-0.63.0} + image: signoz/frontend:${DOCKER_TAG:-0.64.0} container_name: signoz-frontend restart: on-failure depends_on: From 42fefc65beeae403b0e2eafc095e4ef43207ef42 Mon Sep 17 00:00:00 2001 From: Prashant Shahi Date: Fri, 13 Dec 2024 18:08:36 +0530 Subject: [PATCH 10/10] chore: deprecate develop branch - use main Signed-off-by: Prashant Shahi --- .github/workflows/build.yaml | 1 - .github/workflows/docs.yml | 2 +- .github/workflows/e2e-k3s.yaml | 2 +- .github/workflows/jest-coverage-changes.yml | 5 +++-- .github/workflows/push.yaml | 1 - .github/workflows/sonar.yml | 1 - .github/workflows/staging-deployment.yaml | 6 +++--- .github/workflows/testing-deployment.yaml | 2 +- CONTRIBUTING.md | 4 ++-- deploy/README.md | 2 +- frontend/.husky/commit-msg | 5 ----- sample-apps/hotrod/README.md | 6 +++--- sample-apps/hotrod/hotrod-delete.sh | 2 +- sample-apps/hotrod/hotrod-install.sh | 2 +- 14 files changed, 17 insertions(+), 24 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 8b3e35b53f..275f0e9599 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -3,7 +3,6 @@ name: build-pipeline on: pull_request: branches: - - develop - main - release/v* diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index de2de959da..d7ecd688c0 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -3,7 +3,7 @@ name: "Update PR labels and Block PR until related docs are shipped for the feat on: pull_request: branches: - - develop + - main types: [opened, edited, labeled, unlabeled] permissions: diff --git a/.github/workflows/e2e-k3s.yaml b/.github/workflows/e2e-k3s.yaml index ef5911f9b0..b09666fb80 100644 --- a/.github/workflows/e2e-k3s.yaml +++ b/.github/workflows/e2e-k3s.yaml @@ -42,7 +42,7 @@ jobs: kubectl create ns sample-application # apply hotrod k8s manifest file - kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/develop/sample-apps/hotrod/hotrod.yaml + kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml # wait for all deployments in sample-application namespace to be READY kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s diff --git a/.github/workflows/jest-coverage-changes.yml b/.github/workflows/jest-coverage-changes.yml index 56d4cf1b70..d5df9ce686 100644 --- a/.github/workflows/jest-coverage-changes.yml +++ b/.github/workflows/jest-coverage-changes.yml @@ -2,7 +2,8 @@ name: Jest Coverage - changed files on: pull_request: - branches: develop + branches: + - main jobs: build: @@ -11,7 +12,7 @@ jobs: - name: Checkout uses: actions/checkout@v4 with: - ref: "refs/heads/develop" + ref: "refs/heads/main" token: ${{ secrets.GITHUB_TOKEN }} # Provide the GitHub token for authentication - name: Fetch branch diff --git a/.github/workflows/push.yaml b/.github/workflows/push.yaml index a440d2a5c7..e7123b8710 100644 --- a/.github/workflows/push.yaml +++ b/.github/workflows/push.yaml @@ -4,7 +4,6 @@ on: push: branches: - main - - develop tags: - v* diff --git a/.github/workflows/sonar.yml b/.github/workflows/sonar.yml index 8c62c12d1b..eff8de7c6b 100644 --- a/.github/workflows/sonar.yml +++ b/.github/workflows/sonar.yml @@ -3,7 +3,6 @@ on: pull_request: branches: - main - - develop paths: - 'frontend/**' defaults: diff --git a/.github/workflows/staging-deployment.yaml b/.github/workflows/staging-deployment.yaml index bbdbe32531..6125283a87 100644 --- a/.github/workflows/staging-deployment.yaml +++ b/.github/workflows/staging-deployment.yaml @@ -1,12 +1,12 @@ name: staging-deployment -# Trigger deployment only on push to develop branch +# Trigger deployment only on push to main branch on: push: branches: - - develop + - main jobs: deploy: - name: Deploy latest develop branch to staging + name: Deploy latest main branch to staging runs-on: ubuntu-latest environment: staging permissions: diff --git a/.github/workflows/testing-deployment.yaml b/.github/workflows/testing-deployment.yaml index 68a9b140cb..6362331e7b 100644 --- a/.github/workflows/testing-deployment.yaml +++ b/.github/workflows/testing-deployment.yaml @@ -44,7 +44,7 @@ jobs: git add . git stash push -m "stashed on $(date --iso-8601=seconds)" git fetch origin - git checkout develop + git checkout main git pull # This is added to include the scenerio when new commit in PR is force-pushed git branch -D ${GITHUB_BRANCH} diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 613b225353..bed34bfd86 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -339,7 +339,7 @@ to make SigNoz UI available at [localhost:3301](http://localhost:3301) **5.1.1 To install the HotROD sample app:** ```bash -curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-install.sh \ +curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh \ | HELM_RELEASE=my-release SIGNOZ_NAMESPACE=platform bash ``` @@ -362,7 +362,7 @@ kubectl -n sample-application run strzal --image=djbingham/curl \ **5.1.4 To delete the HotROD sample app:** ```bash -curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-delete.sh \ +curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh \ | HOTROD_NAMESPACE=sample-application bash ``` diff --git a/deploy/README.md b/deploy/README.md index 5e6740e6a7..55c3b6e8d4 100644 --- a/deploy/README.md +++ b/deploy/README.md @@ -58,7 +58,7 @@ from the HotROD application, you should see the data generated from hotrod in Si ```sh kubectl create ns sample-application -kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/develop/sample-apps/hotrod/hotrod.yaml +kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml ``` To generate load: diff --git a/frontend/.husky/commit-msg b/frontend/.husky/commit-msg index cb50d87a2d..0afb1a6cb8 100755 --- a/frontend/.husky/commit-msg +++ b/frontend/.husky/commit-msg @@ -13,8 +13,3 @@ if [ "$branch" = "main" ]; then echo "${color_red}${bold}You can't commit directly to the main branch${reset}" exit 1 fi - -if [ "$branch" = "develop" ]; then - echo "${color_red}${bold}You can't commit directly to the develop branch${reset}" - exit 1 -fi \ No newline at end of file diff --git a/sample-apps/hotrod/README.md b/sample-apps/hotrod/README.md index b9638281a0..3498a06bd4 100644 --- a/sample-apps/hotrod/README.md +++ b/sample-apps/hotrod/README.md @@ -5,7 +5,7 @@ Follow the steps in this section to install a sample application named HotR.O.D, ```console kubectl create ns sample-application -kubectl -n sample-application apply -f https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod.yaml +kubectl -n sample-application apply -f https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod.yaml ``` In case, you have installed SigNoz in namespace other than `platform` or selected Helm release name other than `my-release`, follow the steps below: @@ -15,7 +15,7 @@ export HELM_RELEASE=my-release-2 export SIGNOZ_NAMESPACE=platform-2 export HOTROD_NAMESPACE=sample-application-2 -curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-install.sh | bash +curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh | bash ``` To delete sample application: @@ -23,7 +23,7 @@ To delete sample application: ```console export HOTROD_NAMESPACE=sample-application-2 -curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-delete.sh | bash +curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh | bash ``` For testing with local scripts, you can use the following commands: diff --git a/sample-apps/hotrod/hotrod-delete.sh b/sample-apps/hotrod/hotrod-delete.sh index 9cc50b4d52..a7d88ebc07 100755 --- a/sample-apps/hotrod/hotrod-delete.sh +++ b/sample-apps/hotrod/hotrod-delete.sh @@ -7,7 +7,7 @@ HOTROD_NAMESPACE=${HOTROD_NAMESPACE:-"sample-application"} if [[ "${HOTROD_NAMESPACE}" == "default" || "${HOTROD_NAMESPACE}" == "kube-system" || "${HOTROD_NAMESPACE}" == "platform" ]]; then echo "Default k8s namespace and SigNoz namespace must not be deleted" echo "Deleting components only" - kubectl delete --namespace="${HOTROD_NAMESPACE}" -f <(cat hotrod-template.yaml || curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-template.yaml) + kubectl delete --namespace="${HOTROD_NAMESPACE}" -f <(cat hotrod-template.yaml || curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-template.yaml) else echo "Delete HotROD sample app namespace ${HOTROD_NAMESPACE}" kubectl delete namespace "${HOTROD_NAMESPACE}" diff --git a/sample-apps/hotrod/hotrod-install.sh b/sample-apps/hotrod/hotrod-install.sh index 42f29ede5f..b7ba4f6caa 100755 --- a/sample-apps/hotrod/hotrod-install.sh +++ b/sample-apps/hotrod/hotrod-install.sh @@ -37,7 +37,7 @@ kubectl create namespace "$HOTROD_NAMESPACE" --save-config --dry-run -o yaml 2>/ # Setup sample apps into specified namespace kubectl apply --namespace="${HOTROD_NAMESPACE}" -f <( \ - (cat hotrod-template.yaml 2>/dev/null || curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-template.yaml) | \ + (cat hotrod-template.yaml 2>/dev/null || curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-template.yaml) | \ HOTROD_NAMESPACE="${HOTROD_NAMESPACE}" \ HOTROD_IMAGE="${HOTROD_IMAGE}" \ LOCUST_IMAGE="${LOCUST_IMAGE}" \