chore: some changes

This commit is contained in:
srikanthccv 2025-05-30 22:13:58 +05:30
parent c073a47834
commit 1f04b77897
10 changed files with 210 additions and 70 deletions

View File

@ -54,6 +54,8 @@ func (m *fieldMapper) getColumn(_ context.Context, key *telemetrytypes.Telemetry
case telemetrytypes.FieldContextUnspecified: case telemetrytypes.FieldContextUnspecified:
col, ok := timeSeriesV4Columns[key.Name] col, ok := timeSeriesV4Columns[key.Name]
if !ok { if !ok {
// if nothing is found, return labels column
// as we keep all the labels in the labels column
return timeSeriesV4Columns["labels"], nil return timeSeriesV4Columns["labels"], nil
} }
return col, nil return col, nil

View File

@ -1 +0,0 @@
package internal

View File

@ -1 +0,0 @@
package internal

View File

@ -49,11 +49,13 @@ func getKeySelectors(query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation])
whereClauseSelectors := querybuilder.QueryStringToKeysSelectors(query.Filter.Expression) whereClauseSelectors := querybuilder.QueryStringToKeysSelectors(query.Filter.Expression)
keySelectors = append(keySelectors, whereClauseSelectors...) keySelectors = append(keySelectors, whereClauseSelectors...)
} }
for idx := range query.GroupBy { for idx := range query.GroupBy {
groupBy := query.GroupBy[idx] groupBy := query.GroupBy[idx]
selectors := querybuilder.QueryStringToKeysSelectors(groupBy.TelemetryFieldKey.Name) selectors := querybuilder.QueryStringToKeysSelectors(groupBy.TelemetryFieldKey.Name)
keySelectors = append(keySelectors, selectors...) keySelectors = append(keySelectors, selectors...)
} }
for idx := range query.Order { for idx := range query.Order {
keySelectors = append(keySelectors, &telemetrytypes.FieldKeySelector{ keySelectors = append(keySelectors, &telemetrytypes.FieldKeySelector{
Name: query.Order[idx].Key.Name, Name: query.Order[idx].Key.Name,
@ -62,6 +64,7 @@ func getKeySelectors(query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation])
FieldDataType: query.Order[idx].Key.FieldDataType, FieldDataType: query.Order[idx].Key.FieldDataType,
}) })
} }
for idx := range keySelectors { for idx := range keySelectors {
keySelectors[idx].Signal = telemetrytypes.SignalMetrics keySelectors[idx].Signal = telemetrytypes.SignalMetrics
} }
@ -85,6 +88,27 @@ func (b *metricQueryStatementBuilder) Build(
} }
// Fastpath (no fingerprint grouping) // Fastpath (no fingerprint grouping)
// canShortCircuitDelta returns true if we can use the optimized query
// for the given query
// This is used to avoid the group by fingerprint thus improving the performance
// for certain queries
// cases where we can short circuit:
// 1. time aggregation = (rate|increase) and space aggregation = sum
// - rate = sum(value)/step, increase = sum(value) - sum of sums is same as sum of all values
//
// 2. time aggregation = sum and space aggregation = sum
// - sum of sums is same as sum of all values
//
// 3. time aggregation = min and space aggregation = min
// - min of mins is same as min of all values
//
// 4. time aggregation = max and space aggregation = max
// - max of maxs is same as max of all values
//
// 5. special case exphist, there is no need for per series/fingerprint aggregation
// we can directly use the quantilesDDMerge function
//
// all of this is true only for delta metrics
func (b *metricQueryStatementBuilder) canShortCircuitDelta(q qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]) bool { func (b *metricQueryStatementBuilder) canShortCircuitDelta(q qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]) bool {
if q.Aggregations[0].Temporality != metrictypes.Delta { if q.Aggregations[0].Temporality != metrictypes.Delta {
return false return false
@ -126,8 +150,9 @@ func (b *metricQueryStatementBuilder) buildPipelineStatement(
origTimeAgg := query.Aggregations[0].TimeAggregation origTimeAgg := query.Aggregations[0].TimeAggregation
origGroupBy := query.GroupBy origGroupBy := query.GroupBy
if query.Aggregations[0].SpaceAggregation.IsPercentile() { if query.Aggregations[0].SpaceAggregation.IsPercentile() &&
// 1. add le in the group by if doesn't exist query.Aggregations[0].Type != metrictypes.ExpHistogramType {
// add le in the group by if doesn't exist
leExists := false leExists := false
for _, g := range query.GroupBy { for _, g := range query.GroupBy {
if g.TelemetryFieldKey.Name == "le" { if g.TelemetryFieldKey.Name == "le" {
@ -135,18 +160,21 @@ func (b *metricQueryStatementBuilder) buildPipelineStatement(
break break
} }
} }
// we need to add le in the group by if it doesn't exist
if !leExists { if !leExists {
query.GroupBy = append(query.GroupBy, qbtypes.GroupByKey{ query.GroupBy = append(query.GroupBy, qbtypes.GroupByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "le"}, TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "le"},
}) })
} }
// 2. make the time aggregation rate and space aggregation sum // make the time aggregation rate and space aggregation sum
query.Aggregations[0].TimeAggregation = metrictypes.TimeAggregationSum query.Aggregations[0].TimeAggregation = metrictypes.TimeAggregationSum
query.Aggregations[0].SpaceAggregation = metrictypes.SpaceAggregationSum query.Aggregations[0].SpaceAggregation = metrictypes.SpaceAggregationSum
} }
// 1. time_series_cte // time_series_cte
// this is applicable for all the queries
if frag, args, err := b.buildTimeSeriesCTE(ctx, start, end, query, keys); err != nil { if frag, args, err := b.buildTimeSeriesCTE(ctx, start, end, query, keys); err != nil {
return nil, err return nil, err
} else if frag != "" { } else if frag != "" {
@ -155,7 +183,7 @@ func (b *metricQueryStatementBuilder) buildPipelineStatement(
} }
if b.canShortCircuitDelta(query) { if b.canShortCircuitDelta(query) {
// 2. spatial_aggregation_cte directly // spatial_aggregation_cte directly for certain delta queries
if frag, args, err := b.buildTemporalAggDeltaFastPath(start, end, query); err != nil { if frag, args, err := b.buildTemporalAggDeltaFastPath(start, end, query); err != nil {
return nil, err return nil, err
} else if frag != "" { } else if frag != "" {
@ -163,7 +191,7 @@ func (b *metricQueryStatementBuilder) buildPipelineStatement(
cteArgs = append(cteArgs, args) cteArgs = append(cteArgs, args)
} }
} else { } else {
// 2. temporal_aggregation_cte // temporal_aggregation_cte
if frag, args, err := b.buildTemporalAggregationCTE(ctx, start, end, query, keys); err != nil { if frag, args, err := b.buildTemporalAggregationCTE(ctx, start, end, query, keys); err != nil {
return nil, err return nil, err
} else if frag != "" { } else if frag != "" {
@ -171,7 +199,7 @@ func (b *metricQueryStatementBuilder) buildPipelineStatement(
cteArgs = append(cteArgs, args) cteArgs = append(cteArgs, args)
} }
// 3. spatial_aggregation_cte // spatial_aggregation_cte
if frag, args, err := b.buildSpatialAggregationCTE(ctx, start, end, query, keys); err != nil { if frag, args, err := b.buildSpatialAggregationCTE(ctx, start, end, query, keys); err != nil {
return nil, err return nil, err
} else if frag != "" { } else if frag != "" {
@ -185,7 +213,7 @@ func (b *metricQueryStatementBuilder) buildPipelineStatement(
query.Aggregations[0].TimeAggregation = origTimeAgg query.Aggregations[0].TimeAggregation = origTimeAgg
query.GroupBy = origGroupBy query.GroupBy = origGroupBy
// 4. final SELECT // final SELECT
return b.buildFinalSelect(cteFragments, cteArgs, query) return b.buildFinalSelect(cteFragments, cteArgs, query)
} }
@ -205,12 +233,16 @@ func (b *metricQueryStatementBuilder) buildTemporalAggDeltaFastPath(
sb.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name)) sb.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
} }
aggCol := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints) aggCol := AggregationColumnForSamplesTable(
start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality,
query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints,
)
if query.Aggregations[0].TimeAggregation == metrictypes.TimeAggregationRate { if query.Aggregations[0].TimeAggregation == metrictypes.TimeAggregationRate {
aggCol = fmt.Sprintf("%s/%d", aggCol, stepSec) aggCol = fmt.Sprintf("%s/%d", aggCol, stepSec)
} }
if query.Aggregations[0].SpaceAggregation.IsPercentile() { if query.Aggregations[0].SpaceAggregation.IsPercentile() &&
query.Aggregations[0].Type == metrictypes.ExpHistogramType {
aggCol = fmt.Sprintf("quantilesDDMerge(0.01, %f)(sketch)[1]", query.Aggregations[0].SpaceAggregation.Percentile()) aggCol = fmt.Sprintf("quantilesDDMerge(0.01, %f)(sketch)[1]", query.Aggregations[0].SpaceAggregation.Percentile())
} }
@ -264,9 +296,17 @@ func (b *metricQueryStatementBuilder) buildTimeSeriesCTE(
sb.In("metric_name", query.Aggregations[0].MetricName), sb.In("metric_name", query.Aggregations[0].MetricName),
sb.GTE("unix_milli", start), sb.GTE("unix_milli", start),
sb.LTE("unix_milli", end), sb.LTE("unix_milli", end),
sb.ILike("temporality", query.Aggregations[0].Temporality.StringValue()),
sb.EQ("__normalized", false), // TODO configurable
) )
if query.Aggregations[0].Temporality != metrictypes.Unspecified {
sb.Where(sb.ILike("temporality", query.Aggregations[0].Temporality.StringValue()))
}
// TODO configurable if we don't rollout the new un-normalized metrics
sb.Where(
sb.EQ("__normalized", false),
)
if filterWhere != nil { if filterWhere != nil {
sb.AddWhereClause(filterWhere) sb.AddWhereClause(filterWhere)
} }
@ -284,12 +324,13 @@ func (b *metricQueryStatementBuilder) buildTemporalAggregationCTE(
_ map[string][]*telemetrytypes.TelemetryFieldKey, _ map[string][]*telemetrytypes.TelemetryFieldKey,
) (string, []any, error) { ) (string, []any, error) {
if query.Aggregations[0].Temporality == metrictypes.Delta { if query.Aggregations[0].Temporality == metrictypes.Delta {
return b.buildTemporalAggDelta(start, end, query) return b.buildTemporalAggDelta(ctx, start, end, query)
} }
return b.buildTemporalAggCumulativeOrUnspecified(ctx, start, end, query) return b.buildTemporalAggCumulativeOrUnspecified(ctx, start, end, query)
} }
func (b *metricQueryStatementBuilder) buildTemporalAggDelta( func (b *metricQueryStatementBuilder) buildTemporalAggDelta(
_ context.Context,
start, end uint64, start, end uint64,
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation], query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
) (string, []any, error) { ) (string, []any, error) {
@ -329,7 +370,7 @@ func (b *metricQueryStatementBuilder) buildTemporalAggDelta(
} }
func (b *metricQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified( func (b *metricQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
ctx context.Context, _ context.Context,
start, end uint64, start, end uint64,
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation], query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
) (string, []any, error) { ) (string, []any, error) {

View File

@ -2,7 +2,6 @@ package telemetrymetrics
import ( import (
"context" "context"
"fmt"
"log/slog" "log/slog"
"testing" "testing"
"time" "time"
@ -24,7 +23,7 @@ func TestStatementBuilder(t *testing.T) {
expectedErr error expectedErr error
}{ }{
{ {
name: "test", name: "test_cumulative_rate_sum",
requestType: qbtypes.RequestTypeTimeSeries, requestType: qbtypes.RequestTypeTimeSeries,
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{ query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Signal: telemetrytypes.SignalMetrics, Signal: telemetrytypes.SignalMetrics,
@ -51,13 +50,13 @@ func TestStatementBuilder(t *testing.T) {
}, },
}, },
expected: qbtypes.Statement{ expected: qbtypes.Statement{
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), __limit_cte AS (SELECT toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint IN (SELECT fingerprint FROM __resource_filter) AND timestamp >= ? AND timestamp <= ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? GROUP BY ALL ORDER BY __result_0 DESC LIMIT ?) SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 30 SECOND) AS ts, toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint IN (SELECT fingerprint FROM __resource_filter) AND timestamp >= ? AND timestamp <= ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? AND (`service.name`) IN (SELECT `service.name` FROM __limit_cte) GROUP BY ALL", Query: "WITH __time_series_cte AS (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY ALL), __temporal_aggregation_cte AS (SELECT ts, `service.name`, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, per_series_value, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(1747947419000))) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN __time_series_cte ON points.fingerprint = __time_series_cte.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ALL ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ALL) SELECT * FROM __spatial_aggregation_cte",
Args: []any{"cartservice", "%service.name%", "%service.name%cartservice%", uint64(1747945619), uint64(1747983448), true, "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10, true, "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448)}, Args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983448000), "cumulative", false, "cartservice", "signoz_calls_total", uint64(1747947419000), uint64(1747983448000), 0},
}, },
expectedErr: nil, expectedErr: nil,
}, },
{ {
name: "test", name: "test_delta_rate_sum",
requestType: qbtypes.RequestTypeTimeSeries, requestType: qbtypes.RequestTypeTimeSeries,
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{ query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Signal: telemetrytypes.SignalMetrics, Signal: telemetrytypes.SignalMetrics,
@ -84,8 +83,73 @@ func TestStatementBuilder(t *testing.T) {
}, },
}, },
expected: qbtypes.Statement{ expected: qbtypes.Statement{
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), __limit_cte AS (SELECT toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint IN (SELECT fingerprint FROM __resource_filter) AND timestamp >= ? AND timestamp <= ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? GROUP BY ALL ORDER BY __result_0 DESC LIMIT ?) SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 30 SECOND) AS ts, toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint IN (SELECT fingerprint FROM __resource_filter) AND timestamp >= ? AND timestamp <= ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? AND (`service.name`) IN (SELECT `service.name` FROM __limit_cte) GROUP BY ALL", Query: "WITH __time_series_cte AS (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY ALL), __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN __time_series_cte ON points.fingerprint = __time_series_cte.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ALL) SELECT * FROM __spatial_aggregation_cte",
Args: []any{"cartservice", "%service.name%", "%service.name%cartservice%", uint64(1747945619), uint64(1747983448), true, "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10, true, "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448)}, Args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983448000), "delta", false, "cartservice", "signoz_calls_total", uint64(1747947419000), uint64(1747983448000)},
},
expectedErr: nil,
},
{
name: "test_histogram_percentile",
requestType: qbtypes.RequestTypeTimeSeries,
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Signal: telemetrytypes.SignalMetrics,
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "signoz_latency",
Type: metrictypes.HistogramType,
Temporality: metrictypes.Delta,
SpaceAggregation: metrictypes.SpaceAggregationPercentile95,
},
},
Filter: &qbtypes.Filter{
Expression: "service.name = 'cartservice'",
},
Limit: 10,
GroupBy: []qbtypes.GroupByKey{
{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "service.name",
},
},
},
},
expected: qbtypes.Statement{
Query: "WITH __time_series_cte AS (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY ALL), __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, sum(value) AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN __time_series_cte ON points.fingerprint = __time_series_cte.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ALL) SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY ALL",
Args: []any{"signoz_latency", uint64(1747936800000), uint64(1747983448000), "delta", false, "cartservice", "signoz_latency", uint64(1747947419000), uint64(1747983448000)},
},
expectedErr: nil,
},
{
name: "test_gauge_avg_sum",
requestType: qbtypes.RequestTypeTimeSeries,
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Signal: telemetrytypes.SignalMetrics,
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "system.memory.usage",
Type: metrictypes.GaugeType,
Temporality: metrictypes.Unspecified,
TimeAggregation: metrictypes.TimeAggregationAvg,
SpaceAggregation: metrictypes.SpaceAggregationSum,
},
},
Filter: &qbtypes.Filter{
Expression: "host.name = 'big-data-node-1'",
},
Limit: 10,
GroupBy: []qbtypes.GroupByKey{
{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "host.name",
},
},
},
},
expected: qbtypes.Statement{
Query: "WITH __time_series_cte AS (SELECT fingerprint, JSONExtractString(labels, 'host.name') AS `host.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND __normalized = ? AND JSONExtractString(labels, 'host.name') = ? GROUP BY ALL), __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `host.name`, avg(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN __time_series_cte ON points.fingerprint = __time_series_cte.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ALL ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ALL) SELECT * FROM __spatial_aggregation_cte",
Args: []any{"system.memory.usage", uint64(1747936800000), uint64(1747983448000), false, "big-data-node-1", "system.memory.usage", uint64(1747947419000), uint64(1747983448000), 0},
}, },
expectedErr: nil, expectedErr: nil,
}, },
@ -94,7 +158,11 @@ func TestStatementBuilder(t *testing.T) {
fm := NewFieldMapper() fm := NewFieldMapper()
cb := NewConditionBuilder(fm) cb := NewConditionBuilder(fm)
mockMetadataStore := telemetrytypestest.NewMockMetadataStore() mockMetadataStore := telemetrytypestest.NewMockMetadataStore()
mockMetadataStore.KeysMap = buildCompleteFieldKeyMap() keys, err := telemetrytypestest.LoadFieldKeysFromJSON("testdata/keys_map.json")
if err != nil {
t.Fatalf("failed to load field keys: %v", err)
}
mockMetadataStore.KeysMap = keys
aggExprRewriter := querybuilder.NewAggExprRewriter(nil, fm, cb, "", nil) aggExprRewriter := querybuilder.NewAggExprRewriter(nil, fm, cb, "", nil)
@ -115,10 +183,6 @@ func TestStatementBuilder(t *testing.T) {
require.Error(t, err) require.Error(t, err)
require.Contains(t, err.Error(), c.expectedErr.Error()) require.Contains(t, err.Error(), c.expectedErr.Error())
} else { } else {
fmt.Println(q.Query)
fmt.Println(q.Args)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, c.expected.Query, q.Query) require.Equal(t, c.expected.Query, q.Query)
require.Equal(t, c.expected.Args, q.Args) require.Equal(t, c.expected.Args, q.Args)

View File

@ -1,30 +0,0 @@
package telemetrymetrics
import (
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
)
func buildCompleteFieldKeyMap() map[string][]*telemetrytypes.TelemetryFieldKey {
keysMap := map[string][]*telemetrytypes.TelemetryFieldKey{
"service.name": {
{
Name: "service.name",
FieldContext: telemetrytypes.FieldContextResource,
FieldDataType: telemetrytypes.FieldDataTypeString,
},
},
"http.request.method": {
{
Name: "http.request.method",
FieldContext: telemetrytypes.FieldContextAttribute,
FieldDataType: telemetrytypes.FieldDataTypeString,
},
},
}
for _, keys := range keysMap {
for _, key := range keys {
key.Signal = telemetrytypes.SignalMetrics
}
}
return keysMap
}

View File

@ -0,0 +1,34 @@
{
"service.name": [
{
"name": "service.name",
"fieldContext": "resource",
"fieldDataType": "string",
"signal": "metrics"
}
],
"http.request.method": [
{
"name": "http.request.method",
"fieldContext": "attribute",
"fieldDataType": "string",
"signal": "metrics"
}
],
"http.response.status_code": [
{
"name": "http.response.status_code",
"fieldContext": "attribute",
"fieldDataType": "int",
"signal": "metrics"
}
],
"host.name": [
{
"name": "host.name",
"fieldContext": "resource",
"fieldDataType": "string",
"signal": "metrics"
}
]
}

View File

@ -92,22 +92,16 @@ func (s SpaceAggregation) Percentile() float64 {
} }
// MetricTableHints is a struct that contains tables to use instead of the derived tables // MetricTableHints is a struct that contains tables to use instead of the derived tables
// from the start and end time // from the start and end time, for internal use only when we need to override the derived tables
type MetricTableHints struct { type MetricTableHints struct {
TimeSeriesTableName string TimeSeriesTableName string
SamplesTableName string SamplesTableName string
} }
// Certain OTEL metrics encode the state in the value of the metric, which is in general // Until recently, certain OTEL metrics encode the state in the value of the metric, which is in general
// a bad modelling (presumably coming from some vendor) and makes it hard to write the aggregation queries // a bad modelling (presumably coming from some vendor) and makes it hard to write the aggregation queries.
// // While this is not the case anymore, there are some existing metrics that do this, we need a way to support them.
// (should have been a key:value with 0, 1 ), example: (pod_status: 0, 1, 2, 3, 4, 5) // This is a workaround for those metrics.
//
// they are better modelled as pod_status: (state: running, pending, terminated, etc)
// the value would be 0 or 1, if the value is 1, and the state is pending, then it indicates pod is pending.
//
// however, there have been some metrics that do this, and we need to support them.
// This is workaround for those metrics.
type MetricValueFilter struct { type MetricValueFilter struct {
Value float64 Value float64
} }

View File

@ -153,7 +153,7 @@ type MetricAggregation struct {
// metric to query // metric to query
MetricName string `json:"metricName"` MetricName string `json:"metricName"`
// type of the metric // type of the metric
Type metrictypes.Type `json:"type"` Type metrictypes.Type `json:"-"`
// temporality to apply to the query // temporality to apply to the query
Temporality metrictypes.Temporality `json:"temporality"` Temporality metrictypes.Temporality `json:"temporality"`
// time aggregation to apply to the query // time aggregation to apply to the query

View File

@ -0,0 +1,37 @@
package telemetrytypestest
import (
"encoding/json"
"fmt"
"os"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
)
// LoadFieldKeysFromJSON loads telemetry field keys from a JSON file
func LoadFieldKeysFromJSON(filePath string) (map[string][]*telemetrytypes.TelemetryFieldKey, error) {
// Read the JSON file
jsonData, err := os.ReadFile(filePath)
if err != nil {
return nil, fmt.Errorf("failed to read JSON file: %w", err)
}
// Parse JSON directly into the target format using built-in unmarshaling
var result map[string][]*telemetrytypes.TelemetryFieldKey
if err := json.Unmarshal(jsonData, &result); err != nil {
return nil, fmt.Errorf("failed to unmarshal JSON: %w", err)
}
return result, nil
}
// LoadFieldKeysFromJSONString loads telemetry field keys from a JSON string
func LoadFieldKeysFromJSONString(jsonStr string) (map[string][]*telemetrytypes.TelemetryFieldKey, error) {
// Parse JSON directly into the target format using built-in unmarshaling
var result map[string][]*telemetrytypes.TelemetryFieldKey
if err := json.Unmarshal([]byte(jsonStr), &result); err != nil {
return nil, fmt.Errorf("failed to unmarshal JSON: %w", err)
}
return result, nil
}