mirror of
https://git.mirrors.martin98.com/https://github.com/SigNoz/signoz
synced 2025-08-13 10:18:57 +08:00
chore: enable caching for all panel types in metrics v4 (#5651)
This commit is contained in:
parent
bb84960442
commit
62be3e7c13
@ -89,9 +89,11 @@ func NewQuerier(opts QuerierOptions) interfaces.Querier {
|
||||
}
|
||||
}
|
||||
|
||||
// execClickHouseQuery executes the clickhouse query and returns the series list
|
||||
// if testing mode is enabled, it returns the mocked series list
|
||||
func (q *querier) execClickHouseQuery(ctx context.Context, query string) ([]*v3.Series, error) {
|
||||
q.queriesExecuted = append(q.queriesExecuted, query)
|
||||
if q.testingMode && q.reader == nil {
|
||||
q.queriesExecuted = append(q.queriesExecuted, query)
|
||||
return q.returnedSeries, q.returnedErr
|
||||
}
|
||||
result, err := q.reader.GetTimeSeriesResultV3(ctx, query)
|
||||
@ -116,9 +118,11 @@ func (q *querier) execClickHouseQuery(ctx context.Context, query string) ([]*v3.
|
||||
return result, err
|
||||
}
|
||||
|
||||
// execPromQuery executes the prom query and returns the series list
|
||||
// if testing mode is enabled, it returns the mocked series list
|
||||
func (q *querier) execPromQuery(ctx context.Context, params *model.QueryRangeParams) ([]*v3.Series, error) {
|
||||
q.queriesExecuted = append(q.queriesExecuted, params.Query)
|
||||
if q.testingMode && q.reader == nil {
|
||||
q.queriesExecuted = append(q.queriesExecuted, params.Query)
|
||||
q.timeRanges = append(q.timeRanges, []int{int(params.Start.UnixMilli()), int(params.End.UnixMilli())})
|
||||
return q.returnedSeries, q.returnedErr
|
||||
}
|
||||
@ -226,6 +230,9 @@ func (q *querier) findMissingTimeRanges(start, end, step int64, cachedData []byt
|
||||
return findMissingTimeRanges(start, end, step, cachedSeriesList, q.fluxInterval)
|
||||
}
|
||||
|
||||
// labelsToString converts the labels map to a string
|
||||
// sorted by key so that the string is consistent
|
||||
// across different runs
|
||||
func labelsToString(labels map[string]string) string {
|
||||
type label struct {
|
||||
Key string
|
||||
@ -245,6 +252,10 @@ func labelsToString(labels map[string]string) string {
|
||||
return fmt.Sprintf("{%s}", strings.Join(labelKVs, ","))
|
||||
}
|
||||
|
||||
// filterCachedPoints filters the points in the series list
|
||||
// that are outside the start and end time range
|
||||
// and returns the filtered series list
|
||||
// TODO(srikanthccv): is this really needed?
|
||||
func filterCachedPoints(cachedSeries []*v3.Series, start, end int64) {
|
||||
for _, c := range cachedSeries {
|
||||
points := []v3.Point{}
|
||||
@ -258,6 +269,8 @@ func filterCachedPoints(cachedSeries []*v3.Series, start, end int64) {
|
||||
}
|
||||
}
|
||||
|
||||
// mergeSerieses merges the cached series and the missed series
|
||||
// and returns the merged series list
|
||||
func mergeSerieses(cachedSeries, missedSeries []*v3.Series) []*v3.Series {
|
||||
// Merge the missed series with the cached series by timestamp
|
||||
mergedSeries := make([]*v3.Series, 0)
|
||||
@ -275,7 +288,9 @@ func mergeSerieses(cachedSeries, missedSeries []*v3.Series) []*v3.Series {
|
||||
}
|
||||
seriesesByLabels[labelsToString(series.Labels)].Points = append(seriesesByLabels[labelsToString(series.Labels)].Points, series.Points...)
|
||||
}
|
||||
|
||||
// Sort the points in each series by timestamp
|
||||
// and remove duplicate points
|
||||
for idx := range seriesesByLabels {
|
||||
series := seriesesByLabels[idx]
|
||||
series.SortPoints()
|
||||
@ -499,6 +514,8 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan
|
||||
return res, nil, nil
|
||||
}
|
||||
|
||||
// QueryRange is the main function that runs the queries
|
||||
// and returns the results
|
||||
func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, map[string]error, error) {
|
||||
var results []*v3.Result
|
||||
var err error
|
||||
@ -539,10 +556,16 @@ func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3,
|
||||
return results, errQueriesByName, err
|
||||
}
|
||||
|
||||
// QueriesExecuted returns the list of queries executed
|
||||
// in the last query range call
|
||||
// used for testing
|
||||
func (q *querier) QueriesExecuted() []string {
|
||||
return q.queriesExecuted
|
||||
}
|
||||
|
||||
// TimeRanges returns the list of time ranges
|
||||
// that were used to fetch the data
|
||||
// used for testing
|
||||
func (q *querier) TimeRanges() [][]int {
|
||||
return q.timeRanges
|
||||
}
|
||||
|
@ -411,12 +411,13 @@ func TestV2FindMissingTimeRangesWithFluxInterval(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestV2QueryRange(t *testing.T) {
|
||||
func TestV2QueryRangePanelGraph(t *testing.T) {
|
||||
params := []*v3.QueryRangeParamsV3{
|
||||
{
|
||||
Start: 1675115596722,
|
||||
End: 1675115596722 + 120*60*1000,
|
||||
Start: 1675115596722, // 31st Jan, 03:23:16
|
||||
End: 1675115596722 + 120*60*1000, // 31st Jan, 05:23:16
|
||||
Step: 60,
|
||||
Version: "v4",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
@ -450,8 +451,8 @@ func TestV2QueryRange(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
Start: 1675115596722 + 60*60*1000,
|
||||
End: 1675115596722 + 180*60*1000,
|
||||
Start: 1675115596722 + 60*60*1000, // 31st Jan, 04:23:16
|
||||
End: 1675115596722 + 180*60*1000, // 31st Jan, 06:23:16
|
||||
Step: 60,
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
@ -569,19 +570,21 @@ func TestV2QueryRange(t *testing.T) {
|
||||
"__name__": "http_server_requests_seconds_count",
|
||||
},
|
||||
Points: []v3.Point{
|
||||
{Timestamp: 1675115596722, Value: 1},
|
||||
{Timestamp: 1675115596722 + 60*60*1000, Value: 2},
|
||||
{Timestamp: 1675115596722 + 120*60*1000, Value: 3},
|
||||
{Timestamp: 1675115596722, Value: 1}, // 31st Jan, 03:23:16
|
||||
{Timestamp: 1675115596722 + 60*60*1000, Value: 2}, // 31st Jan, 04:23:16
|
||||
{Timestamp: 1675115596722 + 120*60*1000, Value: 3}, // 31st Jan, 05:23:16
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
q := NewQuerier(opts)
|
||||
expectedTimeRangeInQueryString := []string{
|
||||
fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", 1675115580000, 1675115580000+120*60*1000),
|
||||
fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", 1675115580000+120*60*1000, 1675115580000+180*60*1000),
|
||||
fmt.Sprintf("timestamp >= '%d' AND timestamp <= '%d'", 1675115580000*1000000, (1675115580000+120*60*1000)*int64(1000000)),
|
||||
fmt.Sprintf("timestamp >= '%d' AND timestamp <= '%d'", (1675115580000+60*60*1000)*int64(1000000), (1675115580000+180*60*1000)*int64(1000000)),
|
||||
fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", 1675115580000, 1675115580000+120*60*1000), // 31st Jan, 03:23:00 to 31st Jan, 05:23:00
|
||||
// second query uses the cached data from the first query
|
||||
fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", 1675115580000+120*60*1000, 1675115580000+180*60*1000), // 31st Jan, 05:23:00 to 31st Jan, 06:23:00
|
||||
// No caching for traces yet
|
||||
fmt.Sprintf("timestamp >= '%d' AND timestamp <= '%d'", 1675115580000*1000000, (1675115580000+120*60*1000)*int64(1000000)), // 31st Jan, 03:23:00 to 31st Jan, 05:23:00
|
||||
fmt.Sprintf("timestamp >= '%d' AND timestamp <= '%d'", (1675115580000+60*60*1000)*int64(1000000), (1675115580000+180*60*1000)*int64(1000000)), // 31st Jan, 04:23:00 to 31st Jan, 06:23:00
|
||||
}
|
||||
|
||||
for i, param := range params {
|
||||
@ -600,12 +603,12 @@ func TestV2QueryRange(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV2QueryRangeValueType(t *testing.T) {
|
||||
// There shouldn't be any caching for value panel type
|
||||
params := []*v3.QueryRangeParamsV3{
|
||||
{
|
||||
Start: 1675115596722,
|
||||
End: 1675115596722 + 120*60*1000,
|
||||
Start: 1675115596722, // 31st Jan, 03:23:16
|
||||
End: 1675115596722 + 120*60*1000, // 31st Jan, 05:23:16
|
||||
Step: 5 * time.Minute.Milliseconds(),
|
||||
Version: "v4",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
PanelType: v3.PanelTypeValue,
|
||||
@ -635,9 +638,43 @@ func TestV2QueryRangeValueType(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
Start: 1675115596722 + 60*60*1000,
|
||||
End: 1675115596722 + 180*60*1000,
|
||||
Start: 1675115596722 + 60*60*1000, // 31st Jan, 04:23:16
|
||||
End: 1675115596722 + 180*60*1000, // 31st Jan, 06:23:16
|
||||
Step: 60,
|
||||
Version: "v4",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
PanelType: v3.PanelTypeValue,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
Temporality: v3.Delta,
|
||||
StepInterval: 60,
|
||||
AggregateAttribute: v3.AttributeKey{Key: "http_server_requests_seconds_count", Type: v3.AttributeKeyTypeUnspecified, DataType: "float64", IsColumn: true},
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{Key: "method", IsColumn: false},
|
||||
Operator: "=",
|
||||
Value: "GET",
|
||||
},
|
||||
},
|
||||
},
|
||||
AggregateOperator: v3.AggregateOperatorSumRate,
|
||||
TimeAggregation: v3.TimeAggregationRate,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
Expression: "A",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Start: 1675115596722 + 60*60*1000, // 31st Jan, 04:23:16
|
||||
End: 1675115596722 + 180*60*1000, // 31st Jan, 06:23:16
|
||||
Step: 5 * time.Minute.Milliseconds(),
|
||||
Version: "v4",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
PanelType: v3.PanelTypeValue,
|
||||
@ -681,18 +718,18 @@ func TestV2QueryRangeValueType(t *testing.T) {
|
||||
"__name__": "http_server_requests_seconds_count",
|
||||
},
|
||||
Points: []v3.Point{
|
||||
{Timestamp: 1675115596722, Value: 1},
|
||||
{Timestamp: 1675115596722 + 60*60*1000, Value: 2},
|
||||
{Timestamp: 1675115596722 + 120*60*1000, Value: 3},
|
||||
{Timestamp: 1675115596722, Value: 1}, // 31st Jan, 03:23:16
|
||||
{Timestamp: 1675115596722 + 60*60*1000, Value: 2}, // 31st Jan, 04:23:16
|
||||
{Timestamp: 1675115596722 + 120*60*1000, Value: 3}, // 31st Jan, 05:23:16
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
q := NewQuerier(opts)
|
||||
// No caching
|
||||
expectedTimeRangeInQueryString := []string{
|
||||
fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", 1675115520000, 1675115580000+120*60*1000),
|
||||
fmt.Sprintf("timestamp >= '%d' AND timestamp <= '%d'", (1675115580000+60*60*1000)*int64(1000000), (1675115580000+180*60*1000)*int64(1000000)),
|
||||
fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", 1675115520000, 1675115580000+120*60*1000), // 31st Jan, 03:23:00 to 31st Jan, 05:23:00
|
||||
fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", 1675115580000+120*60*1000, 1675115580000+180*60*1000), // 31st Jan, 05:23:00 to 31st Jan, 06:23:00
|
||||
fmt.Sprintf("timestamp >= '%d' AND timestamp <= '%d'", (1675115580000+60*60*1000)*int64(1000000), (1675115580000+180*60*1000)*int64(1000000)), // 31st Jan, 05:23:00 to 31st Jan, 06:23:00
|
||||
}
|
||||
|
||||
for i, param := range params {
|
||||
@ -717,6 +754,7 @@ func TestV2QueryRangeTimeShift(t *testing.T) {
|
||||
Start: 1675115596722, //31, 3:23
|
||||
End: 1675115596722 + 120*60*1000, //31, 5:23
|
||||
Step: 5 * time.Minute.Milliseconds(),
|
||||
Version: "v4",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
@ -767,8 +805,9 @@ func TestV2QueryRangeTimeShiftWithCache(t *testing.T) {
|
||||
params := []*v3.QueryRangeParamsV3{
|
||||
{
|
||||
Start: 1675115596722 + 60*60*1000 - 86400*1000, //30th Jan, 4:23
|
||||
End: 1675115596722 + 120*60*1000 - 86400*1000, //30, 5:23
|
||||
End: 1675115596722 + 120*60*1000 - 86400*1000, //30th Jan, 5:23
|
||||
Step: 5 * time.Minute.Milliseconds(),
|
||||
Version: "v4",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
@ -796,6 +835,7 @@ func TestV2QueryRangeTimeShiftWithCache(t *testing.T) {
|
||||
Start: 1675115596722, //31st Jan, 3:23
|
||||
End: 1675115596722 + 120*60*1000, //31st Jan, 5:23
|
||||
Step: 5 * time.Minute.Milliseconds(),
|
||||
Version: "v4",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
@ -833,7 +873,7 @@ func TestV2QueryRangeTimeShiftWithCache(t *testing.T) {
|
||||
Labels: map[string]string{},
|
||||
Points: []v3.Point{
|
||||
{Timestamp: 1675115596722 + 60*60*1000 - 86400*1000, Value: 1}, // 30th Jan, 4:23
|
||||
{Timestamp: 1675115596722 + 120*60*1000 - 86400*1000 + 60*60*1000, Value: 2}, // 30th Jan, 6:23
|
||||
{Timestamp: 1675115596722 + 120*60*1000 - 86400*1000 + 60*60*1000, Value: 2}, // 30th Jan, 5:23
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -842,8 +882,8 @@ func TestV2QueryRangeTimeShiftWithCache(t *testing.T) {
|
||||
|
||||
// logs queries are generates in ns
|
||||
expectedTimeRangeInQueryString := []string{
|
||||
fmt.Sprintf("timestamp >= %d AND timestamp <= %d", (1675115596722+60*60*1000-86400*1000)*1000000, (1675115596722+120*60*1000-86400*1000)*1000000),
|
||||
fmt.Sprintf("timestamp >= %d AND timestamp <= %d", (1675115596722-86400*1000)*1000000, ((1675115596722+120*60*1000)-86400*1000)*1000000),
|
||||
fmt.Sprintf("timestamp >= %d AND timestamp <= %d", (1675115596722+60*60*1000-86400*1000)*1000000, (1675115596722+120*60*1000-86400*1000)*1000000), // 30th Jan, 4:23 to 30th Jan, 5:23
|
||||
fmt.Sprintf("timestamp >= %d AND timestamp <= %d", (1675115596722-86400*1000)*1000000, ((1675115596722+120*60*1000)-86400*1000)*1000000), // 30th Jan, 3:23 to 30th Jan, 5:23
|
||||
}
|
||||
|
||||
for i, param := range params {
|
||||
@ -865,8 +905,9 @@ func TestV2QueryRangeTimeShiftWithLimitAndCache(t *testing.T) {
|
||||
params := []*v3.QueryRangeParamsV3{
|
||||
{
|
||||
Start: 1675115596722 + 60*60*1000 - 86400*1000, //30th Jan, 4:23
|
||||
End: 1675115596722 + 120*60*1000 - 86400*1000, //30, 5:23
|
||||
End: 1675115596722 + 120*60*1000 - 86400*1000, //30th, 5:23
|
||||
Step: 5 * time.Minute.Milliseconds(),
|
||||
Version: "v4",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
@ -895,6 +936,7 @@ func TestV2QueryRangeTimeShiftWithLimitAndCache(t *testing.T) {
|
||||
Start: 1675115596722, //31st Jan, 3:23
|
||||
End: 1675115596722 + 120*60*1000, //31st Jan, 5:23
|
||||
Step: 5 * time.Minute.Milliseconds(),
|
||||
Version: "v4",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
@ -942,7 +984,7 @@ func TestV2QueryRangeTimeShiftWithLimitAndCache(t *testing.T) {
|
||||
|
||||
// logs queries are generates in ns
|
||||
expectedTimeRangeInQueryString := []string{
|
||||
fmt.Sprintf("timestamp >= %d AND timestamp <= %d", (1675115596722+60*60*1000-86400*1000)*1000000, (1675115596722+120*60*1000-86400*1000)*1000000),
|
||||
fmt.Sprintf("timestamp >= %d AND timestamp <= %d", (1675115596722+60*60*1000-86400*1000)*1000000, (1675115596722+120*60*1000-86400*1000)*1000000), // 30th Jan, 4:23 to 30th Jan, 5:23
|
||||
fmt.Sprintf("timestamp >= %d AND timestamp <= %d", (1675115596722-86400*1000)*1000000, ((1675115596722+120*60*1000)-86400*1000)*1000000),
|
||||
}
|
||||
|
||||
@ -967,6 +1009,7 @@ func TestV2QueryRangeValueTypePromQL(t *testing.T) {
|
||||
Start: 1675115596722,
|
||||
End: 1675115596722 + 120*60*1000,
|
||||
Step: 5 * time.Minute.Milliseconds(),
|
||||
Version: "v4",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypePromQL,
|
||||
PanelType: v3.PanelTypeValue,
|
||||
@ -981,6 +1024,7 @@ func TestV2QueryRangeValueTypePromQL(t *testing.T) {
|
||||
Start: 1675115596722 + 60*60*1000,
|
||||
End: 1675115596722 + 180*60*1000,
|
||||
Step: 5 * time.Minute.Milliseconds(),
|
||||
Version: "v4",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypePromQL,
|
||||
PanelType: v3.PanelTypeValue,
|
||||
|
@ -322,13 +322,12 @@ func isLogExpression(expression *govaluate.EvaluableExpression, params *v3.Query
|
||||
func (c *cacheKeyGenerator) GenerateKeys(params *v3.QueryRangeParamsV3) map[string]string {
|
||||
keys := make(map[string]string)
|
||||
|
||||
// For non-graph panels, we don't support caching
|
||||
// Use query as the cache key for PromQL queries
|
||||
if params.CompositeQuery.QueryType == v3.QueryTypePromQL {
|
||||
if params.CompositeQuery.PanelType != v3.PanelTypeGraph {
|
||||
return keys
|
||||
}
|
||||
|
||||
// Use query as the cache key for PromQL queries
|
||||
if params.CompositeQuery.QueryType == v3.QueryTypePromQL {
|
||||
for name, query := range params.CompositeQuery.PromQueries {
|
||||
keys[name] = query.Query
|
||||
}
|
||||
@ -338,6 +337,11 @@ func (c *cacheKeyGenerator) GenerateKeys(params *v3.QueryRangeParamsV3) map[stri
|
||||
// Build keys for each builder query
|
||||
for queryName, query := range params.CompositeQuery.BuilderQueries {
|
||||
if query.Expression == queryName && query.DataSource == v3.DataSourceLogs {
|
||||
|
||||
if params.CompositeQuery.PanelType != v3.PanelTypeGraph {
|
||||
continue
|
||||
}
|
||||
|
||||
var parts []string
|
||||
|
||||
// We need to build uniqe cache query for BuilderQuery
|
||||
@ -383,6 +387,22 @@ func (c *cacheKeyGenerator) GenerateKeys(params *v3.QueryRangeParamsV3) map[stri
|
||||
} else if query.Expression == queryName && query.DataSource == v3.DataSourceMetrics {
|
||||
var parts []string
|
||||
|
||||
// what is this condition checking?
|
||||
// there are two version of the metric query builder, v3 and v4
|
||||
// the way query is built is different for each version
|
||||
// only time series panel type returns a "time series" data
|
||||
// every other panel type returns just a single value
|
||||
// this means that we can't use the previous results for caching
|
||||
// however, in v4, the result of every panel type is a time series data
|
||||
// that gets aggregated in the query service and then converted to a single value
|
||||
// so we can use the previous results for caching
|
||||
|
||||
// if version is not v4 (it can be empty or v3) and panel type is not graph
|
||||
// then we can't use the previous results for caching
|
||||
if params.Version != "v4" && params.CompositeQuery.PanelType != v3.PanelTypeGraph {
|
||||
continue
|
||||
}
|
||||
|
||||
// We need to build uniqe cache query for BuilderQuery
|
||||
|
||||
parts = append(parts, fmt.Sprintf("source=%s", query.DataSource))
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
logsV3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3"
|
||||
metricsv3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3"
|
||||
"go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/featureManager"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
)
|
||||
@ -583,3 +584,662 @@ func TestLogsQueryWithFormula(t *testing.T) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestGenerateCacheKeysMetricsBuilder(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
query *v3.QueryRangeParamsV3
|
||||
expectedCacheKeys map[string]string
|
||||
}{
|
||||
// v3 - only the graph builder queries can be cached
|
||||
{
|
||||
name: "version=v3;panelType=graph;dataSource=metrics;queryType=builder",
|
||||
query: &v3.QueryRangeParamsV3{
|
||||
Version: "v3",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
StepInterval: 60,
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateOperator: v3.AggregateOperatorSumRate,
|
||||
AggregateAttribute: v3.AttributeKey{Key: "signoz_latency_bucket"},
|
||||
Temporality: v3.Delta,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "service_name"}, Value: "A", Operator: v3.FilterOperatorEqual},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{Key: "service_name"},
|
||||
{Key: "le"},
|
||||
},
|
||||
Expression: "A",
|
||||
OrderBy: []v3.OrderBy{
|
||||
{ColumnName: constants.SigNozOrderByValue, Order: "desc"},
|
||||
},
|
||||
Having: []v3.Having{
|
||||
{
|
||||
ColumnName: "value",
|
||||
Operator: v3.HavingOperatorGreaterThan,
|
||||
Value: 100,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCacheKeys: map[string]string{
|
||||
"A": "source=metrics&step=60&aggregate=sum_rate&timeAggregation=&spaceAggregation=&aggregateAttribute=signoz_latency_bucket---false&filter-0=key:service_name---false,op:=,value:A&groupBy-0=service_name---false&groupBy-1=le---false&having-0=column:value,op:>,value:100",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "version=v3;panelType=graph;dataSource=metrics;queryType=builder with limit", // limit should not be part of the cache key
|
||||
query: &v3.QueryRangeParamsV3{
|
||||
Version: "v3",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
StepInterval: 60,
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateOperator: v3.AggregateOperatorSumRate,
|
||||
AggregateAttribute: v3.AttributeKey{Key: "signoz_latency_bucket"},
|
||||
Temporality: v3.Delta,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "service_name"}, Value: "A", Operator: v3.FilterOperatorEqual},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{Key: "service_name"},
|
||||
{Key: "le"},
|
||||
},
|
||||
Expression: "A",
|
||||
OrderBy: []v3.OrderBy{
|
||||
{ColumnName: constants.SigNozOrderByValue, Order: "desc"},
|
||||
},
|
||||
Having: []v3.Having{
|
||||
{
|
||||
ColumnName: "value",
|
||||
Operator: v3.HavingOperatorGreaterThan,
|
||||
Value: 100,
|
||||
},
|
||||
},
|
||||
Limit: 10,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCacheKeys: map[string]string{
|
||||
"A": "source=metrics&step=60&aggregate=sum_rate&timeAggregation=&spaceAggregation=&aggregateAttribute=signoz_latency_bucket---false&filter-0=key:service_name---false,op:=,value:A&groupBy-0=service_name---false&groupBy-1=le---false&having-0=column:value,op:>,value:100",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "version=v3;panelType=graph;dataSource=metrics;queryType=builder with shiftBy", // shiftBy should be part of the cache key
|
||||
query: &v3.QueryRangeParamsV3{
|
||||
Version: "v3",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
StepInterval: 60,
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateOperator: v3.AggregateOperatorSumRate,
|
||||
AggregateAttribute: v3.AttributeKey{Key: "signoz_latency_bucket"},
|
||||
Temporality: v3.Delta,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "service_name"}, Value: "A", Operator: v3.FilterOperatorEqual},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{Key: "service_name"},
|
||||
{Key: "le"},
|
||||
},
|
||||
Expression: "A",
|
||||
OrderBy: []v3.OrderBy{
|
||||
{ColumnName: constants.SigNozOrderByValue, Order: "desc"},
|
||||
},
|
||||
Having: []v3.Having{
|
||||
{
|
||||
ColumnName: "value",
|
||||
Operator: v3.HavingOperatorGreaterThan,
|
||||
Value: 100,
|
||||
},
|
||||
},
|
||||
Limit: 10,
|
||||
ShiftBy: 86400,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCacheKeys: map[string]string{
|
||||
"A": "source=metrics&step=60&aggregate=sum_rate&timeAggregation=&spaceAggregation=&shiftBy=86400&aggregateAttribute=signoz_latency_bucket---false&filter-0=key:service_name---false,op:=,value:A&groupBy-0=service_name---false&groupBy-1=le---false&having-0=column:value,op:>,value:100",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "version=v3;panelType=value;dataSource=metrics;queryType=builder",
|
||||
query: &v3.QueryRangeParamsV3{
|
||||
Version: "v3",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeValue,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
StepInterval: 60,
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateOperator: v3.AggregateOperatorSumRate,
|
||||
Expression: "A",
|
||||
AggregateAttribute: v3.AttributeKey{Key: "signoz_latency_bucket"},
|
||||
Temporality: v3.Delta,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "service_name"}, Value: "A", Operator: v3.FilterOperatorEqual},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{Key: "service_name"},
|
||||
{Key: "le"},
|
||||
},
|
||||
OrderBy: []v3.OrderBy{
|
||||
{ColumnName: constants.SigNozOrderByValue, Order: "desc"},
|
||||
},
|
||||
Having: []v3.Having{
|
||||
{
|
||||
ColumnName: "value",
|
||||
Operator: v3.HavingOperatorGreaterThan,
|
||||
Value: 100,
|
||||
},
|
||||
},
|
||||
ReduceTo: v3.ReduceToOperatorAvg,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCacheKeys: map[string]string{},
|
||||
},
|
||||
{
|
||||
name: "version=v3;panelType=table;dataSource=metrics;queryType=builder",
|
||||
query: &v3.QueryRangeParamsV3{
|
||||
Version: "v3",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeTable,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
StepInterval: 60,
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateOperator: v3.AggregateOperatorSumRate,
|
||||
Expression: "A",
|
||||
AggregateAttribute: v3.AttributeKey{Key: "signoz_latency_bucket"},
|
||||
Temporality: v3.Delta,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "service_name"}, Value: "A", Operator: v3.FilterOperatorEqual},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{Key: "service_name"},
|
||||
{Key: "le"},
|
||||
},
|
||||
OrderBy: []v3.OrderBy{
|
||||
{ColumnName: constants.SigNozOrderByValue, Order: "desc"},
|
||||
},
|
||||
Having: []v3.Having{
|
||||
{
|
||||
ColumnName: "value",
|
||||
Operator: v3.HavingOperatorGreaterThan,
|
||||
Value: 100,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCacheKeys: map[string]string{},
|
||||
},
|
||||
|
||||
// v4 - everything can be cached
|
||||
{
|
||||
name: "version=v4;panelType=graph;dataSource=metrics;queryType=builder",
|
||||
query: &v3.QueryRangeParamsV3{
|
||||
Version: "v4",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
StepInterval: 60,
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateOperator: v3.AggregateOperatorSumRate,
|
||||
AggregateAttribute: v3.AttributeKey{Key: "signoz_latency_bucket"},
|
||||
Temporality: v3.Delta,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "service_name"}, Value: "A", Operator: v3.FilterOperatorEqual},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{Key: "service_name"},
|
||||
{Key: "le"},
|
||||
},
|
||||
Expression: "A",
|
||||
OrderBy: []v3.OrderBy{
|
||||
{ColumnName: constants.SigNozOrderByValue, Order: "desc"},
|
||||
},
|
||||
Having: []v3.Having{
|
||||
{
|
||||
ColumnName: "value",
|
||||
Operator: v3.HavingOperatorGreaterThan,
|
||||
Value: 100,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCacheKeys: map[string]string{
|
||||
"A": "source=metrics&step=60&aggregate=sum_rate&timeAggregation=&spaceAggregation=&aggregateAttribute=signoz_latency_bucket---false&filter-0=key:service_name---false,op:=,value:A&groupBy-0=service_name---false&groupBy-1=le---false&having-0=column:value,op:>,value:100",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "version=v4;panelType=graph;dataSource=metrics;queryType=builder with limit", // limit should not be part of the cache key
|
||||
query: &v3.QueryRangeParamsV3{
|
||||
Version: "v4",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
StepInterval: 60,
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateOperator: v3.AggregateOperatorSumRate,
|
||||
AggregateAttribute: v3.AttributeKey{Key: "signoz_latency_bucket"},
|
||||
Temporality: v3.Delta,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "service_name"}, Value: "A", Operator: v3.FilterOperatorEqual},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{Key: "service_name"},
|
||||
{Key: "le"},
|
||||
},
|
||||
Expression: "A",
|
||||
OrderBy: []v3.OrderBy{
|
||||
{ColumnName: constants.SigNozOrderByValue, Order: "desc"},
|
||||
},
|
||||
Having: []v3.Having{
|
||||
{
|
||||
ColumnName: "value",
|
||||
Operator: v3.HavingOperatorGreaterThan,
|
||||
Value: 100,
|
||||
},
|
||||
},
|
||||
Limit: 10,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCacheKeys: map[string]string{
|
||||
"A": "source=metrics&step=60&aggregate=sum_rate&timeAggregation=&spaceAggregation=&aggregateAttribute=signoz_latency_bucket---false&filter-0=key:service_name---false,op:=,value:A&groupBy-0=service_name---false&groupBy-1=le---false&having-0=column:value,op:>,value:100",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "version=v4;panelType=graph;dataSource=metrics;queryType=builder with shiftBy", // shiftBy should be part of the cache key
|
||||
query: &v3.QueryRangeParamsV3{
|
||||
Version: "v4",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
StepInterval: 60,
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateOperator: v3.AggregateOperatorSumRate,
|
||||
AggregateAttribute: v3.AttributeKey{Key: "signoz_latency_bucket"},
|
||||
Temporality: v3.Delta,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "service_name"}, Value: "A", Operator: v3.FilterOperatorEqual},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{Key: "service_name"},
|
||||
{Key: "le"},
|
||||
},
|
||||
Expression: "A",
|
||||
OrderBy: []v3.OrderBy{
|
||||
{ColumnName: constants.SigNozOrderByValue, Order: "desc"},
|
||||
},
|
||||
Having: []v3.Having{
|
||||
{
|
||||
ColumnName: "value",
|
||||
Operator: v3.HavingOperatorGreaterThan,
|
||||
Value: 100,
|
||||
},
|
||||
},
|
||||
Limit: 10,
|
||||
ShiftBy: 86400,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCacheKeys: map[string]string{
|
||||
"A": "source=metrics&step=60&aggregate=sum_rate&timeAggregation=&spaceAggregation=&shiftBy=86400&aggregateAttribute=signoz_latency_bucket---false&filter-0=key:service_name---false,op:=,value:A&groupBy-0=service_name---false&groupBy-1=le---false&having-0=column:value,op:>,value:100",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "version=v4;panelType=value;dataSource=metrics;queryType=builder",
|
||||
query: &v3.QueryRangeParamsV3{
|
||||
Version: "v4",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeValue,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
StepInterval: 60,
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateOperator: v3.AggregateOperatorSumRate,
|
||||
Expression: "A",
|
||||
AggregateAttribute: v3.AttributeKey{Key: "signoz_latency_bucket"},
|
||||
Temporality: v3.Delta,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "service_name"}, Value: "A", Operator: v3.FilterOperatorEqual},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{Key: "service_name"},
|
||||
{Key: "le"},
|
||||
},
|
||||
OrderBy: []v3.OrderBy{
|
||||
{ColumnName: constants.SigNozOrderByValue, Order: "desc"},
|
||||
},
|
||||
Having: []v3.Having{
|
||||
{
|
||||
ColumnName: "value",
|
||||
Operator: v3.HavingOperatorGreaterThan,
|
||||
Value: 100,
|
||||
},
|
||||
},
|
||||
ReduceTo: v3.ReduceToOperatorAvg,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCacheKeys: map[string]string{
|
||||
"A": "source=metrics&step=60&aggregate=sum_rate&timeAggregation=&spaceAggregation=&aggregateAttribute=signoz_latency_bucket---false&filter-0=key:service_name---false,op:=,value:A&groupBy-0=service_name---false&groupBy-1=le---false&having-0=column:value,op:>,value:100",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "version=v4;panelType=table;dataSource=metrics;queryType=builder",
|
||||
query: &v3.QueryRangeParamsV3{
|
||||
Version: "v4",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeTable,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
StepInterval: 60,
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateOperator: v3.AggregateOperatorSumRate,
|
||||
Expression: "A",
|
||||
AggregateAttribute: v3.AttributeKey{Key: "signoz_latency_bucket"},
|
||||
Temporality: v3.Delta,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "service_name"}, Value: "A", Operator: v3.FilterOperatorEqual},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{Key: "service_name"},
|
||||
{Key: "le"},
|
||||
},
|
||||
OrderBy: []v3.OrderBy{
|
||||
{ColumnName: constants.SigNozOrderByValue, Order: "desc"},
|
||||
},
|
||||
Having: []v3.Having{
|
||||
{
|
||||
ColumnName: "value",
|
||||
Operator: v3.HavingOperatorGreaterThan,
|
||||
Value: 100,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCacheKeys: map[string]string{
|
||||
"A": "source=metrics&step=60&aggregate=sum_rate&timeAggregation=&spaceAggregation=&aggregateAttribute=signoz_latency_bucket---false&filter-0=key:service_name---false,op:=,value:A&groupBy-0=service_name---false&groupBy-1=le---false&having-0=column:value,op:>,value:100",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
keyGen := NewKeyGenerator()
|
||||
|
||||
for _, test := range testCases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
cacheKeys := keyGen.GenerateKeys(test.query)
|
||||
require.Equal(t, test.expectedCacheKeys, cacheKeys)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateCacheKeysLogs(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
query *v3.QueryRangeParamsV3
|
||||
expectedCacheKeys map[string]string
|
||||
}{
|
||||
{
|
||||
name: "panelType=graph;dataSource=logs;queryType=builder",
|
||||
query: &v3.QueryRangeParamsV3{
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
StepInterval: 60,
|
||||
DataSource: v3.DataSourceLogs,
|
||||
AggregateOperator: v3.AggregateOperatorCount,
|
||||
AggregateAttribute: v3.AttributeKey{Key: "log_level"},
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "service_name"}, Value: "A", Operator: v3.FilterOperatorEqual},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{Key: "service_name"},
|
||||
{Key: "log_level"},
|
||||
},
|
||||
Expression: "A",
|
||||
Having: []v3.Having{
|
||||
{
|
||||
ColumnName: "value",
|
||||
Operator: v3.HavingOperatorGreaterThan,
|
||||
Value: 100,
|
||||
},
|
||||
},
|
||||
OrderBy: []v3.OrderBy{
|
||||
{ColumnName: constants.SigNozOrderByValue, Order: "desc"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCacheKeys: map[string]string{
|
||||
"A": "source=logs&step=60&aggregate=count&limit=0&aggregateAttribute=log_level---false&filter-0=key:service_name---false,op:=,value:A&groupBy-0=service_name---false&groupBy-1=log_level---false&orderBy-0=#SIGNOZ_VALUE-desc&having-0=column:value,op:>,value:100",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "panelType=table;dataSource=logs;queryType=builder",
|
||||
query: &v3.QueryRangeParamsV3{
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeTable,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
StepInterval: 60,
|
||||
DataSource: v3.DataSourceLogs,
|
||||
AggregateOperator: v3.AggregateOperatorCount,
|
||||
AggregateAttribute: v3.AttributeKey{Key: "log_level"},
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "service_name"}, Value: "A", Operator: v3.FilterOperatorEqual},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{Key: "service_name"},
|
||||
{Key: "log_level"},
|
||||
},
|
||||
Expression: "A",
|
||||
Having: []v3.Having{
|
||||
{
|
||||
ColumnName: "value",
|
||||
Operator: v3.HavingOperatorGreaterThan,
|
||||
Value: 100,
|
||||
},
|
||||
},
|
||||
OrderBy: []v3.OrderBy{
|
||||
{ColumnName: constants.SigNozOrderByValue, Order: "desc"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCacheKeys: map[string]string{},
|
||||
},
|
||||
{
|
||||
name: "panelType=value;dataSource=logs;queryType=builder",
|
||||
query: &v3.QueryRangeParamsV3{
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeValue,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
StepInterval: 60,
|
||||
DataSource: v3.DataSourceLogs,
|
||||
AggregateOperator: v3.AggregateOperatorCount,
|
||||
AggregateAttribute: v3.AttributeKey{Key: "log_level"},
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "service_name"}, Value: "A", Operator: v3.FilterOperatorEqual},
|
||||
},
|
||||
},
|
||||
Expression: "A",
|
||||
Limit: 10,
|
||||
ReduceTo: v3.ReduceToOperatorAvg,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCacheKeys: map[string]string{},
|
||||
},
|
||||
}
|
||||
|
||||
keyGen := NewKeyGenerator()
|
||||
for _, test := range testCases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
cacheKeys := keyGen.GenerateKeys(test.query)
|
||||
require.Equal(t, test.expectedCacheKeys, cacheKeys)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateCacheKeysMetricsPromQL(t *testing.T) {
|
||||
// there is no version difference between v3 and v4 for promql
|
||||
testCases := []struct {
|
||||
name string
|
||||
query *v3.QueryRangeParamsV3
|
||||
expectedCacheKeys map[string]string
|
||||
}{
|
||||
{
|
||||
name: "panelType=graph;dataSource=metrics;queryType=promql",
|
||||
query: &v3.QueryRangeParamsV3{
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
QueryType: v3.QueryTypePromQL,
|
||||
PromQueries: map[string]*v3.PromQuery{
|
||||
"A": {
|
||||
Query: "signoz_latency_bucket",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCacheKeys: map[string]string{
|
||||
"A": "signoz_latency_bucket",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "panelType=graph;dataSource=metrics;queryType=promql",
|
||||
query: &v3.QueryRangeParamsV3{
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
QueryType: v3.QueryTypePromQL,
|
||||
PromQueries: map[string]*v3.PromQuery{
|
||||
"A": {
|
||||
Query: "histogram_quantile(0.9, sum(rate(signoz_latency_bucket[1m])) by (le))",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCacheKeys: map[string]string{
|
||||
"A": "histogram_quantile(0.9, sum(rate(signoz_latency_bucket[1m])) by (le))",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "panelType=value;dataSource=metrics;queryType=promql",
|
||||
query: &v3.QueryRangeParamsV3{
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeValue,
|
||||
QueryType: v3.QueryTypePromQL,
|
||||
PromQueries: map[string]*v3.PromQuery{
|
||||
"A": {
|
||||
Query: "histogram_quantile(0.9, sum(rate(signoz_latency_bucket[1m])) by (le))",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCacheKeys: map[string]string{},
|
||||
},
|
||||
}
|
||||
|
||||
keyGen := NewKeyGenerator()
|
||||
for _, test := range testCases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
cacheKeys := keyGen.GenerateKeys(test.query)
|
||||
require.Equal(t, test.expectedCacheKeys, cacheKeys)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user