fix: dot support in attribute name (#4121)

* fix: dot support

* fix: column name updated for materialized columns

* fix: tests updated

* fix: comments updated

* fix: enrichment test updated

* fix: dont return underscore fields in the fields API

* fix: update fields function changed to support default instead of materialized

* fix: updated how formulas are built and test added

* fix: don't create index for bool attributes

* fix: support for limit queries updated
This commit is contained in:
Nityananda Gohain 2023-12-21 12:11:35 +05:30 committed by GitHub
parent 1be4731710
commit f8bb42a13c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 505 additions and 131 deletions

View File

@ -3423,6 +3423,26 @@ func (r *ClickHouseReader) GetTagsInfoInLastHeartBeatInterval(ctx context.Contex
return &tagsInfo, nil return &tagsInfo, nil
} }
// remove this after sometime
func removeUnderscoreDuplicateFields(fields []model.LogField) []model.LogField {
lookup := map[string]model.LogField{}
for _, v := range fields {
lookup[v.Name+v.DataType] = v
}
for k := range lookup {
if strings.Contains(k, ".") {
delete(lookup, strings.ReplaceAll(k, ".", "_"))
}
}
updatedFields := []model.LogField{}
for _, v := range lookup {
updatedFields = append(updatedFields, v)
}
return updatedFields
}
// GetDashboardsInfo returns analytics data for dashboards // GetDashboardsInfo returns analytics data for dashboards
func (r *ClickHouseReader) GetDashboardsInfo(ctx context.Context) (*model.DashboardsInfo, error) { func (r *ClickHouseReader) GetDashboardsInfo(ctx context.Context) (*model.DashboardsInfo, error) {
dashboardsInfo := model.DashboardsInfo{} dashboardsInfo := model.DashboardsInfo{}
@ -3540,6 +3560,10 @@ func (r *ClickHouseReader) GetLogFields(ctx context.Context) (*model.GetFieldsRe
return nil, &model.ApiError{Err: err, Typ: model.ErrorInternal} return nil, &model.ApiError{Err: err, Typ: model.ErrorInternal}
} }
//remove this code after sometime
attributes = removeUnderscoreDuplicateFields(attributes)
resources = removeUnderscoreDuplicateFields(resources)
statements := []model.ShowCreateTableStatement{} statements := []model.ShowCreateTableStatement{}
query = fmt.Sprintf("SHOW CREATE TABLE %s.%s", r.logsDB, r.logsLocalTable) query = fmt.Sprintf("SHOW CREATE TABLE %s.%s", r.logsDB, r.logsLocalTable)
err = r.db.Select(ctx, &statements, query) err = r.db.Select(ctx, &statements, query)
@ -3587,8 +3611,11 @@ func (r *ClickHouseReader) UpdateLogField(ctx context.Context, field *model.Upda
valueColName := fmt.Sprintf("%s_%s_value", field.Type, strings.ToLower(field.DataType)) valueColName := fmt.Sprintf("%s_%s_value", field.Type, strings.ToLower(field.DataType))
// create materialized column // create materialized column
query := fmt.Sprintf("ALTER TABLE %s.%s ON CLUSTER %s ADD COLUMN IF NOT EXISTS %s %s MATERIALIZED %s[indexOf(%s, '%s')] CODEC(ZSTD(1))",
r.logsDB, r.logsLocalTable, for _, table := range []string{r.logsLocalTable, r.logsTable} {
q := "ALTER TABLE %s.%s ON CLUSTER %s ADD COLUMN IF NOT EXISTS %s %s DEFAULT %s[indexOf(%s, '%s')] CODEC(ZSTD(1))"
query := fmt.Sprintf(q,
r.logsDB, table,
r.cluster, r.cluster,
colname, field.DataType, colname, field.DataType,
valueColName, valueColName,
@ -3600,25 +3627,8 @@ func (r *ClickHouseReader) UpdateLogField(ctx context.Context, field *model.Upda
return &model.ApiError{Err: err, Typ: model.ErrorInternal} return &model.ApiError{Err: err, Typ: model.ErrorInternal}
} }
defaultValueDistributed := "-1" query = fmt.Sprintf("ALTER TABLE %s.%s ON CLUSTER %s ADD COLUMN IF NOT EXISTS %s_exists bool DEFAULT if(indexOf(%s, '%s') != 0, true, false) CODEC(ZSTD(1))",
if strings.ToLower(field.DataType) == "bool" { r.logsDB, table,
defaultValueDistributed = "false"
field.IndexType = "set(2)"
}
query = fmt.Sprintf("ALTER TABLE %s.%s ON CLUSTER %s ADD COLUMN IF NOT EXISTS %s %s MATERIALIZED %s",
r.logsDB, r.logsTable,
r.cluster,
colname, field.DataType,
defaultValueDistributed,
)
err = r.db.Exec(ctx, query)
if err != nil {
return &model.ApiError{Err: err, Typ: model.ErrorInternal}
}
// create exists column
query = fmt.Sprintf("ALTER TABLE %s.%s ON CLUSTER %s ADD COLUMN IF NOT EXISTS %s_exists bool MATERIALIZED if(indexOf(%s, '%s') != 0, true, false) CODEC(ZSTD(1))",
r.logsDB, r.logsLocalTable,
r.cluster, r.cluster,
colname, colname,
keyColName, keyColName,
@ -3628,25 +3638,21 @@ func (r *ClickHouseReader) UpdateLogField(ctx context.Context, field *model.Upda
if err != nil { if err != nil {
return &model.ApiError{Err: err, Typ: model.ErrorInternal} return &model.ApiError{Err: err, Typ: model.ErrorInternal}
} }
query = fmt.Sprintf("ALTER TABLE %s.%s ON CLUSTER %s ADD COLUMN IF NOT EXISTS %s_exists bool MATERIALIZED false",
r.logsDB, r.logsTable,
r.cluster,
colname,
)
err = r.db.Exec(ctx, query)
if err != nil {
return &model.ApiError{Err: err, Typ: model.ErrorInternal}
} }
// create the index // create the index
if strings.ToLower(field.DataType) == "bool" {
// there is no point in creating index for bool attributes as the cardinality is just 2
return nil
}
if field.IndexType == "" { if field.IndexType == "" {
field.IndexType = constants.DefaultLogSkipIndexType field.IndexType = constants.DefaultLogSkipIndexType
} }
if field.IndexGranularity == 0 { if field.IndexGranularity == 0 {
field.IndexGranularity = constants.DefaultLogSkipIndexGranularity field.IndexGranularity = constants.DefaultLogSkipIndexGranularity
} }
query = fmt.Sprintf("ALTER TABLE %s.%s ON CLUSTER %s ADD INDEX IF NOT EXISTS %s_idx (%s) TYPE %s GRANULARITY %d", query := fmt.Sprintf("ALTER TABLE %s.%s ON CLUSTER %s ADD INDEX IF NOT EXISTS %s_idx (%s) TYPE %s GRANULARITY %d",
r.logsDB, r.logsLocalTable, r.logsDB, r.logsLocalTable,
r.cluster, r.cluster,
colname, colname,
@ -3654,7 +3660,7 @@ func (r *ClickHouseReader) UpdateLogField(ctx context.Context, field *model.Upda
field.IndexType, field.IndexType,
field.IndexGranularity, field.IndexGranularity,
) )
err = r.db.Exec(ctx, query) err := r.db.Exec(ctx, query)
if err != nil { if err != nil {
return &model.ApiError{Err: err, Typ: model.ErrorInternal} return &model.ApiError{Err: err, Typ: model.ErrorInternal}
} }
@ -4571,12 +4577,64 @@ func (r *ClickHouseReader) GetListResultV3(ctx context.Context, query string) ([
row[columnNames[idx]] = v row[columnNames[idx]] = v
} }
} }
// remove duplicate _ attributes for logs.
// remove this function after a month
removeDuplicateUnderscoreAttributes(row)
rowList = append(rowList, &v3.Row{Timestamp: t, Data: row}) rowList = append(rowList, &v3.Row{Timestamp: t, Data: row})
} }
return rowList, nil return rowList, nil
} }
func removeDuplicateUnderscoreAttributes(row map[string]interface{}) {
if val, ok := row["attributes_int64"]; ok {
attributes := val.(*map[string]int64)
for key := range *attributes {
if strings.Contains(key, ".") {
uKey := strings.ReplaceAll(key, ".", "_")
delete(*attributes, uKey)
}
}
}
if val, ok := row["attributes_float64"]; ok {
attributes := val.(*map[string]float64)
for key := range *attributes {
if strings.Contains(key, ".") {
uKey := strings.ReplaceAll(key, ".", "_")
delete(*attributes, uKey)
}
}
}
if val, ok := row["attributes_bool"]; ok {
attributes := val.(*map[string]bool)
for key := range *attributes {
if strings.Contains(key, ".") {
uKey := strings.ReplaceAll(key, ".", "_")
delete(*attributes, uKey)
}
}
}
for _, k := range []string{"attributes_string", "resources_string"} {
if val, ok := row[k]; ok {
attributes := val.(*map[string]string)
for key := range *attributes {
if strings.Contains(key, ".") {
uKey := strings.ReplaceAll(key, ".", "_")
delete(*attributes, uKey)
}
}
}
}
}
func (r *ClickHouseReader) CheckClickHouse(ctx context.Context) error { func (r *ClickHouseReader) CheckClickHouse(ctx context.Context) error {
rows, err := r.db.Query(ctx, "SELECT 1") rows, err := r.db.Query(ctx, "SELECT 1")
if err != nil { if err != nil {

View File

@ -279,6 +279,68 @@ var testEnrichParamsData = []struct {
}, },
}, },
}, },
{
Name: "Enriching query range v3 params with dot support",
Params: v3.QueryRangeParamsV3{
CompositeQuery: &v3.CompositeQuery{
BuilderQueries: map[string]*v3.BuilderQuery{
"test": {
QueryName: "test",
Expression: "test",
DataSource: v3.DataSourceLogs,
AggregateAttribute: v3.AttributeKey{
Key: "method.name",
},
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "service.name"}, Value: "test", Operator: "="},
}},
GroupBy: []v3.AttributeKey{{Key: "host.name"}},
OrderBy: []v3.OrderBy{{ColumnName: "host.name"}},
},
},
},
},
Fields: map[string]v3.AttributeKey{
"method.name": {
Key: "method.name",
Type: v3.AttributeKeyTypeTag,
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"service.name": {
Key: "service.name",
Type: v3.AttributeKeyTypeTag,
DataType: v3.AttributeKeyDataTypeString,
},
"host.name": {
Key: "host.name",
Type: v3.AttributeKeyTypeTag,
DataType: v3.AttributeKeyDataTypeString,
},
},
Result: v3.QueryRangeParamsV3{
CompositeQuery: &v3.CompositeQuery{
BuilderQueries: map[string]*v3.BuilderQuery{
"test": {
QueryName: "test",
Expression: "test",
DataSource: v3.DataSourceLogs,
AggregateAttribute: v3.AttributeKey{
Key: "method.name",
Type: v3.AttributeKeyTypeTag,
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "service.name", Type: v3.AttributeKeyTypeTag, DataType: v3.AttributeKeyDataTypeString}, Value: "test", Operator: "="},
}},
GroupBy: []v3.AttributeKey{{Key: "host.name", Type: v3.AttributeKeyTypeTag, DataType: v3.AttributeKeyDataTypeString}},
OrderBy: []v3.OrderBy{{ColumnName: "host.name", Key: "host.name", Type: v3.AttributeKeyTypeTag, DataType: v3.AttributeKeyDataTypeString}},
},
},
},
},
},
} }
func TestEnrichParams(t *testing.T) { func TestEnrichParams(t *testing.T) {

View File

@ -106,7 +106,7 @@ func getSelectLabels(aggregatorOperator v3.AggregateOperator, groupBy []v3.Attri
} else { } else {
for _, tag := range groupBy { for _, tag := range groupBy {
columnName := getClickhouseColumnName(tag) columnName := getClickhouseColumnName(tag)
selectLabels += fmt.Sprintf(" %s as %s,", columnName, tag.Key) selectLabels += fmt.Sprintf(" %s as `%s`,", columnName, tag.Key)
} }
} }
return selectLabels return selectLabels
@ -118,7 +118,7 @@ func getSelectKeys(aggregatorOperator v3.AggregateOperator, groupBy []v3.Attribu
return "" return ""
} else { } else {
for _, tag := range groupBy { for _, tag := range groupBy {
selectLabels = append(selectLabels, tag.Key) selectLabels = append(selectLabels, "`"+tag.Key+"`")
} }
} }
return strings.Join(selectLabels, ",") return strings.Join(selectLabels, ",")
@ -209,7 +209,7 @@ func buildLogsTimeSeriesFilterQuery(fs *v3.FilterSet, groupBy []v3.AttributeKey,
if !attr.IsColumn { if !attr.IsColumn {
columnType := getClickhouseLogsColumnType(attr.Type) columnType := getClickhouseLogsColumnType(attr.Type)
columnDataType := getClickhouseLogsColumnDataType(attr.DataType) columnDataType := getClickhouseLogsColumnDataType(attr.DataType)
conditions = append(conditions, fmt.Sprintf("indexOf(%s_%s_key, '%s') > 0", columnType, columnDataType, attr.Key)) conditions = append(conditions, fmt.Sprintf("has(%s_%s_key, '%s')", columnType, columnDataType, attr.Key))
} else if attr.Type != v3.AttributeKeyTypeUnspecified { } else if attr.Type != v3.AttributeKeyTypeUnspecified {
// for materialzied columns // for materialzied columns
conditions = append(conditions, fmt.Sprintf("%s_exists=true", getClickhouseColumnName(attr))) conditions = append(conditions, fmt.Sprintf("%s_exists=true", getClickhouseColumnName(attr)))
@ -378,7 +378,7 @@ func groupBy(panelType v3.PanelType, graphLimitQtype string, tags ...string) str
func groupByAttributeKeyTags(panelType v3.PanelType, graphLimitQtype string, tags ...v3.AttributeKey) string { func groupByAttributeKeyTags(panelType v3.PanelType, graphLimitQtype string, tags ...v3.AttributeKey) string {
groupTags := []string{} groupTags := []string{}
for _, tag := range tags { for _, tag := range tags {
groupTags = append(groupTags, tag.Key) groupTags = append(groupTags, "`"+tag.Key+"`")
} }
return groupBy(panelType, graphLimitQtype, groupTags...) return groupBy(panelType, graphLimitQtype, groupTags...)
} }
@ -393,11 +393,11 @@ func orderBy(panelType v3.PanelType, items []v3.OrderBy, tagLookup map[string]st
if item.ColumnName == constants.SigNozOrderByValue { if item.ColumnName == constants.SigNozOrderByValue {
orderBy = append(orderBy, fmt.Sprintf("value %s", item.Order)) orderBy = append(orderBy, fmt.Sprintf("value %s", item.Order))
} else if _, ok := tagLookup[item.ColumnName]; ok { } else if _, ok := tagLookup[item.ColumnName]; ok {
orderBy = append(orderBy, fmt.Sprintf("%s %s", item.ColumnName, item.Order)) orderBy = append(orderBy, fmt.Sprintf("`%s` %s", item.ColumnName, item.Order))
} else if panelType == v3.PanelTypeList { } else if panelType == v3.PanelTypeList {
attr := v3.AttributeKey{Key: item.ColumnName, DataType: item.DataType, Type: item.Type, IsColumn: item.IsColumn} attr := v3.AttributeKey{Key: item.ColumnName, DataType: item.DataType, Type: item.Type, IsColumn: item.IsColumn}
name := getClickhouseColumnName(attr) name := getClickhouseColumnName(attr)
orderBy = append(orderBy, fmt.Sprintf("%s %s", name, item.Order)) orderBy = append(orderBy, fmt.Sprintf("`%s` %s", name, item.Order))
} }
} }
return orderBy return orderBy

View File

@ -69,13 +69,13 @@ var testGetSelectLabelsData = []struct {
Name: "select fields for groupBy attribute", Name: "select fields for groupBy attribute",
AggregateOperator: v3.AggregateOperatorCount, AggregateOperator: v3.AggregateOperatorCount,
GroupByTags: []v3.AttributeKey{{Key: "user_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}}, GroupByTags: []v3.AttributeKey{{Key: "user_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
SelectLabels: " attributes_string_value[indexOf(attributes_string_key, 'user_name')] as user_name,", SelectLabels: " attributes_string_value[indexOf(attributes_string_key, 'user_name')] as `user_name`,",
}, },
{ {
Name: "select fields for groupBy resource", Name: "select fields for groupBy resource",
AggregateOperator: v3.AggregateOperatorCount, AggregateOperator: v3.AggregateOperatorCount,
GroupByTags: []v3.AttributeKey{{Key: "user_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource}}, GroupByTags: []v3.AttributeKey{{Key: "user_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource}},
SelectLabels: " resources_string_value[indexOf(resources_string_key, 'user_name')] as user_name,", SelectLabels: " resources_string_value[indexOf(resources_string_key, 'user_name')] as `user_name`,",
}, },
{ {
Name: "select fields for groupBy attribute and resource", Name: "select fields for groupBy attribute and resource",
@ -84,19 +84,19 @@ var testGetSelectLabelsData = []struct {
{Key: "user_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource}, {Key: "user_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource},
{Key: "host", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, {Key: "host", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
}, },
SelectLabels: " resources_string_value[indexOf(resources_string_key, 'user_name')] as user_name, attributes_string_value[indexOf(attributes_string_key, 'host')] as host,", SelectLabels: " resources_string_value[indexOf(resources_string_key, 'user_name')] as `user_name`, attributes_string_value[indexOf(attributes_string_key, 'host')] as `host`,",
}, },
{ {
Name: "select fields for groupBy materialized columns", Name: "select fields for groupBy materialized columns",
AggregateOperator: v3.AggregateOperatorCount, AggregateOperator: v3.AggregateOperatorCount,
GroupByTags: []v3.AttributeKey{{Key: "host", IsColumn: true}}, GroupByTags: []v3.AttributeKey{{Key: "host", IsColumn: true}},
SelectLabels: " host as host,", SelectLabels: " host as `host`,",
}, },
{ {
Name: "trace_id field as an attribute", Name: "trace_id field as an attribute",
AggregateOperator: v3.AggregateOperatorCount, AggregateOperator: v3.AggregateOperatorCount,
GroupByTags: []v3.AttributeKey{{Key: "trace_id", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}}, GroupByTags: []v3.AttributeKey{{Key: "trace_id", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
SelectLabels: " attributes_string_value[indexOf(attributes_string_key, 'trace_id')] as trace_id,", SelectLabels: " attributes_string_value[indexOf(attributes_string_key, 'trace_id')] as `trace_id`,",
}, },
} }
@ -209,7 +209,7 @@ var timeSeriesFilterQueryData = []struct {
{Key: v3.AttributeKey{Key: "host", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "102.", Operator: "ncontains"}, {Key: v3.AttributeKey{Key: "host", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "102.", Operator: "ncontains"},
}}, }},
GroupBy: []v3.AttributeKey{{Key: "host", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}}, GroupBy: []v3.AttributeKey{{Key: "host", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
ExpectedFilter: "attributes_string_value[indexOf(attributes_string_key, 'host')] NOT ILIKE '%102.%' AND indexOf(attributes_string_key, 'host') > 0", ExpectedFilter: "attributes_string_value[indexOf(attributes_string_key, 'host')] NOT ILIKE '%102.%' AND has(attributes_string_key, 'host')",
}, },
{ {
Name: "Test groupBy isColumn", Name: "Test groupBy isColumn",
@ -385,6 +385,48 @@ var testBuildLogsQueryData = []struct {
TableName: "logs", TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND has(attributes_string_key, 'name') group by ts order by value DESC", ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND has(attributes_string_key, 'name') group by ts order by value DESC",
}, },
{
Name: "Test aggregate count distinct on non selected field containing dot",
PanelType: v3.PanelTypeGraph,
Start: 1680066360726210000,
End: 1680066458000000000,
BuilderQuery: &v3.BuilderQuery{
QueryName: "A",
StepInterval: 60,
AggregateAttribute: v3.AttributeKey{Key: "method.name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
AggregateOperator: v3.AggregateOperatorCountDistinct,
Expression: "A",
GroupBy: []v3.AttributeKey{{Key: "host.name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
OrderBy: []v3.OrderBy{{ColumnName: "host.name", Order: "ASC"}, {ColumnName: "ts", Order: "ASC", Key: "ts"}},
},
TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, attributes_string_value[indexOf(attributes_string_key, 'host.name')] as `host.name`, " +
"toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'method.name')]))) as value from signoz_logs.distributed_logs " +
"where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND has(attributes_string_key, 'host.name') AND has(attributes_string_key, 'method.name') " +
"group by `host.name`,ts order by `host.name` ASC",
},
{
Name: "Test aggregate count distinct on selected field containing dot",
PanelType: v3.PanelTypeGraph,
Start: 1680066360726210000,
End: 1680066458000000000,
BuilderQuery: &v3.BuilderQuery{
QueryName: "A",
StepInterval: 60,
AggregateAttribute: v3.AttributeKey{Key: "method.name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true},
AggregateOperator: v3.AggregateOperatorCountDistinct,
Expression: "A",
GroupBy: []v3.AttributeKey{{Key: "host.name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}},
OrderBy: []v3.OrderBy{{ColumnName: "host.name", Order: "ASC"}, {ColumnName: "ts", Order: "ASC", Key: "ts", IsColumn: true}},
},
TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, attribute_string_host$$name as `host.name`, toFloat64(count(distinct(attribute_string_method$$name))) as value" +
" from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
"AND attribute_string_host$$name_exists=true AND attribute_string_method$$name_exists=true " +
"group by `host.name`,ts " +
"order by `host.name` ASC",
},
{ {
Name: "Test aggregate count distinct with filter and groupBy", Name: "Test aggregate count distinct with filter and groupBy",
PanelType: v3.PanelTypeGraph, PanelType: v3.PanelTypeGraph,
@ -406,14 +448,14 @@ var testBuildLogsQueryData = []struct {
}, },
TableName: "logs", TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts," + ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts," +
" attributes_string_value[indexOf(attributes_string_key, 'method')] as method, " + " attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, " +
"toFloat64(count(distinct(attribute_string_name))) as value from signoz_logs.distributed_logs " + "toFloat64(count(distinct(attribute_string_name))) as value from signoz_logs.distributed_logs " +
"where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " + "where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
"AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND resources_string_value[indexOf(resources_string_key, 'x')] != 'abc' " + "AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND resources_string_value[indexOf(resources_string_key, 'x')] != 'abc' " +
"AND indexOf(attributes_string_key, 'method') > 0 " + "AND has(attributes_string_key, 'method') " +
"AND attribute_string_name_exists=true " + "AND attribute_string_name_exists=true " +
"group by method,ts " + "group by `method`,ts " +
"order by method ASC", "order by `method` ASC",
}, },
{ {
Name: "Test aggregate count with multiple filter,groupBy and orderBy", Name: "Test aggregate count with multiple filter,groupBy and orderBy",
@ -436,16 +478,16 @@ var testBuildLogsQueryData = []struct {
}, },
TableName: "logs", TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts," + ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts," +
" attributes_string_value[indexOf(attributes_string_key, 'method')] as method, " + " attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, " +
"resources_string_value[indexOf(resources_string_key, 'x')] as x, " + "resources_string_value[indexOf(resources_string_key, 'x')] as `x`, " +
"toFloat64(count(distinct(attribute_string_name))) as value from signoz_logs.distributed_logs " + "toFloat64(count(distinct(attribute_string_name))) as value from signoz_logs.distributed_logs " +
"where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " + "where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
"AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND resources_string_value[indexOf(resources_string_key, 'x')] != 'abc' " + "AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND resources_string_value[indexOf(resources_string_key, 'x')] != 'abc' " +
"AND indexOf(attributes_string_key, 'method') > 0 " + "AND has(attributes_string_key, 'method') " +
"AND indexOf(resources_string_key, 'x') > 0 " + "AND has(resources_string_key, 'x') " +
"AND attribute_string_name_exists=true " + "AND attribute_string_name_exists=true " +
"group by method,x,ts " + "group by `method`,`x`,ts " +
"order by method ASC,x ASC", "order by `method` ASC,`x` ASC",
}, },
{ {
Name: "Test aggregate avg", Name: "Test aggregate avg",
@ -467,15 +509,15 @@ var testBuildLogsQueryData = []struct {
}, },
TableName: "logs", TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts," + ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts," +
" attributes_string_value[indexOf(attributes_string_key, 'method')] as method, " + " attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, " +
"avg(attributes_float64_value[indexOf(attributes_float64_key, 'bytes')]) as value " + "avg(attributes_float64_value[indexOf(attributes_float64_key, 'bytes')]) as value " +
"from signoz_logs.distributed_logs " + "from signoz_logs.distributed_logs " +
"where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " + "where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
"AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' " + "AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' " +
"AND indexOf(attributes_string_key, 'method') > 0 " + "AND has(attributes_string_key, 'method') " +
"AND has(attributes_float64_key, 'bytes') " + "AND has(attributes_float64_key, 'bytes') " +
"group by method,ts " + "group by `method`,ts " +
"order by method ASC", "order by `method` ASC",
}, },
{ {
Name: "Test aggregate sum", Name: "Test aggregate sum",
@ -497,15 +539,15 @@ var testBuildLogsQueryData = []struct {
}, },
TableName: "logs", TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts," + ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts," +
" attributes_string_value[indexOf(attributes_string_key, 'method')] as method, " + " attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, " +
"sum(attribute_float64_bytes) as value " + "sum(attribute_float64_bytes) as value " +
"from signoz_logs.distributed_logs " + "from signoz_logs.distributed_logs " +
"where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " + "where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
"AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' " + "AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' " +
"AND indexOf(attributes_string_key, 'method') > 0 " + "AND has(attributes_string_key, 'method') " +
"AND attribute_float64_bytes_exists=true " + "AND attribute_float64_bytes_exists=true " +
"group by method,ts " + "group by `method`,ts " +
"order by method ASC", "order by `method` ASC",
}, },
{ {
Name: "Test aggregate min", Name: "Test aggregate min",
@ -527,15 +569,15 @@ var testBuildLogsQueryData = []struct {
}, },
TableName: "logs", TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts," + ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts," +
" attributes_string_value[indexOf(attributes_string_key, 'method')] as method, " + " attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, " +
"min(attribute_float64_bytes) as value " + "min(attribute_float64_bytes) as value " +
"from signoz_logs.distributed_logs " + "from signoz_logs.distributed_logs " +
"where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " + "where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
"AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' " + "AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' " +
"AND indexOf(attributes_string_key, 'method') > 0 " + "AND has(attributes_string_key, 'method') " +
"AND attribute_float64_bytes_exists=true " + "AND attribute_float64_bytes_exists=true " +
"group by method,ts " + "group by `method`,ts " +
"order by method ASC", "order by `method` ASC",
}, },
{ {
Name: "Test aggregate max", Name: "Test aggregate max",
@ -557,15 +599,15 @@ var testBuildLogsQueryData = []struct {
}, },
TableName: "logs", TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts," + ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts," +
" attributes_string_value[indexOf(attributes_string_key, 'method')] as method, " + " attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, " +
"max(attribute_float64_bytes) as value " + "max(attribute_float64_bytes) as value " +
"from signoz_logs.distributed_logs " + "from signoz_logs.distributed_logs " +
"where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " + "where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
"AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' " + "AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' " +
"AND indexOf(attributes_string_key, 'method') > 0 " + "AND has(attributes_string_key, 'method') " +
"AND attribute_float64_bytes_exists=true " + "AND attribute_float64_bytes_exists=true " +
"group by method,ts " + "group by `method`,ts " +
"order by method ASC", "order by `method` ASC",
}, },
{ {
Name: "Test aggregate PXX", Name: "Test aggregate PXX",
@ -584,14 +626,14 @@ var testBuildLogsQueryData = []struct {
}, },
TableName: "logs", TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts," + ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts," +
" attributes_string_value[indexOf(attributes_string_key, 'method')] as method, " + " attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, " +
"quantile(0.05)(attribute_float64_bytes) as value " + "quantile(0.05)(attribute_float64_bytes) as value " +
"from signoz_logs.distributed_logs " + "from signoz_logs.distributed_logs " +
"where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " + "where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
"AND indexOf(attributes_string_key, 'method') > 0 " + "AND has(attributes_string_key, 'method') " +
"AND attribute_float64_bytes_exists=true " + "AND attribute_float64_bytes_exists=true " +
"group by method,ts " + "group by `method`,ts " +
"order by method ASC", "order by `method` ASC",
}, },
{ {
Name: "Test aggregate RateSum", Name: "Test aggregate RateSum",
@ -610,12 +652,12 @@ var testBuildLogsQueryData = []struct {
}, },
TableName: "logs", TableName: "logs",
PreferRPM: true, PreferRPM: true,
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, attributes_string_value[indexOf(attributes_string_key, 'method')] as method" + ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`" +
", sum(attribute_float64_bytes)/1.000000 as value from signoz_logs.distributed_logs " + ", sum(attribute_float64_bytes)/1.000000 as value from signoz_logs.distributed_logs " +
"where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " + "where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
"AND indexOf(attributes_string_key, 'method') > 0 " + "AND has(attributes_string_key, 'method') " +
"AND attribute_float64_bytes_exists=true " + "AND attribute_float64_bytes_exists=true " +
"group by method,ts order by method ASC", "group by `method`,ts order by `method` ASC",
}, },
{ {
Name: "Test aggregate rate", Name: "Test aggregate rate",
@ -634,13 +676,13 @@ var testBuildLogsQueryData = []struct {
}, },
TableName: "logs", TableName: "logs",
PreferRPM: false, PreferRPM: false,
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, attributes_string_value[indexOf(attributes_string_key, 'method')] as method" + ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`" +
", count(attributes_float64_value[indexOf(attributes_float64_key, 'bytes')])/60.000000 as value " + ", count(attributes_float64_value[indexOf(attributes_float64_key, 'bytes')])/60.000000 as value " +
"from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " + "from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
"AND indexOf(attributes_string_key, 'method') > 0 " + "AND has(attributes_string_key, 'method') " +
"AND has(attributes_float64_key, 'bytes') " + "AND has(attributes_float64_key, 'bytes') " +
"group by method,ts " + "group by `method`,ts " +
"order by method ASC", "order by `method` ASC",
}, },
{ {
Name: "Test aggregate RateSum without materialized column", Name: "Test aggregate RateSum without materialized column",
@ -660,13 +702,13 @@ var testBuildLogsQueryData = []struct {
TableName: "logs", TableName: "logs",
PreferRPM: true, PreferRPM: true,
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, " + ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, " +
"attributes_string_value[indexOf(attributes_string_key, 'method')] as method, " + "attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, " +
"sum(attributes_float64_value[indexOf(attributes_float64_key, 'bytes')])/1.000000 as value " + "sum(attributes_float64_value[indexOf(attributes_float64_key, 'bytes')])/1.000000 as value " +
"from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " + "from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
"AND indexOf(attributes_string_key, 'method') > 0 " + "AND has(attributes_string_key, 'method') " +
"AND has(attributes_float64_key, 'bytes') " + "AND has(attributes_float64_key, 'bytes') " +
"group by method,ts " + "group by `method`,ts " +
"order by method ASC", "order by `method` ASC",
}, },
{ {
Name: "Test Noop", Name: "Test Noop",
@ -704,7 +746,7 @@ var testBuildLogsQueryData = []struct {
"CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64," + "CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64," +
"CAST((attributes_bool_key, attributes_bool_value), 'Map(String, Bool)') as attributes_bool," + "CAST((attributes_bool_key, attributes_bool_value), 'Map(String, Bool)') as attributes_bool," +
"CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string " + "CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string " +
"from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) order by method ASC", "from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) order by `method` ASC",
}, },
{ {
Name: "Test Noop with filter", Name: "Test Noop with filter",
@ -827,7 +869,7 @@ var testBuildLogsQueryData = []struct {
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'body')] ILIKE '%test%' AND has(attributes_string_key, 'name') group by ts having value > 10 order by value DESC", ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'body')] ILIKE '%test%' AND has(attributes_string_key, 'name') group by ts having value > 10 order by value DESC",
}, },
// // Tests for table panel type // Tests for table panel type
{ {
Name: "TABLE: Test count", Name: "TABLE: Test count",
PanelType: v3.PanelTypeTable, PanelType: v3.PanelTypeTable,
@ -857,7 +899,7 @@ var testBuildLogsQueryData = []struct {
}, },
}, },
TableName: "logs", TableName: "logs",
ExpectedQuery: "SELECT now() as ts, attributes_string_value[indexOf(attributes_string_key, 'name')] as name, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND indexOf(attributes_string_key, 'name') > 0 group by name order by value DESC", ExpectedQuery: "SELECT now() as ts, attributes_string_value[indexOf(attributes_string_key, 'name')] as `name`, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND has(attributes_string_key, 'name') group by `name` order by value DESC",
}, },
{ {
Name: "TABLE: Test count with groupBy, orderBy", Name: "TABLE: Test count with groupBy, orderBy",
@ -877,7 +919,7 @@ var testBuildLogsQueryData = []struct {
}, },
}, },
TableName: "logs", TableName: "logs",
ExpectedQuery: "SELECT now() as ts, attributes_string_value[indexOf(attributes_string_key, 'name')] as name, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND indexOf(attributes_string_key, 'name') > 0 group by name order by name DESC", ExpectedQuery: "SELECT now() as ts, attributes_string_value[indexOf(attributes_string_key, 'name')] as `name`, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND has(attributes_string_key, 'name') group by `name` order by `name` DESC",
}, },
{ {
Name: "TABLE: Test count with JSON Filter, groupBy, orderBy", Name: "TABLE: Test count with JSON Filter, groupBy, orderBy",
@ -911,7 +953,7 @@ var testBuildLogsQueryData = []struct {
}, },
}, },
TableName: "logs", TableName: "logs",
ExpectedQuery: "SELECT now() as ts, attributes_string_value[indexOf(attributes_string_key, 'name')] as name, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND JSON_EXISTS(body, '$.\"message\"') AND JSON_VALUE(body, '$.\"message\"') ILIKE '%a%' AND indexOf(attributes_string_key, 'name') > 0 group by name order by name DESC", ExpectedQuery: "SELECT now() as ts, attributes_string_value[indexOf(attributes_string_key, 'name')] as `name`, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND JSON_EXISTS(body, '$.\"message\"') AND JSON_VALUE(body, '$.\"message\"') ILIKE '%a%' AND has(attributes_string_key, 'name') group by `name` order by `name` DESC",
}, },
{ {
Name: "TABLE: Test count with JSON Filter Array, groupBy, orderBy", Name: "TABLE: Test count with JSON Filter Array, groupBy, orderBy",
@ -945,7 +987,7 @@ var testBuildLogsQueryData = []struct {
}, },
}, },
TableName: "logs", TableName: "logs",
ExpectedQuery: "SELECT now() as ts, attributes_string_value[indexOf(attributes_string_key, 'name')] as name, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND has(JSONExtract(JSON_QUERY(body, '$.\"requestor_list\"[*]'), 'Array(String)'), 'index_service') AND indexOf(attributes_string_key, 'name') > 0 group by name order by name DESC", ExpectedQuery: "SELECT now() as ts, attributes_string_value[indexOf(attributes_string_key, 'name')] as `name`, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND has(JSONExtract(JSON_QUERY(body, '$.\"requestor_list\"[*]'), 'Array(String)'), 'index_service') AND has(attributes_string_key, 'name') group by `name` order by `name` DESC",
}, },
} }
@ -983,7 +1025,7 @@ var testOrderBy = []struct {
Tags: []v3.AttributeKey{ Tags: []v3.AttributeKey{
{Key: "name"}, {Key: "name"},
}, },
Result: "name asc,value desc", Result: "`name` asc,value desc",
}, },
{ {
Name: "Test 2", Name: "Test 2",
@ -1002,7 +1044,7 @@ var testOrderBy = []struct {
{Key: "name"}, {Key: "name"},
{Key: "bytes"}, {Key: "bytes"},
}, },
Result: "name asc,bytes asc", Result: "`name` asc,`bytes` asc",
}, },
{ {
Name: "Test Graph item not present in tag", Name: "Test Graph item not present in tag",
@ -1025,7 +1067,7 @@ var testOrderBy = []struct {
{Key: "name"}, {Key: "name"},
{Key: "bytes"}, {Key: "bytes"},
}, },
Result: "name asc,bytes asc", Result: "`name` asc,`bytes` asc",
}, },
{ {
Name: "Test 3", Name: "Test 3",
@ -1048,7 +1090,7 @@ var testOrderBy = []struct {
{Key: "name"}, {Key: "name"},
{Key: "bytes"}, {Key: "bytes"},
}, },
Result: "name asc,value asc,bytes asc", Result: "`name` asc,value asc,`bytes` asc",
}, },
{ {
Name: "Test 4", Name: "Test 4",
@ -1078,7 +1120,7 @@ var testOrderBy = []struct {
{Key: "name"}, {Key: "name"},
{Key: "bytes"}, {Key: "bytes"},
}, },
Result: "name asc,value asc,bytes asc,attributes_string_value[indexOf(attributes_string_key, 'response_time')] desc", Result: "`name` asc,value asc,`bytes` asc,`attributes_string_value[indexOf(attributes_string_key, 'response_time')]` desc",
}, },
} }
@ -1128,7 +1170,7 @@ var testPrepLogsQueryData = []struct {
GroupBy: []v3.AttributeKey{{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}}, GroupBy: []v3.AttributeKey{{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
}, },
TableName: "logs", TableName: "logs",
ExpectedQuery: "SELECT method from (SELECT attributes_string_value[indexOf(attributes_string_key, 'method')] as method, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND indexOf(attributes_string_key, 'method') > 0 AND has(attributes_string_key, 'name') group by method order by value DESC) LIMIT 10", ExpectedQuery: "SELECT `method` from (SELECT attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND has(attributes_string_key, 'method') AND has(attributes_string_key, 'name') group by `method` order by value DESC) LIMIT 10",
Options: Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: true}, Options: Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: true},
}, },
{ {
@ -1151,7 +1193,7 @@ var testPrepLogsQueryData = []struct {
OrderBy: []v3.OrderBy{{ColumnName: constants.SigNozOrderByValue, Order: "ASC"}}, OrderBy: []v3.OrderBy{{ColumnName: constants.SigNozOrderByValue, Order: "ASC"}},
}, },
TableName: "logs", TableName: "logs",
ExpectedQuery: "SELECT method from (SELECT attributes_string_value[indexOf(attributes_string_key, 'method')] as method, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND indexOf(attributes_string_key, 'method') > 0 AND has(attributes_string_key, 'name') group by method order by value ASC) LIMIT 10", ExpectedQuery: "SELECT `method` from (SELECT attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND has(attributes_string_key, 'method') AND has(attributes_string_key, 'name') group by `method` order by value ASC) LIMIT 10",
Options: Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: true}, Options: Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: true},
}, },
{ {
@ -1174,7 +1216,7 @@ var testPrepLogsQueryData = []struct {
OrderBy: []v3.OrderBy{{ColumnName: "method", Order: "ASC"}}, OrderBy: []v3.OrderBy{{ColumnName: "method", Order: "ASC"}},
}, },
TableName: "logs", TableName: "logs",
ExpectedQuery: "SELECT method from (SELECT attributes_string_value[indexOf(attributes_string_key, 'method')] as method, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND indexOf(attributes_string_key, 'method') > 0 AND has(attributes_string_key, 'name') group by method order by method ASC) LIMIT 10", ExpectedQuery: "SELECT `method` from (SELECT attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND has(attributes_string_key, 'method') AND has(attributes_string_key, 'name') group by `method` order by `method` ASC) LIMIT 10",
Options: Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: true}, Options: Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: true},
}, },
{ {
@ -1196,7 +1238,7 @@ var testPrepLogsQueryData = []struct {
Limit: 2, Limit: 2,
}, },
TableName: "logs", TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, attributes_string_value[indexOf(attributes_string_key, 'method')] as method, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND indexOf(attributes_string_key, 'method') > 0 AND has(attributes_string_key, 'name') AND (method) GLOBAL IN (#LIMIT_PLACEHOLDER) group by method,ts order by value DESC", ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND has(attributes_string_key, 'method') AND has(attributes_string_key, 'name') AND (`method`) GLOBAL IN (#LIMIT_PLACEHOLDER) group by `method`,ts order by value DESC",
Options: Options{GraphLimitQtype: constants.SecondQueryGraphLimit}, Options: Options{GraphLimitQtype: constants.SecondQueryGraphLimit},
}, },
{ {
@ -1219,7 +1261,7 @@ var testPrepLogsQueryData = []struct {
Limit: 2, Limit: 2,
}, },
TableName: "logs", TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, attributes_string_value[indexOf(attributes_string_key, 'method')] as method, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND indexOf(attributes_string_key, 'method') > 0 AND has(attributes_string_key, 'name') AND (method) GLOBAL IN (#LIMIT_PLACEHOLDER) group by method,ts order by method ASC", ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND has(attributes_string_key, 'method') AND has(attributes_string_key, 'name') AND (`method`) GLOBAL IN (#LIMIT_PLACEHOLDER) group by `method`,ts order by `method` ASC",
Options: Options{GraphLimitQtype: constants.SecondQueryGraphLimit}, Options: Options{GraphLimitQtype: constants.SecondQueryGraphLimit},
}, },
// Live tail // Live tail
@ -1353,7 +1395,7 @@ var testPrepLogsQueryLimitOffsetData = []struct {
PageSize: 5, PageSize: 5,
}, },
TableName: "logs", TableName: "logs",
ExpectedQuery: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body,CAST((attributes_string_key, attributes_string_value), 'Map(String, String)') as attributes_string,CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64,CAST((attributes_bool_key, attributes_bool_value), 'Map(String, Bool)') as attributes_bool,CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) order by timestamp desc LIMIT 1", ExpectedQuery: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body,CAST((attributes_string_key, attributes_string_value), 'Map(String, String)') as attributes_string,CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64,CAST((attributes_bool_key, attributes_bool_value), 'Map(String, Bool)') as attributes_bool,CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) order by `timestamp` desc LIMIT 1",
}, },
{ {
Name: "Test limit greater than pageSize - order by ts", Name: "Test limit greater than pageSize - order by ts",
@ -1374,7 +1416,7 @@ var testPrepLogsQueryLimitOffsetData = []struct {
PageSize: 10, PageSize: 10,
}, },
TableName: "logs", TableName: "logs",
ExpectedQuery: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body,CAST((attributes_string_key, attributes_string_value), 'Map(String, String)') as attributes_string,CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64,CAST((attributes_bool_key, attributes_bool_value), 'Map(String, Bool)') as attributes_bool,CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND id < '2TNh4vp2TpiWyLt3SzuadLJF2s4' order by timestamp desc LIMIT 10", ExpectedQuery: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body,CAST((attributes_string_key, attributes_string_value), 'Map(String, String)') as attributes_string,CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64,CAST((attributes_bool_key, attributes_bool_value), 'Map(String, Bool)') as attributes_bool,CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND id < '2TNh4vp2TpiWyLt3SzuadLJF2s4' order by `timestamp` desc LIMIT 10",
}, },
{ {
Name: "Test limit less than pageSize - order by custom", Name: "Test limit less than pageSize - order by custom",
@ -1393,7 +1435,7 @@ var testPrepLogsQueryLimitOffsetData = []struct {
PageSize: 5, PageSize: 5,
}, },
TableName: "logs", TableName: "logs",
ExpectedQuery: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body,CAST((attributes_string_key, attributes_string_value), 'Map(String, String)') as attributes_string,CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64,CAST((attributes_bool_key, attributes_bool_value), 'Map(String, Bool)') as attributes_bool,CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) order by attributes_string_value[indexOf(attributes_string_key, 'method')] desc LIMIT 1 OFFSET 0", ExpectedQuery: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body,CAST((attributes_string_key, attributes_string_value), 'Map(String, String)') as attributes_string,CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64,CAST((attributes_bool_key, attributes_bool_value), 'Map(String, Bool)') as attributes_bool,CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) order by `attributes_string_value[indexOf(attributes_string_key, 'method')]` desc LIMIT 1 OFFSET 0",
}, },
{ {
Name: "Test limit greater than pageSize - order by custom", Name: "Test limit greater than pageSize - order by custom",
@ -1414,7 +1456,7 @@ var testPrepLogsQueryLimitOffsetData = []struct {
PageSize: 50, PageSize: 50,
}, },
TableName: "logs", TableName: "logs",
ExpectedQuery: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body,CAST((attributes_string_key, attributes_string_value), 'Map(String, String)') as attributes_string,CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64,CAST((attributes_bool_key, attributes_bool_value), 'Map(String, Bool)') as attributes_bool,CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND id < '2TNh4vp2TpiWyLt3SzuadLJF2s4' order by attributes_string_value[indexOf(attributes_string_key, 'method')] desc LIMIT 50 OFFSET 50", ExpectedQuery: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body,CAST((attributes_string_key, attributes_string_value), 'Map(String, String)') as attributes_string,CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64,CAST((attributes_bool_key, attributes_bool_value), 'Map(String, Bool)') as attributes_bool,CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND id < '2TNh4vp2TpiWyLt3SzuadLJF2s4' order by `attributes_string_value[indexOf(attributes_string_key, 'method')]` desc LIMIT 50 OFFSET 50",
}, },
} }

View File

@ -121,7 +121,7 @@ func expressionToQuery(
groupTags = append(groupTags, "ts") groupTags = append(groupTags, "ts")
if joinUsing == "" { if joinUsing == "" {
for _, tag := range groupTags { for _, tag := range groupTags {
joinUsing += fmt.Sprintf("%s.%s as %s, ", variable, tag, tag) joinUsing += fmt.Sprintf("%s.`%s` as `%s`, ", variable, tag, tag)
} }
joinUsing = strings.TrimSuffix(joinUsing, ", ") joinUsing = strings.TrimSuffix(joinUsing, ", ")
} }
@ -129,7 +129,7 @@ func expressionToQuery(
if idx > 0 { if idx > 0 {
formulaSubQuery += " ON " formulaSubQuery += " ON "
for _, tag := range groupTags { for _, tag := range groupTags {
formulaSubQuery += fmt.Sprintf("%s.%s = %s.%s AND ", prevVar, tag, variable, tag) formulaSubQuery += fmt.Sprintf("%s.`%s` = %s.`%s` AND ", prevVar, tag, variable, tag)
} }
formulaSubQuery = strings.TrimSuffix(formulaSubQuery, " AND ") formulaSubQuery = strings.TrimSuffix(formulaSubQuery, " AND ")
} }

View File

@ -5,6 +5,7 @@ import (
"testing" "testing"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
logsV3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3"
metricsv3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3" metricsv3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3"
"go.signoz.io/signoz/pkg/query-service/featureManager" "go.signoz.io/signoz/pkg/query-service/featureManager"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3" v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
@ -53,7 +54,7 @@ func TestBuildQueryWithMultipleQueriesAndFormula(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Contains(t, queries["C"], "SELECT A.ts as ts, A.value / B.value") require.Contains(t, queries["C"], "SELECT A.`ts` as `ts`, A.value / B.value")
require.Contains(t, queries["C"], "WHERE metric_name = 'name' AND temporality IN ['Cumulative', 'Unspecified'] AND JSONExtractString(labels, 'in') IN ['a','b','c']") require.Contains(t, queries["C"], "WHERE metric_name = 'name' AND temporality IN ['Cumulative', 'Unspecified'] AND JSONExtractString(labels, 'in') IN ['a','b','c']")
require.Contains(t, queries["C"], "(value - lagInFrame(value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window)))") require.Contains(t, queries["C"], "(value - lagInFrame(value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window)))")
}) })
@ -170,19 +171,19 @@ func TestBuildQueryWithThreeOrMoreQueriesRefAndFormula(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Contains(t, queries["F1"], "SELECT A.ts as ts, A.value / B.value") require.Contains(t, queries["F1"], "SELECT A.`ts` as `ts`, A.value / B.value")
require.Equal(t, 1, strings.Count(queries["F1"], " ON ")) require.Equal(t, 1, strings.Count(queries["F1"], " ON "))
require.Contains(t, queries["F2"], "SELECT A.ts as ts, A.value / (B.value + C.value)") require.Contains(t, queries["F2"], "SELECT A.`ts` as `ts`, A.value / (B.value + C.value)")
require.Equal(t, 2, strings.Count(queries["F2"], " ON ")) require.Equal(t, 2, strings.Count(queries["F2"], " ON "))
// Working with same query multiple times should not join on itself // Working with same query multiple times should not join on itself
require.NotContains(t, queries["F3"], " ON ") require.NotContains(t, queries["F3"], " ON ")
require.Contains(t, queries["F4"], "SELECT A.ts as ts, A.value * B.value * C.value") require.Contains(t, queries["F4"], "SELECT A.`ts` as `ts`, A.value * B.value * C.value")
require.Equal(t, 2, strings.Count(queries["F4"], " ON ")) require.Equal(t, 2, strings.Count(queries["F4"], " ON "))
require.Contains(t, queries["F5"], "SELECT A.ts as ts, ((A.value - B.value) / B.value) * 100") require.Contains(t, queries["F5"], "SELECT A.`ts` as `ts`, ((A.value - B.value) / B.value) * 100")
require.Equal(t, 1, strings.Count(queries["F5"], " ON ")) require.Equal(t, 1, strings.Count(queries["F5"], " ON "))
for _, query := range q.CompositeQuery.BuilderQueries { for _, query := range q.CompositeQuery.BuilderQueries {
@ -326,7 +327,7 @@ func TestDeltaQueryBuilder(t *testing.T) {
}, },
}, },
queryToTest: "C", queryToTest: "C",
expected: "SELECT A.ts as ts, A.value * 100 / B.value as value FROM (SELECT toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v2 INNER JOIN (SELECT fingerprint FROM signoz_metrics.time_series_v2 WHERE metric_name = 'signoz_latency_count' AND temporality = 'Delta' AND JSONExtractString(labels, 'service_name') IN ['frontend'] AND JSONExtractString(labels, 'operation') IN ['HTTP GET /dispatch'] AND JSONExtractString(labels, 'status_code') IN ['STATUS_CODE_ERROR'] AND JSONExtractString(labels, '__temporality__') = 'Delta') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_count' AND timestamp_ms >= 1650991980000 AND timestamp_ms <= 1651078380000 GROUP BY ts ORDER BY ts) as A INNER JOIN (SELECT toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v2 INNER JOIN (SELECT fingerprint FROM signoz_metrics.time_series_v2 WHERE metric_name = 'signoz_latency_count' AND temporality = 'Delta' AND JSONExtractString(labels, 'service_name') IN ['frontend'] AND JSONExtractString(labels, 'operation') IN ['HTTP GET /dispatch'] AND JSONExtractString(labels, '__temporality__') = 'Delta') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_count' AND timestamp_ms >= 1650991980000 AND timestamp_ms <= 1651078380000 GROUP BY ts ORDER BY ts) as B ON A.ts = B.ts", expected: "SELECT A.`ts` as `ts`, A.value * 100 / B.value as value FROM (SELECT toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v2 INNER JOIN (SELECT fingerprint FROM signoz_metrics.time_series_v2 WHERE metric_name = 'signoz_latency_count' AND temporality = 'Delta' AND JSONExtractString(labels, 'service_name') IN ['frontend'] AND JSONExtractString(labels, 'operation') IN ['HTTP GET /dispatch'] AND JSONExtractString(labels, 'status_code') IN ['STATUS_CODE_ERROR'] AND JSONExtractString(labels, '__temporality__') = 'Delta') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_count' AND timestamp_ms >= 1650991980000 AND timestamp_ms <= 1651078380000 GROUP BY ts ORDER BY ts) as A INNER JOIN (SELECT toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v2 INNER JOIN (SELECT fingerprint FROM signoz_metrics.time_series_v2 WHERE metric_name = 'signoz_latency_count' AND temporality = 'Delta' AND JSONExtractString(labels, 'service_name') IN ['frontend'] AND JSONExtractString(labels, 'operation') IN ['HTTP GET /dispatch'] AND JSONExtractString(labels, '__temporality__') = 'Delta') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency_count' AND timestamp_ms >= 1650991980000 AND timestamp_ms <= 1651078380000 GROUP BY ts ORDER BY ts) as B ON A.`ts` = B.`ts`",
}, },
{ {
name: "TestQuery - Quantile", name: "TestQuery - Quantile",
@ -371,3 +372,211 @@ func TestDeltaQueryBuilder(t *testing.T) {
}) })
} }
} }
var testLogsWithFormula = []struct {
Name string
Query *v3.QueryRangeParamsV3
ExpectedQuery string
}{
{
Name: "test formula without dot in filter and group by attribute",
Query: &v3.QueryRangeParamsV3{
Start: 1702979275000000000,
End: 1702981075000000000,
CompositeQuery: &v3.CompositeQuery{
QueryType: v3.QueryTypeBuilder,
PanelType: v3.PanelTypeGraph,
BuilderQueries: map[string]*v3.BuilderQuery{
"A": {
QueryName: "A",
StepInterval: 60,
DataSource: v3.DataSourceLogs,
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "key_1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, Value: true, Operator: v3.FilterOperatorEqual},
}},
AggregateOperator: v3.AggregateOperatorCount,
Expression: "A",
OrderBy: []v3.OrderBy{
{
ColumnName: "timestamp",
Order: "desc",
},
},
GroupBy: []v3.AttributeKey{
{Key: "key_1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag},
},
},
"B": {
QueryName: "B",
StepInterval: 60,
DataSource: v3.DataSourceLogs,
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "key_2", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, Value: true, Operator: v3.FilterOperatorEqual},
}},
AggregateOperator: v3.AggregateOperatorCount,
Expression: "B",
OrderBy: []v3.OrderBy{
{
ColumnName: "timestamp",
Order: "desc",
},
},
GroupBy: []v3.AttributeKey{
{Key: "key_1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag},
},
},
"C": {
QueryName: "C",
Expression: "A + B",
},
},
},
},
ExpectedQuery: "SELECT A.`key_1` as `key_1`, A.`ts` as `ts`, A.value + B.value as value FROM " +
"(SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, attributes_bool_value[indexOf(attributes_bool_key, 'key_1')] as `key_1`, toFloat64(count(*)) as value from " +
"signoz_logs.distributed_logs where (timestamp >= 1702979275000000000 AND timestamp <= 1702981075000000000) AND attributes_bool_value[indexOf(attributes_bool_key, 'key_1')] = true AND " +
"has(attributes_bool_key, 'key_1') group by `key_1`,ts order by value DESC) as A INNER JOIN (SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, " +
"attributes_bool_value[indexOf(attributes_bool_key, 'key_1')] as `key_1`, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1702979275000000000 AND timestamp <= 1702981075000000000) " +
"AND attributes_bool_value[indexOf(attributes_bool_key, 'key_2')] = true AND has(attributes_bool_key, 'key_1') group by `key_1`,ts order by value DESC) as B ON A.`key_1` = B.`key_1` AND A.`ts` = B.`ts`",
},
{
Name: "test formula with dot in filter and group by attribute",
Query: &v3.QueryRangeParamsV3{
Start: 1702979056000000000,
End: 1702982656000000000,
CompositeQuery: &v3.CompositeQuery{
QueryType: v3.QueryTypeBuilder,
PanelType: v3.PanelTypeTable,
BuilderQueries: map[string]*v3.BuilderQuery{
"A": {
QueryName: "A",
StepInterval: 60,
DataSource: v3.DataSourceLogs,
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "key1.1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, Value: true, Operator: v3.FilterOperatorEqual},
}},
AggregateOperator: v3.AggregateOperatorCount,
Expression: "A",
OrderBy: []v3.OrderBy{
{
ColumnName: "timestamp",
Order: "desc",
},
},
GroupBy: []v3.AttributeKey{
{Key: "key1.1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag},
},
},
"B": {
QueryName: "B",
StepInterval: 60,
DataSource: v3.DataSourceLogs,
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "key1.2", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, Value: true, Operator: v3.FilterOperatorEqual},
}},
AggregateOperator: v3.AggregateOperatorCount,
Expression: "B",
OrderBy: []v3.OrderBy{
{
ColumnName: "timestamp",
Order: "desc",
},
},
GroupBy: []v3.AttributeKey{
{Key: "key1.1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag},
},
},
"C": {
QueryName: "C",
Expression: "A + B",
},
},
},
},
ExpectedQuery: "SELECT A.`key1.1` as `key1.1`, A.`ts` as `ts`, A.value + B.value as value FROM (SELECT now() as ts, attributes_bool_value[indexOf(attributes_bool_key, 'key1.1')] as `key1.1`, " +
"toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1702979056000000000 AND timestamp <= 1702982656000000000) AND attributes_bool_value[indexOf(attributes_bool_key, 'key1.1')] = true AND " +
"has(attributes_bool_key, 'key1.1') group by `key1.1` order by value DESC) as A INNER JOIN (SELECT now() as ts, attributes_bool_value[indexOf(attributes_bool_key, 'key1.1')] as `key1.1`, " +
"toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1702979056000000000 AND timestamp <= 1702982656000000000) AND attributes_bool_value[indexOf(attributes_bool_key, 'key1.2')] = true AND " +
"has(attributes_bool_key, 'key1.1') group by `key1.1` order by value DESC) as B ON A.`key1.1` = B.`key1.1` AND A.`ts` = B.`ts`",
},
{
Name: "test formula with dot in filter and group by materialized attribute",
Query: &v3.QueryRangeParamsV3{
Start: 1702980884000000000,
End: 1702984484000000000,
CompositeQuery: &v3.CompositeQuery{
QueryType: v3.QueryTypeBuilder,
PanelType: v3.PanelTypeGraph,
BuilderQueries: map[string]*v3.BuilderQuery{
"A": {
QueryName: "A",
StepInterval: 60,
DataSource: v3.DataSourceLogs,
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "key_2", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Value: true, Operator: v3.FilterOperatorEqual},
}},
AggregateOperator: v3.AggregateOperatorCount,
Expression: "A",
OrderBy: []v3.OrderBy{
{
ColumnName: "timestamp",
Order: "desc",
},
},
GroupBy: []v3.AttributeKey{
{Key: "key1.1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag, IsColumn: true},
},
},
"B": {
QueryName: "B",
StepInterval: 60,
DataSource: v3.DataSourceLogs,
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "key_1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, Value: true, Operator: v3.FilterOperatorEqual},
}},
AggregateOperator: v3.AggregateOperatorCount,
Expression: "B",
OrderBy: []v3.OrderBy{
{
ColumnName: "timestamp",
Order: "desc",
},
},
GroupBy: []v3.AttributeKey{
{Key: "key1.1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag, IsColumn: true},
},
},
"C": {
QueryName: "C",
Expression: "A - B",
},
},
},
},
ExpectedQuery: "SELECT A.`key1.1` as `key1.1`, A.`ts` as `ts`, A.value - B.value as value FROM (SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, " +
"attribute_bool_key1$$1 as `key1.1`, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1702980884000000000 AND timestamp <= 1702984484000000000) AND " +
"attribute_bool_key_2 = true AND attribute_bool_key1$$1_exists=true group by `key1.1`,ts order by value DESC) as A INNER JOIN (SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), " +
"INTERVAL 60 SECOND) AS ts, attribute_bool_key1$$1 as `key1.1`, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1702980884000000000 AND " +
"timestamp <= 1702984484000000000) AND attributes_bool_value[indexOf(attributes_bool_key, 'key_1')] = true AND attribute_bool_key1$$1_exists=true group by `key1.1`,ts order by value DESC) as B " +
"ON A.`key1.1` = B.`key1.1` AND A.`ts` = B.`ts`",
},
}
func TestLogsQueryWithFormula(t *testing.T) {
t.Parallel()
qbOptions := QueryBuilderOptions{
BuildLogQuery: logsV3.PrepareLogsQuery,
}
fm := featureManager.StartManager()
qb := NewQueryBuilder(qbOptions, fm)
for _, test := range testLogsWithFormula {
t.Run(test.Name, func(t *testing.T) {
queries, err := qb.PrepareQueries(test.Query)
require.NoError(t, err)
require.Equal(t, test.ExpectedQuery, queries["C"])
})
}
}

View File

@ -240,6 +240,9 @@ func GetClickhouseColumnName(typeName string, dataType, field string) string {
typeName = typeName[:len(typeName)-1] typeName = typeName[:len(typeName)-1]
} }
// if name contains . replace it with `$$`
field = strings.ReplaceAll(field, ".", "$$")
colName := fmt.Sprintf("%s_%s_%s", strings.ToLower(typeName), strings.ToLower(dataType), field) colName := fmt.Sprintf("%s_%s_%s", strings.ToLower(typeName), strings.ToLower(dataType), field)
return colName return colName
} }