mirror of
https://git.mirrors.martin98.com/https://github.com/SigNoz/signoz
synced 2025-08-10 16:58:59 +08:00
feat: allow characters in attribute names (#4775)
This commit is contained in:
parent
397da5857f
commit
a34c59762b
@ -3674,7 +3674,7 @@ func isSelectedField(tableStatement string, field model.LogField) bool {
|
||||
// in case of attributes and resources, if there is a materialized column present then it is selected
|
||||
// TODO: handle partial change complete eg:- index is removed but materialized column is still present
|
||||
name := utils.GetClickhouseColumnName(field.Type, field.DataType, field.Name)
|
||||
return strings.Contains(tableStatement, fmt.Sprintf("`%s`", name))
|
||||
return strings.Contains(tableStatement, fmt.Sprintf("%s", name))
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) UpdateLogField(ctx context.Context, field *model.UpdateField) *model.ApiError {
|
||||
@ -3708,10 +3708,10 @@ func (r *ClickHouseReader) UpdateLogField(ctx context.Context, field *model.Upda
|
||||
return &model.ApiError{Err: err, Typ: model.ErrorInternal}
|
||||
}
|
||||
|
||||
query = fmt.Sprintf("ALTER TABLE %s.%s ON CLUSTER %s ADD COLUMN IF NOT EXISTS %s_exists bool DEFAULT if(indexOf(%s, '%s') != 0, true, false) CODEC(ZSTD(1))",
|
||||
query = fmt.Sprintf("ALTER TABLE %s.%s ON CLUSTER %s ADD COLUMN IF NOT EXISTS %s_exists` bool DEFAULT if(indexOf(%s, '%s') != 0, true, false) CODEC(ZSTD(1))",
|
||||
r.logsDB, table,
|
||||
r.cluster,
|
||||
colname,
|
||||
strings.TrimSuffix(colname, "`"),
|
||||
keyColName,
|
||||
field.Name,
|
||||
)
|
||||
@ -3733,10 +3733,10 @@ func (r *ClickHouseReader) UpdateLogField(ctx context.Context, field *model.Upda
|
||||
if field.IndexGranularity == 0 {
|
||||
field.IndexGranularity = constants.DefaultLogSkipIndexGranularity
|
||||
}
|
||||
query := fmt.Sprintf("ALTER TABLE %s.%s ON CLUSTER %s ADD INDEX IF NOT EXISTS %s_idx (%s) TYPE %s GRANULARITY %d",
|
||||
query := fmt.Sprintf("ALTER TABLE %s.%s ON CLUSTER %s ADD INDEX IF NOT EXISTS %s_idx` (%s) TYPE %s GRANULARITY %d",
|
||||
r.logsDB, r.logsLocalTable,
|
||||
r.cluster,
|
||||
colname,
|
||||
strings.TrimSuffix(colname, "`"),
|
||||
colname,
|
||||
field.IndexType,
|
||||
field.IndexGranularity,
|
||||
@ -3748,7 +3748,7 @@ func (r *ClickHouseReader) UpdateLogField(ctx context.Context, field *model.Upda
|
||||
|
||||
} else {
|
||||
// Delete the index first
|
||||
query := fmt.Sprintf("ALTER TABLE %s.%s ON CLUSTER %s DROP INDEX IF EXISTS %s_idx", r.logsDB, r.logsLocalTable, r.cluster, colname)
|
||||
query := fmt.Sprintf("ALTER TABLE %s.%s ON CLUSTER %s DROP INDEX IF EXISTS %s_idx`", r.logsDB, r.logsLocalTable, r.cluster, strings.TrimSuffix(colname, "`"))
|
||||
err := r.db.Exec(ctx, query)
|
||||
if err != nil {
|
||||
return &model.ApiError{Err: err, Typ: model.ErrorInternal}
|
||||
@ -3768,11 +3768,11 @@ func (r *ClickHouseReader) UpdateLogField(ctx context.Context, field *model.Upda
|
||||
}
|
||||
|
||||
// drop exists column on logs table
|
||||
query = "ALTER TABLE %s.%s ON CLUSTER %s DROP COLUMN IF EXISTS %s_exists "
|
||||
query = "ALTER TABLE %s.%s ON CLUSTER %s DROP COLUMN IF EXISTS %s_exists` "
|
||||
err = r.db.Exec(ctx, fmt.Sprintf(query,
|
||||
r.logsDB, table,
|
||||
r.cluster,
|
||||
colname,
|
||||
strings.TrimSuffix(colname, "`"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
@ -4329,7 +4329,7 @@ func isColumn(tableStatement, attrType, field, datType string) bool {
|
||||
// value of attrType will be `resource` or `tag`, if `tag` change it to `attribute`
|
||||
name := utils.GetClickhouseColumnName(attrType, datType, field)
|
||||
|
||||
return strings.Contains(tableStatement, fmt.Sprintf("`%s` ", name))
|
||||
return strings.Contains(tableStatement, fmt.Sprintf("%s ", name))
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetLogAggregateAttributes(ctx context.Context, req *v3.AggregateAttributeRequest) (*v3.AggregateAttributeResponse, error) {
|
||||
|
@ -252,7 +252,7 @@ func TestReplaceInterestingFields(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
expectedTokens := []string{"attributes_int64_value[indexOf(attributes_int64_key, 'id.userid')] IN (100) ", "and attribute_int64_id_key >= 50 ", `AND body ILIKE '%searchstring%'`}
|
||||
expectedTokens := []string{"attributes_int64_value[indexOf(attributes_int64_key, 'id.userid')] IN (100) ", "and `attribute_int64_id_key` >= 50 ", `AND body ILIKE '%searchstring%'`}
|
||||
Convey("testInterestingFields", t, func() {
|
||||
tokens, err := replaceInterestingFields(&allFields, queryTokens)
|
||||
So(err, ShouldBeNil)
|
||||
@ -374,7 +374,7 @@ var generateSQLQueryTestCases = []struct {
|
||||
IdGt: "2BsKLKv8cZrLCn6rkOcRGkdjBdM",
|
||||
IdLT: "2BsKG6tRpFWjYMcWsAGKfSxoQdU",
|
||||
},
|
||||
SqlFilter: "( timestamp >= '1657689292000' and timestamp <= '1657689294000' and id > '2BsKLKv8cZrLCn6rkOcRGkdjBdM' and id < '2BsKG6tRpFWjYMcWsAGKfSxoQdU' ) and ( attribute_int64_field1 < 100 and attribute_int64_field1 > 50 and attributes_int64_value[indexOf(attributes_int64_key, 'code')] <= 500 and attributes_int64_value[indexOf(attributes_int64_key, 'code')] >= 400 ) ",
|
||||
SqlFilter: "( timestamp >= '1657689292000' and timestamp <= '1657689294000' and id > '2BsKLKv8cZrLCn6rkOcRGkdjBdM' and id < '2BsKG6tRpFWjYMcWsAGKfSxoQdU' ) and ( `attribute_int64_field1` < 100 and `attribute_int64_field1` > 50 and attributes_int64_value[indexOf(attributes_int64_key, 'code')] <= 500 and attributes_int64_value[indexOf(attributes_int64_key, 'code')] >= 400 ) ",
|
||||
},
|
||||
{
|
||||
Name: "second query with only timestamp range",
|
||||
@ -383,7 +383,7 @@ var generateSQLQueryTestCases = []struct {
|
||||
TimestampStart: uint64(1657689292000),
|
||||
TimestampEnd: uint64(1657689294000),
|
||||
},
|
||||
SqlFilter: "( timestamp >= '1657689292000' and timestamp <= '1657689294000' ) and ( attribute_int64_field1 < 100 and attribute_int64_field1 > 50 and attributes_int64_value[indexOf(attributes_int64_key, 'code')] <= 500 and attributes_int64_value[indexOf(attributes_int64_key, 'code')] >= 400 ) ",
|
||||
SqlFilter: "( timestamp >= '1657689292000' and timestamp <= '1657689294000' ) and ( `attribute_int64_field1` < 100 and `attribute_int64_field1` > 50 and attributes_int64_value[indexOf(attributes_int64_key, 'code')] <= 500 and attributes_int64_value[indexOf(attributes_int64_key, 'code')] >= 400 ) ",
|
||||
},
|
||||
{
|
||||
Name: "generate case sensitive query",
|
||||
@ -392,7 +392,7 @@ var generateSQLQueryTestCases = []struct {
|
||||
TimestampStart: uint64(1657689292000),
|
||||
TimestampEnd: uint64(1657689294000),
|
||||
},
|
||||
SqlFilter: "( timestamp >= '1657689292000' and timestamp <= '1657689294000' ) and ( attribute_int64_field1 < 100 and attributes_int64_value[indexOf(attributes_int64_key, 'FielD1')] > 50 and attribute_double64_Field2 > 10 and attributes_int64_value[indexOf(attributes_int64_key, 'code')] <= 500 and attributes_int64_value[indexOf(attributes_int64_key, 'code')] >= 400 ) ",
|
||||
SqlFilter: "( timestamp >= '1657689292000' and timestamp <= '1657689294000' ) and ( `attribute_int64_field1` < 100 and attributes_int64_value[indexOf(attributes_int64_key, 'FielD1')] > 50 and `attribute_double64_Field2` > 10 and attributes_int64_value[indexOf(attributes_int64_key, 'code')] <= 500 and attributes_int64_value[indexOf(attributes_int64_key, 'code')] >= 400 ) ",
|
||||
},
|
||||
{
|
||||
Name: "Check exists and not exists",
|
||||
@ -401,7 +401,7 @@ var generateSQLQueryTestCases = []struct {
|
||||
TimestampStart: uint64(1657689292000),
|
||||
TimestampEnd: uint64(1657689294000),
|
||||
},
|
||||
SqlFilter: "( timestamp >= '1657689292000' and timestamp <= '1657689294000' ) and ( has(attributes_int64_key, 'field1') and NOT has(attributes_double64_key, 'Field2') and attribute_double64_Field2 > 10 ) ",
|
||||
SqlFilter: "( timestamp >= '1657689292000' and timestamp <= '1657689294000' ) and ( has(attributes_int64_key, 'field1') and NOT has(attributes_double64_key, 'Field2') and `attribute_double64_Field2` > 10 ) ",
|
||||
},
|
||||
{
|
||||
Name: "Check top level key filter",
|
||||
|
@ -150,7 +150,7 @@ func GetExistsNexistsFilter(op v3.FilterOperator, item v3.FilterItem) string {
|
||||
if op == v3.FilterOperatorNotExists {
|
||||
val = false
|
||||
}
|
||||
return fmt.Sprintf("%s_exists=%v", getClickhouseColumnName(item.Key), val)
|
||||
return fmt.Sprintf("%s_exists`=%v", strings.TrimSuffix(getClickhouseColumnName(item.Key), "`"), val)
|
||||
}
|
||||
columnType := getClickhouseLogsColumnType(item.Key.Type)
|
||||
columnDataType := getClickhouseLogsColumnDataType(item.Key.DataType)
|
||||
@ -212,7 +212,7 @@ func buildLogsTimeSeriesFilterQuery(fs *v3.FilterSet, groupBy []v3.AttributeKey,
|
||||
conditions = append(conditions, fmt.Sprintf("has(%s_%s_key, '%s')", columnType, columnDataType, attr.Key))
|
||||
} else if attr.Type != v3.AttributeKeyTypeUnspecified {
|
||||
// for materialzied columns
|
||||
conditions = append(conditions, fmt.Sprintf("%s_exists=true", getClickhouseColumnName(attr)))
|
||||
conditions = append(conditions, fmt.Sprintf("%s_exists`=true", strings.TrimSuffix(getClickhouseColumnName(attr), "`")))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -26,17 +26,17 @@ var testGetClickhouseColumnNameData = []struct {
|
||||
{
|
||||
Name: "selected field",
|
||||
AttributeKey: v3.AttributeKey{Key: "servicename", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true},
|
||||
ExpectedColumnName: "attribute_string_servicename",
|
||||
ExpectedColumnName: "`attribute_string_servicename`",
|
||||
},
|
||||
{
|
||||
Name: "selected field resource",
|
||||
AttributeKey: v3.AttributeKey{Key: "sdk_version", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeResource, IsColumn: true},
|
||||
ExpectedColumnName: "resource_int64_sdk_version",
|
||||
ExpectedColumnName: "`resource_int64_sdk_version`",
|
||||
},
|
||||
{
|
||||
Name: "selected field float",
|
||||
AttributeKey: v3.AttributeKey{Key: "sdk_version", DataType: v3.AttributeKeyDataTypeFloat64, Type: v3.AttributeKeyTypeTag, IsColumn: true},
|
||||
ExpectedColumnName: "attribute_float64_sdk_version",
|
||||
ExpectedColumnName: "`attribute_float64_sdk_version`",
|
||||
},
|
||||
{
|
||||
Name: "same name as top level column",
|
||||
@ -48,6 +48,11 @@ var testGetClickhouseColumnNameData = []struct {
|
||||
AttributeKey: v3.AttributeKey{Key: "trace_id", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeUnspecified, IsColumn: true},
|
||||
ExpectedColumnName: "trace_id",
|
||||
},
|
||||
{
|
||||
Name: "name with - ",
|
||||
AttributeKey: v3.AttributeKey{Key: "test-attr", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true},
|
||||
ExpectedColumnName: "`attribute_string_test-attr`",
|
||||
},
|
||||
}
|
||||
|
||||
func TestGetClickhouseColumnName(t *testing.T) {
|
||||
@ -131,7 +136,7 @@ var timeSeriesFilterQueryData = []struct {
|
||||
{Key: v3.AttributeKey{Key: "user_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Value: "john", Operator: "="},
|
||||
{Key: v3.AttributeKey{Key: "k8s_namespace", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource}, Value: "my_service", Operator: "!="},
|
||||
}},
|
||||
ExpectedFilter: "attribute_string_user_name = 'john' AND resources_string_value[indexOf(resources_string_key, 'k8s_namespace')] != 'my_service'",
|
||||
ExpectedFilter: "`attribute_string_user_name` = 'john' AND resources_string_value[indexOf(resources_string_key, 'k8s_namespace')] != 'my_service'",
|
||||
},
|
||||
{
|
||||
Name: "Test like",
|
||||
@ -194,7 +199,7 @@ var timeSeriesFilterQueryData = []struct {
|
||||
FilterSet: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "host", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Value: "host: \"(?P<host>\\S+)\"", Operator: "regex"},
|
||||
}},
|
||||
ExpectedFilter: "match(attribute_string_host, 'host: \"(?P<host>\\\\S+)\"')",
|
||||
ExpectedFilter: "match(`attribute_string_host`, 'host: \"(?P<host>\\\\S+)\"')",
|
||||
},
|
||||
{
|
||||
Name: "Test not regex",
|
||||
@ -217,7 +222,7 @@ var timeSeriesFilterQueryData = []struct {
|
||||
{Key: v3.AttributeKey{Key: "host", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "102.", Operator: "ncontains"},
|
||||
}},
|
||||
GroupBy: []v3.AttributeKey{{Key: "host", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}},
|
||||
ExpectedFilter: "attributes_string_value[indexOf(attributes_string_key, 'host')] NOT ILIKE '%102.%' AND attribute_string_host_exists=true",
|
||||
ExpectedFilter: "attributes_string_value[indexOf(attributes_string_key, 'host')] NOT ILIKE '%102.%' AND `attribute_string_host_exists`=true",
|
||||
},
|
||||
{
|
||||
Name: "Wrong data",
|
||||
@ -266,14 +271,14 @@ var timeSeriesFilterQueryData = []struct {
|
||||
FilterSet: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Operator: "exists"},
|
||||
}},
|
||||
ExpectedFilter: "attribute_string_method_exists=true",
|
||||
ExpectedFilter: "`attribute_string_method_exists`=true",
|
||||
},
|
||||
{
|
||||
Name: "Test nexists on materiazlied column",
|
||||
FilterSet: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "status", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Operator: "nexists"},
|
||||
}},
|
||||
ExpectedFilter: "attribute_int64_status_exists=false",
|
||||
ExpectedFilter: "`attribute_int64_status_exists`=false",
|
||||
},
|
||||
// add new tests
|
||||
}
|
||||
@ -368,7 +373,7 @@ var testBuildLogsQueryData = []struct {
|
||||
OrderBy: []v3.OrderBy{{ColumnName: "#SIGNOZ_VALUE", Order: "ASC"}},
|
||||
},
|
||||
TableName: "logs",
|
||||
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attribute_string_name))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attribute_string_name_exists=true group by ts order by value ASC",
|
||||
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(`attribute_string_name`))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND `attribute_string_name_exists`=true group by ts order by value ASC",
|
||||
},
|
||||
{
|
||||
Name: "Test aggregate count distinct on non selected field",
|
||||
@ -421,9 +426,9 @@ var testBuildLogsQueryData = []struct {
|
||||
},
|
||||
|
||||
TableName: "logs",
|
||||
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, attribute_string_host$$name as `host.name`, toFloat64(count(distinct(attribute_string_method$$name))) as value" +
|
||||
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, `attribute_string_host$$name` as `host.name`, toFloat64(count(distinct(`attribute_string_method$$name`))) as value" +
|
||||
" from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
|
||||
"AND attribute_string_host$$name_exists=true AND attribute_string_method$$name_exists=true " +
|
||||
"AND `attribute_string_host$$name_exists`=true AND `attribute_string_method$$name_exists`=true " +
|
||||
"group by `host.name`,ts " +
|
||||
"order by `host.name` ASC",
|
||||
},
|
||||
@ -449,11 +454,11 @@ var testBuildLogsQueryData = []struct {
|
||||
TableName: "logs",
|
||||
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts," +
|
||||
" attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, " +
|
||||
"toFloat64(count(distinct(attribute_string_name))) as value from signoz_logs.distributed_logs " +
|
||||
"toFloat64(count(distinct(`attribute_string_name`))) as value from signoz_logs.distributed_logs " +
|
||||
"where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
|
||||
"AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND resources_string_value[indexOf(resources_string_key, 'x')] != 'abc' " +
|
||||
"AND has(attributes_string_key, 'method') " +
|
||||
"AND attribute_string_name_exists=true " +
|
||||
"AND `attribute_string_name_exists`=true " +
|
||||
"group by `method`,ts " +
|
||||
"order by `method` ASC",
|
||||
},
|
||||
@ -480,12 +485,12 @@ var testBuildLogsQueryData = []struct {
|
||||
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts," +
|
||||
" attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, " +
|
||||
"resources_string_value[indexOf(resources_string_key, 'x')] as `x`, " +
|
||||
"toFloat64(count(distinct(attribute_string_name))) as value from signoz_logs.distributed_logs " +
|
||||
"toFloat64(count(distinct(`attribute_string_name`))) as value from signoz_logs.distributed_logs " +
|
||||
"where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
|
||||
"AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND resources_string_value[indexOf(resources_string_key, 'x')] != 'abc' " +
|
||||
"AND has(attributes_string_key, 'method') " +
|
||||
"AND has(resources_string_key, 'x') " +
|
||||
"AND attribute_string_name_exists=true " +
|
||||
"AND `attribute_string_name_exists`=true " +
|
||||
"group by `method`,`x`,ts " +
|
||||
"order by `method` ASC,`x` ASC",
|
||||
},
|
||||
@ -540,12 +545,12 @@ var testBuildLogsQueryData = []struct {
|
||||
TableName: "logs",
|
||||
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts," +
|
||||
" attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, " +
|
||||
"sum(attribute_float64_bytes) as value " +
|
||||
"sum(`attribute_float64_bytes`) as value " +
|
||||
"from signoz_logs.distributed_logs " +
|
||||
"where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
|
||||
"AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' " +
|
||||
"AND has(attributes_string_key, 'method') " +
|
||||
"AND attribute_float64_bytes_exists=true " +
|
||||
"AND `attribute_float64_bytes_exists`=true " +
|
||||
"group by `method`,ts " +
|
||||
"order by `method` ASC",
|
||||
},
|
||||
@ -570,12 +575,12 @@ var testBuildLogsQueryData = []struct {
|
||||
TableName: "logs",
|
||||
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts," +
|
||||
" attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, " +
|
||||
"min(attribute_float64_bytes) as value " +
|
||||
"min(`attribute_float64_bytes`) as value " +
|
||||
"from signoz_logs.distributed_logs " +
|
||||
"where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
|
||||
"AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' " +
|
||||
"AND has(attributes_string_key, 'method') " +
|
||||
"AND attribute_float64_bytes_exists=true " +
|
||||
"AND `attribute_float64_bytes_exists`=true " +
|
||||
"group by `method`,ts " +
|
||||
"order by `method` ASC",
|
||||
},
|
||||
@ -600,12 +605,12 @@ var testBuildLogsQueryData = []struct {
|
||||
TableName: "logs",
|
||||
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts," +
|
||||
" attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, " +
|
||||
"max(attribute_float64_bytes) as value " +
|
||||
"max(`attribute_float64_bytes`) as value " +
|
||||
"from signoz_logs.distributed_logs " +
|
||||
"where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
|
||||
"AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' " +
|
||||
"AND has(attributes_string_key, 'method') " +
|
||||
"AND attribute_float64_bytes_exists=true " +
|
||||
"AND `attribute_float64_bytes_exists`=true " +
|
||||
"group by `method`,ts " +
|
||||
"order by `method` ASC",
|
||||
},
|
||||
@ -627,11 +632,11 @@ var testBuildLogsQueryData = []struct {
|
||||
TableName: "logs",
|
||||
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts," +
|
||||
" attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, " +
|
||||
"quantile(0.05)(attribute_float64_bytes) as value " +
|
||||
"quantile(0.05)(`attribute_float64_bytes`) as value " +
|
||||
"from signoz_logs.distributed_logs " +
|
||||
"where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
|
||||
"AND has(attributes_string_key, 'method') " +
|
||||
"AND attribute_float64_bytes_exists=true " +
|
||||
"AND `attribute_float64_bytes_exists`=true " +
|
||||
"group by `method`,ts " +
|
||||
"order by `method` ASC",
|
||||
},
|
||||
@ -653,10 +658,10 @@ var testBuildLogsQueryData = []struct {
|
||||
TableName: "logs",
|
||||
PreferRPM: true,
|
||||
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`" +
|
||||
", sum(attribute_float64_bytes)/1.000000 as value from signoz_logs.distributed_logs " +
|
||||
", sum(`attribute_float64_bytes`)/1.000000 as value from signoz_logs.distributed_logs " +
|
||||
"where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
|
||||
"AND has(attributes_string_key, 'method') " +
|
||||
"AND attribute_float64_bytes_exists=true " +
|
||||
"AND `attribute_float64_bytes_exists`=true " +
|
||||
"group by `method`,ts order by `method` ASC",
|
||||
},
|
||||
{
|
||||
|
@ -554,10 +554,10 @@ var testLogsWithFormula = []struct {
|
||||
},
|
||||
},
|
||||
ExpectedQuery: "SELECT A.`key1.1` as `key1.1`, A.`ts` as `ts`, A.value - B.value as value FROM (SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, " +
|
||||
"attribute_bool_key1$$1 as `key1.1`, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1702980884000000000 AND timestamp <= 1702984484000000000) AND " +
|
||||
"attribute_bool_key_2 = true AND attribute_bool_key1$$1_exists=true group by `key1.1`,ts order by value DESC) as A INNER JOIN (SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), " +
|
||||
"INTERVAL 60 SECOND) AS ts, attribute_bool_key1$$1 as `key1.1`, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1702980884000000000 AND " +
|
||||
"timestamp <= 1702984484000000000) AND attributes_bool_value[indexOf(attributes_bool_key, 'key_1')] = true AND attribute_bool_key1$$1_exists=true group by `key1.1`,ts order by value DESC) as B " +
|
||||
"`attribute_bool_key1$$1` as `key1.1`, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1702980884000000000 AND timestamp <= 1702984484000000000) AND " +
|
||||
"`attribute_bool_key_2` = true AND `attribute_bool_key1$$1_exists`=true group by `key1.1`,ts order by value DESC) as A INNER JOIN (SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), " +
|
||||
"INTERVAL 60 SECOND) AS ts, `attribute_bool_key1$$1` as `key1.1`, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1702980884000000000 AND " +
|
||||
"timestamp <= 1702984484000000000) AND attributes_bool_value[indexOf(attributes_bool_key, 'key_1')] = true AND `attribute_bool_key1$$1_exists`=true group by `key1.1`,ts order by value DESC) as B " +
|
||||
"ON A.`key1.1` = B.`key1.1` AND A.`ts` = B.`ts`",
|
||||
},
|
||||
}
|
||||
|
@ -243,7 +243,7 @@ func GetClickhouseColumnName(typeName string, dataType, field string) string {
|
||||
// if name contains . replace it with `$$`
|
||||
field = strings.ReplaceAll(field, ".", "$$")
|
||||
|
||||
colName := fmt.Sprintf("%s_%s_%s", strings.ToLower(typeName), strings.ToLower(dataType), field)
|
||||
colName := fmt.Sprintf("`%s_%s_%s`", strings.ToLower(typeName), strings.ToLower(dataType), field)
|
||||
return colName
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user