feat: add support for multiquery in ts with limit (#2970)

* feat: add support for multiquery in ts with limit

* feat: multiple groupby support

* feat: variables renamed

* feat: cleanup

* feat: clickhouse formatted value updated to support pointers

* fix: filter creation logic updated

* fix: minor fixes and tests

* fix: autcomplete top level keys

* Revert "fix: autcomplete top level keys"

This reverts commit 8d5e1e480fac5427db67bd3f015ad30de8b3aa1e.

* fix: minor fixes

* feat: formula support for timeseries query with limit

* feat: implementation updated for limit queries

* feat: cleanup

* feat: order by logic updated

* feat: order by logic updated for both ts and table view

---------

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
Co-authored-by: Vishal Sharma <makeavish786@gmail.com>
This commit is contained in:
Nityananda Gohain 2023-07-16 23:07:45 +05:30 committed by GitHub
parent 7a177e18e4
commit 7f9ba6c43a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 463 additions and 127 deletions

View File

@ -2522,6 +2522,7 @@ func (aH *APIHandler) execClickHouseGraphQueries(ctx context.Context, queries ma
wg.Add(1)
go func(name, query string) {
defer wg.Done()
seriesList, err := aH.reader.GetTimeSeriesResultV3(ctx, query)
if err != nil {

View File

@ -89,17 +89,29 @@ func getClickhouseColumnName(key v3.AttributeKey) string {
}
// getSelectLabels returns the select labels for the query based on groupBy and aggregateOperator
func getSelectLabels(aggregatorOperator v3.AggregateOperator, groupBy []v3.AttributeKey) (string, error) {
func getSelectLabels(aggregatorOperator v3.AggregateOperator, groupBy []v3.AttributeKey) string {
var selectLabels string
if aggregatorOperator == v3.AggregateOperatorNoOp {
selectLabels = ""
} else {
for _, tag := range groupBy {
columnName := getClickhouseColumnName(tag)
selectLabels += fmt.Sprintf(", %s as %s", columnName, tag.Key)
selectLabels += fmt.Sprintf(" %s as %s,", columnName, tag.Key)
}
}
return selectLabels, nil
return selectLabels
}
func getSelectKeys(aggregatorOperator v3.AggregateOperator, groupBy []v3.AttributeKey) string {
var selectLabels []string
if aggregatorOperator == v3.AggregateOperatorNoOp {
return ""
} else {
for _, tag := range groupBy {
selectLabels = append(selectLabels, tag.Key)
}
}
return strings.Join(selectLabels, ",")
}
func buildLogsTimeSeriesFilterQuery(fs *v3.FilterSet, groupBy []v3.AttributeKey) (string, error) {
@ -163,7 +175,7 @@ func getZerosForEpochNano(epoch int64) int64 {
return int64(math.Pow(10, float64(19-count)))
}
func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.BuilderQuery) (string, error) {
func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.BuilderQuery, graphLimitQtype string) (string, error) {
filterSubQuery, err := buildLogsTimeSeriesFilterQuery(mq.Filters, mq.GroupBy)
if err != nil {
@ -173,10 +185,7 @@ func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.Build
// timerange will be sent in epoch millisecond
timeFilter := fmt.Sprintf("(timestamp >= %d AND timestamp <= %d)", start*getZerosForEpochNano(start), end*getZerosForEpochNano(end))
selectLabels, err := getSelectLabels(mq.AggregateOperator, mq.GroupBy)
if err != nil {
return "", err
}
selectLabels := getSelectLabels(mq.AggregateOperator, mq.GroupBy)
having := having(mq.Having)
if having != "" {
@ -184,35 +193,44 @@ func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.Build
}
var queryTmpl string
if panelType == v3.PanelTypeTable {
if graphLimitQtype == constants.FirstQueryGraphLimit {
queryTmpl = "SELECT"
} else if panelType == v3.PanelTypeTable {
queryTmpl =
"SELECT now() as ts" + selectLabels +
", %s as value " +
"from signoz_logs.distributed_logs " +
"where " + timeFilter + "%s" +
"%s%s" +
"%s"
"SELECT now() as ts,"
} else if panelType == v3.PanelTypeGraph || panelType == v3.PanelTypeValue {
// Select the aggregate value for interval
queryTmpl =
fmt.Sprintf("SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL %d SECOND) AS ts", step) + selectLabels +
", %s as value " +
"from signoz_logs.distributed_logs " +
"where " + timeFilter + "%s" +
"%s%s" +
"%s"
fmt.Sprintf("SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL %d SECOND) AS ts,", step)
}
groupBy := groupByAttributeKeyTags(panelType, mq.GroupBy...)
queryTmpl =
queryTmpl + selectLabels +
" %s as value " +
"from signoz_logs.distributed_logs " +
"where " + timeFilter + "%s" +
"%s%s" +
"%s"
// we dont need value for first query
// going with this route as for a cleaner approach on implementation
if graphLimitQtype == constants.FirstQueryGraphLimit {
queryTmpl = "SELECT " + getSelectKeys(mq.AggregateOperator, mq.GroupBy) + " from (" + queryTmpl + ")"
}
groupBy := groupByAttributeKeyTags(panelType, graphLimitQtype, mq.GroupBy...)
if panelType != v3.PanelTypeList && groupBy != "" {
groupBy = " group by " + groupBy
}
orderBy := orderByAttributeKeyTags(panelType, mq.AggregateOperator, mq.OrderBy, mq.GroupBy)
orderBy := orderByAttributeKeyTags(panelType, mq.OrderBy, mq.GroupBy)
if panelType != v3.PanelTypeList && orderBy != "" {
orderBy = " order by " + orderBy
}
if graphLimitQtype == constants.SecondQueryGraphLimit {
filterSubQuery = filterSubQuery + " AND " + fmt.Sprintf("(%s) IN (", getSelectKeys(mq.AggregateOperator, mq.GroupBy)) + "%s)"
}
aggregationKey := ""
if mq.AggregateAttribute.Key != "" {
aggregationKey = getClickhouseColumnName(mq.AggregateAttribute)
@ -273,82 +291,56 @@ func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.Build
// groupBy returns a string of comma separated tags for group by clause
// `ts` is always added to the group by clause
func groupBy(panelType v3.PanelType, tags ...string) string {
if panelType == v3.PanelTypeGraph || panelType == v3.PanelTypeValue {
func groupBy(panelType v3.PanelType, graphLimitQtype string, tags ...string) string {
if (graphLimitQtype != constants.FirstQueryGraphLimit) && (panelType == v3.PanelTypeGraph || panelType == v3.PanelTypeValue) {
tags = append(tags, "ts")
}
return strings.Join(tags, ",")
}
func groupByAttributeKeyTags(panelType v3.PanelType, tags ...v3.AttributeKey) string {
func groupByAttributeKeyTags(panelType v3.PanelType, graphLimitQtype string, tags ...v3.AttributeKey) string {
groupTags := []string{}
for _, tag := range tags {
groupTags = append(groupTags, tag.Key)
}
return groupBy(panelType, groupTags...)
return groupBy(panelType, graphLimitQtype, groupTags...)
}
// orderBy returns a string of comma separated tags for order by clause
// if there are remaining items which are not present in tags they are also added
// if the order is not specified, it defaults to ASC
func orderBy(panelType v3.PanelType, items []v3.OrderBy, tags []string) []string {
func orderBy(panelType v3.PanelType, items []v3.OrderBy, tagLookup map[string]struct{}) []string {
var orderBy []string
// create a lookup
addedToOrderBy := map[string]bool{}
itemsLookup := map[string]v3.OrderBy{}
for i := 0; i < len(items); i++ {
addedToOrderBy[items[i].ColumnName] = false
itemsLookup[items[i].ColumnName] = items[i]
}
for _, tag := range tags {
if item, ok := itemsLookup[tag]; ok {
orderBy = append(orderBy, fmt.Sprintf("%s %s", item.ColumnName, item.Order))
addedToOrderBy[item.ColumnName] = true
} else {
orderBy = append(orderBy, fmt.Sprintf("%s ASC", tag))
}
}
// users might want to order by value of aggreagation
for _, item := range items {
if item.ColumnName == constants.SigNozOrderByValue {
orderBy = append(orderBy, fmt.Sprintf("value %s", item.Order))
addedToOrderBy[item.ColumnName] = true
}
}
// add the remaining items
if panelType == v3.PanelTypeList {
for _, item := range items {
// since these are not present in tags we will have to select them correctly
// for list view there is no need to check if it was added since they wont be added yet but this is just for safety
if !addedToOrderBy[item.ColumnName] {
attr := v3.AttributeKey{Key: item.ColumnName, DataType: item.DataType, Type: item.Type, IsColumn: item.IsColumn}
name := getClickhouseColumnName(attr)
orderBy = append(orderBy, fmt.Sprintf("%s %s", name, item.Order))
}
} else if _, ok := tagLookup[item.ColumnName]; ok {
orderBy = append(orderBy, fmt.Sprintf("%s %s", item.ColumnName, item.Order))
} else if panelType == v3.PanelTypeList {
attr := v3.AttributeKey{Key: item.ColumnName, DataType: item.DataType, Type: item.Type, IsColumn: item.IsColumn}
name := getClickhouseColumnName(attr)
orderBy = append(orderBy, fmt.Sprintf("%s %s", name, item.Order))
}
}
return orderBy
}
func orderByAttributeKeyTags(panelType v3.PanelType, aggregatorOperator v3.AggregateOperator, items []v3.OrderBy, tags []v3.AttributeKey) string {
var groupTags []string
for _, tag := range tags {
groupTags = append(groupTags, tag.Key)
}
orderByArray := orderBy(panelType, items, groupTags)
func orderByAttributeKeyTags(panelType v3.PanelType, items []v3.OrderBy, tags []v3.AttributeKey) string {
if panelType == v3.PanelTypeList {
if len(orderByArray) == 0 {
orderByArray = append(orderByArray, constants.TIMESTAMP)
tagLookup := map[string]struct{}{}
for _, v := range tags {
tagLookup[v.Key] = struct{}{}
}
orderByArray := orderBy(panelType, items, tagLookup)
if len(orderByArray) == 0 {
if panelType == v3.PanelTypeList {
orderByArray = append(orderByArray, constants.TIMESTAMP+" DESC")
} else {
orderByArray = append(orderByArray, "value DESC")
}
} else if panelType == v3.PanelTypeGraph || panelType == v3.PanelTypeValue {
// since in other aggregation operator we will have to add ts as it will not be present in group by
orderByArray = append(orderByArray, "ts")
}
str := strings.Join(orderByArray, ",")
@ -392,8 +384,26 @@ func addOffsetToQuery(query string, offset uint64) string {
return fmt.Sprintf("%s OFFSET %d", query, offset)
}
func PrepareLogsQuery(start, end int64, queryType v3.QueryType, panelType v3.PanelType, mq *v3.BuilderQuery) (string, error) {
query, err := buildLogsQuery(panelType, start, end, mq.StepInterval, mq)
func PrepareLogsQuery(start, end int64, queryType v3.QueryType, panelType v3.PanelType, mq *v3.BuilderQuery, graphLimitQtype string) (string, error) {
if graphLimitQtype == constants.FirstQueryGraphLimit {
// give me just the groupby names
query, err := buildLogsQuery(panelType, start, end, mq.StepInterval, mq, graphLimitQtype)
if err != nil {
return "", err
}
query = addLimitToQuery(query, mq.Limit)
return query, nil
} else if graphLimitQtype == constants.SecondQueryGraphLimit {
query, err := buildLogsQuery(panelType, start, end, mq.StepInterval, mq, graphLimitQtype)
if err != nil {
return "", err
}
return query, nil
}
query, err := buildLogsQuery(panelType, start, end, mq.StepInterval, mq, graphLimitQtype)
if err != nil {
return "", err
}
@ -401,7 +411,7 @@ func PrepareLogsQuery(start, end int64, queryType v3.QueryType, panelType v3.Pan
query, err = reduceQuery(query, mq.ReduceTo, mq.AggregateOperator)
}
if panelType == v3.PanelTypeList {
if panelType == v3.PanelTypeList || panelType == v3.PanelTypeTable {
if mq.PageSize > 0 {
if mq.Limit > 0 && mq.Offset > mq.Limit {
return "", fmt.Errorf("max limit exceeded")
@ -414,4 +424,5 @@ func PrepareLogsQuery(start, end int64, queryType v3.QueryType, panelType v3.Pan
}
return query, err
}

View File

@ -1,6 +1,7 @@
package v3
import (
"fmt"
"testing"
. "github.com/smartystreets/goconvey/convey"
@ -59,13 +60,13 @@ var testGetSelectLabelsData = []struct {
Name: "select fields for groupBy attribute",
AggregateOperator: v3.AggregateOperatorCount,
GroupByTags: []v3.AttributeKey{{Key: "user_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
SelectLabels: ", attributes_string_value[indexOf(attributes_string_key, 'user_name')] as user_name",
SelectLabels: " attributes_string_value[indexOf(attributes_string_key, 'user_name')] as user_name,",
},
{
Name: "select fields for groupBy resource",
AggregateOperator: v3.AggregateOperatorCount,
GroupByTags: []v3.AttributeKey{{Key: "user_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource}},
SelectLabels: ", resources_string_value[indexOf(resources_string_key, 'user_name')] as user_name",
SelectLabels: " resources_string_value[indexOf(resources_string_key, 'user_name')] as user_name,",
},
{
Name: "select fields for groupBy attribute and resource",
@ -74,27 +75,26 @@ var testGetSelectLabelsData = []struct {
{Key: "user_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource},
{Key: "host", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
},
SelectLabels: ", resources_string_value[indexOf(resources_string_key, 'user_name')] as user_name, attributes_string_value[indexOf(attributes_string_key, 'host')] as host",
SelectLabels: " resources_string_value[indexOf(resources_string_key, 'user_name')] as user_name, attributes_string_value[indexOf(attributes_string_key, 'host')] as host,",
},
{
Name: "select fields for groupBy materialized columns",
AggregateOperator: v3.AggregateOperatorCount,
GroupByTags: []v3.AttributeKey{{Key: "host", IsColumn: true}},
SelectLabels: ", host as host",
SelectLabels: " host as host,",
},
{
Name: "trace_id field as an attribute",
AggregateOperator: v3.AggregateOperatorCount,
GroupByTags: []v3.AttributeKey{{Key: "trace_id", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
SelectLabels: ", attributes_string_value[indexOf(attributes_string_key, 'trace_id')] as trace_id",
SelectLabels: " attributes_string_value[indexOf(attributes_string_key, 'trace_id')] as trace_id,",
},
}
func TestGetSelectLabels(t *testing.T) {
for _, tt := range testGetSelectLabelsData {
Convey("testGetSelectLabelsData", t, func() {
selectLabels, err := getSelectLabels(tt.AggregateOperator, tt.GroupByTags)
So(err, ShouldBeNil)
selectLabels := getSelectLabels(tt.AggregateOperator, tt.GroupByTags)
So(selectLabels, ShouldEqual, tt.SelectLabels)
})
}
@ -238,6 +238,7 @@ var testBuildLogsQueryData = []struct {
TableName string
AggregateOperator v3.AggregateOperator
ExpectedQuery string
Type int
}{
{
Name: "Test aggregate count on select field",
@ -251,7 +252,7 @@ var testBuildLogsQueryData = []struct {
Expression: "A",
},
TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) group by ts order by ts",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) group by ts order by value DESC",
},
{
Name: "Test aggregate count on a attribute",
@ -266,7 +267,7 @@ var testBuildLogsQueryData = []struct {
Expression: "A",
},
TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND has(attributes_string_key, 'user_name') group by ts order by ts",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND has(attributes_string_key, 'user_name') group by ts order by value DESC",
},
{
Name: "Test aggregate count on a with filter",
@ -284,7 +285,7 @@ var testBuildLogsQueryData = []struct {
Expression: "A",
},
TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_float64_value[indexOf(attributes_float64_key, 'bytes')] > 100.000000 AND has(attributes_string_key, 'user_name') group by ts order by ts",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_float64_value[indexOf(attributes_float64_key, 'bytes')] > 100.000000 AND has(attributes_string_key, 'user_name') group by ts order by value DESC",
},
{
Name: "Test aggregate count distinct and order by value",
@ -300,7 +301,7 @@ var testBuildLogsQueryData = []struct {
OrderBy: []v3.OrderBy{{ColumnName: "#SIGNOZ_VALUE", Order: "ASC"}},
},
TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(name))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) group by ts order by value ASC,ts",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(name))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) group by ts order by value ASC",
},
{
Name: "Test aggregate count distinct on non selected field",
@ -315,7 +316,7 @@ var testBuildLogsQueryData = []struct {
Expression: "A",
},
TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) group by ts order by ts",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) group by ts order by value DESC",
},
{
Name: "Test aggregate count distinct with filter and groupBy",
@ -344,7 +345,7 @@ var testBuildLogsQueryData = []struct {
"AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND resources_string_value[indexOf(resources_string_key, 'x')] != 'abc' " +
"AND indexOf(attributes_string_key, 'method') > 0 " +
"group by method,ts " +
"order by method ASC,ts",
"order by method ASC",
},
{
Name: "Test aggregate count with multiple filter,groupBy and orderBy",
@ -375,7 +376,7 @@ var testBuildLogsQueryData = []struct {
"AND indexOf(attributes_string_key, 'method') > 0 " +
"AND indexOf(resources_string_key, 'x') > 0 " +
"group by method,x,ts " +
"order by method ASC,x ASC,ts",
"order by method ASC,x ASC",
},
{
Name: "Test aggregate avg",
@ -404,7 +405,7 @@ var testBuildLogsQueryData = []struct {
"AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' " +
"AND indexOf(attributes_string_key, 'method') > 0 " +
"group by method,ts " +
"order by method ASC,ts",
"order by method ASC",
},
{
Name: "Test aggregate sum",
@ -433,7 +434,7 @@ var testBuildLogsQueryData = []struct {
"AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' " +
"AND indexOf(attributes_string_key, 'method') > 0 " +
"group by method,ts " +
"order by method ASC,ts",
"order by method ASC",
},
{
Name: "Test aggregate min",
@ -462,7 +463,7 @@ var testBuildLogsQueryData = []struct {
"AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' " +
"AND indexOf(attributes_string_key, 'method') > 0 " +
"group by method,ts " +
"order by method ASC,ts",
"order by method ASC",
},
{
Name: "Test aggregate max",
@ -491,7 +492,7 @@ var testBuildLogsQueryData = []struct {
"AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' " +
"AND indexOf(attributes_string_key, 'method') > 0 " +
"group by method,ts " +
"order by method ASC,ts",
"order by method ASC",
},
{
Name: "Test aggregate PXX",
@ -516,7 +517,7 @@ var testBuildLogsQueryData = []struct {
"where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
"AND indexOf(attributes_string_key, 'method') > 0 " +
"group by method,ts " +
"order by method ASC,ts",
"order by method ASC",
},
{
Name: "Test aggregate RateSum",
@ -538,7 +539,7 @@ var testBuildLogsQueryData = []struct {
", sum(bytes)/60 as value from signoz_logs.distributed_logs " +
"where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
"AND indexOf(attributes_string_key, 'method') > 0 " +
"group by method,ts order by method ASC,ts",
"group by method,ts order by method ASC",
},
{
Name: "Test aggregate rate",
@ -561,7 +562,7 @@ var testBuildLogsQueryData = []struct {
"from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
"AND indexOf(attributes_string_key, 'method') > 0 " +
"group by method,ts " +
"order by method ASC,ts",
"order by method ASC",
},
{
Name: "Test aggregate RateSum without materialized column",
@ -585,7 +586,7 @@ var testBuildLogsQueryData = []struct {
"from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) " +
"AND indexOf(attributes_string_key, 'method') > 0 " +
"group by method,ts " +
"order by method ASC,ts",
"order by method ASC",
},
{
Name: "Test Noop",
@ -603,7 +604,7 @@ var testBuildLogsQueryData = []struct {
ExpectedQuery: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body,CAST((attributes_string_key, attributes_string_value), 'Map(String, String)') as attributes_string," +
"CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64," +
"CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string " +
"from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) order by timestamp",
"from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) order by timestamp DESC",
},
{
Name: "Test Noop order by custom",
@ -642,7 +643,7 @@ var testBuildLogsQueryData = []struct {
ExpectedQuery: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body,CAST((attributes_string_key, attributes_string_value), 'Map(String, String)') as attributes_string," +
"CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64," +
"CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string " +
"from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND severity_number != 0 order by timestamp",
"from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND severity_number != 0 order by timestamp DESC",
},
{
Name: "Test aggregate with having clause",
@ -664,7 +665,7 @@ var testBuildLogsQueryData = []struct {
},
},
TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) group by ts having value > 10 order by ts",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) group by ts having value > 10 order by value DESC",
},
{
Name: "Test aggregate with having clause and filters",
@ -690,7 +691,7 @@ var testBuildLogsQueryData = []struct {
},
},
TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' group by ts having value > 10 order by ts",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' group by ts having value > 10 order by value DESC",
},
{
Name: "Test top level key",
@ -716,7 +717,7 @@ var testBuildLogsQueryData = []struct {
},
},
TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND body ILIKE '%test%' group by ts having value > 10 order by ts",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND body ILIKE '%test%' group by ts having value > 10 order by value DESC",
},
{
Name: "Test attribute with same name as top level key",
@ -742,7 +743,7 @@ var testBuildLogsQueryData = []struct {
},
},
TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'body')] ILIKE '%test%' group by ts having value > 10 order by ts",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'body')] ILIKE '%test%' group by ts having value > 10 order by value DESC",
},
// Tests for table panel type
@ -758,7 +759,7 @@ var testBuildLogsQueryData = []struct {
Expression: "A",
},
TableName: "logs",
ExpectedQuery: "SELECT now() as ts, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000)",
ExpectedQuery: "SELECT now() as ts, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) order by value DESC",
},
{
Name: "TABLE: Test count with groupBy",
@ -775,7 +776,7 @@ var testBuildLogsQueryData = []struct {
},
},
TableName: "logs",
ExpectedQuery: "SELECT now() as ts, attributes_string_value[indexOf(attributes_string_key, 'name')] as name, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND indexOf(attributes_string_key, 'name') > 0 group by name order by name ASC",
ExpectedQuery: "SELECT now() as ts, attributes_string_value[indexOf(attributes_string_key, 'name')] as name, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND indexOf(attributes_string_key, 'name') > 0 group by name order by value DESC",
},
{
Name: "TABLE: Test count with groupBy, orderBy",
@ -802,7 +803,8 @@ var testBuildLogsQueryData = []struct {
func TestBuildLogsQuery(t *testing.T) {
for _, tt := range testBuildLogsQueryData {
Convey("TestBuildLogsQuery", t, func() {
query, err := buildLogsQuery(tt.PanelType, tt.Start, tt.End, tt.Step, tt.BuilderQuery)
query, err := buildLogsQuery(tt.PanelType, tt.Start, tt.End, tt.Step, tt.BuilderQuery, "")
fmt.Println(query)
So(err, ShouldBeNil)
So(query, ShouldEqual, tt.ExpectedQuery)
@ -844,8 +846,8 @@ var testOrderBy = []struct {
Name string
PanelType v3.PanelType
Items []v3.OrderBy
Tags []string
Result []string
Tags []v3.AttributeKey
Result string
}{
{
Name: "Test 1",
@ -860,8 +862,10 @@ var testOrderBy = []struct {
Order: "desc",
},
},
Tags: []string{"name"},
Result: []string{"name asc", "value desc"},
Tags: []v3.AttributeKey{
{Key: "name"},
},
Result: "name asc,value desc",
},
{
Name: "Test 2",
@ -876,8 +880,34 @@ var testOrderBy = []struct {
Order: "asc",
},
},
Tags: []string{"name", "bytes"},
Result: []string{"name asc", "bytes asc"},
Tags: []v3.AttributeKey{
{Key: "name"},
{Key: "bytes"},
},
Result: "name asc,bytes asc",
},
{
Name: "Test Graph item not present in tag",
PanelType: v3.PanelTypeGraph,
Items: []v3.OrderBy{
{
ColumnName: "name",
Order: "asc",
},
{
ColumnName: "bytes",
Order: "asc",
},
{
ColumnName: "method",
Order: "asc",
},
},
Tags: []v3.AttributeKey{
{Key: "name"},
{Key: "bytes"},
},
Result: "name asc,bytes asc",
},
{
Name: "Test 3",
@ -896,8 +926,11 @@ var testOrderBy = []struct {
Order: "asc",
},
},
Tags: []string{"name", "bytes"},
Result: []string{"name asc", "bytes asc", "value asc"},
Tags: []v3.AttributeKey{
{Key: "name"},
{Key: "bytes"},
},
Result: "name asc,value asc,bytes asc",
},
{
Name: "Test 4",
@ -923,16 +956,163 @@ var testOrderBy = []struct {
DataType: v3.AttributeKeyDataTypeString,
},
},
Tags: []string{"name", "bytes"},
Result: []string{"name asc", "bytes asc", "value asc", "attributes_string_value[indexOf(attributes_string_key, 'response_time')] desc"},
Tags: []v3.AttributeKey{
{Key: "name"},
{Key: "bytes"},
},
Result: "name asc,value asc,bytes asc,attributes_string_value[indexOf(attributes_string_key, 'response_time')] desc",
},
}
func TestOrderBy(t *testing.T) {
for _, tt := range testOrderBy {
Convey("testOrderBy", t, func() {
res := orderBy(tt.PanelType, tt.Items, tt.Tags)
res := orderByAttributeKeyTags(tt.PanelType, tt.Items, tt.Tags)
So(res, ShouldResemble, tt.Result)
})
}
}
// if there is no group by then there is no point of limit in ts and table queries
// since the above will result in a single ts
// handle only when there is a group by something.
var testPrepLogsQueryData = []struct {
Name string
PanelType v3.PanelType
Start int64
End int64
Step int64
BuilderQuery *v3.BuilderQuery
GroupByTags []v3.AttributeKey
TableName string
AggregateOperator v3.AggregateOperator
ExpectedQuery string
Type string
}{
{
Name: "Test TS with limit- first",
PanelType: v3.PanelTypeGraph,
Start: 1680066360726210000,
End: 1680066458000000000,
Step: 60,
BuilderQuery: &v3.BuilderQuery{
QueryName: "A",
AggregateAttribute: v3.AttributeKey{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
AggregateOperator: v3.AggregateOperatorCountDistinct,
Expression: "A",
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "GET", Operator: "="},
},
},
Limit: 10,
GroupBy: []v3.AttributeKey{{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
},
TableName: "logs",
ExpectedQuery: "SELECT method from (SELECT attributes_string_value[indexOf(attributes_string_key, 'method')] as method, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND indexOf(attributes_string_key, 'method') > 0 group by method order by value DESC) LIMIT 10",
Type: constants.FirstQueryGraphLimit,
},
{
Name: "Test TS with limit- first - with order by value",
PanelType: v3.PanelTypeGraph,
Start: 1680066360726210000,
End: 1680066458000000000,
Step: 60,
BuilderQuery: &v3.BuilderQuery{
QueryName: "A",
AggregateAttribute: v3.AttributeKey{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
AggregateOperator: v3.AggregateOperatorCountDistinct,
Expression: "A",
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "GET", Operator: "="},
},
},
Limit: 10,
GroupBy: []v3.AttributeKey{{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
OrderBy: []v3.OrderBy{{ColumnName: constants.SigNozOrderByValue, Order: "ASC"}},
},
TableName: "logs",
ExpectedQuery: "SELECT method from (SELECT attributes_string_value[indexOf(attributes_string_key, 'method')] as method, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND indexOf(attributes_string_key, 'method') > 0 group by method order by value ASC) LIMIT 10",
Type: constants.FirstQueryGraphLimit,
},
{
Name: "Test TS with limit- first - with order by attribute",
PanelType: v3.PanelTypeGraph,
Start: 1680066360726210000,
End: 1680066458000000000,
Step: 60,
BuilderQuery: &v3.BuilderQuery{
QueryName: "A",
AggregateAttribute: v3.AttributeKey{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
AggregateOperator: v3.AggregateOperatorCountDistinct,
Expression: "A",
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "GET", Operator: "="},
},
},
Limit: 10,
GroupBy: []v3.AttributeKey{{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
OrderBy: []v3.OrderBy{{ColumnName: "method", Order: "ASC"}},
},
TableName: "logs",
ExpectedQuery: "SELECT method from (SELECT attributes_string_value[indexOf(attributes_string_key, 'method')] as method, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND indexOf(attributes_string_key, 'method') > 0 group by method order by method ASC) LIMIT 10",
Type: constants.FirstQueryGraphLimit,
},
{
Name: "Test TS with limit- second",
PanelType: v3.PanelTypeGraph,
Start: 1680066360726210000,
End: 1680066458000000000,
Step: 60,
BuilderQuery: &v3.BuilderQuery{
QueryName: "A",
AggregateAttribute: v3.AttributeKey{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
AggregateOperator: v3.AggregateOperatorCountDistinct,
Expression: "A",
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "GET", Operator: "="},
},
},
GroupBy: []v3.AttributeKey{{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
Limit: 2,
},
TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 0 SECOND) AS ts, attributes_string_value[indexOf(attributes_string_key, 'method')] as method, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND indexOf(attributes_string_key, 'method') > 0 AND (method) IN (%s) group by method,ts order by value DESC",
Type: constants.SecondQueryGraphLimit,
},
{
Name: "Test TS with limit- second - with order by",
PanelType: v3.PanelTypeGraph,
Start: 1680066360726210000,
End: 1680066458000000000,
Step: 60,
BuilderQuery: &v3.BuilderQuery{
QueryName: "A",
AggregateAttribute: v3.AttributeKey{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
AggregateOperator: v3.AggregateOperatorCountDistinct,
Expression: "A",
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "GET", Operator: "="},
},
},
GroupBy: []v3.AttributeKey{{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
OrderBy: []v3.OrderBy{{ColumnName: "method", Order: "ASC"}},
Limit: 2,
},
TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 0 SECOND) AS ts, attributes_string_value[indexOf(attributes_string_key, 'method')] as method, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND indexOf(attributes_string_key, 'method') > 0 AND (method) IN (%s) group by method,ts order by method ASC",
Type: constants.SecondQueryGraphLimit,
},
}
func TestPrepareLogsQuery(t *testing.T) {
for _, tt := range testPrepLogsQueryData {
Convey("TestBuildLogsQuery", t, func() {
query, err := PrepareLogsQuery(tt.Start, tt.End, "", tt.PanelType, tt.BuilderQuery, tt.Type)
So(err, ShouldBeNil)
So(query, ShouldEqual, tt.ExpectedQuery)
})
}
}

View File

@ -235,7 +235,7 @@ func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangePa
// TODO: add support for logs and traces
if builderQuery.DataSource == v3.DataSourceLogs {
query, err := logsV3.PrepareLogsQuery(params.Start, params.End, params.CompositeQuery.QueryType, params.CompositeQuery.PanelType, builderQuery)
query, err := logsV3.PrepareLogsQuery(params.Start, params.End, params.CompositeQuery.QueryType, params.CompositeQuery.PanelType, builderQuery, "")
if err != nil {
errQueriesByName[queryName] = err.Error()
continue

View File

@ -6,6 +6,7 @@ import (
"github.com/SigNoz/govaluate"
"go.signoz.io/signoz/pkg/query-service/cache"
"go.signoz.io/signoz/pkg/query-service/constants"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.uber.org/zap"
)
@ -39,7 +40,7 @@ var SupportedFunctions = []string{
var EvalFuncs = map[string]govaluate.ExpressionFunction{}
type prepareTracesQueryFunc func(start, end int64, queryType v3.QueryType, panelType v3.PanelType, bq *v3.BuilderQuery, keys map[string]v3.AttributeKey) (string, error)
type prepareLogsQueryFunc func(start, end int64, queryType v3.QueryType, panelType v3.PanelType, bq *v3.BuilderQuery) (string, error)
type prepareLogsQueryFunc func(start, end int64, queryType v3.QueryType, panelType v3.PanelType, bq *v3.BuilderQuery, graphLimitQtype string) (string, error)
type prepareMetricQueryFunc func(start, end int64, queryType v3.QueryType, panelType v3.PanelType, bq *v3.BuilderQuery) (string, error)
type QueryBuilder struct {
@ -152,11 +153,25 @@ func (qb *QueryBuilder) PrepareQueries(params *v3.QueryRangeParamsV3, args ...in
}
queries[queryName] = queryString
case v3.DataSourceLogs:
queryString, err := qb.options.BuildLogQuery(params.Start, params.End, compositeQuery.QueryType, compositeQuery.PanelType, query)
if err != nil {
return nil, err
// for ts query with limit replace it as it is already formed
if compositeQuery.PanelType == v3.PanelTypeGraph && query.Limit > 0 && len(query.GroupBy) > 0 {
limitQuery, err := qb.options.BuildLogQuery(params.Start, params.End, compositeQuery.QueryType, compositeQuery.PanelType, query, constants.FirstQueryGraphLimit)
if err != nil {
return nil, err
}
placeholderQuery, err := qb.options.BuildLogQuery(params.Start, params.End, compositeQuery.QueryType, compositeQuery.PanelType, query, constants.SecondQueryGraphLimit)
if err != nil {
return nil, err
}
query := fmt.Sprintf(placeholderQuery, limitQuery)
queries[queryName] = query
} else {
queryString, err := qb.options.BuildLogQuery(params.Start, params.End, compositeQuery.QueryType, compositeQuery.PanelType, query, "")
if err != nil {
return nil, err
}
queries[queryName] = queryString
}
queries[queryName] = queryString
case v3.DataSourceMetrics:
queryString, err := qb.options.BuildMetricQuery(params.Start, params.End, compositeQuery.QueryType, compositeQuery.PanelType, query)
if err != nil {

View File

@ -308,3 +308,6 @@ var StaticFieldsLogsV3 = map[string]v3.AttributeKey{
const SigNozOrderByValue = "#SIGNOZ_VALUE"
const TIMESTAMP = "timestamp"
const FirstQueryGraphLimit = "first_query_graph_limit"
const SecondQueryGraphLimit = "second_query_graph_limit"

View File

@ -143,8 +143,11 @@ func ValidateAndCastValue(v interface{}, dataType v3.AttributeKeyDataType) (inte
// ClickHouseFormattedValue formats the value to be used in clickhouse query
func ClickHouseFormattedValue(v interface{}) string {
// if it's pointer convert it to a value
v = getPointerValue(v)
switch x := v.(type) {
case int, int8, int16, int32, int64:
case uint8, uint16, uint32, uint64, int, int8, int16, int32, int64:
return fmt.Sprintf("%d", x)
case float32, float64:
return fmt.Sprintf("%f", x)
@ -152,6 +155,7 @@ func ClickHouseFormattedValue(v interface{}) string {
return fmt.Sprintf("'%s'", x)
case bool:
return fmt.Sprintf("%v", x)
case []interface{}:
if len(x) == 0 {
return ""
@ -167,7 +171,7 @@ func ClickHouseFormattedValue(v interface{}) string {
}
str += "]"
return str
case int, int8, int16, int32, int64, float32, float64, bool:
case uint8, uint16, uint32, uint64, int, int8, int16, int32, int64, float32, float64, bool:
return strings.Join(strings.Fields(fmt.Sprint(x)), ",")
default:
zap.S().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x[0])))
@ -178,3 +182,42 @@ func ClickHouseFormattedValue(v interface{}) string {
return ""
}
}
func getPointerValue(v interface{}) interface{} {
switch x := v.(type) {
case *uint8:
return *x
case *uint16:
return *x
case *uint32:
return *x
case *uint64:
return *x
case *int:
return *x
case *int8:
return *x
case *int16:
return *x
case *int32:
return *x
case *int64:
return *x
case *float32:
return *x
case *float64:
return *x
case *string:
return *x
case *bool:
return *x
case []interface{}:
values := []interface{}{}
for _, val := range x {
values = append(values, getPointerValue(val))
}
return values
default:
return v
}
}

View File

@ -291,3 +291,86 @@ func TestValidateAndCastValue(t *testing.T) {
})
}
}
var one = 1
var onePointOne = 1.1
var oneString = "1"
var trueBool = true
var testClickHouseFormattedValueData = []struct {
name string
value interface{}
want interface{}
}{
{
name: "int",
value: 1,
want: "1",
},
{
name: "int64",
value: int64(1),
want: "1",
},
{
name: "float32",
value: float32(1.1),
want: "1.100000",
},
{
name: "string",
value: "1",
want: "'1'",
},
{
name: "bool",
value: true,
want: "true",
},
{
name: "[]interface{}",
value: []interface{}{1, 2},
want: "[1,2]",
},
{
name: "[]interface{}",
value: []interface{}{"1", "2"},
want: "['1','2']",
},
{
name: "pointer int",
value: &one,
want: "1",
},
{
name: "pointer float32",
value: onePointOne,
want: "1.100000",
},
{
name: "pointer string",
value: &oneString,
want: "'1'",
},
{
name: "pointer bool",
value: &trueBool,
want: "true",
},
{
name: "pointer []interface{}",
value: []interface{}{&one, &one},
want: "[1,1]",
},
}
func TestClickHouseFormattedValue(t *testing.T) {
for _, tt := range testClickHouseFormattedValueData {
t.Run(tt.name, func(t *testing.T) {
got := ClickHouseFormattedValue(tt.value)
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("ClickHouseFormattedValue() = %v, want %v", got, tt.want)
}
})
}
}