chore: clickhouse sql support for v3 (#6778)

This commit is contained in:
Srikanth Chekuri 2025-01-21 09:39:40 +05:30 committed by GitHub
parent 89541862cc
commit 4ec1e66c7e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 42 additions and 3 deletions

View File

@ -218,6 +218,7 @@ func NewReaderFromClickhouseConnection(
MaxBytesToRead: os.Getenv("ClickHouseMaxBytesToRead"),
OptimizeReadInOrderRegex: os.Getenv("ClickHouseOptimizeReadInOrderRegex"),
OptimizeReadInOrderRegexCompiled: regexCompiled,
MaxResultRowsForCHQuery: constants.MaxResultRowsForCHQuery,
},
}
@ -4198,9 +4199,26 @@ func (r *ClickHouseReader) GetListResultV3(ctx context.Context, query string) ([
var t time.Time
for idx, v := range vars {
if columnNames[idx] == "timestamp" {
t = time.Unix(0, int64(*v.(*uint64)))
switch v := v.(type) {
case *uint64:
t = time.Unix(0, int64(*v))
case *time.Time:
t = *v
}
} else if columnNames[idx] == "timestamp_datetime" {
t = *v.(*time.Time)
} else if columnNames[idx] == "events" {
var events []map[string]interface{}
eventsFromDB, ok := v.(*[]string)
if !ok {
continue
}
for _, event := range *eventsFromDB {
var eventMap map[string]interface{}
json.Unmarshal([]byte(event), &eventMap)
events = append(events, eventMap)
}
row[columnNames[idx]] = events
} else {
row[columnNames[idx]] = v
}

View File

@ -17,6 +17,7 @@ type ClickhouseQuerySettings struct {
MaxBytesToRead string
OptimizeReadInOrderRegex string
OptimizeReadInOrderRegexCompiled *regexp.Regexp
MaxResultRowsForCHQuery int
}
type clickhouseConnWrapper struct {
@ -44,6 +45,10 @@ func (c clickhouseConnWrapper) addClickHouseSettings(ctx context.Context, query
settings["log_comment"] = logComment
}
if ctx.Value("enforce_max_result_rows") != nil {
settings["max_result_rows"] = c.settings.MaxResultRowsForCHQuery
}
if c.settings.MaxBytesToRead != "" {
settings["max_bytes_to_read"] = c.settings.MaxBytesToRead
}

View File

@ -465,7 +465,15 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan
}
}
queries, err := q.builder.PrepareQueries(params)
queries := make(map[string]string)
var err error
if params.CompositeQuery.QueryType == v3.QueryTypeBuilder {
queries, err = q.builder.PrepareQueries(params)
} else if params.CompositeQuery.QueryType == v3.QueryTypeClickHouseSQL {
for name, chQuery := range params.CompositeQuery.ClickHouseQueries {
queries[name] = chQuery.Query
}
}
if err != nil {
return nil, nil, err
@ -534,7 +542,12 @@ func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3)
case v3.QueryTypePromQL:
results, errQueriesByName, err = q.runPromQueries(ctx, params)
case v3.QueryTypeClickHouseSQL:
results, errQueriesByName, err = q.runClickHouseQueries(ctx, params)
ctx = context.WithValue(ctx, "enforce_max_result_rows", true)
if params.CompositeQuery.PanelType == v3.PanelTypeList || params.CompositeQuery.PanelType == v3.PanelTypeTrace {
results, errQueriesByName, err = q.runBuilderListQueries(ctx, params)
} else {
results, errQueriesByName, err = q.runClickHouseQueries(ctx, params)
}
default:
err = fmt.Errorf("invalid query type")
}

View File

@ -548,6 +548,7 @@ func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3)
if params.CompositeQuery.PanelType == v3.PanelTypeList || params.CompositeQuery.PanelType == v3.PanelTypeTrace {
results, errQueriesByName, err = q.runBuilderListQueries(ctx, params)
} else {
ctx = context.WithValue(ctx, "enforce_max_result_rows", true)
results, errQueriesByName, err = q.runClickHouseQueries(ctx, params)
}
default:

View File

@ -738,3 +738,5 @@ func init() {
}
const TRACE_V4_MAX_PAGINATION_LIMIT = 10000
const MaxResultRowsForCHQuery = 1_000_000