feat: trace v4 integration (#6226)

* feat: trace v4 inital commit

* fix: add remaining files

* fix: integrate with querier

* fix: get trace by id api updated

* fix: add servicename resource filter

* fix: tests

* fix: use correct prepQUery

* fix: services page

* fix: minor fixes to use the new table in api's and querier

* fix: add support for window based pagination

* feat: support for faster trace detail

* fix: searchTraces

* fix: attribute enrichment updated and issue in group by

* fix: issues in group by

* fix: enrichment using alias

* fix: test file added

* fix: tests

* fix: group by with filters

* fix: add subquery

* fix: trigger builde

* fix: update pagination logic and few ch column names

* fix: update qb

* fix: add tests

* feat: minor fixes

* fix: update pagination logic

* fix: update pagination logic

* fix: remove utils

* fix: remove unwanted API's

* fix: attribute and attribute values v2

* fix: autocomplete api's updated

* fix: tests fixed

* feat: minor fixes

* fix: update telemetry functions

* fix: dont use alias, use proper col names

* fix: move models to it's own file

* fix: minor fixes

* fix: address comments

* fix: add to serviceoverview function

* fix: add changes to overview function

* fix: address comments

* fix: remove printlines

---------

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
This commit is contained in:
Nityananda Gohain 2024-11-22 12:00:29 +05:30 committed by GitHub
parent e46d969143
commit 67058b2a17
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
21 changed files with 227 additions and 136 deletions

View File

@ -38,9 +38,10 @@ type APIHandlerOptions struct {
Cache cache.Cache Cache cache.Cache
Gateway *httputil.ReverseProxy Gateway *httputil.ReverseProxy
// Querier Influx Interval // Querier Influx Interval
FluxInterval time.Duration FluxInterval time.Duration
UseLogsNewSchema bool UseLogsNewSchema bool
UseLicensesV3 bool UseTraceNewSchema bool
UseLicensesV3 bool
} }
type APIHandler struct { type APIHandler struct {
@ -66,6 +67,7 @@ func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
Cache: opts.Cache, Cache: opts.Cache,
FluxInterval: opts.FluxInterval, FluxInterval: opts.FluxInterval,
UseLogsNewSchema: opts.UseLogsNewSchema, UseLogsNewSchema: opts.UseLogsNewSchema,
UseTraceNewSchema: opts.UseTraceNewSchema,
UseLicensesV3: opts.UseLicensesV3, UseLicensesV3: opts.UseLicensesV3,
}) })

View File

@ -2,32 +2,31 @@ package api
import ( import (
"net/http" "net/http"
"go.signoz.io/signoz/ee/query-service/app/db"
"go.signoz.io/signoz/ee/query-service/model"
baseapp "go.signoz.io/signoz/pkg/query-service/app"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap"
) )
func (ah *APIHandler) searchTraces(w http.ResponseWriter, r *http.Request) { func (ah *APIHandler) searchTraces(w http.ResponseWriter, r *http.Request) {
if !ah.CheckFeature(basemodel.SmartTraceDetail) { ah.APIHandler.SearchTraces(w, r)
zap.L().Info("SmartTraceDetail feature is not enabled in this plan") return
ah.APIHandler.SearchTraces(w, r)
return
}
searchTracesParams, err := baseapp.ParseSearchTracesParams(r)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, "Error reading params")
return
}
result, err := ah.opts.DataConnector.SearchTraces(r.Context(), searchTracesParams, db.SmartTraceAlgorithm) // This is commented since this will be taken care by new trace API
if ah.HandleError(w, err, http.StatusBadRequest) {
return
}
ah.WriteJSON(w, r, result) // if !ah.CheckFeature(basemodel.SmartTraceDetail) {
// zap.L().Info("SmartTraceDetail feature is not enabled in this plan")
// ah.APIHandler.SearchTraces(w, r)
// return
// }
// searchTracesParams, err := baseapp.ParseSearchTracesParams(r)
// if err != nil {
// RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, "Error reading params")
// return
// }
// result, err := ah.opts.DataConnector.SearchTraces(r.Context(), searchTracesParams, db.SmartTraceAlgorithm)
// if ah.HandleError(w, err, http.StatusBadRequest) {
// return
// }
// ah.WriteJSON(w, r, result)
} }

View File

@ -26,8 +26,9 @@ func NewDataConnector(
dialTimeout time.Duration, dialTimeout time.Duration,
cluster string, cluster string,
useLogsNewSchema bool, useLogsNewSchema bool,
useTraceNewSchema bool,
) *ClickhouseReader { ) *ClickhouseReader {
ch := basechr.NewReader(localDB, promConfigPath, lm, maxIdleConns, maxOpenConns, dialTimeout, cluster, useLogsNewSchema) ch := basechr.NewReader(localDB, promConfigPath, lm, maxIdleConns, maxOpenConns, dialTimeout, cluster, useLogsNewSchema, useTraceNewSchema)
return &ClickhouseReader{ return &ClickhouseReader{
conn: ch.GetConn(), conn: ch.GetConn(),
appdb: localDB, appdb: localDB,

View File

@ -77,6 +77,7 @@ type ServerOptions struct {
Cluster string Cluster string
GatewayUrl string GatewayUrl string
UseLogsNewSchema bool UseLogsNewSchema bool
UseTraceNewSchema bool
UseLicensesV3 bool UseLicensesV3 bool
} }
@ -156,6 +157,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
serverOptions.DialTimeout, serverOptions.DialTimeout,
serverOptions.Cluster, serverOptions.Cluster,
serverOptions.UseLogsNewSchema, serverOptions.UseLogsNewSchema,
serverOptions.UseTraceNewSchema,
) )
go qb.Start(readerReady) go qb.Start(readerReady)
reader = qb reader = qb
@ -189,6 +191,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
serverOptions.DisableRules, serverOptions.DisableRules,
lm, lm,
serverOptions.UseLogsNewSchema, serverOptions.UseLogsNewSchema,
serverOptions.UseTraceNewSchema,
) )
if err != nil { if err != nil {
@ -270,6 +273,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
FluxInterval: fluxInterval, FluxInterval: fluxInterval,
Gateway: gatewayProxy, Gateway: gatewayProxy,
UseLogsNewSchema: serverOptions.UseLogsNewSchema, UseLogsNewSchema: serverOptions.UseLogsNewSchema,
UseTraceNewSchema: serverOptions.UseTraceNewSchema,
UseLicensesV3: serverOptions.UseLicensesV3, UseLicensesV3: serverOptions.UseLicensesV3,
} }
@ -737,7 +741,8 @@ func makeRulesManager(
cache cache.Cache, cache cache.Cache,
disableRules bool, disableRules bool,
fm baseint.FeatureLookup, fm baseint.FeatureLookup,
useLogsNewSchema bool) (*baserules.Manager, error) { useLogsNewSchema bool,
useTraceNewSchema bool) (*baserules.Manager, error) {
// create engine // create engine
pqle, err := pqle.FromConfigPath(promConfigPath) pqle, err := pqle.FromConfigPath(promConfigPath)
@ -767,8 +772,9 @@ func makeRulesManager(
EvalDelay: baseconst.GetEvalDelay(), EvalDelay: baseconst.GetEvalDelay(),
PrepareTaskFunc: rules.PrepareTaskFunc, PrepareTaskFunc: rules.PrepareTaskFunc,
PrepareTestRuleFunc: rules.TestNotification,
UseLogsNewSchema: useLogsNewSchema, UseLogsNewSchema: useLogsNewSchema,
UseTraceNewSchema: useTraceNewSchema,
PrepareTestRuleFunc: rules.TestNotification,
} }
// create Manager // create Manager

View File

@ -94,6 +94,7 @@ func main() {
var cluster string var cluster string
var useLogsNewSchema bool var useLogsNewSchema bool
var useTraceNewSchema bool
var useLicensesV3 bool var useLicensesV3 bool
var cacheConfigPath, fluxInterval string var cacheConfigPath, fluxInterval string
var enableQueryServiceLogOTLPExport bool var enableQueryServiceLogOTLPExport bool
@ -105,6 +106,7 @@ func main() {
var gatewayUrl string var gatewayUrl string
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs") flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
flag.BoolVar(&useTraceNewSchema, "use-trace-new-schema", false, "use new schema for traces")
flag.BoolVar(&useLicensesV3, "use-licenses-v3", false, "use licenses_v3 schema for licenses") flag.BoolVar(&useLicensesV3, "use-licenses-v3", false, "use licenses_v3 schema for licenses")
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)") flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)") flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
@ -145,6 +147,7 @@ func main() {
Cluster: cluster, Cluster: cluster,
GatewayUrl: gatewayUrl, GatewayUrl: gatewayUrl,
UseLogsNewSchema: useLogsNewSchema, UseLogsNewSchema: useLogsNewSchema,
UseTraceNewSchema: useTraceNewSchema,
UseLicensesV3: useLicensesV3, UseLicensesV3: useLicensesV3,
} }

View File

@ -26,6 +26,7 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
opts.FF, opts.FF,
opts.Reader, opts.Reader,
opts.UseLogsNewSchema, opts.UseLogsNewSchema,
opts.UseTraceNewSchema,
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay), baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
) )
@ -122,6 +123,7 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
opts.FF, opts.FF,
opts.Reader, opts.Reader,
opts.UseLogsNewSchema, opts.UseLogsNewSchema,
opts.UseTraceNewSchema,
baserules.WithSendAlways(), baserules.WithSendAlways(),
baserules.WithSendUnmatched(), baserules.WithSendUnmatched(),
) )

View File

@ -167,7 +167,7 @@ func NewReader(
dialTimeout time.Duration, dialTimeout time.Duration,
cluster string, cluster string,
useLogsNewSchema bool, useLogsNewSchema bool,
// useTraceNewSchema bool, // TODO: uncomment this in integration PR useTraceNewSchema bool,
) *ClickHouseReader { ) *ClickHouseReader {
datasource := os.Getenv("ClickHouseUrl") datasource := os.Getenv("ClickHouseUrl")
@ -178,7 +178,7 @@ func NewReader(
zap.L().Fatal("failed to initialize ClickHouse", zap.Error(err)) zap.L().Fatal("failed to initialize ClickHouse", zap.Error(err))
} }
return NewReaderFromClickhouseConnection(db, options, localDB, configFile, featureFlag, cluster, useLogsNewSchema) return NewReaderFromClickhouseConnection(db, options, localDB, configFile, featureFlag, cluster, useLogsNewSchema, useTraceNewSchema)
} }
func NewReaderFromClickhouseConnection( func NewReaderFromClickhouseConnection(
@ -189,7 +189,7 @@ func NewReaderFromClickhouseConnection(
featureFlag interfaces.FeatureLookup, featureFlag interfaces.FeatureLookup,
cluster string, cluster string,
useLogsNewSchema bool, useLogsNewSchema bool,
// useTraceNewSchema bool, useTraceNewSchema bool,
) *ClickHouseReader { ) *ClickHouseReader {
alertManager, err := am.New() alertManager, err := am.New()
if err != nil { if err != nil {
@ -229,11 +229,10 @@ func NewReaderFromClickhouseConnection(
traceTableName := options.primary.IndexTable traceTableName := options.primary.IndexTable
traceLocalTableName := options.primary.LocalIndexTable traceLocalTableName := options.primary.LocalIndexTable
// TODO: uncomment this in integration PR if useTraceNewSchema {
// if useTraceNewSchema { traceTableName = options.primary.TraceIndexTableV3
// traceTableName = options.primary.TraceIndexTableV3 traceLocalTableName = options.primary.TraceLocalTableNameV3
// traceLocalTableName = options.primary.TraceLocalTableNameV3 }
// }
return &ClickHouseReader{ return &ClickHouseReader{
db: wrap, db: wrap,
@ -262,7 +261,8 @@ func NewReaderFromClickhouseConnection(
cluster: cluster, cluster: cluster,
queryProgressTracker: queryprogress.NewQueryProgressTracker(), queryProgressTracker: queryprogress.NewQueryProgressTracker(),
useLogsNewSchema: useLogsNewSchema, useLogsNewSchema: useLogsNewSchema,
useTraceNewSchema: useTraceNewSchema,
logsTableV2: options.primary.LogsTableV2, logsTableV2: options.primary.LogsTableV2,
logsLocalTableV2: options.primary.LogsLocalTableV2, logsLocalTableV2: options.primary.LogsLocalTableV2,
@ -271,7 +271,6 @@ func NewReaderFromClickhouseConnection(
logsTableName: logsTableName, logsTableName: logsTableName,
logsLocalTableName: logsLocalTableName, logsLocalTableName: logsLocalTableName,
// useTraceNewSchema: useTraceNewSchema,
traceLocalTableName: traceLocalTableName, traceLocalTableName: traceLocalTableName,
traceTableName: traceTableName, traceTableName: traceTableName,
traceResourceTableV3: options.primary.TraceResourceTableV3, traceResourceTableV3: options.primary.TraceResourceTableV3,

View File

@ -39,6 +39,7 @@ import (
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2" querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder" "go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3" tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
tracesV4 "go.signoz.io/signoz/pkg/query-service/app/traces/v4"
"go.signoz.io/signoz/pkg/query-service/auth" "go.signoz.io/signoz/pkg/query-service/auth"
"go.signoz.io/signoz/pkg/query-service/cache" "go.signoz.io/signoz/pkg/query-service/cache"
"go.signoz.io/signoz/pkg/query-service/common" "go.signoz.io/signoz/pkg/query-service/common"
@ -110,8 +111,9 @@ type APIHandler struct {
// Websocket connection upgrader // Websocket connection upgrader
Upgrader *websocket.Upgrader Upgrader *websocket.Upgrader
UseLogsNewSchema bool UseLogsNewSchema bool
UseLicensesV3 bool UseTraceNewSchema bool
UseLicensesV3 bool
hostsRepo *inframetrics.HostsRepo hostsRepo *inframetrics.HostsRepo
processesRepo *inframetrics.ProcessesRepo processesRepo *inframetrics.ProcessesRepo
@ -163,6 +165,7 @@ type APIHandlerOpts struct {
// Use Logs New schema // Use Logs New schema
UseLogsNewSchema bool UseLogsNewSchema bool
UseTraceNewSchema bool
// Use Licenses V3 structure // Use Licenses V3 structure
UseLicensesV3 bool UseLicensesV3 bool
} }
@ -176,21 +179,23 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
} }
querierOpts := querier.QuerierOptions{ querierOpts := querier.QuerierOptions{
Reader: opts.Reader, Reader: opts.Reader,
Cache: opts.Cache, Cache: opts.Cache,
KeyGenerator: queryBuilder.NewKeyGenerator(), KeyGenerator: queryBuilder.NewKeyGenerator(),
FluxInterval: opts.FluxInterval, FluxInterval: opts.FluxInterval,
FeatureLookup: opts.FeatureFlags, FeatureLookup: opts.FeatureFlags,
UseLogsNewSchema: opts.UseLogsNewSchema, UseLogsNewSchema: opts.UseLogsNewSchema,
UseTraceNewSchema: opts.UseTraceNewSchema,
} }
querierOptsV2 := querierV2.QuerierOptions{ querierOptsV2 := querierV2.QuerierOptions{
Reader: opts.Reader, Reader: opts.Reader,
Cache: opts.Cache, Cache: opts.Cache,
KeyGenerator: queryBuilder.NewKeyGenerator(), KeyGenerator: queryBuilder.NewKeyGenerator(),
FluxInterval: opts.FluxInterval, FluxInterval: opts.FluxInterval,
FeatureLookup: opts.FeatureFlags, FeatureLookup: opts.FeatureFlags,
UseLogsNewSchema: opts.UseLogsNewSchema, UseLogsNewSchema: opts.UseLogsNewSchema,
UseTraceNewSchema: opts.UseTraceNewSchema,
} }
querier := querier.NewQuerier(querierOpts) querier := querier.NewQuerier(querierOpts)
@ -224,6 +229,7 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
querier: querier, querier: querier,
querierV2: querierv2, querierV2: querierv2,
UseLogsNewSchema: opts.UseLogsNewSchema, UseLogsNewSchema: opts.UseLogsNewSchema,
UseTraceNewSchema: opts.UseTraceNewSchema,
UseLicensesV3: opts.UseLicensesV3, UseLicensesV3: opts.UseLicensesV3,
hostsRepo: hostsRepo, hostsRepo: hostsRepo,
processesRepo: processesRepo, processesRepo: processesRepo,
@ -242,9 +248,14 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
logsQueryBuilder = logsv4.PrepareLogsQuery logsQueryBuilder = logsv4.PrepareLogsQuery
} }
tracesQueryBuilder := tracesV3.PrepareTracesQuery
if opts.UseTraceNewSchema {
tracesQueryBuilder = tracesV4.PrepareTracesQuery
}
builderOpts := queryBuilder.QueryBuilderOptions{ builderOpts := queryBuilder.QueryBuilderOptions{
BuildMetricQuery: metricsv3.PrepareMetricQuery, BuildMetricQuery: metricsv3.PrepareMetricQuery,
BuildTraceQuery: tracesV3.PrepareTracesQuery, BuildTraceQuery: tracesQueryBuilder,
BuildLogQuery: logsQueryBuilder, BuildLogQuery: logsQueryBuilder,
} }
aH.queryBuilder = queryBuilder.NewQueryBuilder(builderOpts, aH.featureFlags) aH.queryBuilder = queryBuilder.NewQueryBuilder(builderOpts, aH.featureFlags)
@ -4330,7 +4341,12 @@ func (aH *APIHandler) queryRangeV3(ctx context.Context, queryRangeParams *v3.Que
RespondError(w, apiErrObj, errQuriesByName) RespondError(w, apiErrObj, errQuriesByName)
return return
} }
tracesV3.Enrich(queryRangeParams, spanKeys) if aH.UseTraceNewSchema {
tracesV4.Enrich(queryRangeParams, spanKeys)
} else {
tracesV3.Enrich(queryRangeParams, spanKeys)
}
} }
// WARN: Only works for AND operator in traces query // WARN: Only works for AND operator in traces query
@ -4800,7 +4816,11 @@ func (aH *APIHandler) queryRangeV4(ctx context.Context, queryRangeParams *v3.Que
RespondError(w, apiErrObj, errQuriesByName) RespondError(w, apiErrObj, errQuriesByName)
return return
} }
tracesV3.Enrich(queryRangeParams, spanKeys) if aH.UseTraceNewSchema {
tracesV4.Enrich(queryRangeParams, spanKeys)
} else {
tracesV3.Enrich(queryRangeParams, spanKeys)
}
} }
// WARN: Only works for AND operator in traces query // WARN: Only works for AND operator in traces query

View File

@ -10,6 +10,7 @@ import (
logsV4 "go.signoz.io/signoz/pkg/query-service/app/logs/v4" logsV4 "go.signoz.io/signoz/pkg/query-service/app/logs/v4"
metricsV3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3" metricsV3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3"
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3" tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
tracesV4 "go.signoz.io/signoz/pkg/query-service/app/traces/v4"
"go.signoz.io/signoz/pkg/query-service/common" "go.signoz.io/signoz/pkg/query-service/common"
"go.signoz.io/signoz/pkg/query-service/constants" "go.signoz.io/signoz/pkg/query-service/constants"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3" v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
@ -158,11 +159,16 @@ func (q *querier) runBuilderQuery(
if builderQuery.DataSource == v3.DataSourceTraces { if builderQuery.DataSource == v3.DataSourceTraces {
tracesQueryBuilder := tracesV3.PrepareTracesQuery
if q.UseTraceNewSchema {
tracesQueryBuilder = tracesV4.PrepareTracesQuery
}
var query string var query string
var err error var err error
// for ts query with group by and limit form two queries // for ts query with group by and limit form two queries
if params.CompositeQuery.PanelType == v3.PanelTypeGraph && builderQuery.Limit > 0 && len(builderQuery.GroupBy) > 0 { if params.CompositeQuery.PanelType == v3.PanelTypeGraph && builderQuery.Limit > 0 && len(builderQuery.GroupBy) > 0 {
limitQuery, err := tracesV3.PrepareTracesQuery( limitQuery, err := tracesQueryBuilder(
start, start,
end, end,
params.CompositeQuery.PanelType, params.CompositeQuery.PanelType,
@ -173,7 +179,7 @@ func (q *querier) runBuilderQuery(
ch <- channelResult{Err: err, Name: queryName, Query: limitQuery, Series: nil} ch <- channelResult{Err: err, Name: queryName, Query: limitQuery, Series: nil}
return return
} }
placeholderQuery, err := tracesV3.PrepareTracesQuery( placeholderQuery, err := tracesQueryBuilder(
start, start,
end, end,
params.CompositeQuery.PanelType, params.CompositeQuery.PanelType,
@ -186,7 +192,7 @@ func (q *querier) runBuilderQuery(
} }
query = fmt.Sprintf(placeholderQuery, limitQuery) query = fmt.Sprintf(placeholderQuery, limitQuery)
} else { } else {
query, err = tracesV3.PrepareTracesQuery( query, err = tracesQueryBuilder(
start, start,
end, end,
params.CompositeQuery.PanelType, params.CompositeQuery.PanelType,

View File

@ -11,6 +11,7 @@ import (
metricsV3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3" metricsV3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3"
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder" "go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3" tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
tracesV4 "go.signoz.io/signoz/pkg/query-service/app/traces/v4"
"go.signoz.io/signoz/pkg/query-service/common" "go.signoz.io/signoz/pkg/query-service/common"
"go.signoz.io/signoz/pkg/query-service/constants" "go.signoz.io/signoz/pkg/query-service/constants"
chErrors "go.signoz.io/signoz/pkg/query-service/errors" chErrors "go.signoz.io/signoz/pkg/query-service/errors"
@ -65,10 +66,11 @@ type QuerierOptions struct {
FeatureLookup interfaces.FeatureLookup FeatureLookup interfaces.FeatureLookup
// used for testing // used for testing
TestingMode bool TestingMode bool
ReturnedSeries []*v3.Series ReturnedSeries []*v3.Series
ReturnedErr error ReturnedErr error
UseLogsNewSchema bool UseLogsNewSchema bool
UseTraceNewSchema bool
} }
func NewQuerier(opts QuerierOptions) interfaces.Querier { func NewQuerier(opts QuerierOptions) interfaces.Querier {
@ -76,6 +78,10 @@ func NewQuerier(opts QuerierOptions) interfaces.Querier {
if opts.UseLogsNewSchema { if opts.UseLogsNewSchema {
logsQueryBuilder = logsV4.PrepareLogsQuery logsQueryBuilder = logsV4.PrepareLogsQuery
} }
tracesQueryBuilder := tracesV3.PrepareTracesQuery
if opts.UseTraceNewSchema {
tracesQueryBuilder = tracesV4.PrepareTracesQuery
}
qc := querycache.NewQueryCache(querycache.WithCache(opts.Cache), querycache.WithFluxInterval(opts.FluxInterval)) qc := querycache.NewQueryCache(querycache.WithCache(opts.Cache), querycache.WithFluxInterval(opts.FluxInterval))
@ -87,16 +93,17 @@ func NewQuerier(opts QuerierOptions) interfaces.Querier {
fluxInterval: opts.FluxInterval, fluxInterval: opts.FluxInterval,
builder: queryBuilder.NewQueryBuilder(queryBuilder.QueryBuilderOptions{ builder: queryBuilder.NewQueryBuilder(queryBuilder.QueryBuilderOptions{
BuildTraceQuery: tracesV3.PrepareTracesQuery, BuildTraceQuery: tracesQueryBuilder,
BuildLogQuery: logsQueryBuilder, BuildLogQuery: logsQueryBuilder,
BuildMetricQuery: metricsV3.PrepareMetricQuery, BuildMetricQuery: metricsV3.PrepareMetricQuery,
}, opts.FeatureLookup), }, opts.FeatureLookup),
featureLookUp: opts.FeatureLookup, featureLookUp: opts.FeatureLookup,
testingMode: opts.TestingMode, testingMode: opts.TestingMode,
returnedSeries: opts.ReturnedSeries, returnedSeries: opts.ReturnedSeries,
returnedErr: opts.ReturnedErr, returnedErr: opts.ReturnedErr,
UseLogsNewSchema: opts.UseLogsNewSchema, UseLogsNewSchema: opts.UseLogsNewSchema,
UseTraceNewSchema: opts.UseTraceNewSchema,
} }
} }

View File

@ -1383,6 +1383,7 @@ func Test_querier_runWindowBasedListQuery(t *testing.T) {
featureManager.StartManager(), featureManager.StartManager(),
"", "",
true, true,
true,
) )
q := &querier{ q := &querier{

View File

@ -11,6 +11,7 @@ import (
metricsV3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3" metricsV3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3"
metricsV4 "go.signoz.io/signoz/pkg/query-service/app/metrics/v4" metricsV4 "go.signoz.io/signoz/pkg/query-service/app/metrics/v4"
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3" tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
tracesV4 "go.signoz.io/signoz/pkg/query-service/app/traces/v4"
"go.signoz.io/signoz/pkg/query-service/common" "go.signoz.io/signoz/pkg/query-service/common"
"go.signoz.io/signoz/pkg/query-service/constants" "go.signoz.io/signoz/pkg/query-service/constants"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3" v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
@ -158,11 +159,16 @@ func (q *querier) runBuilderQuery(
if builderQuery.DataSource == v3.DataSourceTraces { if builderQuery.DataSource == v3.DataSourceTraces {
tracesQueryBuilder := tracesV3.PrepareTracesQuery
if q.UseTraceNewSchema {
tracesQueryBuilder = tracesV4.PrepareTracesQuery
}
var query string var query string
var err error var err error
// for ts query with group by and limit form two queries // for ts query with group by and limit form two queries
if params.CompositeQuery.PanelType == v3.PanelTypeGraph && builderQuery.Limit > 0 && len(builderQuery.GroupBy) > 0 { if params.CompositeQuery.PanelType == v3.PanelTypeGraph && builderQuery.Limit > 0 && len(builderQuery.GroupBy) > 0 {
limitQuery, err := tracesV3.PrepareTracesQuery( limitQuery, err := tracesQueryBuilder(
start, start,
end, end,
params.CompositeQuery.PanelType, params.CompositeQuery.PanelType,
@ -173,7 +179,7 @@ func (q *querier) runBuilderQuery(
ch <- channelResult{Err: err, Name: queryName, Query: limitQuery, Series: nil} ch <- channelResult{Err: err, Name: queryName, Query: limitQuery, Series: nil}
return return
} }
placeholderQuery, err := tracesV3.PrepareTracesQuery( placeholderQuery, err := tracesQueryBuilder(
start, start,
end, end,
params.CompositeQuery.PanelType, params.CompositeQuery.PanelType,
@ -186,7 +192,7 @@ func (q *querier) runBuilderQuery(
} }
query = fmt.Sprintf(placeholderQuery, limitQuery) query = fmt.Sprintf(placeholderQuery, limitQuery)
} else { } else {
query, err = tracesV3.PrepareTracesQuery( query, err = tracesQueryBuilder(
start, start,
end, end,
params.CompositeQuery.PanelType, params.CompositeQuery.PanelType,

View File

@ -11,6 +11,7 @@ import (
metricsV4 "go.signoz.io/signoz/pkg/query-service/app/metrics/v4" metricsV4 "go.signoz.io/signoz/pkg/query-service/app/metrics/v4"
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder" "go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3" tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
tracesV4 "go.signoz.io/signoz/pkg/query-service/app/traces/v4"
"go.signoz.io/signoz/pkg/query-service/common" "go.signoz.io/signoz/pkg/query-service/common"
"go.signoz.io/signoz/pkg/query-service/constants" "go.signoz.io/signoz/pkg/query-service/constants"
chErrors "go.signoz.io/signoz/pkg/query-service/errors" chErrors "go.signoz.io/signoz/pkg/query-service/errors"
@ -64,10 +65,11 @@ type QuerierOptions struct {
FeatureLookup interfaces.FeatureLookup FeatureLookup interfaces.FeatureLookup
// used for testing // used for testing
TestingMode bool TestingMode bool
ReturnedSeries []*v3.Series ReturnedSeries []*v3.Series
ReturnedErr error ReturnedErr error
UseLogsNewSchema bool UseLogsNewSchema bool
UseTraceNewSchema bool
} }
func NewQuerier(opts QuerierOptions) interfaces.Querier { func NewQuerier(opts QuerierOptions) interfaces.Querier {
@ -76,6 +78,11 @@ func NewQuerier(opts QuerierOptions) interfaces.Querier {
logsQueryBuilder = logsV4.PrepareLogsQuery logsQueryBuilder = logsV4.PrepareLogsQuery
} }
tracesQueryBuilder := tracesV3.PrepareTracesQuery
if opts.UseTraceNewSchema {
tracesQueryBuilder = tracesV4.PrepareTracesQuery
}
qc := querycache.NewQueryCache(querycache.WithCache(opts.Cache), querycache.WithFluxInterval(opts.FluxInterval)) qc := querycache.NewQueryCache(querycache.WithCache(opts.Cache), querycache.WithFluxInterval(opts.FluxInterval))
return &querier{ return &querier{
@ -86,16 +93,17 @@ func NewQuerier(opts QuerierOptions) interfaces.Querier {
fluxInterval: opts.FluxInterval, fluxInterval: opts.FluxInterval,
builder: queryBuilder.NewQueryBuilder(queryBuilder.QueryBuilderOptions{ builder: queryBuilder.NewQueryBuilder(queryBuilder.QueryBuilderOptions{
BuildTraceQuery: tracesV3.PrepareTracesQuery, BuildTraceQuery: tracesQueryBuilder,
BuildLogQuery: logsQueryBuilder, BuildLogQuery: logsQueryBuilder,
BuildMetricQuery: metricsV4.PrepareMetricQuery, BuildMetricQuery: metricsV4.PrepareMetricQuery,
}, opts.FeatureLookup), }, opts.FeatureLookup),
featureLookUp: opts.FeatureLookup, featureLookUp: opts.FeatureLookup,
testingMode: opts.TestingMode, testingMode: opts.TestingMode,
returnedSeries: opts.ReturnedSeries, returnedSeries: opts.ReturnedSeries,
returnedErr: opts.ReturnedErr, returnedErr: opts.ReturnedErr,
UseLogsNewSchema: opts.UseLogsNewSchema, UseLogsNewSchema: opts.UseLogsNewSchema,
UseTraceNewSchema: opts.UseTraceNewSchema,
} }
} }

View File

@ -1437,6 +1437,7 @@ func Test_querier_runWindowBasedListQuery(t *testing.T) {
featureManager.StartManager(), featureManager.StartManager(),
"", "",
true, true,
true,
) )
q := &querier{ q := &querier{

View File

@ -67,6 +67,7 @@ type ServerOptions struct {
FluxInterval string FluxInterval string
Cluster string Cluster string
UseLogsNewSchema bool UseLogsNewSchema bool
UseTraceNewSchema bool
} }
// Server runs HTTP, Mux and a grpc server // Server runs HTTP, Mux and a grpc server
@ -130,6 +131,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
serverOptions.DialTimeout, serverOptions.DialTimeout,
serverOptions.Cluster, serverOptions.Cluster,
serverOptions.UseLogsNewSchema, serverOptions.UseLogsNewSchema,
serverOptions.UseTraceNewSchema,
) )
go clickhouseReader.Start(readerReady) go clickhouseReader.Start(readerReady)
reader = clickhouseReader reader = clickhouseReader
@ -157,7 +159,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
rm, err := makeRulesManager( rm, err := makeRulesManager(
serverOptions.PromConfigPath, serverOptions.PromConfigPath,
constants.GetAlertManagerApiPrefix(), constants.GetAlertManagerApiPrefix(),
serverOptions.RuleRepoURL, localDB, reader, c, serverOptions.DisableRules, fm, serverOptions.UseLogsNewSchema) serverOptions.RuleRepoURL, localDB, reader, c, serverOptions.DisableRules, fm, serverOptions.UseLogsNewSchema, serverOptions.UseTraceNewSchema)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -202,6 +204,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
Cache: c, Cache: c,
FluxInterval: fluxInterval, FluxInterval: fluxInterval,
UseLogsNewSchema: serverOptions.UseLogsNewSchema, UseLogsNewSchema: serverOptions.UseLogsNewSchema,
UseTraceNewSchema: serverOptions.UseTraceNewSchema,
}) })
if err != nil { if err != nil {
return nil, err return nil, err
@ -727,7 +730,8 @@ func makeRulesManager(
cache cache.Cache, cache cache.Cache,
disableRules bool, disableRules bool,
fm interfaces.FeatureLookup, fm interfaces.FeatureLookup,
useLogsNewSchema bool) (*rules.Manager, error) { useLogsNewSchema bool,
useTraceNewSchema bool) (*rules.Manager, error) {
// create engine // create engine
pqle, err := pqle.FromReader(ch) pqle, err := pqle.FromReader(ch)
@ -744,18 +748,19 @@ func makeRulesManager(
// create manager opts // create manager opts
managerOpts := &rules.ManagerOptions{ managerOpts := &rules.ManagerOptions{
NotifierOpts: notifierOpts, NotifierOpts: notifierOpts,
PqlEngine: pqle, PqlEngine: pqle,
RepoURL: ruleRepoURL, RepoURL: ruleRepoURL,
DBConn: db, DBConn: db,
Context: context.Background(), Context: context.Background(),
Logger: zap.L(), Logger: zap.L(),
DisableRules: disableRules, DisableRules: disableRules,
FeatureFlags: fm, FeatureFlags: fm,
Reader: ch, Reader: ch,
Cache: cache, Cache: cache,
EvalDelay: constants.GetEvalDelay(), EvalDelay: constants.GetEvalDelay(),
UseLogsNewSchema: useLogsNewSchema, UseLogsNewSchema: useLogsNewSchema,
UseTraceNewSchema: useTraceNewSchema,
} }
// create Manager // create Manager

View File

@ -39,6 +39,7 @@ func main() {
var disableRules bool var disableRules bool
var useLogsNewSchema bool var useLogsNewSchema bool
var useTraceNewSchema bool
// the url used to build link in the alert messages in slack and other systems // the url used to build link in the alert messages in slack and other systems
var ruleRepoURL, cacheConfigPath, fluxInterval string var ruleRepoURL, cacheConfigPath, fluxInterval string
var cluster string var cluster string
@ -50,6 +51,7 @@ func main() {
var dialTimeout time.Duration var dialTimeout time.Duration
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs") flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
flag.BoolVar(&useTraceNewSchema, "use-trace-new-schema", false, "use new schema for traces")
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)") flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)") flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)") flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
@ -87,6 +89,7 @@ func main() {
FluxInterval: fluxInterval, FluxInterval: fluxInterval,
Cluster: cluster, Cluster: cluster,
UseLogsNewSchema: useLogsNewSchema, UseLogsNewSchema: useLogsNewSchema,
UseTraceNewSchema: useTraceNewSchema,
} }
// Read the jwt secret key // Read the jwt secret key

View File

@ -35,7 +35,8 @@ type PrepareTaskOptions struct {
ManagerOpts *ManagerOptions ManagerOpts *ManagerOptions
NotifyFunc NotifyFunc NotifyFunc NotifyFunc
UseLogsNewSchema bool UseLogsNewSchema bool
UseTraceNewSchema bool
} }
type PrepareTestRuleOptions struct { type PrepareTestRuleOptions struct {
@ -48,7 +49,8 @@ type PrepareTestRuleOptions struct {
ManagerOpts *ManagerOptions ManagerOpts *ManagerOptions
NotifyFunc NotifyFunc NotifyFunc NotifyFunc
UseLogsNewSchema bool UseLogsNewSchema bool
UseTraceNewSchema bool
} }
const taskNamesuffix = "webAppEditor" const taskNamesuffix = "webAppEditor"
@ -91,9 +93,9 @@ type ManagerOptions struct {
PrepareTaskFunc func(opts PrepareTaskOptions) (Task, error) PrepareTaskFunc func(opts PrepareTaskOptions) (Task, error)
UseLogsNewSchema bool
UseTraceNewSchema bool
PrepareTestRuleFunc func(opts PrepareTestRuleOptions) (int, *model.ApiError) PrepareTestRuleFunc func(opts PrepareTestRuleOptions) (int, *model.ApiError)
UseLogsNewSchema bool
} }
// The Manager manages recording and alerting rules. // The Manager manages recording and alerting rules.
@ -117,7 +119,8 @@ type Manager struct {
prepareTaskFunc func(opts PrepareTaskOptions) (Task, error) prepareTaskFunc func(opts PrepareTaskOptions) (Task, error)
prepareTestRuleFunc func(opts PrepareTestRuleOptions) (int, *model.ApiError) prepareTestRuleFunc func(opts PrepareTestRuleOptions) (int, *model.ApiError)
UseLogsNewSchema bool UseLogsNewSchema bool
UseTraceNewSchema bool
} }
func defaultOptions(o *ManagerOptions) *ManagerOptions { func defaultOptions(o *ManagerOptions) *ManagerOptions {
@ -156,6 +159,7 @@ func defaultPrepareTaskFunc(opts PrepareTaskOptions) (Task, error) {
opts.FF, opts.FF,
opts.Reader, opts.Reader,
opts.UseLogsNewSchema, opts.UseLogsNewSchema,
opts.UseTraceNewSchema,
WithEvalDelay(opts.ManagerOpts.EvalDelay), WithEvalDelay(opts.ManagerOpts.EvalDelay),
) )
@ -368,7 +372,8 @@ func (m *Manager) editTask(rule *PostableRule, taskName string) error {
ManagerOpts: m.opts, ManagerOpts: m.opts,
NotifyFunc: m.prepareNotifyFunc(), NotifyFunc: m.prepareNotifyFunc(),
UseLogsNewSchema: m.opts.UseLogsNewSchema, UseLogsNewSchema: m.opts.UseLogsNewSchema,
UseTraceNewSchema: m.opts.UseTraceNewSchema,
}) })
if err != nil { if err != nil {
@ -490,7 +495,8 @@ func (m *Manager) addTask(rule *PostableRule, taskName string) error {
ManagerOpts: m.opts, ManagerOpts: m.opts,
NotifyFunc: m.prepareNotifyFunc(), NotifyFunc: m.prepareNotifyFunc(),
UseLogsNewSchema: m.opts.UseLogsNewSchema, UseLogsNewSchema: m.opts.UseLogsNewSchema,
UseTraceNewSchema: m.opts.UseTraceNewSchema,
}) })
if err != nil { if err != nil {
@ -809,15 +815,16 @@ func (m *Manager) TestNotification(ctx context.Context, ruleStr string) (int, *m
} }
alertCount, apiErr := m.prepareTestRuleFunc(PrepareTestRuleOptions{ alertCount, apiErr := m.prepareTestRuleFunc(PrepareTestRuleOptions{
Rule: parsedRule, Rule: parsedRule,
RuleDB: m.ruleDB, RuleDB: m.ruleDB,
Logger: m.logger, Logger: m.logger,
Reader: m.reader, Reader: m.reader,
Cache: m.cache, Cache: m.cache,
FF: m.featureFlags, FF: m.featureFlags,
ManagerOpts: m.opts, ManagerOpts: m.opts,
NotifyFunc: m.prepareNotifyFunc(), NotifyFunc: m.prepareNotifyFunc(),
UseLogsNewSchema: m.opts.UseLogsNewSchema, UseLogsNewSchema: m.opts.UseLogsNewSchema,
UseTraceNewSchema: m.opts.UseTraceNewSchema,
}) })
return alertCount, apiErr return alertCount, apiErr

View File

@ -49,6 +49,7 @@ func defaultTestNotification(opts PrepareTestRuleOptions) (int, *model.ApiError)
opts.FF, opts.FF,
opts.Reader, opts.Reader,
opts.UseLogsNewSchema, opts.UseLogsNewSchema,
opts.UseTraceNewSchema,
WithSendAlways(), WithSendAlways(),
WithSendUnmatched(), WithSendUnmatched(),
) )

View File

@ -29,6 +29,7 @@ import (
logsv3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3" logsv3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3"
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3" tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
tracesV4 "go.signoz.io/signoz/pkg/query-service/app/traces/v4"
"go.signoz.io/signoz/pkg/query-service/formatter" "go.signoz.io/signoz/pkg/query-service/formatter"
yaml "gopkg.in/yaml.v2" yaml "gopkg.in/yaml.v2"
@ -50,6 +51,8 @@ type ThresholdRule struct {
// used for attribute metadata enrichment for logs and traces // used for attribute metadata enrichment for logs and traces
logsKeys map[string]v3.AttributeKey logsKeys map[string]v3.AttributeKey
spansKeys map[string]v3.AttributeKey spansKeys map[string]v3.AttributeKey
useTraceNewSchema bool
} }
func NewThresholdRule( func NewThresholdRule(
@ -58,6 +61,7 @@ func NewThresholdRule(
featureFlags interfaces.FeatureLookup, featureFlags interfaces.FeatureLookup,
reader interfaces.Reader, reader interfaces.Reader,
useLogsNewSchema bool, useLogsNewSchema bool,
useTraceNewSchema bool,
opts ...RuleOption, opts ...RuleOption,
) (*ThresholdRule, error) { ) (*ThresholdRule, error) {
@ -74,19 +78,21 @@ func NewThresholdRule(
} }
querierOption := querier.QuerierOptions{ querierOption := querier.QuerierOptions{
Reader: reader, Reader: reader,
Cache: nil, Cache: nil,
KeyGenerator: queryBuilder.NewKeyGenerator(), KeyGenerator: queryBuilder.NewKeyGenerator(),
FeatureLookup: featureFlags, FeatureLookup: featureFlags,
UseLogsNewSchema: useLogsNewSchema, UseLogsNewSchema: useLogsNewSchema,
UseTraceNewSchema: useTraceNewSchema,
} }
querierOptsV2 := querierV2.QuerierOptions{ querierOptsV2 := querierV2.QuerierOptions{
Reader: reader, Reader: reader,
Cache: nil, Cache: nil,
KeyGenerator: queryBuilder.NewKeyGenerator(), KeyGenerator: queryBuilder.NewKeyGenerator(),
FeatureLookup: featureFlags, FeatureLookup: featureFlags,
UseLogsNewSchema: useLogsNewSchema, UseLogsNewSchema: useLogsNewSchema,
UseTraceNewSchema: useTraceNewSchema,
} }
t.querier = querier.NewQuerier(querierOption) t.querier = querier.NewQuerier(querierOption)
@ -296,7 +302,11 @@ func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, ts time.Time) (Vec
return nil, err return nil, err
} }
r.spansKeys = spanKeys r.spansKeys = spanKeys
tracesV3.Enrich(params, spanKeys) if r.useTraceNewSchema {
tracesV4.Enrich(params, spanKeys)
} else {
tracesV3.Enrich(params, spanKeys)
}
} }
} }

View File

@ -791,7 +791,7 @@ func TestThresholdRuleShouldAlert(t *testing.T) {
postableRule.RuleCondition.MatchType = MatchType(c.matchType) postableRule.RuleCondition.MatchType = MatchType(c.matchType)
postableRule.RuleCondition.Target = &c.target postableRule.RuleCondition.Target = &c.target
rule, err := NewThresholdRule("69", &postableRule, fm, nil, true, WithEvalDelay(2*time.Minute)) rule, err := NewThresholdRule("69", &postableRule, fm, nil, true, true, WithEvalDelay(2*time.Minute))
if err != nil { if err != nil {
assert.NoError(t, err) assert.NoError(t, err)
} }
@ -880,7 +880,7 @@ func TestPrepareLinksToLogs(t *testing.T) {
} }
fm := featureManager.StartManager() fm := featureManager.StartManager()
rule, err := NewThresholdRule("69", &postableRule, fm, nil, true, WithEvalDelay(2*time.Minute)) rule, err := NewThresholdRule("69", &postableRule, fm, nil, true, true, WithEvalDelay(2*time.Minute))
if err != nil { if err != nil {
assert.NoError(t, err) assert.NoError(t, err)
} }
@ -922,7 +922,7 @@ func TestPrepareLinksToTraces(t *testing.T) {
} }
fm := featureManager.StartManager() fm := featureManager.StartManager()
rule, err := NewThresholdRule("69", &postableRule, fm, nil, true, WithEvalDelay(2*time.Minute)) rule, err := NewThresholdRule("69", &postableRule, fm, nil, true, true, WithEvalDelay(2*time.Minute))
if err != nil { if err != nil {
assert.NoError(t, err) assert.NoError(t, err)
} }
@ -998,7 +998,7 @@ func TestThresholdRuleLabelNormalization(t *testing.T) {
postableRule.RuleCondition.MatchType = MatchType(c.matchType) postableRule.RuleCondition.MatchType = MatchType(c.matchType)
postableRule.RuleCondition.Target = &c.target postableRule.RuleCondition.Target = &c.target
rule, err := NewThresholdRule("69", &postableRule, fm, nil, true, WithEvalDelay(2*time.Minute)) rule, err := NewThresholdRule("69", &postableRule, fm, nil, true, true, WithEvalDelay(2*time.Minute))
if err != nil { if err != nil {
assert.NoError(t, err) assert.NoError(t, err)
} }
@ -1051,7 +1051,7 @@ func TestThresholdRuleEvalDelay(t *testing.T) {
fm := featureManager.StartManager() fm := featureManager.StartManager()
for idx, c := range cases { for idx, c := range cases {
rule, err := NewThresholdRule("69", &postableRule, fm, nil, true) // no eval delay rule, err := NewThresholdRule("69", &postableRule, fm, nil, true, true) // no eval delay
if err != nil { if err != nil {
assert.NoError(t, err) assert.NoError(t, err)
} }
@ -1100,7 +1100,7 @@ func TestThresholdRuleClickHouseTmpl(t *testing.T) {
fm := featureManager.StartManager() fm := featureManager.StartManager()
for idx, c := range cases { for idx, c := range cases {
rule, err := NewThresholdRule("69", &postableRule, fm, nil, true, WithEvalDelay(2*time.Minute)) rule, err := NewThresholdRule("69", &postableRule, fm, nil, true, true, WithEvalDelay(2*time.Minute))
if err != nil { if err != nil {
assert.NoError(t, err) assert.NoError(t, err)
} }
@ -1118,7 +1118,7 @@ func TestThresholdRuleClickHouseTmpl(t *testing.T) {
type queryMatcherAny struct { type queryMatcherAny struct {
} }
func (m *queryMatcherAny) Match(string, string) error { func (m *queryMatcherAny) Match(x string, y string) error {
return nil return nil
} }
@ -1241,9 +1241,9 @@ func TestThresholdRuleUnitCombinations(t *testing.T) {
} }
options := clickhouseReader.NewOptions("", 0, 0, 0, "", "archiveNamespace") options := clickhouseReader.NewOptions("", 0, 0, 0, "", "archiveNamespace")
reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "", true) reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "", true, true)
rule, err := NewThresholdRule("69", &postableRule, fm, reader, true) rule, err := NewThresholdRule("69", &postableRule, fm, reader, true, true)
rule.TemporalityMap = map[string]map[v3.Temporality]bool{ rule.TemporalityMap = map[string]map[v3.Temporality]bool{
"signoz_calls_total": { "signoz_calls_total": {
v3.Delta: true, v3.Delta: true,
@ -1340,9 +1340,9 @@ func TestThresholdRuleNoData(t *testing.T) {
} }
options := clickhouseReader.NewOptions("", 0, 0, 0, "", "archiveNamespace") options := clickhouseReader.NewOptions("", 0, 0, 0, "", "archiveNamespace")
reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "", true) reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "", true, true)
rule, err := NewThresholdRule("69", &postableRule, fm, reader, true) rule, err := NewThresholdRule("69", &postableRule, fm, reader, true, true)
rule.TemporalityMap = map[string]map[v3.Temporality]bool{ rule.TemporalityMap = map[string]map[v3.Temporality]bool{
"signoz_calls_total": { "signoz_calls_total": {
v3.Delta: true, v3.Delta: true,
@ -1423,9 +1423,12 @@ func TestThresholdRuleTracesLink(t *testing.T) {
for idx, c := range testCases { for idx, c := range testCases {
metaRows := cmock.NewRows(metaCols, c.metaValues) metaRows := cmock.NewRows(metaCols, c.metaValues)
mock. mock.
ExpectQuery("SELECT DISTINCT(tagKey), tagType, dataType, isColumn FROM archiveNamespace.span_attributes_keys"). ExpectQuery("SELECT DISTINCT(tagKey), tagType, dataType FROM archiveNamespace.span_attributes_keys").
WillReturnRows(metaRows) WillReturnRows(metaRows)
mock.
ExpectSelect("SHOW CREATE TABLE signoz_traces.distributed_signoz_index_v3").WillReturnRows(&cmock.Rows{})
rows := cmock.NewRows(cols, c.values) rows := cmock.NewRows(cols, c.values)
// We are testing the eval logic after the query is run // We are testing the eval logic after the query is run
@ -1445,9 +1448,9 @@ func TestThresholdRuleTracesLink(t *testing.T) {
} }
options := clickhouseReader.NewOptions("", 0, 0, 0, "", "archiveNamespace") options := clickhouseReader.NewOptions("", 0, 0, 0, "", "archiveNamespace")
reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "", true) reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "", true, true)
rule, err := NewThresholdRule("69", &postableRule, fm, reader, true) rule, err := NewThresholdRule("69", &postableRule, fm, reader, true, true)
rule.TemporalityMap = map[string]map[v3.Temporality]bool{ rule.TemporalityMap = map[string]map[v3.Temporality]bool{
"signoz_calls_total": { "signoz_calls_total": {
v3.Delta: true, v3.Delta: true,
@ -1570,9 +1573,9 @@ func TestThresholdRuleLogsLink(t *testing.T) {
} }
options := clickhouseReader.NewOptions("", 0, 0, 0, "", "archiveNamespace") options := clickhouseReader.NewOptions("", 0, 0, 0, "", "archiveNamespace")
reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "", true) reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "", true, true)
rule, err := NewThresholdRule("69", &postableRule, fm, reader, true) rule, err := NewThresholdRule("69", &postableRule, fm, reader, true, true)
rule.TemporalityMap = map[string]map[v3.Temporality]bool{ rule.TemporalityMap = map[string]map[v3.Temporality]bool{
"signoz_calls_total": { "signoz_calls_total": {
v3.Delta: true, v3.Delta: true,
@ -1648,7 +1651,7 @@ func TestThresholdRuleShiftBy(t *testing.T) {
}, },
} }
rule, err := NewThresholdRule("69", &postableRule, nil, nil, true) rule, err := NewThresholdRule("69", &postableRule, nil, nil, true, true)
if err != nil { if err != nil {
assert.NoError(t, err) assert.NoError(t, err)
} }

View File

@ -46,6 +46,7 @@ func NewMockClickhouseReader(
featureFlags, featureFlags,
"", "",
true, true,
true,
) )
return reader, mockDB return reader, mockDB