feat(cache): multi-tenant cache (#7805)

* feat(cache): remove the references of old cache

* feat(cache): add orgID in query range modules pt1

* feat(cache): add orgID in query range modules pt2

* feat(cache): add orgID in query range modules pt3

* feat(cache): preload metrics for all orgs

* feat(cache): fix ruler

* feat(cache): fix go build

* feat(cache): add orgID to rule

* feat(cache): fix tests

* feat(cache): address review comments

* feat(cache): use correct errors

* feat(cache): fix tests

* feat(cache): add the cache test package
This commit is contained in:
Vikrant Gupta 2025-05-03 18:30:07 +05:30 committed by GitHub
parent cb08ce5e5d
commit 5b237ee628
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
71 changed files with 1265 additions and 1476 deletions

View File

@ -50,7 +50,7 @@ cache:
# Time-to-live for cache entries in memory. Specify the duration in ns # Time-to-live for cache entries in memory. Specify the duration in ns
ttl: 60000000000 ttl: 60000000000
# The interval at which the cache will be cleaned up # The interval at which the cache will be cleaned up
cleanupInterval: 1m cleanup_interval: 1m
# redis: Uses Redis as the caching backend. # redis: Uses Redis as the caching backend.
redis: redis:
# The hostname or IP address of the Redis server. # The hostname or IP address of the Redis server.

View File

@ -5,6 +5,7 @@ import (
querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2" querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2"
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder" "github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
"github.com/SigNoz/signoz/pkg/valuer"
) )
type DailyProvider struct { type DailyProvider struct {
@ -37,7 +38,7 @@ func NewDailyProvider(opts ...GenericProviderOption[*DailyProvider]) *DailyProvi
return dp return dp
} }
func (p *DailyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) { func (p *DailyProvider) GetAnomalies(ctx context.Context, orgID valuer.UUID, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
req.Seasonality = SeasonalityDaily req.Seasonality = SeasonalityDaily
return p.getAnomalies(ctx, req) return p.getAnomalies(ctx, orgID, req)
} }

View File

@ -5,6 +5,7 @@ import (
querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2" querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2"
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder" "github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
"github.com/SigNoz/signoz/pkg/valuer"
) )
type HourlyProvider struct { type HourlyProvider struct {
@ -37,7 +38,7 @@ func NewHourlyProvider(opts ...GenericProviderOption[*HourlyProvider]) *HourlyPr
return hp return hp
} }
func (p *HourlyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) { func (p *HourlyProvider) GetAnomalies(ctx context.Context, orgID valuer.UUID, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
req.Seasonality = SeasonalityHourly req.Seasonality = SeasonalityHourly
return p.getAnomalies(ctx, req) return p.getAnomalies(ctx, orgID, req)
} }

View File

@ -2,8 +2,10 @@ package anomaly
import ( import (
"context" "context"
"github.com/SigNoz/signoz/pkg/valuer"
) )
type Provider interface { type Provider interface {
GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) GetAnomalies(ctx context.Context, orgID valuer.UUID, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error)
} }

View File

@ -5,11 +5,12 @@ import (
"math" "math"
"time" "time"
"github.com/SigNoz/signoz/pkg/query-service/cache" "github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/query-service/interfaces" "github.com/SigNoz/signoz/pkg/query-service/interfaces"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3" v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/query-service/postprocess" "github.com/SigNoz/signoz/pkg/query-service/postprocess"
"github.com/SigNoz/signoz/pkg/query-service/utils/labels" "github.com/SigNoz/signoz/pkg/query-service/utils/labels"
"github.com/SigNoz/signoz/pkg/valuer"
"go.uber.org/zap" "go.uber.org/zap"
) )
@ -59,9 +60,9 @@ func (p *BaseSeasonalProvider) getQueryParams(req *GetAnomaliesRequest) *anomaly
return prepareAnomalyQueryParams(req.Params, req.Seasonality) return prepareAnomalyQueryParams(req.Params, req.Seasonality)
} }
func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQueryParams) (*anomalyQueryResults, error) { func (p *BaseSeasonalProvider) getResults(ctx context.Context, orgID valuer.UUID, params *anomalyQueryParams) (*anomalyQueryResults, error) {
zap.L().Info("fetching results for current period", zap.Any("currentPeriodQuery", params.CurrentPeriodQuery)) zap.L().Info("fetching results for current period", zap.Any("currentPeriodQuery", params.CurrentPeriodQuery))
currentPeriodResults, _, err := p.querierV2.QueryRange(ctx, params.CurrentPeriodQuery) currentPeriodResults, _, err := p.querierV2.QueryRange(ctx, orgID, params.CurrentPeriodQuery)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -72,7 +73,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQu
} }
zap.L().Info("fetching results for past period", zap.Any("pastPeriodQuery", params.PastPeriodQuery)) zap.L().Info("fetching results for past period", zap.Any("pastPeriodQuery", params.PastPeriodQuery))
pastPeriodResults, _, err := p.querierV2.QueryRange(ctx, params.PastPeriodQuery) pastPeriodResults, _, err := p.querierV2.QueryRange(ctx, orgID, params.PastPeriodQuery)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -83,7 +84,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQu
} }
zap.L().Info("fetching results for current season", zap.Any("currentSeasonQuery", params.CurrentSeasonQuery)) zap.L().Info("fetching results for current season", zap.Any("currentSeasonQuery", params.CurrentSeasonQuery))
currentSeasonResults, _, err := p.querierV2.QueryRange(ctx, params.CurrentSeasonQuery) currentSeasonResults, _, err := p.querierV2.QueryRange(ctx, orgID, params.CurrentSeasonQuery)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -94,7 +95,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQu
} }
zap.L().Info("fetching results for past season", zap.Any("pastSeasonQuery", params.PastSeasonQuery)) zap.L().Info("fetching results for past season", zap.Any("pastSeasonQuery", params.PastSeasonQuery))
pastSeasonResults, _, err := p.querierV2.QueryRange(ctx, params.PastSeasonQuery) pastSeasonResults, _, err := p.querierV2.QueryRange(ctx, orgID, params.PastSeasonQuery)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -105,7 +106,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQu
} }
zap.L().Info("fetching results for past 2 season", zap.Any("past2SeasonQuery", params.Past2SeasonQuery)) zap.L().Info("fetching results for past 2 season", zap.Any("past2SeasonQuery", params.Past2SeasonQuery))
past2SeasonResults, _, err := p.querierV2.QueryRange(ctx, params.Past2SeasonQuery) past2SeasonResults, _, err := p.querierV2.QueryRange(ctx, orgID, params.Past2SeasonQuery)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -116,7 +117,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQu
} }
zap.L().Info("fetching results for past 3 season", zap.Any("past3SeasonQuery", params.Past3SeasonQuery)) zap.L().Info("fetching results for past 3 season", zap.Any("past3SeasonQuery", params.Past3SeasonQuery))
past3SeasonResults, _, err := p.querierV2.QueryRange(ctx, params.Past3SeasonQuery) past3SeasonResults, _, err := p.querierV2.QueryRange(ctx, orgID, params.Past3SeasonQuery)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -335,9 +336,9 @@ func (p *BaseSeasonalProvider) getAnomalyScores(
return anomalyScoreSeries return anomalyScoreSeries
} }
func (p *BaseSeasonalProvider) getAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) { func (p *BaseSeasonalProvider) getAnomalies(ctx context.Context, orgID valuer.UUID, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
anomalyParams := p.getQueryParams(req) anomalyParams := p.getQueryParams(req)
anomalyQueryResults, err := p.getResults(ctx, anomalyParams) anomalyQueryResults, err := p.getResults(ctx, orgID, anomalyParams)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -5,6 +5,7 @@ import (
querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2" querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2"
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder" "github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
"github.com/SigNoz/signoz/pkg/valuer"
) )
type WeeklyProvider struct { type WeeklyProvider struct {
@ -36,7 +37,7 @@ func NewWeeklyProvider(opts ...GenericProviderOption[*WeeklyProvider]) *WeeklyPr
return wp return wp
} }
func (p *WeeklyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) { func (p *WeeklyProvider) GetAnomalies(ctx context.Context, orgID valuer.UUID, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
req.Seasonality = SeasonalityWeekly req.Seasonality = SeasonalityWeekly
return p.getAnomalies(ctx, req) return p.getAnomalies(ctx, orgID, req)
} }

View File

@ -19,7 +19,6 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations" "github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
"github.com/SigNoz/signoz/pkg/query-service/app/integrations" "github.com/SigNoz/signoz/pkg/query-service/app/integrations"
"github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline" "github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline"
"github.com/SigNoz/signoz/pkg/query-service/cache"
baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces" baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model" basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
rules "github.com/SigNoz/signoz/pkg/query-service/rules" rules "github.com/SigNoz/signoz/pkg/query-service/rules"
@ -40,7 +39,6 @@ type APIHandlerOptions struct {
IntegrationsController *integrations.Controller IntegrationsController *integrations.Controller
CloudIntegrationsController *cloudintegrations.Controller CloudIntegrationsController *cloudintegrations.Controller
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
Cache cache.Cache
Gateway *httputil.ReverseProxy Gateway *httputil.ReverseProxy
GatewayUrl string GatewayUrl string
// Querier Influx Interval // Querier Influx Interval
@ -68,7 +66,6 @@ func NewAPIHandler(opts APIHandlerOptions, signoz *signoz.SigNoz) (*APIHandler,
IntegrationsController: opts.IntegrationsController, IntegrationsController: opts.IntegrationsController,
CloudIntegrationsController: opts.CloudIntegrationsController, CloudIntegrationsController: opts.CloudIntegrationsController,
LogsParsingPipelineController: opts.LogsParsingPipelineController, LogsParsingPipelineController: opts.LogsParsingPipelineController,
Cache: opts.Cache,
FluxInterval: opts.FluxInterval, FluxInterval: opts.FluxInterval,
AlertmanagerAPI: alertmanager.NewAPI(signoz.Alertmanager), AlertmanagerAPI: alertmanager.NewAPI(signoz.Alertmanager),
FieldsAPI: fields.NewAPI(signoz.TelemetryStore), FieldsAPI: fields.NewAPI(signoz.TelemetryStore),

View File

@ -7,14 +7,27 @@ import (
"net/http" "net/http"
"github.com/SigNoz/signoz/ee/query-service/anomaly" "github.com/SigNoz/signoz/ee/query-service/anomaly"
"github.com/SigNoz/signoz/pkg/http/render"
baseapp "github.com/SigNoz/signoz/pkg/query-service/app" baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder" "github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
"github.com/SigNoz/signoz/pkg/query-service/model" "github.com/SigNoz/signoz/pkg/query-service/model"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3" v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/valuer"
"go.uber.org/zap" "go.uber.org/zap"
) )
func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
bodyBytes, _ := io.ReadAll(r.Body) bodyBytes, _ := io.ReadAll(r.Body)
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes)) r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
@ -29,7 +42,7 @@ func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
queryRangeParams.Version = "v4" queryRangeParams.Version = "v4"
// add temporality for each metric // add temporality for each metric
temporalityErr := aH.PopulateTemporality(r.Context(), queryRangeParams) temporalityErr := aH.PopulateTemporality(r.Context(), orgID, queryRangeParams)
if temporalityErr != nil { if temporalityErr != nil {
zap.L().Error("Error while adding temporality for metrics", zap.Error(temporalityErr)) zap.L().Error("Error while adding temporality for metrics", zap.Error(temporalityErr))
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: temporalityErr}, nil) RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: temporalityErr}, nil)
@ -85,30 +98,30 @@ func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
switch seasonality { switch seasonality {
case anomaly.SeasonalityWeekly: case anomaly.SeasonalityWeekly:
provider = anomaly.NewWeeklyProvider( provider = anomaly.NewWeeklyProvider(
anomaly.WithCache[*anomaly.WeeklyProvider](aH.opts.Cache), anomaly.WithCache[*anomaly.WeeklyProvider](aH.Signoz.Cache),
anomaly.WithKeyGenerator[*anomaly.WeeklyProvider](queryBuilder.NewKeyGenerator()), anomaly.WithKeyGenerator[*anomaly.WeeklyProvider](queryBuilder.NewKeyGenerator()),
anomaly.WithReader[*anomaly.WeeklyProvider](aH.opts.DataConnector), anomaly.WithReader[*anomaly.WeeklyProvider](aH.opts.DataConnector),
) )
case anomaly.SeasonalityDaily: case anomaly.SeasonalityDaily:
provider = anomaly.NewDailyProvider( provider = anomaly.NewDailyProvider(
anomaly.WithCache[*anomaly.DailyProvider](aH.opts.Cache), anomaly.WithCache[*anomaly.DailyProvider](aH.Signoz.Cache),
anomaly.WithKeyGenerator[*anomaly.DailyProvider](queryBuilder.NewKeyGenerator()), anomaly.WithKeyGenerator[*anomaly.DailyProvider](queryBuilder.NewKeyGenerator()),
anomaly.WithReader[*anomaly.DailyProvider](aH.opts.DataConnector), anomaly.WithReader[*anomaly.DailyProvider](aH.opts.DataConnector),
) )
case anomaly.SeasonalityHourly: case anomaly.SeasonalityHourly:
provider = anomaly.NewHourlyProvider( provider = anomaly.NewHourlyProvider(
anomaly.WithCache[*anomaly.HourlyProvider](aH.opts.Cache), anomaly.WithCache[*anomaly.HourlyProvider](aH.Signoz.Cache),
anomaly.WithKeyGenerator[*anomaly.HourlyProvider](queryBuilder.NewKeyGenerator()), anomaly.WithKeyGenerator[*anomaly.HourlyProvider](queryBuilder.NewKeyGenerator()),
anomaly.WithReader[*anomaly.HourlyProvider](aH.opts.DataConnector), anomaly.WithReader[*anomaly.HourlyProvider](aH.opts.DataConnector),
) )
default: default:
provider = anomaly.NewDailyProvider( provider = anomaly.NewDailyProvider(
anomaly.WithCache[*anomaly.DailyProvider](aH.opts.Cache), anomaly.WithCache[*anomaly.DailyProvider](aH.Signoz.Cache),
anomaly.WithKeyGenerator[*anomaly.DailyProvider](queryBuilder.NewKeyGenerator()), anomaly.WithKeyGenerator[*anomaly.DailyProvider](queryBuilder.NewKeyGenerator()),
anomaly.WithReader[*anomaly.DailyProvider](aH.opts.DataConnector), anomaly.WithReader[*anomaly.DailyProvider](aH.opts.DataConnector),
) )
} }
anomalies, err := provider.GetAnomalies(r.Context(), &anomaly.GetAnomaliesRequest{Params: queryRangeParams}) anomalies, err := provider.GetAnomalies(r.Context(), orgID, &anomaly.GetAnomaliesRequest{Params: queryRangeParams})
if err != nil { if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return return

View File

@ -19,6 +19,7 @@ import (
"github.com/SigNoz/signoz/ee/query-service/integrations/gateway" "github.com/SigNoz/signoz/ee/query-service/integrations/gateway"
"github.com/SigNoz/signoz/ee/query-service/rules" "github.com/SigNoz/signoz/ee/query-service/rules"
"github.com/SigNoz/signoz/pkg/alertmanager" "github.com/SigNoz/signoz/pkg/alertmanager"
"github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/http/middleware" "github.com/SigNoz/signoz/pkg/http/middleware"
"github.com/SigNoz/signoz/pkg/prometheus" "github.com/SigNoz/signoz/pkg/prometheus"
"github.com/SigNoz/signoz/pkg/signoz" "github.com/SigNoz/signoz/pkg/signoz"
@ -41,7 +42,6 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline" "github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline"
"github.com/SigNoz/signoz/pkg/query-service/app/opamp" "github.com/SigNoz/signoz/pkg/query-service/app/opamp"
opAmpModel "github.com/SigNoz/signoz/pkg/query-service/app/opamp/model" opAmpModel "github.com/SigNoz/signoz/pkg/query-service/app/opamp/model"
"github.com/SigNoz/signoz/pkg/query-service/cache"
baseconst "github.com/SigNoz/signoz/pkg/query-service/constants" baseconst "github.com/SigNoz/signoz/pkg/query-service/constants"
"github.com/SigNoz/signoz/pkg/query-service/healthcheck" "github.com/SigNoz/signoz/pkg/query-service/healthcheck"
baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces" baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces"
@ -57,7 +57,6 @@ type ServerOptions struct {
HTTPHostPort string HTTPHostPort string
PrivateHostPort string PrivateHostPort string
PreferSpanMetrics bool PreferSpanMetrics bool
CacheConfigPath string
FluxInterval string FluxInterval string
FluxIntervalForTraceDetail string FluxIntervalForTraceDetail string
Cluster string Cluster string
@ -134,19 +133,10 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
serverOptions.SigNoz.Cache, serverOptions.SigNoz.Cache,
) )
var c cache.Cache
if serverOptions.CacheConfigPath != "" {
cacheOpts, err := cache.LoadFromYAMLCacheConfigFile(serverOptions.CacheConfigPath)
if err != nil {
return nil, err
}
c = cache.NewCache(cacheOpts)
}
rm, err := makeRulesManager( rm, err := makeRulesManager(
serverOptions.SigNoz.SQLStore.SQLxDB(), serverOptions.SigNoz.SQLStore.SQLxDB(),
reader, reader,
c, serverOptions.SigNoz.Cache,
serverOptions.SigNoz.Alertmanager, serverOptions.SigNoz.Alertmanager,
serverOptions.SigNoz.SQLStore, serverOptions.SigNoz.SQLStore,
serverOptions.SigNoz.TelemetryStore, serverOptions.SigNoz.TelemetryStore,
@ -223,7 +213,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
IntegrationsController: integrationsController, IntegrationsController: integrationsController,
CloudIntegrationsController: cloudIntegrationsController, CloudIntegrationsController: cloudIntegrationsController,
LogsParsingPipelineController: logParsingPipelineController, LogsParsingPipelineController: logParsingPipelineController,
Cache: c,
FluxInterval: fluxInterval, FluxInterval: fluxInterval,
Gateway: gatewayProxy, Gateway: gatewayProxy,
GatewayUrl: serverOptions.GatewayUrl, GatewayUrl: serverOptions.GatewayUrl,
@ -261,9 +250,15 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
&opAmpModel.AllAgents, agentConfMgr, &opAmpModel.AllAgents, agentConfMgr,
) )
errorList := reader.PreloadMetricsMetadata(context.Background()) orgs, err := apiHandler.Signoz.Modules.Organization.GetAll(context.Background())
for _, er := range errorList { if err != nil {
zap.L().Error("failed to preload metrics metadata", zap.Error(er)) return nil, err
}
for _, org := range orgs {
errorList := reader.PreloadMetricsMetadata(context.Background(), org.ID)
for _, er := range errorList {
zap.L().Error("failed to preload metrics metadata", zap.Error(er))
}
} }
return s, nil return s, nil

View File

@ -72,6 +72,7 @@ func main() {
flag.DurationVar(&dialTimeout, "dial-timeout", 5*time.Second, "(the maximum time to establish a connection.)") flag.DurationVar(&dialTimeout, "dial-timeout", 5*time.Second, "(the maximum time to establish a connection.)")
// Deprecated // Deprecated
flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)") flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)")
// Deprecated
flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)") flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)")
flag.StringVar(&fluxInterval, "flux-interval", "5m", "(the interval to exclude data from being cached to avoid incorrect cache for data in motion)") flag.StringVar(&fluxInterval, "flux-interval", "5m", "(the interval to exclude data from being cached to avoid incorrect cache for data in motion)")
flag.StringVar(&fluxIntervalForTraceDetail, "flux-interval-trace-detail", "2m", "(the interval to exclude data from being cached to avoid incorrect cache for trace data in motion)") flag.StringVar(&fluxIntervalForTraceDetail, "flux-interval-trace-detail", "2m", "(the interval to exclude data from being cached to avoid incorrect cache for trace data in motion)")
@ -138,7 +139,6 @@ func main() {
HTTPHostPort: baseconst.HTTPHostPort, HTTPHostPort: baseconst.HTTPHostPort,
PreferSpanMetrics: preferSpanMetrics, PreferSpanMetrics: preferSpanMetrics,
PrivateHostPort: baseconst.PrivateHostPort, PrivateHostPort: baseconst.PrivateHostPort,
CacheConfigPath: cacheConfigPath,
FluxInterval: fluxInterval, FluxInterval: fluxInterval,
FluxIntervalForTraceDetail: fluxIntervalForTraceDetail, FluxIntervalForTraceDetail: fluxIntervalForTraceDetail,
Cluster: cluster, Cluster: cluster,

View File

@ -12,10 +12,11 @@ import (
"go.uber.org/zap" "go.uber.org/zap"
"github.com/SigNoz/signoz/ee/query-service/anomaly" "github.com/SigNoz/signoz/ee/query-service/anomaly"
"github.com/SigNoz/signoz/pkg/query-service/cache" "github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/query-service/common" "github.com/SigNoz/signoz/pkg/query-service/common"
"github.com/SigNoz/signoz/pkg/query-service/model" "github.com/SigNoz/signoz/pkg/query-service/model"
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes" ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
"github.com/SigNoz/signoz/pkg/valuer"
querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2" querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2"
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder" "github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
@ -53,6 +54,7 @@ type AnomalyRule struct {
func NewAnomalyRule( func NewAnomalyRule(
id string, id string,
orgID valuer.UUID,
p *ruletypes.PostableRule, p *ruletypes.PostableRule,
reader interfaces.Reader, reader interfaces.Reader,
cache cache.Cache, cache cache.Cache,
@ -66,7 +68,7 @@ func NewAnomalyRule(
p.RuleCondition.Target = &target p.RuleCondition.Target = &target
} }
baseRule, err := baserules.NewBaseRule(id, p, reader, opts...) baseRule, err := baserules.NewBaseRule(id, orgID, p, reader, opts...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -158,18 +160,18 @@ func (r *AnomalyRule) GetSelectedQuery() string {
return r.Condition().GetSelectedQueryName() return r.Condition().GetSelectedQueryName()
} }
func (r *AnomalyRule) buildAndRunQuery(ctx context.Context, ts time.Time) (ruletypes.Vector, error) { func (r *AnomalyRule) buildAndRunQuery(ctx context.Context, orgID valuer.UUID, ts time.Time) (ruletypes.Vector, error) {
params, err := r.prepareQueryRange(ts) params, err := r.prepareQueryRange(ts)
if err != nil { if err != nil {
return nil, err return nil, err
} }
err = r.PopulateTemporality(ctx, params) err = r.PopulateTemporality(ctx, orgID, params)
if err != nil { if err != nil {
return nil, fmt.Errorf("internal error while setting temporality") return nil, fmt.Errorf("internal error while setting temporality")
} }
anomalies, err := r.provider.GetAnomalies(ctx, &anomaly.GetAnomaliesRequest{ anomalies, err := r.provider.GetAnomalies(ctx, orgID, &anomaly.GetAnomaliesRequest{
Params: params, Params: params,
Seasonality: r.seasonality, Seasonality: r.seasonality,
}) })
@ -204,7 +206,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
prevState := r.State() prevState := r.State()
valueFormatter := formatter.FromUnit(r.Unit()) valueFormatter := formatter.FromUnit(r.Unit())
res, err := r.buildAndRunQuery(ctx, ts) res, err := r.buildAndRunQuery(ctx, r.OrgID(), ts)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -9,6 +9,7 @@ import (
baserules "github.com/SigNoz/signoz/pkg/query-service/rules" baserules "github.com/SigNoz/signoz/pkg/query-service/rules"
"github.com/SigNoz/signoz/pkg/query-service/utils/labels" "github.com/SigNoz/signoz/pkg/query-service/utils/labels"
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes" ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/google/uuid" "github.com/google/uuid"
"go.uber.org/zap" "go.uber.org/zap"
) )
@ -23,6 +24,7 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
// create a threshold rule // create a threshold rule
tr, err := baserules.NewThresholdRule( tr, err := baserules.NewThresholdRule(
ruleId, ruleId,
opts.OrgID,
opts.Rule, opts.Rule,
opts.Reader, opts.Reader,
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay), baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
@ -43,6 +45,7 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
// create promql rule // create promql rule
pr, err := baserules.NewPromRule( pr, err := baserules.NewPromRule(
ruleId, ruleId,
opts.OrgID,
opts.Rule, opts.Rule,
opts.Logger, opts.Logger,
opts.Reader, opts.Reader,
@ -63,6 +66,7 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
// create anomaly rule // create anomaly rule
ar, err := NewAnomalyRule( ar, err := NewAnomalyRule(
ruleId, ruleId,
opts.OrgID,
opts.Rule, opts.Rule,
opts.Reader, opts.Reader,
opts.Cache, opts.Cache,
@ -119,6 +123,7 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
// create a threshold rule // create a threshold rule
rule, err = baserules.NewThresholdRule( rule, err = baserules.NewThresholdRule(
alertname, alertname,
opts.OrgID,
parsedRule, parsedRule,
opts.Reader, opts.Reader,
baserules.WithSendAlways(), baserules.WithSendAlways(),
@ -136,6 +141,7 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
// create promql rule // create promql rule
rule, err = baserules.NewPromRule( rule, err = baserules.NewPromRule(
alertname, alertname,
opts.OrgID,
parsedRule, parsedRule,
opts.Logger, opts.Logger,
opts.Reader, opts.Reader,
@ -153,6 +159,7 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
// create anomaly rule // create anomaly rule
rule, err = NewAnomalyRule( rule, err = NewAnomalyRule(
alertname, alertname,
opts.OrgID,
parsedRule, parsedRule,
opts.Reader, opts.Reader,
opts.Cache, opts.Cache,
@ -187,7 +194,7 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
// newTask returns an appropriate group for // newTask returns an appropriate group for
// rule type // rule type
func newTask(taskType baserules.TaskType, name string, frequency time.Duration, rules []baserules.Rule, opts *baserules.ManagerOptions, notify baserules.NotifyFunc, maintenanceStore ruletypes.MaintenanceStore, orgID string) baserules.Task { func newTask(taskType baserules.TaskType, name string, frequency time.Duration, rules []baserules.Rule, opts *baserules.ManagerOptions, notify baserules.NotifyFunc, maintenanceStore ruletypes.MaintenanceStore, orgID valuer.UUID) baserules.Task {
if taskType == baserules.TaskTypeCh { if taskType == baserules.TaskTypeCh {
return baserules.NewRuleTask(name, "", frequency, rules, opts, notify, maintenanceStore, orgID) return baserules.NewRuleTask(name, "", frequency, rules, opts, notify, maintenanceStore, orgID)
} }

80
pkg/cache/cache.go vendored
View File

@ -2,70 +2,26 @@ package cache
import ( import (
"context" "context"
"encoding"
"fmt"
"reflect"
"time" "time"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/types/cachetypes"
"github.com/SigNoz/signoz/pkg/valuer"
) )
// cacheable entity
type CacheableEntity interface {
encoding.BinaryMarshaler
encoding.BinaryUnmarshaler
}
func WrapCacheableEntityErrors(rt reflect.Type, caller string) error {
if rt == nil {
return fmt.Errorf("%s: (nil)", caller)
}
if rt.Kind() != reflect.Pointer {
return fmt.Errorf("%s: (non-pointer \"%s\")", caller, rt.String())
}
return fmt.Errorf("%s: (nil \"%s\")", caller, rt.String())
}
// cache status
type RetrieveStatus int
const (
RetrieveStatusHit = RetrieveStatus(iota)
RetrieveStatusPartialHit
RetrieveStatusRangeMiss
RetrieveStatusKeyMiss
RetrieveStatusRevalidated
RetrieveStatusError
)
func (s RetrieveStatus) String() string {
switch s {
case RetrieveStatusHit:
return "hit"
case RetrieveStatusPartialHit:
return "partial hit"
case RetrieveStatusRangeMiss:
return "range miss"
case RetrieveStatusKeyMiss:
return "key miss"
case RetrieveStatusRevalidated:
return "revalidated"
case RetrieveStatusError:
return "error"
default:
return "unknown"
}
}
// cache interface
type Cache interface { type Cache interface {
Connect(ctx context.Context) error // Set sets the cacheable entity in cache.
Store(ctx context.Context, cacheKey string, data CacheableEntity, ttl time.Duration) error Set(ctx context.Context, orgID valuer.UUID, cacheKey string, data cachetypes.Cacheable, ttl time.Duration) error
Retrieve(ctx context.Context, cacheKey string, dest CacheableEntity, allowExpired bool) (RetrieveStatus, error) // Get gets the cacheble entity in the dest entity passed
SetTTL(ctx context.Context, cacheKey string, ttl time.Duration) Get(ctx context.Context, orgID valuer.UUID, cacheKey string, dest cachetypes.Cacheable, allowExpired bool) error
Remove(ctx context.Context, cacheKey string) // Delete deletes the cacheable entity from cache
BulkRemove(ctx context.Context, cacheKeys []string) Delete(ctx context.Context, orgID valuer.UUID, cacheKey string)
Close(ctx context.Context) error // DeleteMany deletes multiple cacheble entities from cache
DeleteMany(ctx context.Context, orgID valuer.UUID, cacheKeys []string)
}
type KeyGenerator interface {
// GenerateKeys generates the cache keys for the given query range params
// The keys are returned as a map where the key is the query name and the value is the cache key
GenerateKeys(*v3.QueryRangeParamsV3) map[string]string
} }

20
pkg/cache/cachetest/provider.go vendored Normal file
View File

@ -0,0 +1,20 @@
package cachetest
import (
"context"
"github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/cache/memorycache"
"github.com/SigNoz/signoz/pkg/factory/factorytest"
)
type provider struct{}
func New(config cache.Config) (cache.Cache, error) {
cache, err := memorycache.New(context.TODO(), factorytest.NewSettings(), config)
if err != nil {
return nil, err
}
return cache, nil
}

2
pkg/cache/config.go vendored
View File

@ -9,7 +9,7 @@ import (
type Memory struct { type Memory struct {
TTL time.Duration `mapstructure:"ttl"` TTL time.Duration `mapstructure:"ttl"`
CleanupInterval time.Duration `mapstructure:"cleanupInterval"` CleanupInterval time.Duration `mapstructure:"cleanup_interval"`
} }
type Redis struct { type Redis struct {

View File

@ -2,12 +2,15 @@ package memorycache
import ( import (
"context" "context"
"fmt"
"reflect" "reflect"
"strings"
"time" "time"
"github.com/SigNoz/signoz/pkg/cache" "github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory" "github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/types/cachetypes"
"github.com/SigNoz/signoz/pkg/valuer"
go_cache "github.com/patrickmn/go-cache" go_cache "github.com/patrickmn/go-cache"
) )
@ -23,79 +26,52 @@ func New(ctx context.Context, settings factory.ProviderSettings, config cache.Co
return &provider{cc: go_cache.New(config.Memory.TTL, config.Memory.CleanupInterval)}, nil return &provider{cc: go_cache.New(config.Memory.TTL, config.Memory.CleanupInterval)}, nil
} }
// Connect does nothing func (c *provider) Set(_ context.Context, orgID valuer.UUID, cacheKey string, data cachetypes.Cacheable, ttl time.Duration) error {
func (c *provider) Connect(_ context.Context) error {
return nil
}
// Store stores the data in the cache
func (c *provider) Store(_ context.Context, cacheKey string, data cache.CacheableEntity, ttl time.Duration) error {
// check if the data being passed is a pointer and is not nil // check if the data being passed is a pointer and is not nil
rv := reflect.ValueOf(data) err := cachetypes.ValidatePointer(data, "inmemory")
if rv.Kind() != reflect.Pointer || rv.IsNil() { if err != nil {
return cache.WrapCacheableEntityErrors(reflect.TypeOf(data), "inmemory") return err
} }
c.cc.Set(cacheKey, data, ttl) c.cc.Set(strings.Join([]string{orgID.StringValue(), cacheKey}, "::"), data, ttl)
return nil return nil
} }
// Retrieve retrieves the data from the cache func (c *provider) Get(_ context.Context, orgID valuer.UUID, cacheKey string, dest cachetypes.Cacheable, allowExpired bool) error {
func (c *provider) Retrieve(_ context.Context, cacheKey string, dest cache.CacheableEntity, allowExpired bool) (cache.RetrieveStatus, error) {
// check if the destination being passed is a pointer and is not nil // check if the destination being passed is a pointer and is not nil
dstv := reflect.ValueOf(dest) err := cachetypes.ValidatePointer(dest, "inmemory")
if dstv.Kind() != reflect.Pointer || dstv.IsNil() { if err != nil {
return cache.RetrieveStatusError, cache.WrapCacheableEntityErrors(reflect.TypeOf(dest), "inmemory") return err
} }
// check if the destination value is settable // check if the destination value is settable
dstv := reflect.ValueOf(dest)
if !dstv.Elem().CanSet() { if !dstv.Elem().CanSet() {
return cache.RetrieveStatusError, fmt.Errorf("destination value is not settable, %s", dstv.Elem()) return errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "destination value is not settable, %s", dstv.Elem())
} }
data, found := c.cc.Get(cacheKey) data, found := c.cc.Get(strings.Join([]string{orgID.StringValue(), cacheKey}, "::"))
if !found { if !found {
return cache.RetrieveStatusKeyMiss, nil return errors.Newf(errors.TypeNotFound, errors.CodeNotFound, "key miss")
} }
// check the type compatbility between the src and dest // check the type compatbility between the src and dest
srcv := reflect.ValueOf(data) srcv := reflect.ValueOf(data)
if !srcv.Type().AssignableTo(dstv.Type()) { if !srcv.Type().AssignableTo(dstv.Type()) {
return cache.RetrieveStatusError, fmt.Errorf("src type is not assignable to dst type") return errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "src type is not assignable to dst type")
} }
// set the value to from src to dest // set the value to from src to dest
dstv.Elem().Set(srcv.Elem()) dstv.Elem().Set(srcv.Elem())
return cache.RetrieveStatusHit, nil return nil
} }
// SetTTL sets the TTL for the cache entry func (c *provider) Delete(_ context.Context, orgID valuer.UUID, cacheKey string) {
func (c *provider) SetTTL(_ context.Context, cacheKey string, ttl time.Duration) { c.cc.Delete(strings.Join([]string{orgID.StringValue(), cacheKey}, "::"))
item, found := c.cc.Get(cacheKey)
if !found {
return
}
_ = c.cc.Replace(cacheKey, item, ttl)
} }
// Remove removes the cache entry func (c *provider) DeleteMany(_ context.Context, orgID valuer.UUID, cacheKeys []string) {
func (c *provider) Remove(_ context.Context, cacheKey string) {
c.cc.Delete(cacheKey)
}
// BulkRemove removes the cache entries
func (c *provider) BulkRemove(_ context.Context, cacheKeys []string) {
for _, cacheKey := range cacheKeys { for _, cacheKey := range cacheKeys {
c.cc.Delete(cacheKey) c.cc.Delete(strings.Join([]string{orgID.StringValue(), cacheKey}, "::"))
} }
} }
// Close does nothing
func (c *provider) Close(_ context.Context) error {
return nil
}
// Configuration returns the cache configuration
func (c *provider) Configuration() *cache.Memory {
return nil
}

View File

@ -8,6 +8,7 @@ import (
"github.com/SigNoz/signoz/pkg/cache" "github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/factory/factorytest" "github.com/SigNoz/signoz/pkg/factory/factorytest"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -22,7 +23,6 @@ func TestNew(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
assert.NotNil(t, c) assert.NotNil(t, c)
assert.NotNil(t, c.(*provider).cc) assert.NotNil(t, c.(*provider).cc)
assert.NoError(t, c.Connect(context.Background()))
} }
type CacheableEntity struct { type CacheableEntity struct {
@ -63,7 +63,7 @@ func TestStoreWithNilPointer(t *testing.T) {
c, err := New(context.Background(), factorytest.NewSettings(), cache.Config{Provider: "memory", Memory: opts}) c, err := New(context.Background(), factorytest.NewSettings(), cache.Config{Provider: "memory", Memory: opts})
require.NoError(t, err) require.NoError(t, err)
var storeCacheableEntity *CacheableEntity var storeCacheableEntity *CacheableEntity
assert.Error(t, c.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second)) assert.Error(t, c.Set(context.Background(), valuer.GenerateUUID(), "key", storeCacheableEntity, 10*time.Second))
} }
// this should fail because of no pointer error // this should fail because of no pointer error
@ -75,7 +75,7 @@ func TestStoreWithStruct(t *testing.T) {
c, err := New(context.Background(), factorytest.NewSettings(), cache.Config{Provider: "memory", Memory: opts}) c, err := New(context.Background(), factorytest.NewSettings(), cache.Config{Provider: "memory", Memory: opts})
require.NoError(t, err) require.NoError(t, err)
var storeCacheableEntity CacheableEntity var storeCacheableEntity CacheableEntity
assert.Error(t, c.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second)) assert.Error(t, c.Set(context.Background(), valuer.GenerateUUID(), "key", storeCacheableEntity, 10*time.Second))
} }
func TestStoreWithNonNilPointer(t *testing.T) { func TestStoreWithNonNilPointer(t *testing.T) {
@ -90,7 +90,7 @@ func TestStoreWithNonNilPointer(t *testing.T) {
Value: 1, Value: 1,
Expiry: time.Microsecond, Expiry: time.Microsecond,
} }
assert.NoError(t, c.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second)) assert.NoError(t, c.Set(context.Background(), valuer.GenerateUUID(), "key", storeCacheableEntity, 10*time.Second))
} }
// TestRetrieve tests the Retrieve function // TestRetrieve tests the Retrieve function
@ -106,13 +106,14 @@ func TestRetrieveWithNilPointer(t *testing.T) {
Value: 1, Value: 1,
Expiry: time.Microsecond, Expiry: time.Microsecond,
} }
assert.NoError(t, c.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second))
orgID := valuer.GenerateUUID()
assert.NoError(t, c.Set(context.Background(), orgID, "key", storeCacheableEntity, 10*time.Second))
var retrieveCacheableEntity *CacheableEntity var retrieveCacheableEntity *CacheableEntity
retrieveStatus, err := c.Retrieve(context.Background(), "key", retrieveCacheableEntity, false) err = c.Get(context.Background(), orgID, "key", retrieveCacheableEntity, false)
assert.Error(t, err) assert.Error(t, err)
assert.Equal(t, retrieveStatus, cache.RetrieveStatusError)
} }
func TestRetrieveWitNonPointer(t *testing.T) { func TestRetrieveWitNonPointer(t *testing.T) {
@ -127,13 +128,13 @@ func TestRetrieveWitNonPointer(t *testing.T) {
Value: 1, Value: 1,
Expiry: time.Microsecond, Expiry: time.Microsecond,
} }
assert.NoError(t, c.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second)) orgID := valuer.GenerateUUID()
assert.NoError(t, c.Set(context.Background(), orgID, "key", storeCacheableEntity, 10*time.Second))
var retrieveCacheableEntity CacheableEntity var retrieveCacheableEntity CacheableEntity
retrieveStatus, err := c.Retrieve(context.Background(), "key", retrieveCacheableEntity, false) err = c.Get(context.Background(), orgID, "key", retrieveCacheableEntity, false)
assert.Error(t, err) assert.Error(t, err)
assert.Equal(t, retrieveStatus, cache.RetrieveStatusError)
} }
func TestRetrieveWithDifferentTypes(t *testing.T) { func TestRetrieveWithDifferentTypes(t *testing.T) {
@ -143,17 +144,17 @@ func TestRetrieveWithDifferentTypes(t *testing.T) {
} }
c, err := New(context.Background(), factorytest.NewSettings(), cache.Config{Provider: "memory", Memory: opts}) c, err := New(context.Background(), factorytest.NewSettings(), cache.Config{Provider: "memory", Memory: opts})
require.NoError(t, err) require.NoError(t, err)
orgID := valuer.GenerateUUID()
storeCacheableEntity := &CacheableEntity{ storeCacheableEntity := &CacheableEntity{
Key: "some-random-key", Key: "some-random-key",
Value: 1, Value: 1,
Expiry: time.Microsecond, Expiry: time.Microsecond,
} }
assert.NoError(t, c.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second)) assert.NoError(t, c.Set(context.Background(), orgID, "key", storeCacheableEntity, 10*time.Second))
retrieveCacheableEntity := new(DCacheableEntity) retrieveCacheableEntity := new(DCacheableEntity)
retrieveStatus, err := c.Retrieve(context.Background(), "key", retrieveCacheableEntity, false) err = c.Get(context.Background(), orgID, "key", retrieveCacheableEntity, false)
assert.Error(t, err) assert.Error(t, err)
assert.Equal(t, retrieveStatus, cache.RetrieveStatusError)
} }
func TestRetrieveWithSameTypes(t *testing.T) { func TestRetrieveWithSameTypes(t *testing.T) {
@ -163,46 +164,20 @@ func TestRetrieveWithSameTypes(t *testing.T) {
} }
c, err := New(context.Background(), factorytest.NewSettings(), cache.Config{Provider: "memory", Memory: opts}) c, err := New(context.Background(), factorytest.NewSettings(), cache.Config{Provider: "memory", Memory: opts})
require.NoError(t, err) require.NoError(t, err)
orgID := valuer.GenerateUUID()
storeCacheableEntity := &CacheableEntity{ storeCacheableEntity := &CacheableEntity{
Key: "some-random-key", Key: "some-random-key",
Value: 1, Value: 1,
Expiry: time.Microsecond, Expiry: time.Microsecond,
} }
assert.NoError(t, c.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second)) assert.NoError(t, c.Set(context.Background(), orgID, "key", storeCacheableEntity, 10*time.Second))
retrieveCacheableEntity := new(CacheableEntity) retrieveCacheableEntity := new(CacheableEntity)
retrieveStatus, err := c.Retrieve(context.Background(), "key", retrieveCacheableEntity, false) err = c.Get(context.Background(), orgID, "key", retrieveCacheableEntity, false)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, retrieveStatus, cache.RetrieveStatusHit)
assert.Equal(t, storeCacheableEntity, retrieveCacheableEntity) assert.Equal(t, storeCacheableEntity, retrieveCacheableEntity)
} }
// TestSetTTL tests the SetTTL function
func TestSetTTL(t *testing.T) {
c, err := New(context.Background(), factorytest.NewSettings(), cache.Config{Provider: "memory", Memory: cache.Memory{TTL: 10 * time.Second, CleanupInterval: 1 * time.Second}})
require.NoError(t, err)
storeCacheableEntity := &CacheableEntity{
Key: "some-random-key",
Value: 1,
Expiry: time.Microsecond,
}
retrieveCacheableEntity := new(CacheableEntity)
assert.NoError(t, c.Store(context.Background(), "key", storeCacheableEntity, 2*time.Second))
time.Sleep(3 * time.Second)
retrieveStatus, err := c.Retrieve(context.Background(), "key", retrieveCacheableEntity, false)
assert.NoError(t, err)
assert.Equal(t, retrieveStatus, cache.RetrieveStatusKeyMiss)
assert.Equal(t, new(CacheableEntity), retrieveCacheableEntity)
assert.NoError(t, c.Store(context.Background(), "key", storeCacheableEntity, 2*time.Second))
c.SetTTL(context.Background(), "key", 4*time.Second)
time.Sleep(3 * time.Second)
retrieveStatus, err = c.Retrieve(context.Background(), "key", retrieveCacheableEntity, false)
assert.NoError(t, err)
assert.Equal(t, retrieveStatus, cache.RetrieveStatusHit)
assert.Equal(t, retrieveCacheableEntity, storeCacheableEntity)
}
// TestRemove tests the Remove function // TestRemove tests the Remove function
func TestRemove(t *testing.T) { func TestRemove(t *testing.T) {
opts := cache.Memory{ opts := cache.Memory{
@ -217,13 +192,12 @@ func TestRemove(t *testing.T) {
Expiry: time.Microsecond, Expiry: time.Microsecond,
} }
retrieveCacheableEntity := new(CacheableEntity) retrieveCacheableEntity := new(CacheableEntity)
assert.NoError(t, c.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second)) orgID := valuer.GenerateUUID()
c.Remove(context.Background(), "key") assert.NoError(t, c.Set(context.Background(), orgID, "key", storeCacheableEntity, 10*time.Second))
c.Delete(context.Background(), orgID, "key")
retrieveStatus, err := c.Retrieve(context.Background(), "key", retrieveCacheableEntity, false) err = c.Get(context.Background(), orgID, "key", retrieveCacheableEntity, false)
assert.NoError(t, err) assert.Error(t, err)
assert.Equal(t, retrieveStatus, cache.RetrieveStatusKeyMiss)
assert.Equal(t, new(CacheableEntity), retrieveCacheableEntity)
} }
// TestBulkRemove tests the BulkRemove function // TestBulkRemove tests the BulkRemove function
@ -234,25 +208,22 @@ func TestBulkRemove(t *testing.T) {
} }
c, err := New(context.Background(), factorytest.NewSettings(), cache.Config{Provider: "memory", Memory: opts}) c, err := New(context.Background(), factorytest.NewSettings(), cache.Config{Provider: "memory", Memory: opts})
require.NoError(t, err) require.NoError(t, err)
orgID := valuer.GenerateUUID()
storeCacheableEntity := &CacheableEntity{ storeCacheableEntity := &CacheableEntity{
Key: "some-random-key", Key: "some-random-key",
Value: 1, Value: 1,
Expiry: time.Microsecond, Expiry: time.Microsecond,
} }
retrieveCacheableEntity := new(CacheableEntity) retrieveCacheableEntity := new(CacheableEntity)
assert.NoError(t, c.Store(context.Background(), "key1", storeCacheableEntity, 10*time.Second)) assert.NoError(t, c.Set(context.Background(), orgID, "key1", storeCacheableEntity, 10*time.Second))
assert.NoError(t, c.Store(context.Background(), "key2", storeCacheableEntity, 10*time.Second)) assert.NoError(t, c.Set(context.Background(), orgID, "key2", storeCacheableEntity, 10*time.Second))
c.BulkRemove(context.Background(), []string{"key1", "key2"}) c.DeleteMany(context.Background(), orgID, []string{"key1", "key2"})
retrieveStatus, err := c.Retrieve(context.Background(), "key1", retrieveCacheableEntity, false) err = c.Get(context.Background(), orgID, "key1", retrieveCacheableEntity, false)
assert.NoError(t, err) assert.Error(t, err)
assert.Equal(t, retrieveStatus, cache.RetrieveStatusKeyMiss)
assert.Equal(t, new(CacheableEntity), retrieveCacheableEntity)
retrieveStatus, err = c.Retrieve(context.Background(), "key2", retrieveCacheableEntity, false) err = c.Get(context.Background(), orgID, "key2", retrieveCacheableEntity, false)
assert.NoError(t, err) assert.Error(t, err)
assert.Equal(t, retrieveStatus, cache.RetrieveStatusKeyMiss)
assert.Equal(t, new(CacheableEntity), retrieveCacheableEntity)
} }
// TestCache tests the cache // TestCache tests the cache
@ -263,16 +234,16 @@ func TestCache(t *testing.T) {
} }
c, err := New(context.Background(), factorytest.NewSettings(), cache.Config{Provider: "memory", Memory: opts}) c, err := New(context.Background(), factorytest.NewSettings(), cache.Config{Provider: "memory", Memory: opts})
require.NoError(t, err) require.NoError(t, err)
orgID := valuer.GenerateUUID()
storeCacheableEntity := &CacheableEntity{ storeCacheableEntity := &CacheableEntity{
Key: "some-random-key", Key: "some-random-key",
Value: 1, Value: 1,
Expiry: time.Microsecond, Expiry: time.Microsecond,
} }
retrieveCacheableEntity := new(CacheableEntity) retrieveCacheableEntity := new(CacheableEntity)
assert.NoError(t, c.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second)) assert.NoError(t, c.Set(context.Background(), orgID, "key", storeCacheableEntity, 10*time.Second))
retrieveStatus, err := c.Retrieve(context.Background(), "key", retrieveCacheableEntity, false) err = c.Get(context.Background(), orgID, "key", retrieveCacheableEntity, false)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, retrieveStatus, cache.RetrieveStatusHit)
assert.Equal(t, storeCacheableEntity, retrieveCacheableEntity) assert.Equal(t, storeCacheableEntity, retrieveCacheableEntity)
c.Remove(context.Background(), "key") c.Delete(context.Background(), orgID, "key")
} }

View File

@ -3,18 +3,22 @@ package rediscache
import ( import (
"context" "context"
"errors" "errors"
"fmt" "strings"
"time" "time"
"fmt"
"github.com/SigNoz/signoz/pkg/cache" "github.com/SigNoz/signoz/pkg/cache"
errorsV2 "github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory" "github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/types/cachetypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/go-redis/redis/v8" "github.com/go-redis/redis/v8"
"go.uber.org/zap" "go.uber.org/zap"
) )
type provider struct { type provider struct {
client *redis.Client client *redis.Client
opts cache.Redis
} }
func NewFactory() factory.ProviderFactory[cache.Cache, cache.Config] { func NewFactory() factory.ProviderFactory[cache.Cache, cache.Config] {
@ -22,99 +26,50 @@ func NewFactory() factory.ProviderFactory[cache.Cache, cache.Config] {
} }
func New(ctx context.Context, settings factory.ProviderSettings, config cache.Config) (cache.Cache, error) { func New(ctx context.Context, settings factory.ProviderSettings, config cache.Config) (cache.Cache, error) {
return &provider{opts: config.Redis}, nil provider := new(provider)
provider.client = redis.NewClient(&redis.Options{
Addr: strings.Join([]string{config.Redis.Host, fmt.Sprint(config.Redis.Port)}, ":"),
Password: config.Redis.Password,
DB: config.Redis.DB,
})
if err := provider.client.Ping(ctx).Err(); err != nil {
return nil, err
}
return provider, nil
} }
// WithClient creates a new cache with the given client
func WithClient(client *redis.Client) *provider { func WithClient(client *redis.Client) *provider {
return &provider{client: client} return &provider{client: client}
} }
// Connect connects to the redis server func (c *provider) Set(ctx context.Context, orgID valuer.UUID, cacheKey string, data cachetypes.Cacheable, ttl time.Duration) error {
func (c *provider) Connect(_ context.Context) error { return c.client.Set(ctx, strings.Join([]string{orgID.StringValue(), cacheKey}, "::"), data, ttl).Err()
c.client = redis.NewClient(&redis.Options{ }
Addr: fmt.Sprintf("%s:%d", c.opts.Host, c.opts.Port),
Password: c.opts.Password, func (c *provider) Get(ctx context.Context, orgID valuer.UUID, cacheKey string, dest cachetypes.Cacheable, allowExpired bool) error {
DB: c.opts.DB, err := c.client.Get(ctx, strings.Join([]string{orgID.StringValue(), cacheKey}, "::")).Scan(dest)
}) if err != nil {
if errors.Is(err, redis.Nil) {
return errorsV2.Newf(errorsV2.TypeNotFound, errorsV2.CodeNotFound, "key miss")
}
return err
}
return nil return nil
} }
// Store stores the data in the cache func (c *provider) Delete(ctx context.Context, orgID valuer.UUID, cacheKey string) {
func (c *provider) Store(ctx context.Context, cacheKey string, data cache.CacheableEntity, ttl time.Duration) error { c.DeleteMany(ctx, orgID, []string{cacheKey})
return c.client.Set(ctx, cacheKey, data, ttl).Err()
} }
// Retrieve retrieves the data from the cache func (c *provider) DeleteMany(ctx context.Context, orgID valuer.UUID, cacheKeys []string) {
func (c *provider) Retrieve(ctx context.Context, cacheKey string, dest cache.CacheableEntity, allowExpired bool) (cache.RetrieveStatus, error) { updatedCacheKeys := []string{}
err := c.client.Get(ctx, cacheKey).Scan(dest) for _, cacheKey := range cacheKeys {
if err != nil { updatedCacheKeys = append(updatedCacheKeys, strings.Join([]string{orgID.StringValue(), cacheKey}, "::"))
if errors.Is(err, redis.Nil) {
return cache.RetrieveStatusKeyMiss, nil
}
return cache.RetrieveStatusError, err
} }
return cache.RetrieveStatusHit, nil
}
// SetTTL sets the TTL for the cache entry if err := c.client.Del(ctx, updatedCacheKeys...).Err(); err != nil {
func (c *provider) SetTTL(ctx context.Context, cacheKey string, ttl time.Duration) {
err := c.client.Expire(ctx, cacheKey, ttl).Err()
if err != nil {
zap.L().Error("error setting TTL for cache key", zap.String("cacheKey", cacheKey), zap.Duration("ttl", ttl), zap.Error(err))
}
}
// Remove removes the cache entry
func (c *provider) Remove(ctx context.Context, cacheKey string) {
c.BulkRemove(ctx, []string{cacheKey})
}
// BulkRemove removes the cache entries
func (c *provider) BulkRemove(ctx context.Context, cacheKeys []string) {
if err := c.client.Del(ctx, cacheKeys...).Err(); err != nil {
zap.L().Error("error deleting cache keys", zap.Strings("cacheKeys", cacheKeys), zap.Error(err)) zap.L().Error("error deleting cache keys", zap.Strings("cacheKeys", cacheKeys), zap.Error(err))
} }
} }
// Close closes the connection to the redis server
func (c *provider) Close(_ context.Context) error {
return c.client.Close()
}
// Ping pings the redis server
func (c *provider) Ping(ctx context.Context) error {
return c.client.Ping(ctx).Err()
}
// GetClient returns the redis client
func (c *provider) GetClient() *redis.Client {
return c.client
}
// GetTTL returns the TTL for the cache entry
func (c *provider) GetTTL(ctx context.Context, cacheKey string) time.Duration {
ttl, err := c.client.TTL(ctx, cacheKey).Result()
if err != nil {
zap.L().Error("error getting TTL for cache key", zap.String("cacheKey", cacheKey), zap.Error(err))
}
return ttl
}
// GetKeys returns the keys matching the pattern
func (c *provider) GetKeys(ctx context.Context, pattern string) ([]string, error) {
return c.client.Keys(ctx, pattern).Result()
}
// GetKeysWithTTL returns the keys matching the pattern with their TTL
func (c *provider) GetKeysWithTTL(ctx context.Context, pattern string) (map[string]time.Duration, error) {
keys, err := c.GetKeys(ctx, pattern)
if err != nil {
return nil, err
}
result := make(map[string]time.Duration)
for _, key := range keys {
result[key] = c.GetTTL(ctx, key)
}
return result, nil
}

View File

@ -3,10 +3,11 @@ package rediscache
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"strings"
"testing" "testing"
"time" "time"
_cache "github.com/SigNoz/signoz/pkg/cache" "github.com/SigNoz/signoz/pkg/valuer"
"github.com/go-redis/redismock/v8" "github.com/go-redis/redismock/v8"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@ -25,7 +26,7 @@ func (ce *CacheableEntity) UnmarshalBinary(data []byte) error {
return json.Unmarshal(data, ce) return json.Unmarshal(data, ce)
} }
func TestStore(t *testing.T) { func TestSet(t *testing.T) {
db, mock := redismock.NewClientMock() db, mock := redismock.NewClientMock()
cache := WithClient(db) cache := WithClient(db)
storeCacheableEntity := &CacheableEntity{ storeCacheableEntity := &CacheableEntity{
@ -34,15 +35,16 @@ func TestStore(t *testing.T) {
Expiry: time.Microsecond, Expiry: time.Microsecond,
} }
mock.ExpectSet("key", storeCacheableEntity, 10*time.Second).RedisNil() orgID := valuer.GenerateUUID()
_ = cache.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second) mock.ExpectSet(strings.Join([]string{orgID.StringValue(), "key"}, "::"), storeCacheableEntity, 10*time.Second).RedisNil()
_ = cache.Set(context.Background(), orgID, "key", storeCacheableEntity, 10*time.Second)
if err := mock.ExpectationsWereMet(); err != nil { if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err) t.Errorf("there were unfulfilled expectations: %s", err)
} }
} }
func TestRetrieve(t *testing.T) { func TestGet(t *testing.T) {
db, mock := redismock.NewClientMock() db, mock := redismock.NewClientMock()
cache := WithClient(db) cache := WithClient(db)
storeCacheableEntity := &CacheableEntity{ storeCacheableEntity := &CacheableEntity{
@ -52,50 +54,26 @@ func TestRetrieve(t *testing.T) {
} }
retrieveCacheableEntity := new(CacheableEntity) retrieveCacheableEntity := new(CacheableEntity)
mock.ExpectSet("key", storeCacheableEntity, 10*time.Second).RedisNil() orgID := valuer.GenerateUUID()
_ = cache.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second) mock.ExpectSet(strings.Join([]string{orgID.StringValue(), "key"}, "::"), storeCacheableEntity, 10*time.Second).RedisNil()
_ = cache.Set(context.Background(), orgID, "key", storeCacheableEntity, 10*time.Second)
data, err := storeCacheableEntity.MarshalBinary() data, err := storeCacheableEntity.MarshalBinary()
assert.NoError(t, err) assert.NoError(t, err)
mock.ExpectGet("key").SetVal(string(data)) mock.ExpectGet(strings.Join([]string{orgID.StringValue(), "key"}, "::")).SetVal(string(data))
retrieveStatus, err := cache.Retrieve(context.Background(), "key", retrieveCacheableEntity, false) err = cache.Get(context.Background(), orgID, "key", retrieveCacheableEntity, false)
if err != nil { if err != nil {
t.Errorf("unexpected error: %s", err) t.Errorf("unexpected error: %s", err)
} }
if retrieveStatus != _cache.RetrieveStatusHit {
t.Errorf("expected status %d, got %d", _cache.RetrieveStatusHit, retrieveStatus)
}
assert.Equal(t, storeCacheableEntity, retrieveCacheableEntity) assert.Equal(t, storeCacheableEntity, retrieveCacheableEntity)
if err := mock.ExpectationsWereMet(); err != nil { if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err) t.Errorf("there were unfulfilled expectations: %s", err)
} }
} }
func TestSetTTL(t *testing.T) { func TestDelete(t *testing.T) {
db, mock := redismock.NewClientMock()
cache := WithClient(db)
storeCacheableEntity := &CacheableEntity{
Key: "some-random-key",
Value: 1,
Expiry: time.Microsecond,
}
mock.ExpectSet("key", storeCacheableEntity, 10*time.Second).RedisNil()
_ = cache.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second)
mock.ExpectExpire("key", 4*time.Second).RedisNil()
cache.SetTTL(context.Background(), "key", 4*time.Second)
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
}
}
func TestRemove(t *testing.T) {
db, mock := redismock.NewClientMock() db, mock := redismock.NewClientMock()
c := WithClient(db) c := WithClient(db)
storeCacheableEntity := &CacheableEntity{ storeCacheableEntity := &CacheableEntity{
@ -103,19 +81,20 @@ func TestRemove(t *testing.T) {
Value: 1, Value: 1,
Expiry: time.Microsecond, Expiry: time.Microsecond,
} }
orgID := valuer.GenerateUUID()
mock.ExpectSet("key", storeCacheableEntity, 10*time.Second).RedisNil() mock.ExpectSet(strings.Join([]string{orgID.StringValue(), "key"}, "::"), storeCacheableEntity, 10*time.Second).RedisNil()
_ = c.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second) _ = c.Set(context.Background(), orgID, "key", storeCacheableEntity, 10*time.Second)
mock.ExpectDel("key").RedisNil() mock.ExpectDel(strings.Join([]string{orgID.StringValue(), "key"}, "::")).RedisNil()
c.Remove(context.Background(), "key") c.Delete(context.Background(), orgID, "key")
if err := mock.ExpectationsWereMet(); err != nil { if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err) t.Errorf("there were unfulfilled expectations: %s", err)
} }
} }
func TestBulkRemove(t *testing.T) { func TestDeleteMany(t *testing.T) {
db, mock := redismock.NewClientMock() db, mock := redismock.NewClientMock()
c := WithClient(db) c := WithClient(db)
storeCacheableEntity := &CacheableEntity{ storeCacheableEntity := &CacheableEntity{
@ -123,15 +102,16 @@ func TestBulkRemove(t *testing.T) {
Value: 1, Value: 1,
Expiry: time.Microsecond, Expiry: time.Microsecond,
} }
orgID := valuer.GenerateUUID()
mock.ExpectSet("key", storeCacheableEntity, 10*time.Second).RedisNil() mock.ExpectSet(strings.Join([]string{orgID.StringValue(), "key"}, "::"), storeCacheableEntity, 10*time.Second).RedisNil()
_ = c.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second) _ = c.Set(context.Background(), orgID, "key", storeCacheableEntity, 10*time.Second)
mock.ExpectSet("key2", storeCacheableEntity, 10*time.Second).RedisNil() mock.ExpectSet(strings.Join([]string{orgID.StringValue(), "key2"}, "::"), storeCacheableEntity, 10*time.Second).RedisNil()
_ = c.Store(context.Background(), "key2", storeCacheableEntity, 10*time.Second) _ = c.Set(context.Background(), orgID, "key2", storeCacheableEntity, 10*time.Second)
mock.ExpectDel("key", "key2").RedisNil() mock.ExpectDel(strings.Join([]string{orgID.StringValue(), "key"}, "::"), strings.Join([]string{orgID.StringValue(), "key2"}, "::")).RedisNil()
c.BulkRemove(context.Background(), []string{"key", "key2"}) c.DeleteMany(context.Background(), orgID, []string{"key", "key2"})
if err := mock.ExpectationsWereMet(); err != nil { if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err) t.Errorf("there were unfulfilled expectations: %s", err)

View File

@ -795,18 +795,14 @@ func (r *ClickHouseReader) GetSpansForTrace(ctx context.Context, traceID string,
return searchScanResponses, nil return searchScanResponses, nil
} }
func (r *ClickHouseReader) GetWaterfallSpansForTraceWithMetadataCache(ctx context.Context, traceID string) (*model.GetWaterfallSpansForTraceWithMetadataCache, error) { func (r *ClickHouseReader) GetWaterfallSpansForTraceWithMetadataCache(ctx context.Context, orgID valuer.UUID, traceID string) (*model.GetWaterfallSpansForTraceWithMetadataCache, error) {
cachedTraceData := new(model.GetWaterfallSpansForTraceWithMetadataCache) cachedTraceData := new(model.GetWaterfallSpansForTraceWithMetadataCache)
cacheStatus, err := r.cache.Retrieve(ctx, fmt.Sprintf("getWaterfallSpansForTraceWithMetadata-%v", traceID), cachedTraceData, false) err := r.cache.Get(ctx, orgID, strings.Join([]string{"getWaterfallSpansForTraceWithMetadata", traceID}, "-"), cachedTraceData, false)
if err != nil { if err != nil {
zap.L().Debug("error in retrieving getWaterfallSpansForTraceWithMetadata cache", zap.Error(err), zap.String("traceID", traceID)) zap.L().Debug("error in retrieving getWaterfallSpansForTraceWithMetadata cache", zap.Error(err), zap.String("traceID", traceID))
return nil, err return nil, err
} }
if cacheStatus != cache.RetrieveStatusHit {
return nil, errors.Errorf("cache status for getWaterfallSpansForTraceWithMetadata : %s, traceID: %s", cacheStatus, traceID)
}
if time.Since(time.UnixMilli(int64(cachedTraceData.EndTime))) < r.fluxIntervalForTraceDetail { if time.Since(time.UnixMilli(int64(cachedTraceData.EndTime))) < r.fluxIntervalForTraceDetail {
zap.L().Info("the trace end time falls under the flux interval, skipping getWaterfallSpansForTraceWithMetadata cache", zap.String("traceID", traceID)) zap.L().Info("the trace end time falls under the flux interval, skipping getWaterfallSpansForTraceWithMetadata cache", zap.String("traceID", traceID))
return nil, errors.Errorf("the trace end time falls under the flux interval, skipping getWaterfallSpansForTraceWithMetadata cache, traceID: %s", traceID) return nil, errors.Errorf("the trace end time falls under the flux interval, skipping getWaterfallSpansForTraceWithMetadata cache, traceID: %s", traceID)
@ -816,7 +812,7 @@ func (r *ClickHouseReader) GetWaterfallSpansForTraceWithMetadataCache(ctx contex
return cachedTraceData, nil return cachedTraceData, nil
} }
func (r *ClickHouseReader) GetWaterfallSpansForTraceWithMetadata(ctx context.Context, traceID string, req *model.GetWaterfallSpansForTraceWithMetadataParams) (*model.GetWaterfallSpansForTraceWithMetadataResponse, *model.ApiError) { func (r *ClickHouseReader) GetWaterfallSpansForTraceWithMetadata(ctx context.Context, orgID valuer.UUID, traceID string, req *model.GetWaterfallSpansForTraceWithMetadataParams) (*model.GetWaterfallSpansForTraceWithMetadataResponse, *model.ApiError) {
response := new(model.GetWaterfallSpansForTraceWithMetadataResponse) response := new(model.GetWaterfallSpansForTraceWithMetadataResponse)
var startTime, endTime, durationNano, totalErrorSpans, totalSpans uint64 var startTime, endTime, durationNano, totalErrorSpans, totalSpans uint64
var spanIdToSpanNodeMap = map[string]*model.Span{} var spanIdToSpanNodeMap = map[string]*model.Span{}
@ -826,7 +822,7 @@ func (r *ClickHouseReader) GetWaterfallSpansForTraceWithMetadata(ctx context.Con
var hasMissingSpans bool var hasMissingSpans bool
claims, errv2 := authtypes.ClaimsFromContext(ctx) claims, errv2 := authtypes.ClaimsFromContext(ctx)
cachedTraceData, err := r.GetWaterfallSpansForTraceWithMetadataCache(ctx, traceID) cachedTraceData, err := r.GetWaterfallSpansForTraceWithMetadataCache(ctx, orgID, traceID)
if err == nil { if err == nil {
startTime = cachedTraceData.StartTime startTime = cachedTraceData.StartTime
endTime = cachedTraceData.EndTime endTime = cachedTraceData.EndTime
@ -984,7 +980,7 @@ func (r *ClickHouseReader) GetWaterfallSpansForTraceWithMetadata(ctx context.Con
} }
zap.L().Info("getWaterfallSpansForTraceWithMetadata: processing pre cache", zap.Duration("duration", time.Since(processingBeforeCache)), zap.String("traceID", traceID)) zap.L().Info("getWaterfallSpansForTraceWithMetadata: processing pre cache", zap.Duration("duration", time.Since(processingBeforeCache)), zap.String("traceID", traceID))
cacheErr := r.cache.Store(ctx, fmt.Sprintf("getWaterfallSpansForTraceWithMetadata-%v", traceID), &traceCache, time.Minute*5) cacheErr := r.cache.Set(ctx, orgID, strings.Join([]string{"getWaterfallSpansForTraceWithMetadata", traceID}, "-"), &traceCache, time.Minute*5)
if cacheErr != nil { if cacheErr != nil {
zap.L().Debug("failed to store cache for getWaterfallSpansForTraceWithMetadata", zap.String("traceID", traceID), zap.Error(err)) zap.L().Debug("failed to store cache for getWaterfallSpansForTraceWithMetadata", zap.String("traceID", traceID), zap.Error(err))
} }
@ -1007,18 +1003,14 @@ func (r *ClickHouseReader) GetWaterfallSpansForTraceWithMetadata(ctx context.Con
return response, nil return response, nil
} }
func (r *ClickHouseReader) GetFlamegraphSpansForTraceCache(ctx context.Context, traceID string) (*model.GetFlamegraphSpansForTraceCache, error) { func (r *ClickHouseReader) GetFlamegraphSpansForTraceCache(ctx context.Context, orgID valuer.UUID, traceID string) (*model.GetFlamegraphSpansForTraceCache, error) {
cachedTraceData := new(model.GetFlamegraphSpansForTraceCache) cachedTraceData := new(model.GetFlamegraphSpansForTraceCache)
cacheStatus, err := r.cache.Retrieve(ctx, fmt.Sprintf("getFlamegraphSpansForTrace-%v", traceID), cachedTraceData, false) err := r.cache.Get(ctx, orgID, strings.Join([]string{"getFlamegraphSpansForTrace", traceID}, "-"), cachedTraceData, false)
if err != nil { if err != nil {
zap.L().Debug("error in retrieving getFlamegraphSpansForTrace cache", zap.Error(err), zap.String("traceID", traceID)) zap.L().Debug("error in retrieving getFlamegraphSpansForTrace cache", zap.Error(err), zap.String("traceID", traceID))
return nil, err return nil, err
} }
if cacheStatus != cache.RetrieveStatusHit {
return nil, errors.Errorf("cache status for getFlamegraphSpansForTrace : %s, traceID: %s", cacheStatus, traceID)
}
if time.Since(time.UnixMilli(int64(cachedTraceData.EndTime))) < r.fluxIntervalForTraceDetail { if time.Since(time.UnixMilli(int64(cachedTraceData.EndTime))) < r.fluxIntervalForTraceDetail {
zap.L().Info("the trace end time falls under the flux interval, skipping getFlamegraphSpansForTrace cache", zap.String("traceID", traceID)) zap.L().Info("the trace end time falls under the flux interval, skipping getFlamegraphSpansForTrace cache", zap.String("traceID", traceID))
return nil, errors.Errorf("the trace end time falls under the flux interval, skipping getFlamegraphSpansForTrace cache, traceID: %s", traceID) return nil, errors.Errorf("the trace end time falls under the flux interval, skipping getFlamegraphSpansForTrace cache, traceID: %s", traceID)
@ -1028,7 +1020,7 @@ func (r *ClickHouseReader) GetFlamegraphSpansForTraceCache(ctx context.Context,
return cachedTraceData, nil return cachedTraceData, nil
} }
func (r *ClickHouseReader) GetFlamegraphSpansForTrace(ctx context.Context, traceID string, req *model.GetFlamegraphSpansForTraceParams) (*model.GetFlamegraphSpansForTraceResponse, *model.ApiError) { func (r *ClickHouseReader) GetFlamegraphSpansForTrace(ctx context.Context, orgID valuer.UUID, traceID string, req *model.GetFlamegraphSpansForTraceParams) (*model.GetFlamegraphSpansForTraceResponse, *model.ApiError) {
trace := new(model.GetFlamegraphSpansForTraceResponse) trace := new(model.GetFlamegraphSpansForTraceResponse)
var startTime, endTime, durationNano uint64 var startTime, endTime, durationNano uint64
var spanIdToSpanNodeMap = map[string]*model.FlamegraphSpan{} var spanIdToSpanNodeMap = map[string]*model.FlamegraphSpan{}
@ -1037,7 +1029,7 @@ func (r *ClickHouseReader) GetFlamegraphSpansForTrace(ctx context.Context, trace
var traceRoots []*model.FlamegraphSpan var traceRoots []*model.FlamegraphSpan
// get the trace tree from cache! // get the trace tree from cache!
cachedTraceData, err := r.GetFlamegraphSpansForTraceCache(ctx, traceID) cachedTraceData, err := r.GetFlamegraphSpansForTraceCache(ctx, orgID, traceID)
if err == nil { if err == nil {
startTime = cachedTraceData.StartTime startTime = cachedTraceData.StartTime
@ -1136,7 +1128,7 @@ func (r *ClickHouseReader) GetFlamegraphSpansForTrace(ctx context.Context, trace
} }
zap.L().Info("getFlamegraphSpansForTrace: processing pre cache", zap.Duration("duration", time.Since(processingBeforeCache)), zap.String("traceID", traceID)) zap.L().Info("getFlamegraphSpansForTrace: processing pre cache", zap.Duration("duration", time.Since(processingBeforeCache)), zap.String("traceID", traceID))
cacheErr := r.cache.Store(ctx, fmt.Sprintf("getFlamegraphSpansForTrace-%v", traceID), &traceCache, time.Minute*5) cacheErr := r.cache.Set(ctx, orgID, strings.Join([]string{"getFlamegraphSpansForTrace", traceID}, "-"), &traceCache, time.Minute*5)
if cacheErr != nil { if cacheErr != nil {
zap.L().Debug("failed to store cache for getFlamegraphSpansForTrace", zap.String("traceID", traceID), zap.Error(err)) zap.L().Debug("failed to store cache for getFlamegraphSpansForTrace", zap.String("traceID", traceID), zap.Error(err))
} }
@ -2266,11 +2258,11 @@ func (r *ClickHouseReader) GetTotalLogs(ctx context.Context) (uint64, error) {
return totalLogs, nil return totalLogs, nil
} }
func (r *ClickHouseReader) FetchTemporality(ctx context.Context, metricNames []string) (map[string]map[v3.Temporality]bool, error) { func (r *ClickHouseReader) FetchTemporality(ctx context.Context, orgID valuer.UUID, metricNames []string) (map[string]map[v3.Temporality]bool, error) {
metricNameToTemporality := make(map[string]map[v3.Temporality]bool) metricNameToTemporality := make(map[string]map[v3.Temporality]bool)
var metricNamesToQuery []string var metricNamesToQuery []string
for _, metricName := range metricNames { for _, metricName := range metricNames {
updatedMetadata, cacheErr := r.GetUpdatedMetricsMetadata(ctx, metricName) updatedMetadata, cacheErr := r.GetUpdatedMetricsMetadata(ctx, orgID, metricName)
if cacheErr != nil { if cacheErr != nil {
zap.L().Info("Error in getting metrics cached metadata", zap.Error(cacheErr)) zap.L().Info("Error in getting metrics cached metadata", zap.Error(cacheErr))
} }
@ -2956,7 +2948,7 @@ func (r *ClickHouseReader) QueryDashboardVars(ctx context.Context, query string)
return &result, nil return &result, nil
} }
func (r *ClickHouseReader) GetMetricAggregateAttributes(ctx context.Context, req *v3.AggregateAttributeRequest, skipDotNames bool, skipSignozMetrics bool) (*v3.AggregateAttributeResponse, error) { func (r *ClickHouseReader) GetMetricAggregateAttributes(ctx context.Context, orgID valuer.UUID, req *v3.AggregateAttributeRequest, skipDotNames bool, skipSignozMetrics bool) (*v3.AggregateAttributeResponse, error) {
var query string var query string
var err error var err error
@ -2991,7 +2983,7 @@ func (r *ClickHouseReader) GetMetricAggregateAttributes(ctx context.Context, req
continue continue
} }
metadata, apiError := r.GetUpdatedMetricsMetadata(ctx, metricName) metadata, apiError := r.GetUpdatedMetricsMetadata(ctx, orgID, metricName)
if apiError != nil { if apiError != nil {
zap.L().Error("Error in getting metrics cached metadata", zap.Error(apiError)) zap.L().Error("Error in getting metrics cached metadata", zap.Error(apiError))
} }
@ -3096,7 +3088,7 @@ func (r *ClickHouseReader) GetMetricAttributeValues(ctx context.Context, req *v3
return &attributeValues, nil return &attributeValues, nil
} }
func (r *ClickHouseReader) GetMetricMetadata(ctx context.Context, metricName, serviceName string) (*v3.MetricMetadataResponse, error) { func (r *ClickHouseReader) GetMetricMetadata(ctx context.Context, orgID valuer.UUID, metricName, serviceName string) (*v3.MetricMetadataResponse, error) {
unixMilli := common.PastDayRoundOff() unixMilli := common.PastDayRoundOff()
@ -3121,7 +3113,7 @@ func (r *ClickHouseReader) GetMetricMetadata(ctx context.Context, metricName, se
deltaExists = true deltaExists = true
} }
} }
metadata, apiError := r.GetUpdatedMetricsMetadata(ctx, metricName) metadata, apiError := r.GetUpdatedMetricsMetadata(ctx, orgID, metricName)
if apiError != nil { if apiError != nil {
zap.L().Error("Error in getting metric cached metadata", zap.Error(apiError)) zap.L().Error("Error in getting metric cached metadata", zap.Error(apiError))
} }
@ -5187,7 +5179,7 @@ func (r *ClickHouseReader) GetActiveTimeSeriesForMetricName(ctx context.Context,
return timeSeries, nil return timeSeries, nil
} }
func (r *ClickHouseReader) ListSummaryMetrics(ctx context.Context, req *metrics_explorer.SummaryListMetricsRequest) (*metrics_explorer.SummaryListMetricsResponse, *model.ApiError) { func (r *ClickHouseReader) ListSummaryMetrics(ctx context.Context, orgID valuer.UUID, req *metrics_explorer.SummaryListMetricsRequest) (*metrics_explorer.SummaryListMetricsResponse, *model.ApiError) {
var args []interface{} var args []interface{}
// Build filter conditions (if any) // Build filter conditions (if any)
@ -5365,7 +5357,7 @@ func (r *ClickHouseReader) ListSummaryMetrics(ctx context.Context, req *metrics_
} }
//get updated metrics data //get updated metrics data
batch, apiError := r.GetUpdatedMetricsMetadata(ctx, metricNames...) batch, apiError := r.GetUpdatedMetricsMetadata(ctx, orgID, metricNames...)
if apiError != nil { if apiError != nil {
zap.L().Error("Error in getting metrics cached metadata", zap.Error(apiError)) zap.L().Error("Error in getting metrics cached metadata", zap.Error(apiError))
} }
@ -6022,18 +6014,18 @@ LIMIT 40`, // added rand to get diff value every time we run this query
return fingerprints, nil return fingerprints, nil
} }
func (r *ClickHouseReader) DeleteMetricsMetadata(ctx context.Context, metricName string) *model.ApiError { func (r *ClickHouseReader) DeleteMetricsMetadata(ctx context.Context, orgID valuer.UUID, metricName string) *model.ApiError {
delQuery := fmt.Sprintf(`ALTER TABLE %s.%s DELETE WHERE metric_name = ?;`, signozMetricDBName, signozUpdatedMetricsMetadataLocalTable) delQuery := fmt.Sprintf(`ALTER TABLE %s.%s DELETE WHERE metric_name = ?;`, signozMetricDBName, signozUpdatedMetricsMetadataLocalTable)
valueCtx := context.WithValue(ctx, "clickhouse_max_threads", constants.MetricsExplorerClickhouseThreads) valueCtx := context.WithValue(ctx, "clickhouse_max_threads", constants.MetricsExplorerClickhouseThreads)
err := r.db.Exec(valueCtx, delQuery, metricName) err := r.db.Exec(valueCtx, delQuery, metricName)
if err != nil { if err != nil {
return &model.ApiError{Typ: "ClickHouseError", Err: err} return &model.ApiError{Typ: "ClickHouseError", Err: err}
} }
r.cache.Remove(ctx, constants.UpdatedMetricsMetadataCachePrefix+metricName) r.cache.Delete(ctx, orgID, constants.UpdatedMetricsMetadataCachePrefix+metricName)
return nil return nil
} }
func (r *ClickHouseReader) UpdateMetricsMetadata(ctx context.Context, req *model.UpdateMetricsMetadata) *model.ApiError { func (r *ClickHouseReader) UpdateMetricsMetadata(ctx context.Context, orgID valuer.UUID, req *model.UpdateMetricsMetadata) *model.ApiError {
if req.MetricType == v3.MetricTypeHistogram { if req.MetricType == v3.MetricTypeHistogram {
labels := []string{"le"} labels := []string{"le"}
hasLabels, apiError := r.CheckForLabelsInMetric(ctx, req.MetricName, labels) hasLabels, apiError := r.CheckForLabelsInMetric(ctx, req.MetricName, labels)
@ -6062,7 +6054,7 @@ func (r *ClickHouseReader) UpdateMetricsMetadata(ctx context.Context, req *model
} }
} }
apiErr := r.DeleteMetricsMetadata(ctx, req.MetricName) apiErr := r.DeleteMetricsMetadata(ctx, orgID, req.MetricName)
if apiErr != nil { if apiErr != nil {
return apiErr return apiErr
} }
@ -6073,7 +6065,7 @@ VALUES ( ?, ?, ?, ?, ?, ?, ?);`, signozMetricDBName, signozUpdatedMetricsMetadat
if err != nil { if err != nil {
return &model.ApiError{Typ: "ClickHouseError", Err: err} return &model.ApiError{Typ: "ClickHouseError", Err: err}
} }
err = r.cache.Store(ctx, constants.UpdatedMetricsMetadataCachePrefix+req.MetricName, req, -1) err = r.cache.Set(ctx, orgID, constants.UpdatedMetricsMetadataCachePrefix+req.MetricName, req, -1)
if err != nil { if err != nil {
return &model.ApiError{Typ: "CachingErr", Err: err} return &model.ApiError{Typ: "CachingErr", Err: err}
} }
@ -6114,7 +6106,7 @@ func (r *ClickHouseReader) CheckForLabelsInMetric(ctx context.Context, metricNam
return hasLE, nil return hasLE, nil
} }
func (r *ClickHouseReader) PreloadMetricsMetadata(ctx context.Context) []error { func (r *ClickHouseReader) PreloadMetricsMetadata(ctx context.Context, orgID valuer.UUID) []error {
var allMetricsMetadata []model.UpdateMetricsMetadata var allMetricsMetadata []model.UpdateMetricsMetadata
var errorList []error var errorList []error
// Fetch all rows from ClickHouse // Fetch all rows from ClickHouse
@ -6127,7 +6119,7 @@ func (r *ClickHouseReader) PreloadMetricsMetadata(ctx context.Context) []error {
return errorList return errorList
} }
for _, m := range allMetricsMetadata { for _, m := range allMetricsMetadata {
err := r.cache.Store(ctx, constants.UpdatedMetricsMetadataCachePrefix+m.MetricName, &m, -1) err := r.cache.Set(ctx, orgID, constants.UpdatedMetricsMetadataCachePrefix+m.MetricName, &m, -1)
if err != nil { if err != nil {
errorList = append(errorList, err) errorList = append(errorList, err)
} }
@ -6136,7 +6128,7 @@ func (r *ClickHouseReader) PreloadMetricsMetadata(ctx context.Context) []error {
return errorList return errorList
} }
func (r *ClickHouseReader) GetUpdatedMetricsMetadata(ctx context.Context, metricNames ...string) (map[string]*model.UpdateMetricsMetadata, *model.ApiError) { func (r *ClickHouseReader) GetUpdatedMetricsMetadata(ctx context.Context, orgID valuer.UUID, metricNames ...string) (map[string]*model.UpdateMetricsMetadata, *model.ApiError) {
cachedMetadata := make(map[string]*model.UpdateMetricsMetadata) cachedMetadata := make(map[string]*model.UpdateMetricsMetadata)
var missingMetrics []string var missingMetrics []string
@ -6144,8 +6136,8 @@ func (r *ClickHouseReader) GetUpdatedMetricsMetadata(ctx context.Context, metric
for _, metricName := range metricNames { for _, metricName := range metricNames {
metadata := new(model.UpdateMetricsMetadata) metadata := new(model.UpdateMetricsMetadata)
cacheKey := constants.UpdatedMetricsMetadataCachePrefix + metricName cacheKey := constants.UpdatedMetricsMetadataCachePrefix + metricName
retrieveStatus, err := r.cache.Retrieve(ctx, cacheKey, metadata, true) err := r.cache.Get(ctx, orgID, cacheKey, metadata, true)
if err == nil && retrieveStatus == cache.RetrieveStatusHit { if err == nil {
cachedMetadata[metricName] = metadata cachedMetadata[metricName] = metadata
} else { } else {
if err != nil { if err != nil {
@ -6185,7 +6177,7 @@ func (r *ClickHouseReader) GetUpdatedMetricsMetadata(ctx context.Context, metric
// Cache the result for future requests. // Cache the result for future requests.
cacheKey := constants.UpdatedMetricsMetadataCachePrefix + metadata.MetricName cacheKey := constants.UpdatedMetricsMetadataCachePrefix + metadata.MetricName
if cacheErr := r.cache.Store(ctx, cacheKey, metadata, -1); cacheErr != nil { if cacheErr := r.cache.Set(ctx, orgID, cacheKey, metadata, -1); cacheErr != nil {
zap.L().Error("Failed to store metrics metadata in cache", zap.String("metric_name", metadata.MetricName), zap.Error(cacheErr)) zap.L().Error("Failed to store metrics metadata in cache", zap.String("metric_name", metadata.MetricName), zap.Error(cacheErr))
} }
cachedMetadata[metadata.MetricName] = metadata cachedMetadata[metadata.MetricName] = metadata

View File

@ -35,6 +35,7 @@ import (
jsoniter "github.com/json-iterator/go" jsoniter "github.com/json-iterator/go"
_ "github.com/mattn/go-sqlite3" _ "github.com/mattn/go-sqlite3"
"github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/query-service/agentConf" "github.com/SigNoz/signoz/pkg/query-service/agentConf"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations" "github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
"github.com/SigNoz/signoz/pkg/query-service/app/dashboards" "github.com/SigNoz/signoz/pkg/query-service/app/dashboards"
@ -53,7 +54,6 @@ import (
tracesV3 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v3" tracesV3 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v3"
tracesV4 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v4" tracesV4 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v4"
"github.com/SigNoz/signoz/pkg/query-service/auth" "github.com/SigNoz/signoz/pkg/query-service/auth"
"github.com/SigNoz/signoz/pkg/query-service/cache"
"github.com/SigNoz/signoz/pkg/query-service/contextlinks" "github.com/SigNoz/signoz/pkg/query-service/contextlinks"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3" v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/query-service/postprocess" "github.com/SigNoz/signoz/pkg/query-service/postprocess"
@ -670,7 +670,7 @@ func (aH *APIHandler) getRule(w http.ResponseWriter, r *http.Request) {
} }
// populateTemporality adds the temporality to the query if it is not present // populateTemporality adds the temporality to the query if it is not present
func (aH *APIHandler) PopulateTemporality(ctx context.Context, qp *v3.QueryRangeParamsV3) error { func (aH *APIHandler) PopulateTemporality(ctx context.Context, orgID valuer.UUID, qp *v3.QueryRangeParamsV3) error {
aH.temporalityMux.Lock() aH.temporalityMux.Lock()
defer aH.temporalityMux.Unlock() defer aH.temporalityMux.Unlock()
@ -701,7 +701,7 @@ func (aH *APIHandler) PopulateTemporality(ctx context.Context, qp *v3.QueryRange
} }
} }
nameToTemporality, err := aH.reader.FetchTemporality(ctx, missingTemporality) nameToTemporality, err := aH.reader.FetchTemporality(ctx, orgID, missingTemporality)
if err != nil { if err != nil {
return err return err
} }
@ -1338,7 +1338,16 @@ func (aH *APIHandler) createDashboards(w http.ResponseWriter, r *http.Request) {
} }
func (aH *APIHandler) testRule(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) testRule(w http.ResponseWriter, r *http.Request) {
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
defer r.Body.Close() defer r.Body.Close()
body, err := io.ReadAll(r.Body) body, err := io.ReadAll(r.Body)
if err != nil { if err != nil {
@ -1350,7 +1359,7 @@ func (aH *APIHandler) testRule(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
defer cancel() defer cancel()
alertCount, apiRrr := aH.ruleManager.TestNotification(ctx, string(body)) alertCount, apiRrr := aH.ruleManager.TestNotification(ctx, orgID, string(body))
if apiRrr != nil { if apiRrr != nil {
RespondError(w, apiRrr, nil) RespondError(w, apiRrr, nil)
return return
@ -1756,6 +1765,16 @@ func (aH *APIHandler) SearchTraces(w http.ResponseWriter, r *http.Request) {
} }
func (aH *APIHandler) GetWaterfallSpansForTraceWithMetadata(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) GetWaterfallSpansForTraceWithMetadata(w http.ResponseWriter, r *http.Request) {
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
traceID := mux.Vars(r)["traceId"] traceID := mux.Vars(r)["traceId"]
if traceID == "" { if traceID == "" {
RespondError(w, model.BadRequest(errors.New("traceID is required")), nil) RespondError(w, model.BadRequest(errors.New("traceID is required")), nil)
@ -1763,13 +1782,13 @@ func (aH *APIHandler) GetWaterfallSpansForTraceWithMetadata(w http.ResponseWrite
} }
req := new(model.GetWaterfallSpansForTraceWithMetadataParams) req := new(model.GetWaterfallSpansForTraceWithMetadataParams)
err := json.NewDecoder(r.Body).Decode(&req) err = json.NewDecoder(r.Body).Decode(&req)
if err != nil { if err != nil {
RespondError(w, model.BadRequest(err), nil) RespondError(w, model.BadRequest(err), nil)
return return
} }
result, apiErr := aH.reader.GetWaterfallSpansForTraceWithMetadata(r.Context(), traceID, req) result, apiErr := aH.reader.GetWaterfallSpansForTraceWithMetadata(r.Context(), orgID, traceID, req)
if apiErr != nil { if apiErr != nil {
RespondError(w, apiErr, nil) RespondError(w, apiErr, nil)
return return
@ -1779,6 +1798,17 @@ func (aH *APIHandler) GetWaterfallSpansForTraceWithMetadata(w http.ResponseWrite
} }
func (aH *APIHandler) GetFlamegraphSpansForTrace(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) GetFlamegraphSpansForTrace(w http.ResponseWriter, r *http.Request) {
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
traceID := mux.Vars(r)["traceId"] traceID := mux.Vars(r)["traceId"]
if traceID == "" { if traceID == "" {
RespondError(w, model.BadRequest(errors.New("traceID is required")), nil) RespondError(w, model.BadRequest(errors.New("traceID is required")), nil)
@ -1786,13 +1816,13 @@ func (aH *APIHandler) GetFlamegraphSpansForTrace(w http.ResponseWriter, r *http.
} }
req := new(model.GetFlamegraphSpansForTraceParams) req := new(model.GetFlamegraphSpansForTraceParams)
err := json.NewDecoder(r.Body).Decode(&req) err = json.NewDecoder(r.Body).Decode(&req)
if err != nil { if err != nil {
RespondError(w, model.BadRequest(err), nil) RespondError(w, model.BadRequest(err), nil)
return return
} }
result, apiErr := aH.reader.GetFlamegraphSpansForTrace(r.Context(), traceID, req) result, apiErr := aH.reader.GetFlamegraphSpansForTrace(r.Context(), orgID, traceID, req)
if apiErr != nil { if apiErr != nil {
RespondError(w, apiErr, nil) RespondError(w, apiErr, nil)
return return
@ -2764,11 +2794,18 @@ func (aH *APIHandler) onboardConsumers(
aH.Respond(w, entries) aH.Respond(w, entries)
} }
func (aH *APIHandler) onboardKafka( func (aH *APIHandler) onboardKafka(w http.ResponseWriter, r *http.Request) {
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
w http.ResponseWriter, r *http.Request,
) {
messagingQueue, apiErr := ParseKafkaQueueBody(r) messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil { if apiErr != nil {
zap.L().Error(apiErr.Err.Error()) zap.L().Error(apiErr.Err.Error())
@ -2784,7 +2821,7 @@ func (aH *APIHandler) onboardKafka(
return return
} }
results, errQueriesByName, err := aH.querierV2.QueryRange(r.Context(), queryRangeParams) results, errQueriesByName, err := aH.querierV2.QueryRange(r.Context(), orgID, queryRangeParams)
if err != nil { if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err} apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err}
RespondError(w, apiErrObj, errQueriesByName) RespondError(w, apiErrObj, errQueriesByName)
@ -2851,9 +2888,18 @@ func (aH *APIHandler) onboardKafka(
aH.Respond(w, entries) aH.Respond(w, entries)
} }
func (aH *APIHandler) getNetworkData( func (aH *APIHandler) getNetworkData(w http.ResponseWriter, r *http.Request) {
w http.ResponseWriter, r *http.Request, claims, err := authtypes.ClaimsFromContext(r.Context())
) { if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
attributeCache := &kafka.Clients{ attributeCache := &kafka.Clients{
Hash: make(map[string]struct{}), Hash: make(map[string]struct{}),
} }
@ -2880,7 +2926,7 @@ func (aH *APIHandler) getNetworkData(
var result []*v3.Result var result []*v3.Result
var errQueriesByName map[string]error var errQueriesByName map[string]error
result, errQueriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams) result, errQueriesByName, err = aH.querierV2.QueryRange(r.Context(), orgID, queryRangeParams)
if err != nil { if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err} apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err}
RespondError(w, apiErrObj, errQueriesByName) RespondError(w, apiErrObj, errQueriesByName)
@ -2916,7 +2962,7 @@ func (aH *APIHandler) getNetworkData(
return return
} }
resultFetchLatency, errQueriesByNameFetchLatency, err := aH.querierV2.QueryRange(r.Context(), queryRangeParams) resultFetchLatency, errQueriesByNameFetchLatency, err := aH.querierV2.QueryRange(r.Context(), orgID, queryRangeParams)
if err != nil { if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err} apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err}
RespondError(w, apiErrObj, errQueriesByNameFetchLatency) RespondError(w, apiErrObj, errQueriesByNameFetchLatency)
@ -2950,9 +2996,18 @@ func (aH *APIHandler) getNetworkData(
aH.Respond(w, resp) aH.Respond(w, resp)
} }
func (aH *APIHandler) getProducerData( func (aH *APIHandler) getProducerData(w http.ResponseWriter, r *http.Request) {
w http.ResponseWriter, r *http.Request, claims, err := authtypes.ClaimsFromContext(r.Context())
) { if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
// parse the query params to retrieve the messaging queue struct // parse the query params to retrieve the messaging queue struct
messagingQueue, apiErr := ParseKafkaQueueBody(r) messagingQueue, apiErr := ParseKafkaQueueBody(r)
@ -2978,7 +3033,7 @@ func (aH *APIHandler) getProducerData(
var result []*v3.Result var result []*v3.Result
var errQuriesByName map[string]error var errQuriesByName map[string]error
result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams) result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), orgID, queryRangeParams)
if err != nil { if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err} apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err}
RespondError(w, apiErrObj, errQuriesByName) RespondError(w, apiErrObj, errQuriesByName)
@ -2992,9 +3047,18 @@ func (aH *APIHandler) getProducerData(
aH.Respond(w, resp) aH.Respond(w, resp)
} }
func (aH *APIHandler) getConsumerData( func (aH *APIHandler) getConsumerData(w http.ResponseWriter, r *http.Request) {
w http.ResponseWriter, r *http.Request, claims, err := authtypes.ClaimsFromContext(r.Context())
) { if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
messagingQueue, apiErr := ParseKafkaQueueBody(r) messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil { if apiErr != nil {
@ -3019,7 +3083,7 @@ func (aH *APIHandler) getConsumerData(
var result []*v3.Result var result []*v3.Result
var errQuriesByName map[string]error var errQuriesByName map[string]error
result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams) result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), orgID, queryRangeParams)
if err != nil { if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err} apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err}
RespondError(w, apiErrObj, errQuriesByName) RespondError(w, apiErrObj, errQuriesByName)
@ -3034,9 +3098,18 @@ func (aH *APIHandler) getConsumerData(
} }
// s1 // s1
func (aH *APIHandler) getPartitionOverviewLatencyData( func (aH *APIHandler) getPartitionOverviewLatencyData(w http.ResponseWriter, r *http.Request) {
w http.ResponseWriter, r *http.Request, claims, err := authtypes.ClaimsFromContext(r.Context())
) { if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
messagingQueue, apiErr := ParseKafkaQueueBody(r) messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil { if apiErr != nil {
@ -3061,7 +3134,7 @@ func (aH *APIHandler) getPartitionOverviewLatencyData(
var result []*v3.Result var result []*v3.Result
var errQuriesByName map[string]error var errQuriesByName map[string]error
result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams) result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), orgID, queryRangeParams)
if err != nil { if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err} apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err}
RespondError(w, apiErrObj, errQuriesByName) RespondError(w, apiErrObj, errQuriesByName)
@ -3076,9 +3149,18 @@ func (aH *APIHandler) getPartitionOverviewLatencyData(
} }
// s1 // s1
func (aH *APIHandler) getConsumerPartitionLatencyData( func (aH *APIHandler) getConsumerPartitionLatencyData(w http.ResponseWriter, r *http.Request) {
w http.ResponseWriter, r *http.Request, claims, err := authtypes.ClaimsFromContext(r.Context())
) { if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
messagingQueue, apiErr := ParseKafkaQueueBody(r) messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil { if apiErr != nil {
@ -3103,7 +3185,7 @@ func (aH *APIHandler) getConsumerPartitionLatencyData(
var result []*v3.Result var result []*v3.Result
var errQuriesByName map[string]error var errQuriesByName map[string]error
result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams) result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), orgID, queryRangeParams)
if err != nil { if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err} apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err}
RespondError(w, apiErrObj, errQuriesByName) RespondError(w, apiErrObj, errQuriesByName)
@ -3121,11 +3203,19 @@ func (aH *APIHandler) getConsumerPartitionLatencyData(
// fetch traces // fetch traces
// cache attributes // cache attributes
// fetch byte rate metrics // fetch byte rate metrics
func (aH *APIHandler) getProducerThroughputOverview( func (aH *APIHandler) getProducerThroughputOverview(w http.ResponseWriter, r *http.Request) {
w http.ResponseWriter, r *http.Request, claims, err := authtypes.ClaimsFromContext(r.Context())
) { if err != nil {
messagingQueue, apiErr := ParseKafkaQueueBody(r) render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil { if apiErr != nil {
zap.L().Error(apiErr.Err.Error()) zap.L().Error(apiErr.Err.Error())
RespondError(w, apiErr, nil) RespondError(w, apiErr, nil)
@ -3152,7 +3242,7 @@ func (aH *APIHandler) getProducerThroughputOverview(
var result []*v3.Result var result []*v3.Result
var errQuriesByName map[string]error var errQuriesByName map[string]error
result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), producerQueryRangeParams) result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), orgID, producerQueryRangeParams)
if err != nil { if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err} apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err}
RespondError(w, apiErrObj, errQuriesByName) RespondError(w, apiErrObj, errQuriesByName)
@ -3186,7 +3276,7 @@ func (aH *APIHandler) getProducerThroughputOverview(
return return
} }
resultFetchLatency, errQueriesByNameFetchLatency, err := aH.querierV2.QueryRange(r.Context(), queryRangeParams) resultFetchLatency, errQueriesByNameFetchLatency, err := aH.querierV2.QueryRange(r.Context(), orgID, queryRangeParams)
if err != nil { if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err} apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err}
RespondError(w, apiErrObj, errQueriesByNameFetchLatency) RespondError(w, apiErrObj, errQueriesByNameFetchLatency)
@ -3224,9 +3314,18 @@ func (aH *APIHandler) getProducerThroughputOverview(
} }
// s3 p details // s3 p details
func (aH *APIHandler) getProducerThroughputDetails( func (aH *APIHandler) getProducerThroughputDetails(w http.ResponseWriter, r *http.Request) {
w http.ResponseWriter, r *http.Request, claims, err := authtypes.ClaimsFromContext(r.Context())
) { if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
messagingQueue, apiErr := ParseKafkaQueueBody(r) messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil { if apiErr != nil {
@ -3251,7 +3350,7 @@ func (aH *APIHandler) getProducerThroughputDetails(
var result []*v3.Result var result []*v3.Result
var errQuriesByName map[string]error var errQuriesByName map[string]error
result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams) result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), orgID, queryRangeParams)
if err != nil { if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err} apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err}
RespondError(w, apiErrObj, errQuriesByName) RespondError(w, apiErrObj, errQuriesByName)
@ -3266,9 +3365,18 @@ func (aH *APIHandler) getProducerThroughputDetails(
} }
// s3 c overview // s3 c overview
func (aH *APIHandler) getConsumerThroughputOverview( func (aH *APIHandler) getConsumerThroughputOverview(w http.ResponseWriter, r *http.Request) {
w http.ResponseWriter, r *http.Request, claims, err := authtypes.ClaimsFromContext(r.Context())
) { if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
messagingQueue, apiErr := ParseKafkaQueueBody(r) messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil { if apiErr != nil {
@ -3293,7 +3401,7 @@ func (aH *APIHandler) getConsumerThroughputOverview(
var result []*v3.Result var result []*v3.Result
var errQuriesByName map[string]error var errQuriesByName map[string]error
result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams) result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), orgID, queryRangeParams)
if err != nil { if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err} apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err}
RespondError(w, apiErrObj, errQuriesByName) RespondError(w, apiErrObj, errQuriesByName)
@ -3308,9 +3416,18 @@ func (aH *APIHandler) getConsumerThroughputOverview(
} }
// s3 c details // s3 c details
func (aH *APIHandler) getConsumerThroughputDetails( func (aH *APIHandler) getConsumerThroughputDetails(w http.ResponseWriter, r *http.Request) {
w http.ResponseWriter, r *http.Request, claims, err := authtypes.ClaimsFromContext(r.Context())
) { if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
messagingQueue, apiErr := ParseKafkaQueueBody(r) messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil { if apiErr != nil {
@ -3335,7 +3452,7 @@ func (aH *APIHandler) getConsumerThroughputDetails(
var result []*v3.Result var result []*v3.Result
var errQuriesByName map[string]error var errQuriesByName map[string]error
result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams) result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), orgID, queryRangeParams)
if err != nil { if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err} apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err}
RespondError(w, apiErrObj, errQuriesByName) RespondError(w, apiErrObj, errQuriesByName)
@ -3353,9 +3470,18 @@ func (aH *APIHandler) getConsumerThroughputDetails(
// needs logic to parse duration // needs logic to parse duration
// needs logic to get the percentage // needs logic to get the percentage
// show 10 traces // show 10 traces
func (aH *APIHandler) getProducerConsumerEval( func (aH *APIHandler) getProducerConsumerEval(w http.ResponseWriter, r *http.Request) {
w http.ResponseWriter, r *http.Request, claims, err := authtypes.ClaimsFromContext(r.Context())
) { if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
messagingQueue, apiErr := ParseKafkaQueueBody(r) messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil { if apiErr != nil {
@ -3380,7 +3506,7 @@ func (aH *APIHandler) getProducerConsumerEval(
var result []*v3.Result var result []*v3.Result
var errQuriesByName map[string]error var errQuriesByName map[string]error
result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams) result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), orgID, queryRangeParams)
if err != nil { if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err} apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err}
RespondError(w, apiErrObj, errQuriesByName) RespondError(w, apiErrObj, errQuriesByName)
@ -3462,9 +3588,18 @@ func (aH *APIHandler) GetIntegration(
aH.Respond(w, integration) aH.Respond(w, integration)
} }
func (aH *APIHandler) GetIntegrationConnectionStatus( func (aH *APIHandler) GetIntegrationConnectionStatus(w http.ResponseWriter, r *http.Request) {
w http.ResponseWriter, r *http.Request, claims, err := authtypes.ClaimsFromContext(r.Context())
) { if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
integrationId := mux.Vars(r)["integrationId"] integrationId := mux.Vars(r)["integrationId"]
claims, errv2 := authtypes.ClaimsFromContext(r.Context()) claims, errv2 := authtypes.ClaimsFromContext(r.Context())
if errv2 != nil { if errv2 != nil {
@ -3500,7 +3635,7 @@ func (aH *APIHandler) GetIntegrationConnectionStatus(
} }
connectionStatus, apiErr := aH.calculateConnectionStatus( connectionStatus, apiErr := aH.calculateConnectionStatus(
r.Context(), connectionTests, lookbackSeconds, r.Context(), orgID, connectionTests, lookbackSeconds,
) )
if apiErr != nil { if apiErr != nil {
RespondError(w, apiErr, "Failed to calculate integration connection status") RespondError(w, apiErr, "Failed to calculate integration connection status")
@ -3512,6 +3647,7 @@ func (aH *APIHandler) GetIntegrationConnectionStatus(
func (aH *APIHandler) calculateConnectionStatus( func (aH *APIHandler) calculateConnectionStatus(
ctx context.Context, ctx context.Context,
orgID valuer.UUID,
connectionTests *integrations.IntegrationConnectionTests, connectionTests *integrations.IntegrationConnectionTests,
lookbackSeconds int64, lookbackSeconds int64,
) (*integrations.IntegrationConnectionStatus, *model.ApiError) { ) (*integrations.IntegrationConnectionStatus, *model.ApiError) {
@ -3528,9 +3664,7 @@ func (aH *APIHandler) calculateConnectionStatus(
go func() { go func() {
defer wg.Done() defer wg.Done()
logsConnStatus, apiErr := aH.calculateLogsConnectionStatus( logsConnStatus, apiErr := aH.calculateLogsConnectionStatus(ctx, orgID, connectionTests.Logs, lookbackSeconds)
ctx, connectionTests.Logs, lookbackSeconds,
)
resultLock.Lock() resultLock.Lock()
defer resultLock.Unlock() defer resultLock.Unlock()
@ -3595,11 +3729,7 @@ func (aH *APIHandler) calculateConnectionStatus(
return result, nil return result, nil
} }
func (aH *APIHandler) calculateLogsConnectionStatus( func (aH *APIHandler) calculateLogsConnectionStatus(ctx context.Context, orgID valuer.UUID, logsConnectionTest *integrations.LogsConnectionTest, lookbackSeconds int64) (*integrations.SignalConnectionStatus, *model.ApiError) {
ctx context.Context,
logsConnectionTest *integrations.LogsConnectionTest,
lookbackSeconds int64,
) (*integrations.SignalConnectionStatus, *model.ApiError) {
if logsConnectionTest == nil { if logsConnectionTest == nil {
return nil, nil return nil, nil
} }
@ -3637,9 +3767,7 @@ func (aH *APIHandler) calculateLogsConnectionStatus(
}, },
}, },
} }
queryRes, _, err := aH.querier.QueryRange( queryRes, _, err := aH.querier.QueryRange(ctx, orgID, qrParams)
ctx, qrParams,
)
if err != nil { if err != nil {
return nil, model.InternalError(fmt.Errorf( return nil, model.InternalError(fmt.Errorf(
"could not query for integration connection status: %w", err, "could not query for integration connection status: %w", err,
@ -3674,9 +3802,7 @@ func (aH *APIHandler) calculateLogsConnectionStatus(
return nil, nil return nil, nil
} }
func (aH *APIHandler) InstallIntegration( func (aH *APIHandler) InstallIntegration(w http.ResponseWriter, r *http.Request) {
w http.ResponseWriter, r *http.Request,
) {
req := integrations.InstallIntegrationRequest{} req := integrations.InstallIntegrationRequest{}
err := json.NewDecoder(r.Body).Decode(&req) err := json.NewDecoder(r.Body).Decode(&req)
@ -3702,9 +3828,7 @@ func (aH *APIHandler) InstallIntegration(
aH.Respond(w, integration) aH.Respond(w, integration)
} }
func (aH *APIHandler) UninstallIntegration( func (aH *APIHandler) UninstallIntegration(w http.ResponseWriter, r *http.Request) {
w http.ResponseWriter, r *http.Request,
) {
req := integrations.UninstallIntegrationRequest{} req := integrations.UninstallIntegrationRequest{}
err := json.NewDecoder(r.Body).Decode(&req) err := json.NewDecoder(r.Body).Decode(&req)
@ -3959,6 +4083,17 @@ func (aH *APIHandler) CloudIntegrationsListServices(
func (aH *APIHandler) CloudIntegrationsGetServiceDetails( func (aH *APIHandler) CloudIntegrationsGetServiceDetails(
w http.ResponseWriter, r *http.Request, w http.ResponseWriter, r *http.Request,
) { ) {
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
cloudProvider := mux.Vars(r)["cloudProvider"] cloudProvider := mux.Vars(r)["cloudProvider"]
serviceId := mux.Vars(r)["serviceId"] serviceId := mux.Vars(r)["serviceId"]
@ -3986,7 +4121,7 @@ func (aH *APIHandler) CloudIntegrationsGetServiceDetails(
// Add connection status for the 2 signals. // Add connection status for the 2 signals.
if cloudAccountId != nil { if cloudAccountId != nil {
connStatus, apiErr := aH.calculateCloudIntegrationServiceConnectionStatus( connStatus, apiErr := aH.calculateCloudIntegrationServiceConnectionStatus(
r.Context(), cloudProvider, *cloudAccountId, resp, r.Context(), orgID, cloudProvider, *cloudAccountId, resp,
) )
if apiErr != nil { if apiErr != nil {
RespondError(w, apiErr, nil) RespondError(w, apiErr, nil)
@ -4000,6 +4135,7 @@ func (aH *APIHandler) CloudIntegrationsGetServiceDetails(
func (aH *APIHandler) calculateCloudIntegrationServiceConnectionStatus( func (aH *APIHandler) calculateCloudIntegrationServiceConnectionStatus(
ctx context.Context, ctx context.Context,
orgID valuer.UUID,
cloudProvider string, cloudProvider string,
cloudAccountId string, cloudAccountId string,
svcDetails *cloudintegrations.CloudServiceDetails, svcDetails *cloudintegrations.CloudServiceDetails,
@ -4052,7 +4188,7 @@ func (aH *APIHandler) calculateCloudIntegrationServiceConnectionStatus(
defer wg.Done() defer wg.Done()
logsConnStatus, apiErr := aH.calculateAWSIntegrationSvcLogsConnectionStatus( logsConnStatus, apiErr := aH.calculateAWSIntegrationSvcLogsConnectionStatus(
ctx, cloudAccountId, telemetryCollectionStrategy.AWSLogs, ctx, orgID, cloudAccountId, telemetryCollectionStrategy.AWSLogs,
) )
resultLock.Lock() resultLock.Lock()
@ -4126,6 +4262,7 @@ func (aH *APIHandler) calculateAWSIntegrationSvcMetricsConnectionStatus(
func (aH *APIHandler) calculateAWSIntegrationSvcLogsConnectionStatus( func (aH *APIHandler) calculateAWSIntegrationSvcLogsConnectionStatus(
ctx context.Context, ctx context.Context,
orgID valuer.UUID,
cloudAccountId string, cloudAccountId string,
strategy *cloudintegrations.AWSLogsCollectionStrategy, strategy *cloudintegrations.AWSLogsCollectionStrategy,
) (*cloudintegrations.SignalConnectionStatus, *model.ApiError) { ) (*cloudintegrations.SignalConnectionStatus, *model.ApiError) {
@ -4184,7 +4321,7 @@ func (aH *APIHandler) calculateAWSIntegrationSvcLogsConnectionStatus(
}, },
} }
queryRes, _, err := aH.querier.QueryRange( queryRes, _, err := aH.querier.QueryRange(
ctx, qrParams, ctx, orgID, qrParams,
) )
if err != nil { if err != nil {
return nil, model.InternalError(fmt.Errorf( return nil, model.InternalError(fmt.Errorf(
@ -4639,6 +4776,17 @@ func (aH *APIHandler) deleteSavedView(w http.ResponseWriter, r *http.Request) {
} }
func (aH *APIHandler) autocompleteAggregateAttributes(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) autocompleteAggregateAttributes(w http.ResponseWriter, r *http.Request) {
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
var response *v3.AggregateAttributeResponse var response *v3.AggregateAttributeResponse
req, err := parseAggregateAttributeRequest(r) req, err := parseAggregateAttributeRequest(r)
@ -4649,7 +4797,7 @@ func (aH *APIHandler) autocompleteAggregateAttributes(w http.ResponseWriter, r *
switch req.DataSource { switch req.DataSource {
case v3.DataSourceMetrics: case v3.DataSourceMetrics:
response, err = aH.reader.GetMetricAggregateAttributes(r.Context(), req, true, false) response, err = aH.reader.GetMetricAggregateAttributes(r.Context(), orgID, req, true, false)
case v3.DataSourceLogs: case v3.DataSourceLogs:
response, err = aH.reader.GetLogAggregateAttributes(r.Context(), req) response, err = aH.reader.GetLogAggregateAttributes(r.Context(), req)
case v3.DataSourceTraces: case v3.DataSourceTraces:
@ -4811,9 +4959,18 @@ func (aH *APIHandler) QueryRangeV3Format(w http.ResponseWriter, r *http.Request)
} }
func (aH *APIHandler) queryRangeV3(ctx context.Context, queryRangeParams *v3.QueryRangeParamsV3, w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) queryRangeV3(ctx context.Context, queryRangeParams *v3.QueryRangeParamsV3, w http.ResponseWriter, r *http.Request) {
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
var result []*v3.Result var result []*v3.Result
var err error
var errQuriesByName map[string]error var errQuriesByName map[string]error
var spanKeys map[string]v3.AttributeKey var spanKeys map[string]v3.AttributeKey
if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeBuilder { if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeBuilder {
@ -4878,7 +5035,7 @@ func (aH *APIHandler) queryRangeV3(ctx context.Context, queryRangeParams *v3.Que
} }
} }
result, errQuriesByName, err = aH.querier.QueryRange(ctx, queryRangeParams) result, errQuriesByName, err = aH.querier.QueryRange(ctx, orgID, queryRangeParams)
if err != nil { if err != nil {
queryErrors := map[string]string{} queryErrors := map[string]string{}
@ -5022,6 +5179,17 @@ func sendQueryResultEvents(r *http.Request, result []*v3.Result, queryRangeParam
} }
func (aH *APIHandler) QueryRangeV3(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) QueryRangeV3(w http.ResponseWriter, r *http.Request) {
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
queryRangeParams, apiErrorObj := ParseQueryRangeParams(r) queryRangeParams, apiErrorObj := ParseQueryRangeParams(r)
if apiErrorObj != nil { if apiErrorObj != nil {
@ -5031,7 +5199,7 @@ func (aH *APIHandler) QueryRangeV3(w http.ResponseWriter, r *http.Request) {
} }
// add temporality for each metric // add temporality for each metric
temporalityErr := aH.PopulateTemporality(r.Context(), queryRangeParams) temporalityErr := aH.PopulateTemporality(r.Context(), orgID, queryRangeParams)
if temporalityErr != nil { if temporalityErr != nil {
zap.L().Error("Error while adding temporality for metrics", zap.Error(temporalityErr)) zap.L().Error("Error while adding temporality for metrics", zap.Error(temporalityErr))
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: temporalityErr}, nil) RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: temporalityErr}, nil)
@ -5199,9 +5367,20 @@ func (aH *APIHandler) liveTailLogs(w http.ResponseWriter, r *http.Request) {
} }
func (aH *APIHandler) getMetricMetadata(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) getMetricMetadata(w http.ResponseWriter, r *http.Request) {
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
metricName := r.URL.Query().Get("metricName") metricName := r.URL.Query().Get("metricName")
serviceName := r.URL.Query().Get("serviceName") serviceName := r.URL.Query().Get("serviceName")
metricMetadata, err := aH.reader.GetMetricMetadata(r.Context(), metricName, serviceName) metricMetadata, err := aH.reader.GetMetricMetadata(r.Context(), orgID, metricName, serviceName)
if err != nil { if err != nil {
RespondError(w, &model.ApiError{Err: err, Typ: model.ErrorInternal}, nil) RespondError(w, &model.ApiError{Err: err, Typ: model.ErrorInternal}, nil)
return return
@ -5211,9 +5390,18 @@ func (aH *APIHandler) getMetricMetadata(w http.ResponseWriter, r *http.Request)
} }
func (aH *APIHandler) queryRangeV4(ctx context.Context, queryRangeParams *v3.QueryRangeParamsV3, w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) queryRangeV4(ctx context.Context, queryRangeParams *v3.QueryRangeParamsV3, w http.ResponseWriter, r *http.Request) {
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
var result []*v3.Result var result []*v3.Result
var err error
var errQuriesByName map[string]error var errQuriesByName map[string]error
var spanKeys map[string]v3.AttributeKey var spanKeys map[string]v3.AttributeKey
if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeBuilder { if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeBuilder {
@ -5255,7 +5443,7 @@ func (aH *APIHandler) queryRangeV4(ctx context.Context, queryRangeParams *v3.Que
} }
} }
result, errQuriesByName, err = aH.querierV2.QueryRange(ctx, queryRangeParams) result, errQuriesByName, err = aH.querierV2.QueryRange(ctx, orgID, queryRangeParams)
if err != nil { if err != nil {
queryErrors := map[string]string{} queryErrors := map[string]string{}
@ -5288,6 +5476,17 @@ func (aH *APIHandler) queryRangeV4(ctx context.Context, queryRangeParams *v3.Que
} }
func (aH *APIHandler) QueryRangeV4(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) QueryRangeV4(w http.ResponseWriter, r *http.Request) {
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
queryRangeParams, apiErrorObj := ParseQueryRangeParams(r) queryRangeParams, apiErrorObj := ParseQueryRangeParams(r)
if apiErrorObj != nil { if apiErrorObj != nil {
@ -5298,7 +5497,7 @@ func (aH *APIHandler) QueryRangeV4(w http.ResponseWriter, r *http.Request) {
queryRangeParams.Version = "v4" queryRangeParams.Version = "v4"
// add temporality for each metric // add temporality for each metric
temporalityErr := aH.PopulateTemporality(r.Context(), queryRangeParams) temporalityErr := aH.PopulateTemporality(r.Context(), orgID, queryRangeParams)
if temporalityErr != nil { if temporalityErr != nil {
zap.L().Error("Error while adding temporality for metrics", zap.Error(temporalityErr)) zap.L().Error("Error while adding temporality for metrics", zap.Error(temporalityErr))
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: temporalityErr}, nil) RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: temporalityErr}, nil)
@ -5364,6 +5563,17 @@ func (aH *APIHandler) getQueueOverview(w http.ResponseWriter, r *http.Request) {
} }
func (aH *APIHandler) getDomainList(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) getDomainList(w http.ResponseWriter, r *http.Request) {
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
thirdPartyQueryRequest, apiErr := ParseRequestBody(r) thirdPartyQueryRequest, apiErr := ParseRequestBody(r)
if apiErr != nil { if apiErr != nil {
zap.L().Error(apiErr.Err.Error()) zap.L().Error(apiErr.Err.Error())
@ -5381,7 +5591,7 @@ func (aH *APIHandler) getDomainList(w http.ResponseWriter, r *http.Request) {
var result []*v3.Result var result []*v3.Result
var errQuriesByName map[string]error var errQuriesByName map[string]error
result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams) result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), orgID, queryRangeParams)
if err != nil { if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err} apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err}
RespondError(w, apiErrObj, errQuriesByName) RespondError(w, apiErrObj, errQuriesByName)
@ -5406,8 +5616,18 @@ func (aH *APIHandler) getDomainList(w http.ResponseWriter, r *http.Request) {
} }
func (aH *APIHandler) getDomainInfo(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) getDomainInfo(w http.ResponseWriter, r *http.Request) {
thirdPartyQueryRequest, apiErr := ParseRequestBody(r) claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
thirdPartyQueryRequest, apiErr := ParseRequestBody(r)
if apiErr != nil { if apiErr != nil {
zap.L().Error(apiErr.Err.Error()) zap.L().Error(apiErr.Err.Error())
RespondError(w, apiErr, nil) RespondError(w, apiErr, nil)
@ -5425,7 +5645,7 @@ func (aH *APIHandler) getDomainInfo(w http.ResponseWriter, r *http.Request) {
var result []*v3.Result var result []*v3.Result
var errQuriesByName map[string]error var errQuriesByName map[string]error
result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams) result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), orgID, queryRangeParams)
if err != nil { if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err} apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err}
RespondError(w, apiErrObj, errQuriesByName) RespondError(w, apiErrObj, errQuriesByName)

View File

@ -4,7 +4,10 @@ import (
"encoding/json" "encoding/json"
"net/http" "net/http"
"github.com/SigNoz/signoz/pkg/http/render"
"github.com/SigNoz/signoz/pkg/query-service/model" "github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/valuer"
) )
func (aH *APIHandler) getHostAttributeKeys(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) getHostAttributeKeys(w http.ResponseWriter, r *http.Request) {
@ -50,17 +53,27 @@ func (aH *APIHandler) getHostAttributeValues(w http.ResponseWriter, r *http.Requ
func (aH *APIHandler) getHostList(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) getHostList(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() ctx := r.Context()
req := model.HostListRequest{} claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
req := model.HostListRequest{}
// parse request // parse request
err := json.NewDecoder(r.Body).Decode(&req) err = json.NewDecoder(r.Body).Decode(&req)
if err != nil { if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return return
} }
// get host list // get host list
hostList, err := aH.hostsRepo.GetHostList(ctx, req) hostList, err := aH.hostsRepo.GetHostList(ctx, orgID, req)
if err != nil { if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return return
@ -106,15 +119,25 @@ func (aH *APIHandler) getProcessAttributeValues(w http.ResponseWriter, r *http.R
func (aH *APIHandler) getProcessList(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) getProcessList(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() ctx := r.Context()
req := model.ProcessListRequest{} claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
err := json.NewDecoder(r.Body).Decode(&req) req := model.ProcessListRequest{}
err = json.NewDecoder(r.Body).Decode(&req)
if err != nil { if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return return
} }
hostList, err := aH.processesRepo.GetProcessList(ctx, req) hostList, err := aH.processesRepo.GetProcessList(ctx, orgID, req)
if err != nil { if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return return
@ -159,15 +182,25 @@ func (aH *APIHandler) getPodAttributeValues(w http.ResponseWriter, r *http.Reque
func (aH *APIHandler) getPodList(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) getPodList(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() ctx := r.Context()
req := model.PodListRequest{} claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
err := json.NewDecoder(r.Body).Decode(&req) req := model.PodListRequest{}
err = json.NewDecoder(r.Body).Decode(&req)
if err != nil { if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return return
} }
podList, err := aH.podsRepo.GetPodList(ctx, req) podList, err := aH.podsRepo.GetPodList(ctx, orgID, req)
if err != nil { if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return return
@ -212,15 +245,25 @@ func (aH *APIHandler) getNodeAttributeValues(w http.ResponseWriter, r *http.Requ
func (aH *APIHandler) getNodeList(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) getNodeList(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() ctx := r.Context()
req := model.NodeListRequest{} claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
err := json.NewDecoder(r.Body).Decode(&req) req := model.NodeListRequest{}
err = json.NewDecoder(r.Body).Decode(&req)
if err != nil { if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return return
} }
nodeList, err := aH.nodesRepo.GetNodeList(ctx, req) nodeList, err := aH.nodesRepo.GetNodeList(ctx, orgID, req)
if err != nil { if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return return
@ -265,15 +308,25 @@ func (aH *APIHandler) getNamespaceAttributeValues(w http.ResponseWriter, r *http
func (aH *APIHandler) getNamespaceList(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) getNamespaceList(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() ctx := r.Context()
req := model.NamespaceListRequest{} claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
err := json.NewDecoder(r.Body).Decode(&req) req := model.NamespaceListRequest{}
err = json.NewDecoder(r.Body).Decode(&req)
if err != nil { if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return return
} }
namespaceList, err := aH.namespacesRepo.GetNamespaceList(ctx, req) namespaceList, err := aH.namespacesRepo.GetNamespaceList(ctx, orgID, req)
if err != nil { if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return return
@ -318,15 +371,25 @@ func (aH *APIHandler) getClusterAttributeValues(w http.ResponseWriter, r *http.R
func (aH *APIHandler) getClusterList(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) getClusterList(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() ctx := r.Context()
req := model.ClusterListRequest{} claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
err := json.NewDecoder(r.Body).Decode(&req) req := model.ClusterListRequest{}
err = json.NewDecoder(r.Body).Decode(&req)
if err != nil { if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return return
} }
clusterList, err := aH.clustersRepo.GetClusterList(ctx, req) clusterList, err := aH.clustersRepo.GetClusterList(ctx, orgID, req)
if err != nil { if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return return
@ -371,15 +434,25 @@ func (aH *APIHandler) getDeploymentAttributeValues(w http.ResponseWriter, r *htt
func (aH *APIHandler) getDeploymentList(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) getDeploymentList(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() ctx := r.Context()
req := model.DeploymentListRequest{} claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
err := json.NewDecoder(r.Body).Decode(&req) req := model.DeploymentListRequest{}
err = json.NewDecoder(r.Body).Decode(&req)
if err != nil { if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return return
} }
deploymentList, err := aH.deploymentsRepo.GetDeploymentList(ctx, req) deploymentList, err := aH.deploymentsRepo.GetDeploymentList(ctx, orgID, req)
if err != nil { if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return return
@ -424,15 +497,25 @@ func (aH *APIHandler) getDaemonSetAttributeValues(w http.ResponseWriter, r *http
func (aH *APIHandler) getDaemonSetList(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) getDaemonSetList(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() ctx := r.Context()
req := model.DaemonSetListRequest{} claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
err := json.NewDecoder(r.Body).Decode(&req) req := model.DaemonSetListRequest{}
err = json.NewDecoder(r.Body).Decode(&req)
if err != nil { if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return return
} }
daemonSetList, err := aH.daemonsetsRepo.GetDaemonSetList(ctx, req) daemonSetList, err := aH.daemonsetsRepo.GetDaemonSetList(ctx, orgID, req)
if err != nil { if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return return
@ -477,15 +560,25 @@ func (aH *APIHandler) getStatefulSetAttributeValues(w http.ResponseWriter, r *ht
func (aH *APIHandler) getStatefulSetList(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) getStatefulSetList(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() ctx := r.Context()
req := model.StatefulSetListRequest{} claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
err := json.NewDecoder(r.Body).Decode(&req) req := model.StatefulSetListRequest{}
err = json.NewDecoder(r.Body).Decode(&req)
if err != nil { if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return return
} }
statefulSetList, err := aH.statefulsetsRepo.GetStatefulSetList(ctx, req) statefulSetList, err := aH.statefulsetsRepo.GetStatefulSetList(ctx, orgID, req)
if err != nil { if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return return
@ -528,15 +621,25 @@ func (aH *APIHandler) getJobAttributeValues(w http.ResponseWriter, r *http.Reque
func (aH *APIHandler) getJobList(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) getJobList(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() ctx := r.Context()
req := model.JobListRequest{} claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
err := json.NewDecoder(r.Body).Decode(&req) req := model.JobListRequest{}
err = json.NewDecoder(r.Body).Decode(&req)
if err != nil { if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return return
} }
jobList, err := aH.jobsRepo.GetJobList(ctx, req) jobList, err := aH.jobsRepo.GetJobList(ctx, orgID, req)
if err != nil { if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return return
@ -547,15 +650,25 @@ func (aH *APIHandler) getJobList(w http.ResponseWriter, r *http.Request) {
func (aH *APIHandler) getPvcList(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) getPvcList(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() ctx := r.Context()
req := model.VolumeListRequest{} claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
err := json.NewDecoder(r.Body).Decode(&req) req := model.VolumeListRequest{}
err = json.NewDecoder(r.Body).Decode(&req)
if err != nil { if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return return
} }
pvcList, err := aH.pvcsRepo.GetPvcList(ctx, req) pvcList, err := aH.pvcsRepo.GetPvcList(ctx, orgID, req)
if err != nil { if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return return

View File

@ -11,6 +11,7 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/model" "github.com/SigNoz/signoz/pkg/query-service/model"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3" v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/query-service/postprocess" "github.com/SigNoz/signoz/pkg/query-service/postprocess"
"github.com/SigNoz/signoz/pkg/valuer"
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
) )
@ -131,7 +132,7 @@ func (p *ClustersRepo) getMetadataAttributes(ctx context.Context, req model.Clus
return clusterAttrs, nil return clusterAttrs, nil
} }
func (p *ClustersRepo) getTopClusterGroups(ctx context.Context, req model.ClusterListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) { func (p *ClustersRepo) getTopClusterGroups(ctx context.Context, orgID valuer.UUID, req model.ClusterListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) {
step, timeSeriesTableName, samplesTableName := getParamsForTopClusters(req) step, timeSeriesTableName, samplesTableName := getParamsForTopClusters(req)
queryNames := queryNamesForClusters[req.OrderBy.ColumnName] queryNames := queryNamesForClusters[req.OrderBy.ColumnName]
@ -162,7 +163,7 @@ func (p *ClustersRepo) getTopClusterGroups(ctx context.Context, req model.Cluste
topClusterGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query topClusterGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query
} }
queryResponse, _, err := p.querierV2.QueryRange(ctx, topClusterGroupsQueryRangeParams) queryResponse, _, err := p.querierV2.QueryRange(ctx, orgID, topClusterGroupsQueryRangeParams)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -201,7 +202,7 @@ func (p *ClustersRepo) getTopClusterGroups(ctx context.Context, req model.Cluste
return topClusterGroups, allClusterGroups, nil return topClusterGroups, allClusterGroups, nil
} }
func (p *ClustersRepo) GetClusterList(ctx context.Context, req model.ClusterListRequest) (model.ClusterListResponse, error) { func (p *ClustersRepo) GetClusterList(ctx context.Context, orgID valuer.UUID, req model.ClusterListRequest) (model.ClusterListResponse, error) {
resp := model.ClusterListResponse{} resp := model.ClusterListResponse{}
if req.Limit == 0 { if req.Limit == 0 {
@ -243,7 +244,7 @@ func (p *ClustersRepo) GetClusterList(ctx context.Context, req model.ClusterList
return resp, err return resp, err
} }
topClusterGroups, allClusterGroups, err := p.getTopClusterGroups(ctx, req, query) topClusterGroups, allClusterGroups, err := p.getTopClusterGroups(ctx, orgID, req, query)
if err != nil { if err != nil {
return resp, err return resp, err
} }
@ -277,7 +278,7 @@ func (p *ClustersRepo) GetClusterList(ctx context.Context, req model.ClusterList
} }
} }
queryResponse, _, err := p.querierV2.QueryRange(ctx, query) queryResponse, _, err := p.querierV2.QueryRange(ctx, orgID, query)
if err != nil { if err != nil {
return resp, err return resp, err
} }

View File

@ -11,6 +11,7 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/model" "github.com/SigNoz/signoz/pkg/query-service/model"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3" v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/query-service/postprocess" "github.com/SigNoz/signoz/pkg/query-service/postprocess"
"github.com/SigNoz/signoz/pkg/valuer"
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
) )
@ -198,7 +199,7 @@ func (d *DaemonSetsRepo) getMetadataAttributes(ctx context.Context, req model.Da
return daemonSetAttrs, nil return daemonSetAttrs, nil
} }
func (d *DaemonSetsRepo) getTopDaemonSetGroups(ctx context.Context, req model.DaemonSetListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) { func (d *DaemonSetsRepo) getTopDaemonSetGroups(ctx context.Context, orgID valuer.UUID, req model.DaemonSetListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) {
step, timeSeriesTableName, samplesTableName := getParamsForTopDaemonSets(req) step, timeSeriesTableName, samplesTableName := getParamsForTopDaemonSets(req)
queryNames := queryNamesForDaemonSets[req.OrderBy.ColumnName] queryNames := queryNamesForDaemonSets[req.OrderBy.ColumnName]
@ -229,7 +230,7 @@ func (d *DaemonSetsRepo) getTopDaemonSetGroups(ctx context.Context, req model.Da
topDaemonSetGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query topDaemonSetGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query
} }
queryResponse, _, err := d.querierV2.QueryRange(ctx, topDaemonSetGroupsQueryRangeParams) queryResponse, _, err := d.querierV2.QueryRange(ctx, orgID, topDaemonSetGroupsQueryRangeParams)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -268,7 +269,7 @@ func (d *DaemonSetsRepo) getTopDaemonSetGroups(ctx context.Context, req model.Da
return topDaemonSetGroups, allDaemonSetGroups, nil return topDaemonSetGroups, allDaemonSetGroups, nil
} }
func (d *DaemonSetsRepo) GetDaemonSetList(ctx context.Context, req model.DaemonSetListRequest) (model.DaemonSetListResponse, error) { func (d *DaemonSetsRepo) GetDaemonSetList(ctx context.Context, orgID valuer.UUID, req model.DaemonSetListRequest) (model.DaemonSetListResponse, error) {
resp := model.DaemonSetListResponse{} resp := model.DaemonSetListResponse{}
if req.Limit == 0 { if req.Limit == 0 {
@ -320,7 +321,7 @@ func (d *DaemonSetsRepo) GetDaemonSetList(ctx context.Context, req model.DaemonS
return resp, err return resp, err
} }
topDaemonSetGroups, allDaemonSetGroups, err := d.getTopDaemonSetGroups(ctx, req, query) topDaemonSetGroups, allDaemonSetGroups, err := d.getTopDaemonSetGroups(ctx, orgID, req, query)
if err != nil { if err != nil {
return resp, err return resp, err
} }
@ -354,7 +355,7 @@ func (d *DaemonSetsRepo) GetDaemonSetList(ctx context.Context, req model.DaemonS
} }
} }
queryResponse, _, err := d.querierV2.QueryRange(ctx, query) queryResponse, _, err := d.querierV2.QueryRange(ctx, orgID, query)
if err != nil { if err != nil {
return resp, err return resp, err
} }

View File

@ -11,6 +11,7 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/model" "github.com/SigNoz/signoz/pkg/query-service/model"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3" v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/query-service/postprocess" "github.com/SigNoz/signoz/pkg/query-service/postprocess"
"github.com/SigNoz/signoz/pkg/valuer"
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
) )
@ -198,7 +199,7 @@ func (d *DeploymentsRepo) getMetadataAttributes(ctx context.Context, req model.D
return deploymentAttrs, nil return deploymentAttrs, nil
} }
func (d *DeploymentsRepo) getTopDeploymentGroups(ctx context.Context, req model.DeploymentListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) { func (d *DeploymentsRepo) getTopDeploymentGroups(ctx context.Context, orgID valuer.UUID, req model.DeploymentListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) {
step, timeSeriesTableName, samplesTableName := getParamsForTopDeployments(req) step, timeSeriesTableName, samplesTableName := getParamsForTopDeployments(req)
queryNames := queryNamesForDeployments[req.OrderBy.ColumnName] queryNames := queryNamesForDeployments[req.OrderBy.ColumnName]
@ -229,7 +230,7 @@ func (d *DeploymentsRepo) getTopDeploymentGroups(ctx context.Context, req model.
topDeploymentGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query topDeploymentGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query
} }
queryResponse, _, err := d.querierV2.QueryRange(ctx, topDeploymentGroupsQueryRangeParams) queryResponse, _, err := d.querierV2.QueryRange(ctx, orgID, topDeploymentGroupsQueryRangeParams)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -268,7 +269,7 @@ func (d *DeploymentsRepo) getTopDeploymentGroups(ctx context.Context, req model.
return topDeploymentGroups, allDeploymentGroups, nil return topDeploymentGroups, allDeploymentGroups, nil
} }
func (d *DeploymentsRepo) GetDeploymentList(ctx context.Context, req model.DeploymentListRequest) (model.DeploymentListResponse, error) { func (d *DeploymentsRepo) GetDeploymentList(ctx context.Context, orgID valuer.UUID, req model.DeploymentListRequest) (model.DeploymentListResponse, error) {
resp := model.DeploymentListResponse{} resp := model.DeploymentListResponse{}
if req.Limit == 0 { if req.Limit == 0 {
@ -320,7 +321,7 @@ func (d *DeploymentsRepo) GetDeploymentList(ctx context.Context, req model.Deplo
return resp, err return resp, err
} }
topDeploymentGroups, allDeploymentGroups, err := d.getTopDeploymentGroups(ctx, req, query) topDeploymentGroups, allDeploymentGroups, err := d.getTopDeploymentGroups(ctx, orgID, req, query)
if err != nil { if err != nil {
return resp, err return resp, err
} }
@ -354,7 +355,7 @@ func (d *DeploymentsRepo) GetDeploymentList(ctx context.Context, req model.Deplo
} }
} }
queryResponse, _, err := d.querierV2.QueryRange(ctx, query) queryResponse, _, err := d.querierV2.QueryRange(ctx, orgID, query)
if err != nil { if err != nil {
return resp, err return resp, err
} }

View File

@ -16,6 +16,7 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/model" "github.com/SigNoz/signoz/pkg/query-service/model"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3" v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/query-service/postprocess" "github.com/SigNoz/signoz/pkg/query-service/postprocess"
"github.com/SigNoz/signoz/pkg/valuer"
"go.uber.org/zap" "go.uber.org/zap"
"golang.org/x/exp/maps" "golang.org/x/exp/maps"
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
@ -129,7 +130,7 @@ func (h *HostsRepo) GetHostAttributeValues(ctx context.Context, req v3.FilterAtt
return &v3.FilterAttributeValueResponse{StringAttributeValues: hostNames}, nil return &v3.FilterAttributeValueResponse{StringAttributeValues: hostNames}, nil
} }
func (h *HostsRepo) getActiveHosts(ctx context.Context, req model.HostListRequest) (map[string]bool, error) { func (h *HostsRepo) getActiveHosts(ctx context.Context, orgID valuer.UUID, req model.HostListRequest) (map[string]bool, error) {
activeStatus := map[string]bool{} activeStatus := map[string]bool{}
step := common.MinAllowedStepInterval(req.Start, req.End) step := common.MinAllowedStepInterval(req.Start, req.End)
@ -172,7 +173,7 @@ func (h *HostsRepo) getActiveHosts(ctx context.Context, req model.HostListReques
}, },
} }
queryResponse, _, err := h.querierV2.QueryRange(ctx, &params) queryResponse, _, err := h.querierV2.QueryRange(ctx, orgID, &params)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -248,7 +249,7 @@ func (h *HostsRepo) getMetadataAttributes(ctx context.Context, req model.HostLis
return hostAttrs, nil return hostAttrs, nil
} }
func (h *HostsRepo) getTopHostGroups(ctx context.Context, req model.HostListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) { func (h *HostsRepo) getTopHostGroups(ctx context.Context, orgID valuer.UUID, req model.HostListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) {
step, timeSeriesTableName, samplesTableName := getParamsForTopHosts(req) step, timeSeriesTableName, samplesTableName := getParamsForTopHosts(req)
queryNames := queryNamesForTopHosts[req.OrderBy.ColumnName] queryNames := queryNamesForTopHosts[req.OrderBy.ColumnName]
@ -276,7 +277,7 @@ func (h *HostsRepo) getTopHostGroups(ctx context.Context, req model.HostListRequ
topHostGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query topHostGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query
} }
queryResponse, _, err := h.querierV2.QueryRange(ctx, topHostGroupsQueryRangeParams) queryResponse, _, err := h.querierV2.QueryRange(ctx, orgID, topHostGroupsQueryRangeParams)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -384,7 +385,7 @@ func (h *HostsRepo) IsSendingK8SAgentMetrics(ctx context.Context, req model.Host
return maps.Keys(clusterNames), maps.Keys(nodeNames), nil return maps.Keys(clusterNames), maps.Keys(nodeNames), nil
} }
func (h *HostsRepo) GetHostList(ctx context.Context, req model.HostListRequest) (model.HostListResponse, error) { func (h *HostsRepo) GetHostList(ctx context.Context, orgID valuer.UUID, req model.HostListRequest) (model.HostListResponse, error) {
resp := model.HostListResponse{} resp := model.HostListResponse{}
if req.Limit == 0 { if req.Limit == 0 {
@ -439,12 +440,12 @@ func (h *HostsRepo) GetHostList(ctx context.Context, req model.HostListRequest)
return resp, err return resp, err
} }
activeHosts, err := h.getActiveHosts(ctx, req) activeHosts, err := h.getActiveHosts(ctx, orgID, req)
if err != nil { if err != nil {
return resp, err return resp, err
} }
topHostGroups, allHostGroups, err := h.getTopHostGroups(ctx, req, query) topHostGroups, allHostGroups, err := h.getTopHostGroups(ctx, orgID, req, query)
if err != nil { if err != nil {
return resp, err return resp, err
} }
@ -477,7 +478,7 @@ func (h *HostsRepo) GetHostList(ctx context.Context, req model.HostListRequest)
} }
} }
queryResponse, _, err := h.querierV2.QueryRange(ctx, query) queryResponse, _, err := h.querierV2.QueryRange(ctx, orgID, query)
if err != nil { if err != nil {
return resp, err return resp, err
} }

View File

@ -11,6 +11,7 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/model" "github.com/SigNoz/signoz/pkg/query-service/model"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3" v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/query-service/postprocess" "github.com/SigNoz/signoz/pkg/query-service/postprocess"
"github.com/SigNoz/signoz/pkg/valuer"
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
) )
@ -242,7 +243,7 @@ func (d *JobsRepo) getMetadataAttributes(ctx context.Context, req model.JobListR
return jobAttrs, nil return jobAttrs, nil
} }
func (d *JobsRepo) getTopJobGroups(ctx context.Context, req model.JobListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) { func (d *JobsRepo) getTopJobGroups(ctx context.Context, orgID valuer.UUID, req model.JobListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) {
step, timeSeriesTableName, samplesTableName := getParamsForTopJobs(req) step, timeSeriesTableName, samplesTableName := getParamsForTopJobs(req)
queryNames := queryNamesForJobs[req.OrderBy.ColumnName] queryNames := queryNamesForJobs[req.OrderBy.ColumnName]
@ -273,7 +274,7 @@ func (d *JobsRepo) getTopJobGroups(ctx context.Context, req model.JobListRequest
topJobGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query topJobGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query
} }
queryResponse, _, err := d.querierV2.QueryRange(ctx, topJobGroupsQueryRangeParams) queryResponse, _, err := d.querierV2.QueryRange(ctx, orgID, topJobGroupsQueryRangeParams)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -312,7 +313,7 @@ func (d *JobsRepo) getTopJobGroups(ctx context.Context, req model.JobListRequest
return topJobGroups, allJobGroups, nil return topJobGroups, allJobGroups, nil
} }
func (d *JobsRepo) GetJobList(ctx context.Context, req model.JobListRequest) (model.JobListResponse, error) { func (d *JobsRepo) GetJobList(ctx context.Context, orgID valuer.UUID, req model.JobListRequest) (model.JobListResponse, error) {
resp := model.JobListResponse{} resp := model.JobListResponse{}
if req.Limit == 0 { if req.Limit == 0 {
@ -364,7 +365,7 @@ func (d *JobsRepo) GetJobList(ctx context.Context, req model.JobListRequest) (mo
return resp, err return resp, err
} }
topJobGroups, allJobGroups, err := d.getTopJobGroups(ctx, req, query) topJobGroups, allJobGroups, err := d.getTopJobGroups(ctx, orgID, req, query)
if err != nil { if err != nil {
return resp, err return resp, err
} }
@ -398,7 +399,7 @@ func (d *JobsRepo) GetJobList(ctx context.Context, req model.JobListRequest) (mo
} }
} }
queryResponse, _, err := d.querierV2.QueryRange(ctx, query) queryResponse, _, err := d.querierV2.QueryRange(ctx, orgID, query)
if err != nil { if err != nil {
return resp, err return resp, err
} }

View File

@ -11,6 +11,7 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/model" "github.com/SigNoz/signoz/pkg/query-service/model"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3" v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/query-service/postprocess" "github.com/SigNoz/signoz/pkg/query-service/postprocess"
"github.com/SigNoz/signoz/pkg/valuer"
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
) )
@ -125,7 +126,7 @@ func (p *NamespacesRepo) getMetadataAttributes(ctx context.Context, req model.Na
return namespaceAttrs, nil return namespaceAttrs, nil
} }
func (p *NamespacesRepo) getTopNamespaceGroups(ctx context.Context, req model.NamespaceListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) { func (p *NamespacesRepo) getTopNamespaceGroups(ctx context.Context, orgID valuer.UUID, req model.NamespaceListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) {
step, timeSeriesTableName, samplesTableName := getParamsForTopNamespaces(req) step, timeSeriesTableName, samplesTableName := getParamsForTopNamespaces(req)
queryNames := queryNamesForNamespaces[req.OrderBy.ColumnName] queryNames := queryNamesForNamespaces[req.OrderBy.ColumnName]
@ -156,7 +157,7 @@ func (p *NamespacesRepo) getTopNamespaceGroups(ctx context.Context, req model.Na
topNamespaceGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query topNamespaceGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query
} }
queryResponse, _, err := p.querierV2.QueryRange(ctx, topNamespaceGroupsQueryRangeParams) queryResponse, _, err := p.querierV2.QueryRange(ctx, orgID, topNamespaceGroupsQueryRangeParams)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -195,7 +196,7 @@ func (p *NamespacesRepo) getTopNamespaceGroups(ctx context.Context, req model.Na
return topNamespaceGroups, allNamespaceGroups, nil return topNamespaceGroups, allNamespaceGroups, nil
} }
func (p *NamespacesRepo) GetNamespaceList(ctx context.Context, req model.NamespaceListRequest) (model.NamespaceListResponse, error) { func (p *NamespacesRepo) GetNamespaceList(ctx context.Context, orgID valuer.UUID, req model.NamespaceListRequest) (model.NamespaceListResponse, error) {
resp := model.NamespaceListResponse{} resp := model.NamespaceListResponse{}
if req.Limit == 0 { if req.Limit == 0 {
@ -242,7 +243,7 @@ func (p *NamespacesRepo) GetNamespaceList(ctx context.Context, req model.Namespa
return resp, err return resp, err
} }
topNamespaceGroups, allNamespaceGroups, err := p.getTopNamespaceGroups(ctx, req, query) topNamespaceGroups, allNamespaceGroups, err := p.getTopNamespaceGroups(ctx, orgID, req, query)
if err != nil { if err != nil {
return resp, err return resp, err
} }
@ -276,7 +277,7 @@ func (p *NamespacesRepo) GetNamespaceList(ctx context.Context, req model.Namespa
} }
} }
queryResponse, _, err := p.querierV2.QueryRange(ctx, query) queryResponse, _, err := p.querierV2.QueryRange(ctx, orgID, query)
if err != nil { if err != nil {
return resp, err return resp, err
} }

View File

@ -14,6 +14,7 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/model" "github.com/SigNoz/signoz/pkg/query-service/model"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3" v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/query-service/postprocess" "github.com/SigNoz/signoz/pkg/query-service/postprocess"
"github.com/SigNoz/signoz/pkg/valuer"
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
) )
@ -155,7 +156,7 @@ func (p *NodesRepo) getMetadataAttributes(ctx context.Context, req model.NodeLis
return nodeAttrs, nil return nodeAttrs, nil
} }
func (p *NodesRepo) getTopNodeGroups(ctx context.Context, req model.NodeListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) { func (p *NodesRepo) getTopNodeGroups(ctx context.Context, orgID valuer.UUID, req model.NodeListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) {
step, timeSeriesTableName, samplesTableName := getParamsForTopNodes(req) step, timeSeriesTableName, samplesTableName := getParamsForTopNodes(req)
queryNames := queryNamesForNodes[req.OrderBy.ColumnName] queryNames := queryNamesForNodes[req.OrderBy.ColumnName]
@ -186,7 +187,7 @@ func (p *NodesRepo) getTopNodeGroups(ctx context.Context, req model.NodeListRequ
topNodeGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query topNodeGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query
} }
queryResponse, _, err := p.querierV2.QueryRange(ctx, topNodeGroupsQueryRangeParams) queryResponse, _, err := p.querierV2.QueryRange(ctx, orgID, topNodeGroupsQueryRangeParams)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -225,7 +226,7 @@ func (p *NodesRepo) getTopNodeGroups(ctx context.Context, req model.NodeListRequ
return topNodeGroups, allNodeGroups, nil return topNodeGroups, allNodeGroups, nil
} }
func (p *NodesRepo) GetNodeList(ctx context.Context, req model.NodeListRequest) (model.NodeListResponse, error) { func (p *NodesRepo) GetNodeList(ctx context.Context, orgID valuer.UUID, req model.NodeListRequest) (model.NodeListResponse, error) {
resp := model.NodeListResponse{} resp := model.NodeListResponse{}
if req.Limit == 0 { if req.Limit == 0 {
@ -267,7 +268,7 @@ func (p *NodesRepo) GetNodeList(ctx context.Context, req model.NodeListRequest)
return resp, err return resp, err
} }
topNodeGroups, allNodeGroups, err := p.getTopNodeGroups(ctx, req, query) topNodeGroups, allNodeGroups, err := p.getTopNodeGroups(ctx, orgID, req, query)
if err != nil { if err != nil {
return resp, err return resp, err
} }
@ -301,7 +302,7 @@ func (p *NodesRepo) GetNodeList(ctx context.Context, req model.NodeListRequest)
} }
} }
queryResponse, _, err := p.querierV2.QueryRange(ctx, query) queryResponse, _, err := p.querierV2.QueryRange(ctx, orgID, query)
if err != nil { if err != nil {
return resp, err return resp, err
} }

View File

@ -14,6 +14,7 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/model" "github.com/SigNoz/signoz/pkg/query-service/model"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3" v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/query-service/postprocess" "github.com/SigNoz/signoz/pkg/query-service/postprocess"
"github.com/SigNoz/signoz/pkg/valuer"
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
) )
@ -300,7 +301,7 @@ func (p *PodsRepo) getMetadataAttributes(ctx context.Context, req model.PodListR
return podAttrs, nil return podAttrs, nil
} }
func (p *PodsRepo) getTopPodGroups(ctx context.Context, req model.PodListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) { func (p *PodsRepo) getTopPodGroups(ctx context.Context, orgID valuer.UUID, req model.PodListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) {
step, timeSeriesTableName, samplesTableName := getParamsForTopPods(req) step, timeSeriesTableName, samplesTableName := getParamsForTopPods(req)
queryNames := queryNamesForPods[req.OrderBy.ColumnName] queryNames := queryNamesForPods[req.OrderBy.ColumnName]
@ -331,7 +332,7 @@ func (p *PodsRepo) getTopPodGroups(ctx context.Context, req model.PodListRequest
topPodGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query topPodGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query
} }
queryResponse, _, err := p.querierV2.QueryRange(ctx, topPodGroupsQueryRangeParams) queryResponse, _, err := p.querierV2.QueryRange(ctx, orgID, topPodGroupsQueryRangeParams)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -370,7 +371,7 @@ func (p *PodsRepo) getTopPodGroups(ctx context.Context, req model.PodListRequest
return topPodGroups, allPodGroups, nil return topPodGroups, allPodGroups, nil
} }
func (p *PodsRepo) GetPodList(ctx context.Context, req model.PodListRequest) (model.PodListResponse, error) { func (p *PodsRepo) GetPodList(ctx context.Context, orgID valuer.UUID, req model.PodListRequest) (model.PodListResponse, error) {
resp := model.PodListResponse{} resp := model.PodListResponse{}
if req.Limit == 0 { if req.Limit == 0 {
@ -412,7 +413,7 @@ func (p *PodsRepo) GetPodList(ctx context.Context, req model.PodListRequest) (mo
return resp, err return resp, err
} }
topPodGroups, allPodGroups, err := p.getTopPodGroups(ctx, req, query) topPodGroups, allPodGroups, err := p.getTopPodGroups(ctx, orgID, req, query)
if err != nil { if err != nil {
return resp, err return resp, err
} }
@ -446,7 +447,7 @@ func (p *PodsRepo) GetPodList(ctx context.Context, req model.PodListRequest) (mo
} }
} }
queryResponse, _, err := p.querierV2.QueryRange(ctx, query) queryResponse, _, err := p.querierV2.QueryRange(ctx, orgID, query)
if err != nil { if err != nil {
return resp, err return resp, err
} }

View File

@ -11,6 +11,7 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/model" "github.com/SigNoz/signoz/pkg/query-service/model"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3" v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/query-service/postprocess" "github.com/SigNoz/signoz/pkg/query-service/postprocess"
"github.com/SigNoz/signoz/pkg/valuer"
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
) )
@ -142,7 +143,7 @@ func (p *ProcessesRepo) getMetadataAttributes(ctx context.Context,
return processAttrs, nil return processAttrs, nil
} }
func (p *ProcessesRepo) getTopProcessGroups(ctx context.Context, req model.ProcessListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) { func (p *ProcessesRepo) getTopProcessGroups(ctx context.Context, orgID valuer.UUID, req model.ProcessListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) {
step, timeSeriesTableName, samplesTableName := getParamsForTopProcesses(req) step, timeSeriesTableName, samplesTableName := getParamsForTopProcesses(req)
queryNames := queryNamesForTopProcesses[req.OrderBy.ColumnName] queryNames := queryNamesForTopProcesses[req.OrderBy.ColumnName]
@ -170,7 +171,7 @@ func (p *ProcessesRepo) getTopProcessGroups(ctx context.Context, req model.Proce
topProcessGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query topProcessGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query
} }
queryResponse, _, err := p.querierV2.QueryRange(ctx, topProcessGroupsQueryRangeParams) queryResponse, _, err := p.querierV2.QueryRange(ctx, orgID, topProcessGroupsQueryRangeParams)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -209,7 +210,7 @@ func (p *ProcessesRepo) getTopProcessGroups(ctx context.Context, req model.Proce
return topProcessGroups, allProcessGroups, nil return topProcessGroups, allProcessGroups, nil
} }
func (p *ProcessesRepo) GetProcessList(ctx context.Context, req model.ProcessListRequest) (model.ProcessListResponse, error) { func (p *ProcessesRepo) GetProcessList(ctx context.Context, orgID valuer.UUID, req model.ProcessListRequest) (model.ProcessListResponse, error) {
resp := model.ProcessListResponse{} resp := model.ProcessListResponse{}
if req.Limit == 0 { if req.Limit == 0 {
req.Limit = 10 req.Limit = 10
@ -249,7 +250,7 @@ func (p *ProcessesRepo) GetProcessList(ctx context.Context, req model.ProcessLis
return resp, err return resp, err
} }
topProcessGroups, allProcessGroups, err := p.getTopProcessGroups(ctx, req, query) topProcessGroups, allProcessGroups, err := p.getTopProcessGroups(ctx, orgID, req, query)
if err != nil { if err != nil {
return resp, err return resp, err
} }
@ -283,7 +284,7 @@ func (p *ProcessesRepo) GetProcessList(ctx context.Context, req model.ProcessLis
} }
} }
queryResponse, _, err := p.querierV2.QueryRange(ctx, query) queryResponse, _, err := p.querierV2.QueryRange(ctx, orgID, query)
if err != nil { if err != nil {
return resp, err return resp, err
} }

View File

@ -11,6 +11,7 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/model" "github.com/SigNoz/signoz/pkg/query-service/model"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3" v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/query-service/postprocess" "github.com/SigNoz/signoz/pkg/query-service/postprocess"
"github.com/SigNoz/signoz/pkg/valuer"
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
) )
@ -158,7 +159,7 @@ func (p *PvcsRepo) getMetadataAttributes(ctx context.Context, req model.VolumeLi
return volumeAttrs, nil return volumeAttrs, nil
} }
func (p *PvcsRepo) getTopVolumeGroups(ctx context.Context, req model.VolumeListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) { func (p *PvcsRepo) getTopVolumeGroups(ctx context.Context, orgID valuer.UUID, req model.VolumeListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) {
step, timeSeriesTableName, samplesTableName := getParamsForTopVolumes(req) step, timeSeriesTableName, samplesTableName := getParamsForTopVolumes(req)
queryNames := queryNamesForVolumes[req.OrderBy.ColumnName] queryNames := queryNamesForVolumes[req.OrderBy.ColumnName]
@ -189,7 +190,7 @@ func (p *PvcsRepo) getTopVolumeGroups(ctx context.Context, req model.VolumeListR
topVolumeGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query topVolumeGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query
} }
queryResponse, _, err := p.querierV2.QueryRange(ctx, topVolumeGroupsQueryRangeParams) queryResponse, _, err := p.querierV2.QueryRange(ctx, orgID, topVolumeGroupsQueryRangeParams)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -228,7 +229,7 @@ func (p *PvcsRepo) getTopVolumeGroups(ctx context.Context, req model.VolumeListR
return topVolumeGroups, allVolumeGroups, nil return topVolumeGroups, allVolumeGroups, nil
} }
func (p *PvcsRepo) GetPvcList(ctx context.Context, req model.VolumeListRequest) (model.VolumeListResponse, error) { func (p *PvcsRepo) GetPvcList(ctx context.Context, orgID valuer.UUID, req model.VolumeListRequest) (model.VolumeListResponse, error) {
resp := model.VolumeListResponse{} resp := model.VolumeListResponse{}
if req.Limit == 0 { if req.Limit == 0 {
@ -270,7 +271,7 @@ func (p *PvcsRepo) GetPvcList(ctx context.Context, req model.VolumeListRequest)
return resp, err return resp, err
} }
topVolumeGroups, allVolumeGroups, err := p.getTopVolumeGroups(ctx, req, query) topVolumeGroups, allVolumeGroups, err := p.getTopVolumeGroups(ctx, orgID, req, query)
if err != nil { if err != nil {
return resp, err return resp, err
} }
@ -304,7 +305,7 @@ func (p *PvcsRepo) GetPvcList(ctx context.Context, req model.VolumeListRequest)
} }
} }
queryResponse, _, err := p.querierV2.QueryRange(ctx, query) queryResponse, _, err := p.querierV2.QueryRange(ctx, orgID, query)
if err != nil { if err != nil {
return resp, err return resp, err
} }

View File

@ -11,6 +11,7 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/model" "github.com/SigNoz/signoz/pkg/query-service/model"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3" v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/query-service/postprocess" "github.com/SigNoz/signoz/pkg/query-service/postprocess"
"github.com/SigNoz/signoz/pkg/valuer"
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
) )
@ -198,7 +199,7 @@ func (d *StatefulSetsRepo) getMetadataAttributes(ctx context.Context, req model.
return statefulSetAttrs, nil return statefulSetAttrs, nil
} }
func (d *StatefulSetsRepo) getTopStatefulSetGroups(ctx context.Context, req model.StatefulSetListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) { func (d *StatefulSetsRepo) getTopStatefulSetGroups(ctx context.Context, orgID valuer.UUID, req model.StatefulSetListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) {
step, timeSeriesTableName, samplesTableName := getParamsForTopStatefulSets(req) step, timeSeriesTableName, samplesTableName := getParamsForTopStatefulSets(req)
queryNames := queryNamesForStatefulSets[req.OrderBy.ColumnName] queryNames := queryNamesForStatefulSets[req.OrderBy.ColumnName]
@ -229,7 +230,7 @@ func (d *StatefulSetsRepo) getTopStatefulSetGroups(ctx context.Context, req mode
topStatefulSetGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query topStatefulSetGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query
} }
queryResponse, _, err := d.querierV2.QueryRange(ctx, topStatefulSetGroupsQueryRangeParams) queryResponse, _, err := d.querierV2.QueryRange(ctx, orgID, topStatefulSetGroupsQueryRangeParams)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -268,7 +269,7 @@ func (d *StatefulSetsRepo) getTopStatefulSetGroups(ctx context.Context, req mode
return topStatefulSetGroups, allStatefulSetGroups, nil return topStatefulSetGroups, allStatefulSetGroups, nil
} }
func (d *StatefulSetsRepo) GetStatefulSetList(ctx context.Context, req model.StatefulSetListRequest) (model.StatefulSetListResponse, error) { func (d *StatefulSetsRepo) GetStatefulSetList(ctx context.Context, orgID valuer.UUID, req model.StatefulSetListRequest) (model.StatefulSetListResponse, error) {
resp := model.StatefulSetListResponse{} resp := model.StatefulSetListResponse{}
if req.Limit == 0 { if req.Limit == 0 {
@ -320,7 +321,7 @@ func (d *StatefulSetsRepo) GetStatefulSetList(ctx context.Context, req model.Sta
return resp, err return resp, err
} }
topStatefulSetGroups, allStatefulSetGroups, err := d.getTopStatefulSetGroups(ctx, req, query) topStatefulSetGroups, allStatefulSetGroups, err := d.getTopStatefulSetGroups(ctx, orgID, req, query)
if err != nil { if err != nil {
return resp, err return resp, err
} }
@ -354,7 +355,7 @@ func (d *StatefulSetsRepo) GetStatefulSetList(ctx context.Context, req model.Sta
} }
} }
queryResponse, _, err := d.querierV2.QueryRange(ctx, query) queryResponse, _, err := d.querierV2.QueryRange(ctx, orgID, query)
if err != nil { if err != nil {
return resp, err return resp, err
} }

View File

@ -17,6 +17,7 @@ import (
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3" v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/query-service/rules" "github.com/SigNoz/signoz/pkg/query-service/rules"
"github.com/SigNoz/signoz/pkg/types/authtypes" "github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/valuer"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
@ -48,13 +49,13 @@ func (receiver *SummaryService) FilterKeys(ctx context.Context, params *metrics_
return &response, nil return &response, nil
} }
func (receiver *SummaryService) FilterValues(ctx context.Context, params *metrics_explorer.FilterValueRequest) (*metrics_explorer.FilterValueResponse, *model.ApiError) { func (receiver *SummaryService) FilterValues(ctx context.Context, orgID valuer.UUID, params *metrics_explorer.FilterValueRequest) (*metrics_explorer.FilterValueResponse, *model.ApiError) {
var response metrics_explorer.FilterValueResponse var response metrics_explorer.FilterValueResponse
switch params.FilterKey { switch params.FilterKey {
case "metric_name": case "metric_name":
var filterValues []string var filterValues []string
request := v3.AggregateAttributeRequest{DataSource: v3.DataSourceMetrics, SearchText: params.SearchText, Limit: params.Limit} request := v3.AggregateAttributeRequest{DataSource: v3.DataSourceMetrics, SearchText: params.SearchText, Limit: params.Limit}
attributes, err := receiver.reader.GetMetricAggregateAttributes(ctx, &request, true, true) attributes, err := receiver.reader.GetMetricAggregateAttributes(ctx, orgID, &request, true, true)
if err != nil { if err != nil {
return nil, model.InternalError(err) return nil, model.InternalError(err)
} }
@ -87,13 +88,13 @@ func (receiver *SummaryService) FilterValues(ctx context.Context, params *metric
} }
} }
func (receiver *SummaryService) GetMetricsSummary(ctx context.Context, metricName string) (metrics_explorer.MetricDetailsDTO, *model.ApiError) { func (receiver *SummaryService) GetMetricsSummary(ctx context.Context, orgID valuer.UUID, metricName string) (metrics_explorer.MetricDetailsDTO, *model.ApiError) {
var metricDetailsDTO metrics_explorer.MetricDetailsDTO var metricDetailsDTO metrics_explorer.MetricDetailsDTO
g, ctx := errgroup.WithContext(ctx) g, ctx := errgroup.WithContext(ctx)
// Call 1: GetMetricMetadata // Call 1: GetMetricMetadata
g.Go(func() error { g.Go(func() error {
metadata, err := receiver.reader.GetMetricMetadata(ctx, metricName, metricName) metadata, err := receiver.reader.GetMetricMetadata(ctx, orgID, metricName, metricName)
if err != nil { if err != nil {
return &model.ApiError{Typ: "ClickHouseError", Err: err} return &model.ApiError{Typ: "ClickHouseError", Err: err}
} }
@ -217,8 +218,8 @@ func (receiver *SummaryService) GetMetricsSummary(ctx context.Context, metricNam
return metricDetailsDTO, nil return metricDetailsDTO, nil
} }
func (receiver *SummaryService) ListMetricsWithSummary(ctx context.Context, params *metrics_explorer.SummaryListMetricsRequest) (*metrics_explorer.SummaryListMetricsResponse, *model.ApiError) { func (receiver *SummaryService) ListMetricsWithSummary(ctx context.Context, orgID valuer.UUID, params *metrics_explorer.SummaryListMetricsRequest) (*metrics_explorer.SummaryListMetricsResponse, *model.ApiError) {
return receiver.reader.ListSummaryMetrics(ctx, params) return receiver.reader.ListSummaryMetrics(ctx, orgID, params)
} }
func (receiver *SummaryService) GetMetricsTreemap(ctx context.Context, params *metrics_explorer.TreeMapMetricsRequest) (*metrics_explorer.TreeMap, *model.ApiError) { func (receiver *SummaryService) GetMetricsTreemap(ctx context.Context, params *metrics_explorer.TreeMapMetricsRequest) (*metrics_explorer.TreeMap, *model.ApiError) {
@ -544,7 +545,7 @@ func (receiver *SummaryService) GetInspectMetrics(ctx context.Context, params *m
return baseResponse, nil return baseResponse, nil
} }
func (receiver *SummaryService) UpdateMetricsMetadata(ctx context.Context, params *metrics_explorer.UpdateMetricsMetadataRequest) *model.ApiError { func (receiver *SummaryService) UpdateMetricsMetadata(ctx context.Context, orgID valuer.UUID, params *metrics_explorer.UpdateMetricsMetadataRequest) *model.ApiError {
if params.MetricType == v3.MetricTypeSum && !params.IsMonotonic && params.Temporality == v3.Cumulative { if params.MetricType == v3.MetricTypeSum && !params.IsMonotonic && params.Temporality == v3.Cumulative {
params.MetricType = v3.MetricTypeGauge params.MetricType = v3.MetricTypeGauge
} }
@ -557,7 +558,7 @@ func (receiver *SummaryService) UpdateMetricsMetadata(ctx context.Context, param
IsMonotonic: params.IsMonotonic, IsMonotonic: params.IsMonotonic,
CreatedAt: time.Now(), CreatedAt: time.Now(),
} }
apiError := receiver.reader.UpdateMetricsMetadata(ctx, &metadata) apiError := receiver.reader.UpdateMetricsMetadata(ctx, orgID, &metadata)
if apiError != nil { if apiError != nil {
return apiError return apiError
} }

View File

@ -14,6 +14,7 @@ import (
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3" v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/query-service/postprocess" "github.com/SigNoz/signoz/pkg/query-service/postprocess"
"github.com/SigNoz/signoz/pkg/query-service/querycache" "github.com/SigNoz/signoz/pkg/query-service/querycache"
"github.com/SigNoz/signoz/pkg/valuer"
"go.uber.org/zap" "go.uber.org/zap"
) )
@ -75,6 +76,7 @@ func prepareLogsQuery(
func (q *querier) runBuilderQuery( func (q *querier) runBuilderQuery(
ctx context.Context, ctx context.Context,
orgID valuer.UUID,
builderQuery *v3.BuilderQuery, builderQuery *v3.BuilderQuery,
params *v3.QueryRangeParamsV3, params *v3.QueryRangeParamsV3,
cacheKeys map[string]string, cacheKeys map[string]string,
@ -106,7 +108,7 @@ func (q *querier) runBuilderQuery(
return return
} }
misses := q.queryCache.FindMissingTimeRanges(start, end, builderQuery.StepInterval, cacheKeys[queryName]) misses := q.queryCache.FindMissingTimeRanges(orgID, start, end, builderQuery.StepInterval, cacheKeys[queryName])
zap.L().Info("cache misses for logs query", zap.Any("misses", misses)) zap.L().Info("cache misses for logs query", zap.Any("misses", misses))
missedSeries := make([]querycache.CachedSeriesData, 0) missedSeries := make([]querycache.CachedSeriesData, 0)
filteredMissedSeries := make([]querycache.CachedSeriesData, 0) filteredMissedSeries := make([]querycache.CachedSeriesData, 0)
@ -146,10 +148,10 @@ func (q *querier) runBuilderQuery(
}) })
} }
filteredMergedSeries := q.queryCache.MergeWithCachedSeriesDataV2(cacheKeys[queryName], filteredMissedSeries) filteredMergedSeries := q.queryCache.MergeWithCachedSeriesDataV2(orgID, cacheKeys[queryName], filteredMissedSeries)
q.queryCache.StoreSeriesInCache(cacheKeys[queryName], filteredMergedSeries) q.queryCache.StoreSeriesInCache(orgID, cacheKeys[queryName], filteredMergedSeries)
mergedSeries := q.queryCache.MergeWithCachedSeriesDataV2(cacheKeys[queryName], missedSeries) mergedSeries := q.queryCache.MergeWithCachedSeriesDataV2(orgID, cacheKeys[queryName], missedSeries)
resultSeries := common.GetSeriesFromCachedDataV2(mergedSeries, start, end, builderQuery.StepInterval) resultSeries := common.GetSeriesFromCachedDataV2(mergedSeries, start, end, builderQuery.StepInterval)
@ -212,7 +214,7 @@ func (q *querier) runBuilderQuery(
} }
if builderQuery.DataSource == v3.DataSourceMetrics && !q.testingMode { if builderQuery.DataSource == v3.DataSourceMetrics && !q.testingMode {
metadata, apiError := q.reader.GetUpdatedMetricsMetadata(ctx, builderQuery.AggregateAttribute.Key) metadata, apiError := q.reader.GetUpdatedMetricsMetadata(ctx, orgID, builderQuery.AggregateAttribute.Key)
if apiError != nil { if apiError != nil {
zap.L().Error("Error in getting metrics cached metadata", zap.Error(apiError)) zap.L().Error("Error in getting metrics cached metadata", zap.Error(apiError))
} }
@ -238,7 +240,7 @@ func (q *querier) runBuilderQuery(
} }
cacheKey := cacheKeys[queryName] cacheKey := cacheKeys[queryName]
misses := q.queryCache.FindMissingTimeRanges(start, end, builderQuery.StepInterval, cacheKey) misses := q.queryCache.FindMissingTimeRanges(orgID, start, end, builderQuery.StepInterval, cacheKey)
zap.L().Info("cache misses for metrics query", zap.Any("misses", misses)) zap.L().Info("cache misses for metrics query", zap.Any("misses", misses))
missedSeries := make([]querycache.CachedSeriesData, 0) missedSeries := make([]querycache.CachedSeriesData, 0)
for _, miss := range misses { for _, miss := range misses {
@ -275,7 +277,7 @@ func (q *querier) runBuilderQuery(
Data: series, Data: series,
}) })
} }
mergedSeries := q.queryCache.MergeWithCachedSeriesData(cacheKey, missedSeries) mergedSeries := q.queryCache.MergeWithCachedSeriesData(orgID, cacheKey, missedSeries)
resultSeries := common.GetSeriesFromCachedData(mergedSeries, start, end) resultSeries := common.GetSeriesFromCachedData(mergedSeries, start, end)
@ -288,6 +290,7 @@ func (q *querier) runBuilderQuery(
func (q *querier) runBuilderExpression( func (q *querier) runBuilderExpression(
ctx context.Context, ctx context.Context,
orgID valuer.UUID,
builderQuery *v3.BuilderQuery, builderQuery *v3.BuilderQuery,
params *v3.QueryRangeParamsV3, params *v3.QueryRangeParamsV3,
cacheKeys map[string]string, cacheKeys map[string]string,
@ -314,7 +317,7 @@ func (q *querier) runBuilderExpression(
cacheKey := cacheKeys[queryName] cacheKey := cacheKeys[queryName]
step := postprocess.StepIntervalForFunction(params, queryName) step := postprocess.StepIntervalForFunction(params, queryName)
misses := q.queryCache.FindMissingTimeRanges(params.Start, params.End, step, cacheKey) misses := q.queryCache.FindMissingTimeRanges(orgID, params.Start, params.End, step, cacheKey)
zap.L().Info("cache misses for expression query", zap.Any("misses", misses)) zap.L().Info("cache misses for expression query", zap.Any("misses", misses))
missedSeries := make([]querycache.CachedSeriesData, 0) missedSeries := make([]querycache.CachedSeriesData, 0)
for _, miss := range misses { for _, miss := range misses {
@ -338,7 +341,7 @@ func (q *querier) runBuilderExpression(
Data: series, Data: series,
}) })
} }
mergedSeries := q.queryCache.MergeWithCachedSeriesData(cacheKey, missedSeries) mergedSeries := q.queryCache.MergeWithCachedSeriesData(orgID, cacheKey, missedSeries)
resultSeries := common.GetSeriesFromCachedData(mergedSeries, params.Start, params.End) resultSeries := common.GetSeriesFromCachedData(mergedSeries, params.Start, params.End)

View File

@ -15,8 +15,9 @@ import (
chErrors "github.com/SigNoz/signoz/pkg/query-service/errors" chErrors "github.com/SigNoz/signoz/pkg/query-service/errors"
"github.com/SigNoz/signoz/pkg/query-service/querycache" "github.com/SigNoz/signoz/pkg/query-service/querycache"
"github.com/SigNoz/signoz/pkg/query-service/utils" "github.com/SigNoz/signoz/pkg/query-service/utils"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/SigNoz/signoz/pkg/query-service/cache" "github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/query-service/interfaces" "github.com/SigNoz/signoz/pkg/query-service/interfaces"
"github.com/SigNoz/signoz/pkg/query-service/model" "github.com/SigNoz/signoz/pkg/query-service/model"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3" v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
@ -143,7 +144,7 @@ func (q *querier) execPromQuery(ctx context.Context, params *model.QueryRangePar
return seriesList, nil return seriesList, nil
} }
func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) { func (q *querier) runBuilderQueries(ctx context.Context, orgID valuer.UUID, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) {
cacheKeys := q.keyGenerator.GenerateKeys(params) cacheKeys := q.keyGenerator.GenerateKeys(params)
@ -156,9 +157,9 @@ func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangePa
} }
wg.Add(1) wg.Add(1)
if queryName == builderQuery.Expression { if queryName == builderQuery.Expression {
go q.runBuilderQuery(ctx, builderQuery, params, cacheKeys, ch, &wg) go q.runBuilderQuery(ctx, orgID, builderQuery, params, cacheKeys, ch, &wg)
} else { } else {
go q.runBuilderExpression(ctx, builderQuery, params, cacheKeys, ch, &wg) go q.runBuilderExpression(ctx, orgID, builderQuery, params, cacheKeys, ch, &wg)
} }
} }
@ -189,7 +190,7 @@ func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangePa
return results, errQueriesByName, err return results, errQueriesByName, err
} }
func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) { func (q *querier) runPromQueries(ctx context.Context, orgID valuer.UUID, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) {
channelResults := make(chan channelResult, len(params.CompositeQuery.PromQueries)) channelResults := make(chan channelResult, len(params.CompositeQuery.PromQueries))
var wg sync.WaitGroup var wg sync.WaitGroup
cacheKeys := q.keyGenerator.GenerateKeys(params) cacheKeys := q.keyGenerator.GenerateKeys(params)
@ -210,7 +211,7 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam
channelResults <- channelResult{Err: err, Name: queryName, Query: query.Query, Series: series} channelResults <- channelResult{Err: err, Name: queryName, Query: query.Query, Series: series}
return return
} }
misses := q.queryCache.FindMissingTimeRanges(params.Start, params.End, params.Step, cacheKey) misses := q.queryCache.FindMissingTimeRanges(orgID, params.Start, params.End, params.Step, cacheKey)
zap.L().Info("cache misses for metrics prom query", zap.Any("misses", misses)) zap.L().Info("cache misses for metrics prom query", zap.Any("misses", misses))
missedSeries := make([]querycache.CachedSeriesData, 0) missedSeries := make([]querycache.CachedSeriesData, 0)
for _, miss := range misses { for _, miss := range misses {
@ -226,7 +227,7 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam
End: miss.End, End: miss.End,
}) })
} }
mergedSeries := q.queryCache.MergeWithCachedSeriesData(cacheKey, missedSeries) mergedSeries := q.queryCache.MergeWithCachedSeriesData(orgID, cacheKey, missedSeries)
resultSeries := common.GetSeriesFromCachedData(mergedSeries, params.Start, params.End) resultSeries := common.GetSeriesFromCachedData(mergedSeries, params.Start, params.End)
channelResults <- channelResult{Err: nil, Name: queryName, Query: promQuery.Query, Series: resultSeries} channelResults <- channelResult{Err: nil, Name: queryName, Query: promQuery.Query, Series: resultSeries}
@ -497,7 +498,7 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan
return res, nil, nil return res, nil, nil
} }
func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) { func (q *querier) QueryRange(ctx context.Context, orgID valuer.UUID, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) {
var results []*v3.Result var results []*v3.Result
var err error var err error
var errQueriesByName map[string]error var errQueriesByName map[string]error
@ -507,7 +508,7 @@ func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3)
if params.CompositeQuery.PanelType == v3.PanelTypeList || params.CompositeQuery.PanelType == v3.PanelTypeTrace { if params.CompositeQuery.PanelType == v3.PanelTypeList || params.CompositeQuery.PanelType == v3.PanelTypeTrace {
results, errQueriesByName, err = q.runBuilderListQueries(ctx, params) results, errQueriesByName, err = q.runBuilderListQueries(ctx, params)
} else { } else {
results, errQueriesByName, err = q.runBuilderQueries(ctx, params) results, errQueriesByName, err = q.runBuilderQueries(ctx, orgID, params)
} }
// in builder query, the only errors we expose are the ones that exceed the resource limits // in builder query, the only errors we expose are the ones that exceed the resource limits
// everything else is internal error as they are not actionable by the user // everything else is internal error as they are not actionable by the user
@ -517,7 +518,7 @@ func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3)
} }
} }
case v3.QueryTypePromQL: case v3.QueryTypePromQL:
results, errQueriesByName, err = q.runPromQueries(ctx, params) results, errQueriesByName, err = q.runPromQueries(ctx, orgID, params)
case v3.QueryTypeClickHouseSQL: case v3.QueryTypeClickHouseSQL:
ctx = context.WithValue(ctx, "enforce_max_result_rows", true) ctx = context.WithValue(ctx, "enforce_max_result_rows", true)
if params.CompositeQuery.PanelType == v3.PanelTypeList || params.CompositeQuery.PanelType == v3.PanelTypeTrace { if params.CompositeQuery.PanelType == v3.PanelTypeList || params.CompositeQuery.PanelType == v3.PanelTypeTrace {

View File

@ -2,7 +2,6 @@ package querier
import ( import (
"context" "context"
"encoding/json"
"fmt" "fmt"
"math" "math"
"strings" "strings"
@ -10,19 +9,22 @@ import (
"time" "time"
"github.com/DATA-DOG/go-sqlmock" "github.com/DATA-DOG/go-sqlmock"
"github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/cache/cachetest"
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest" "github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
"github.com/SigNoz/signoz/pkg/prometheus" "github.com/SigNoz/signoz/pkg/prometheus"
"github.com/SigNoz/signoz/pkg/prometheus/prometheustest" "github.com/SigNoz/signoz/pkg/prometheus/prometheustest"
"github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader" "github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader"
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder" "github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
tracesV3 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v3" tracesV3 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v3"
"github.com/SigNoz/signoz/pkg/query-service/cache/inmemory"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3" v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/query-service/querycache" "github.com/SigNoz/signoz/pkg/query-service/querycache"
"github.com/SigNoz/signoz/pkg/query-service/utils" "github.com/SigNoz/signoz/pkg/query-service/utils"
"github.com/SigNoz/signoz/pkg/telemetrystore" "github.com/SigNoz/signoz/pkg/telemetrystore"
"github.com/SigNoz/signoz/pkg/telemetrystore/telemetrystoretest" "github.com/SigNoz/signoz/pkg/telemetrystore/telemetrystoretest"
"github.com/SigNoz/signoz/pkg/valuer"
cmock "github.com/srikanthccv/ClickHouse-go-mock" cmock "github.com/srikanthccv/ClickHouse-go-mock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -233,28 +235,28 @@ func TestFindMissingTimeRangesZeroFreshNess(t *testing.T) {
}, },
} }
c := inmemory.New(&inmemory.Options{TTL: 5 * time.Minute, CleanupInterval: 10 * time.Minute}) opts := cache.Memory{
TTL: 5 * time.Minute,
CleanupInterval: 10 * time.Minute,
}
c, err := cachetest.New(cache.Config{Provider: "memory", Memory: opts})
require.NoError(t, err)
qc := querycache.NewQueryCache(querycache.WithCache(c)) qc := querycache.NewQueryCache(querycache.WithCache(c))
for idx, tc := range testCases { for idx, tc := range testCases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
cacheKey := fmt.Sprintf("test-cache-key-%d", idx) cacheKey := fmt.Sprintf("test-cache-key-%d", idx)
cachedData := &querycache.CachedSeriesData{ cachedData := querycache.CachedSeriesData{
Start: minTimestamp(tc.cachedSeries), Start: minTimestamp(tc.cachedSeries),
End: maxTimestamp(tc.cachedSeries), End: maxTimestamp(tc.cachedSeries),
Data: tc.cachedSeries, Data: tc.cachedSeries,
} }
jsonData, err := json.Marshal([]*querycache.CachedSeriesData{cachedData}) orgID := valuer.GenerateUUID()
if err != nil { cacheableData := querycache.CacheableSeriesData{Series: []querycache.CachedSeriesData{cachedData}}
t.Errorf("error marshalling cached data: %v", err) err = c.Set(context.Background(), orgID, cacheKey, &cacheableData, 0)
} assert.NoError(t, err)
err = c.Store(cacheKey, jsonData, 5*time.Minute)
if err != nil {
t.Errorf("error storing cached data: %v", err)
}
misses := qc.FindMissingTimeRanges(tc.requestedStart, tc.requestedEnd, tc.requestedStep, cacheKey) misses := qc.FindMissingTimeRanges(orgID, tc.requestedStart, tc.requestedEnd, tc.requestedStep, cacheKey)
if len(misses) != len(tc.expectedMiss) { if len(misses) != len(tc.expectedMiss) {
t.Errorf("expected %d misses, got %d", len(tc.expectedMiss), len(misses)) t.Errorf("expected %d misses, got %d", len(tc.expectedMiss), len(misses))
} }
@ -453,27 +455,28 @@ func TestFindMissingTimeRangesWithFluxInterval(t *testing.T) {
}, },
} }
c := inmemory.New(&inmemory.Options{TTL: 5 * time.Minute, CleanupInterval: 10 * time.Minute}) opts := cache.Memory{
TTL: 5 * time.Minute,
CleanupInterval: 10 * time.Minute,
}
c, err := cachetest.New(cache.Config{Provider: "memory", Memory: opts})
require.NoError(t, err)
qc := querycache.NewQueryCache(querycache.WithCache(c)) qc := querycache.NewQueryCache(querycache.WithCache(c))
for idx, tc := range testCases { for idx, tc := range testCases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
cacheKey := fmt.Sprintf("test-cache-key-%d", idx) cacheKey := fmt.Sprintf("test-cache-key-%d", idx)
cachedData := &querycache.CachedSeriesData{ cachedData := querycache.CachedSeriesData{
Start: minTimestamp(tc.cachedSeries), Start: minTimestamp(tc.cachedSeries),
End: maxTimestamp(tc.cachedSeries), End: maxTimestamp(tc.cachedSeries),
Data: tc.cachedSeries, Data: tc.cachedSeries,
} }
jsonData, err := json.Marshal([]*querycache.CachedSeriesData{cachedData}) orgID := valuer.GenerateUUID()
if err != nil { cacheableData := querycache.CacheableSeriesData{Series: []querycache.CachedSeriesData{cachedData}}
t.Errorf("error marshalling cached data: %v", err) err = c.Set(context.Background(), orgID, cacheKey, &cacheableData, 0)
} assert.NoError(t, err)
err = c.Store(cacheKey, jsonData, 5*time.Minute)
if err != nil { misses := qc.FindMissingTimeRanges(orgID, tc.requestedStart, tc.requestedEnd, tc.requestedStep, cacheKey)
t.Errorf("error storing cached data: %v", err)
}
misses := qc.FindMissingTimeRanges(tc.requestedStart, tc.requestedEnd, tc.requestedStep, cacheKey)
if len(misses) != len(tc.expectedMiss) { if len(misses) != len(tc.expectedMiss) {
t.Errorf("expected %d misses, got %d", len(tc.expectedMiss), len(misses)) t.Errorf("expected %d misses, got %d", len(tc.expectedMiss), len(misses))
} }
@ -625,9 +628,14 @@ func TestQueryRange(t *testing.T) {
}, },
}, },
} }
cache := inmemory.New(&inmemory.Options{TTL: 5 * time.Minute, CleanupInterval: 10 * time.Minute}) cacheOpts := cache.Memory{
TTL: 5 * time.Minute,
CleanupInterval: 10 * time.Minute,
}
c, err := cachetest.New(cache.Config{Provider: "memory", Memory: cacheOpts})
require.NoError(t, err)
opts := QuerierOptions{ opts := QuerierOptions{
Cache: cache, Cache: c,
Reader: nil, Reader: nil,
FluxInterval: 5 * time.Minute, FluxInterval: 5 * time.Minute,
KeyGenerator: queryBuilder.NewKeyGenerator(), KeyGenerator: queryBuilder.NewKeyGenerator(),
@ -656,9 +664,10 @@ func TestQueryRange(t *testing.T) {
fmt.Sprintf("timestamp >= '%d' AND timestamp <= '%d'", (1675115580000+60*60*1000)*int64(1000000), (1675115580000+180*60*1000)*int64(1000000)), fmt.Sprintf("timestamp >= '%d' AND timestamp <= '%d'", (1675115580000+60*60*1000)*int64(1000000), (1675115580000+180*60*1000)*int64(1000000)),
} }
orgID := valuer.GenerateUUID()
for i, param := range params { for i, param := range params {
tracesV3.Enrich(param, map[string]v3.AttributeKey{}) tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param) _, errByName, err := q.QueryRange(context.Background(), orgID, param)
if err != nil { if err != nil {
t.Errorf("expected no error, got %s", err) t.Errorf("expected no error, got %s", err)
} }
@ -736,9 +745,14 @@ func TestQueryRangeValueType(t *testing.T) {
}, },
}, },
} }
cache := inmemory.New(&inmemory.Options{TTL: 60 * time.Minute, CleanupInterval: 10 * time.Minute}) cacheOpts := cache.Memory{
TTL: 5 * time.Minute,
CleanupInterval: 10 * time.Minute,
}
c, err := cachetest.New(cache.Config{Provider: "memory", Memory: cacheOpts})
require.NoError(t, err)
opts := QuerierOptions{ opts := QuerierOptions{
Cache: cache, Cache: c,
Reader: nil, Reader: nil,
FluxInterval: 5 * time.Minute, FluxInterval: 5 * time.Minute,
KeyGenerator: queryBuilder.NewKeyGenerator(), KeyGenerator: queryBuilder.NewKeyGenerator(),
@ -768,7 +782,7 @@ func TestQueryRangeValueType(t *testing.T) {
for i, param := range params { for i, param := range params {
tracesV3.Enrich(param, map[string]v3.AttributeKey{}) tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param) _, errByName, err := q.QueryRange(context.Background(), valuer.GenerateUUID(), param)
if err != nil { if err != nil {
t.Errorf("expected no error, got %s", err) t.Errorf("expected no error, got %s", err)
} }
@ -822,7 +836,7 @@ func TestQueryRangeTimeShift(t *testing.T) {
for i, param := range params { for i, param := range params {
tracesV3.Enrich(param, map[string]v3.AttributeKey{}) tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param) _, errByName, err := q.QueryRange(context.Background(), valuer.GenerateUUID(), param)
if err != nil { if err != nil {
t.Errorf("expected no error, got %s", err) t.Errorf("expected no error, got %s", err)
} }
@ -894,9 +908,14 @@ func TestQueryRangeTimeShiftWithCache(t *testing.T) {
}, },
}, },
} }
cache := inmemory.New(&inmemory.Options{TTL: 60 * time.Minute, CleanupInterval: 10 * time.Minute}) cacheOpts := cache.Memory{
TTL: 5 * time.Minute,
CleanupInterval: 10 * time.Minute,
}
c, err := cachetest.New(cache.Config{Provider: "memory", Memory: cacheOpts})
require.NoError(t, err)
opts := QuerierOptions{ opts := QuerierOptions{
Cache: cache, Cache: c,
Reader: nil, Reader: nil,
FluxInterval: 5 * time.Minute, FluxInterval: 5 * time.Minute,
KeyGenerator: queryBuilder.NewKeyGenerator(), KeyGenerator: queryBuilder.NewKeyGenerator(),
@ -921,7 +940,7 @@ func TestQueryRangeTimeShiftWithCache(t *testing.T) {
for i, param := range params { for i, param := range params {
tracesV3.Enrich(param, map[string]v3.AttributeKey{}) tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param) _, errByName, err := q.QueryRange(context.Background(), valuer.GenerateUUID(), param)
if err != nil { if err != nil {
t.Errorf("expected no error, got %s", err) t.Errorf("expected no error, got %s", err)
} }
@ -995,9 +1014,14 @@ func TestQueryRangeTimeShiftWithLimitAndCache(t *testing.T) {
}, },
}, },
} }
cache := inmemory.New(&inmemory.Options{TTL: 60 * time.Minute, CleanupInterval: 10 * time.Minute}) cacheOpts := cache.Memory{
TTL: 5 * time.Minute,
CleanupInterval: 10 * time.Minute,
}
c, err := cachetest.New(cache.Config{Provider: "memory", Memory: cacheOpts})
require.NoError(t, err)
opts := QuerierOptions{ opts := QuerierOptions{
Cache: cache, Cache: c,
Reader: nil, Reader: nil,
FluxInterval: 5 * time.Minute, FluxInterval: 5 * time.Minute,
KeyGenerator: queryBuilder.NewKeyGenerator(), KeyGenerator: queryBuilder.NewKeyGenerator(),
@ -1022,7 +1046,7 @@ func TestQueryRangeTimeShiftWithLimitAndCache(t *testing.T) {
for i, param := range params { for i, param := range params {
tracesV3.Enrich(param, map[string]v3.AttributeKey{}) tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param) _, errByName, err := q.QueryRange(context.Background(), valuer.GenerateUUID(), param)
if err != nil { if err != nil {
t.Errorf("expected no error, got %s", err) t.Errorf("expected no error, got %s", err)
} }
@ -1067,9 +1091,14 @@ func TestQueryRangeValueTypePromQL(t *testing.T) {
}, },
}, },
} }
cache := inmemory.New(&inmemory.Options{TTL: 60 * time.Minute, CleanupInterval: 10 * time.Minute}) cacheOpts := cache.Memory{
TTL: 5 * time.Minute,
CleanupInterval: 10 * time.Minute,
}
c, err := cachetest.New(cache.Config{Provider: "memory", Memory: cacheOpts})
require.NoError(t, err)
opts := QuerierOptions{ opts := QuerierOptions{
Cache: cache, Cache: c,
Reader: nil, Reader: nil,
FluxInterval: 5 * time.Minute, FluxInterval: 5 * time.Minute,
KeyGenerator: queryBuilder.NewKeyGenerator(), KeyGenerator: queryBuilder.NewKeyGenerator(),
@ -1112,7 +1141,7 @@ func TestQueryRangeValueTypePromQL(t *testing.T) {
for i, param := range params { for i, param := range params {
tracesV3.Enrich(param, map[string]v3.AttributeKey{}) tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param) _, errByName, err := q.QueryRange(context.Background(), valuer.GenerateUUID(), param)
if err != nil { if err != nil {
t.Errorf("expected no error, got %s", err) t.Errorf("expected no error, got %s", err)
} }

View File

@ -14,6 +14,7 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/constants" "github.com/SigNoz/signoz/pkg/query-service/constants"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3" v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/query-service/querycache" "github.com/SigNoz/signoz/pkg/query-service/querycache"
"github.com/SigNoz/signoz/pkg/valuer"
"go.uber.org/zap" "go.uber.org/zap"
) )
@ -75,6 +76,7 @@ func prepareLogsQuery(
func (q *querier) runBuilderQuery( func (q *querier) runBuilderQuery(
ctx context.Context, ctx context.Context,
orgID valuer.UUID,
builderQuery *v3.BuilderQuery, builderQuery *v3.BuilderQuery,
params *v3.QueryRangeParamsV3, params *v3.QueryRangeParamsV3,
cacheKeys map[string]string, cacheKeys map[string]string,
@ -106,7 +108,7 @@ func (q *querier) runBuilderQuery(
ch <- channelResult{Err: err, Name: queryName, Query: query, Series: series} ch <- channelResult{Err: err, Name: queryName, Query: query, Series: series}
return return
} }
misses := q.queryCache.FindMissingTimeRangesV2(start, end, builderQuery.StepInterval, cacheKeys[queryName]) misses := q.queryCache.FindMissingTimeRangesV2(orgID, start, end, builderQuery.StepInterval, cacheKeys[queryName])
zap.L().Info("cache misses for logs query", zap.Any("misses", misses)) zap.L().Info("cache misses for logs query", zap.Any("misses", misses))
missedSeries := make([]querycache.CachedSeriesData, 0) missedSeries := make([]querycache.CachedSeriesData, 0)
filteredMissedSeries := make([]querycache.CachedSeriesData, 0) filteredMissedSeries := make([]querycache.CachedSeriesData, 0)
@ -147,10 +149,10 @@ func (q *querier) runBuilderQuery(
}) })
} }
filteredMergedSeries := q.queryCache.MergeWithCachedSeriesDataV2(cacheKeys[queryName], filteredMissedSeries) filteredMergedSeries := q.queryCache.MergeWithCachedSeriesDataV2(orgID, cacheKeys[queryName], filteredMissedSeries)
q.queryCache.StoreSeriesInCache(cacheKeys[queryName], filteredMergedSeries) q.queryCache.StoreSeriesInCache(orgID, cacheKeys[queryName], filteredMergedSeries)
mergedSeries := q.queryCache.MergeWithCachedSeriesDataV2(cacheKeys[queryName], missedSeries) mergedSeries := q.queryCache.MergeWithCachedSeriesDataV2(orgID, cacheKeys[queryName], missedSeries)
resultSeries := common.GetSeriesFromCachedDataV2(mergedSeries, start, end, builderQuery.StepInterval) resultSeries := common.GetSeriesFromCachedDataV2(mergedSeries, start, end, builderQuery.StepInterval)
@ -213,7 +215,7 @@ func (q *querier) runBuilderQuery(
} }
if builderQuery.DataSource == v3.DataSourceMetrics && !q.testingMode { if builderQuery.DataSource == v3.DataSourceMetrics && !q.testingMode {
metadata, apiError := q.reader.GetUpdatedMetricsMetadata(ctx, builderQuery.AggregateAttribute.Key) metadata, apiError := q.reader.GetUpdatedMetricsMetadata(ctx, orgID, builderQuery.AggregateAttribute.Key)
if apiError != nil { if apiError != nil {
zap.L().Error("Error in getting metrics cached metadata", zap.Error(apiError)) zap.L().Error("Error in getting metrics cached metadata", zap.Error(apiError))
} }
@ -238,7 +240,7 @@ func (q *querier) runBuilderQuery(
return return
} }
misses := q.queryCache.FindMissingTimeRanges(start, end, builderQuery.StepInterval, cacheKeys[queryName]) misses := q.queryCache.FindMissingTimeRanges(orgID, start, end, builderQuery.StepInterval, cacheKeys[queryName])
zap.L().Info("cache misses for metrics query", zap.Any("misses", misses)) zap.L().Info("cache misses for metrics query", zap.Any("misses", misses))
missedSeries := make([]querycache.CachedSeriesData, 0) missedSeries := make([]querycache.CachedSeriesData, 0)
for _, miss := range misses { for _, miss := range misses {
@ -275,7 +277,7 @@ func (q *querier) runBuilderQuery(
End: miss.End, End: miss.End,
}) })
} }
mergedSeries := q.queryCache.MergeWithCachedSeriesData(cacheKeys[queryName], missedSeries) mergedSeries := q.queryCache.MergeWithCachedSeriesData(orgID, cacheKeys[queryName], missedSeries)
resultSeries := common.GetSeriesFromCachedData(mergedSeries, start, end) resultSeries := common.GetSeriesFromCachedData(mergedSeries, start, end)

View File

@ -15,8 +15,9 @@ import (
chErrors "github.com/SigNoz/signoz/pkg/query-service/errors" chErrors "github.com/SigNoz/signoz/pkg/query-service/errors"
"github.com/SigNoz/signoz/pkg/query-service/querycache" "github.com/SigNoz/signoz/pkg/query-service/querycache"
"github.com/SigNoz/signoz/pkg/query-service/utils" "github.com/SigNoz/signoz/pkg/query-service/utils"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/SigNoz/signoz/pkg/query-service/cache" "github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/query-service/interfaces" "github.com/SigNoz/signoz/pkg/query-service/interfaces"
"github.com/SigNoz/signoz/pkg/query-service/model" "github.com/SigNoz/signoz/pkg/query-service/model"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3" v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
@ -147,7 +148,7 @@ func (q *querier) execPromQuery(ctx context.Context, params *model.QueryRangePar
return seriesList, nil return seriesList, nil
} }
func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) { func (q *querier) runBuilderQueries(ctx context.Context, orgID valuer.UUID, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) {
cacheKeys := q.keyGenerator.GenerateKeys(params) cacheKeys := q.keyGenerator.GenerateKeys(params)
@ -159,7 +160,7 @@ func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangePa
for queryName, builderQuery := range params.CompositeQuery.BuilderQueries { for queryName, builderQuery := range params.CompositeQuery.BuilderQueries {
if queryName == builderQuery.Expression { if queryName == builderQuery.Expression {
wg.Add(1) wg.Add(1)
go q.runBuilderQuery(ctx, builderQuery, params, cacheKeys, ch, &wg) go q.runBuilderQuery(ctx, orgID, builderQuery, params, cacheKeys, ch, &wg)
} }
} }
@ -191,7 +192,7 @@ func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangePa
return results, errQueriesByName, err return results, errQueriesByName, err
} }
func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) { func (q *querier) runPromQueries(ctx context.Context, orgID valuer.UUID, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) {
channelResults := make(chan channelResult, len(params.CompositeQuery.PromQueries)) channelResults := make(chan channelResult, len(params.CompositeQuery.PromQueries))
var wg sync.WaitGroup var wg sync.WaitGroup
cacheKeys := q.keyGenerator.GenerateKeys(params) cacheKeys := q.keyGenerator.GenerateKeys(params)
@ -212,7 +213,7 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam
channelResults <- channelResult{Err: err, Name: queryName, Query: query.Query, Series: series} channelResults <- channelResult{Err: err, Name: queryName, Query: query.Query, Series: series}
return return
} }
misses := q.queryCache.FindMissingTimeRanges(params.Start, params.End, params.Step, cacheKey) misses := q.queryCache.FindMissingTimeRanges(orgID, params.Start, params.End, params.Step, cacheKey)
zap.L().Info("cache misses for metrics prom query", zap.Any("misses", misses)) zap.L().Info("cache misses for metrics prom query", zap.Any("misses", misses))
missedSeries := make([]querycache.CachedSeriesData, 0) missedSeries := make([]querycache.CachedSeriesData, 0)
for _, miss := range misses { for _, miss := range misses {
@ -228,7 +229,7 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam
End: miss.End, End: miss.End,
}) })
} }
mergedSeries := q.queryCache.MergeWithCachedSeriesData(cacheKey, missedSeries) mergedSeries := q.queryCache.MergeWithCachedSeriesData(orgID, cacheKey, missedSeries)
resultSeries := common.GetSeriesFromCachedData(mergedSeries, params.Start, params.End) resultSeries := common.GetSeriesFromCachedData(mergedSeries, params.Start, params.End)
channelResults <- channelResult{Err: nil, Name: queryName, Query: promQuery.Query, Series: resultSeries} channelResults <- channelResult{Err: nil, Name: queryName, Query: promQuery.Query, Series: resultSeries}
}(queryName, promQuery) }(queryName, promQuery)
@ -500,7 +501,7 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan
// QueryRange is the main function that runs the queries // QueryRange is the main function that runs the queries
// and returns the results // and returns the results
func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) { func (q *querier) QueryRange(ctx context.Context, orgID valuer.UUID, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) {
var results []*v3.Result var results []*v3.Result
var err error var err error
var errQueriesByName map[string]error var errQueriesByName map[string]error
@ -510,7 +511,7 @@ func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3)
if params.CompositeQuery.PanelType == v3.PanelTypeList || params.CompositeQuery.PanelType == v3.PanelTypeTrace { if params.CompositeQuery.PanelType == v3.PanelTypeList || params.CompositeQuery.PanelType == v3.PanelTypeTrace {
results, errQueriesByName, err = q.runBuilderListQueries(ctx, params) results, errQueriesByName, err = q.runBuilderListQueries(ctx, params)
} else { } else {
results, errQueriesByName, err = q.runBuilderQueries(ctx, params) results, errQueriesByName, err = q.runBuilderQueries(ctx, orgID, params)
} }
// in builder query, the only errors we expose are the ones that exceed the resource limits // in builder query, the only errors we expose are the ones that exceed the resource limits
// everything else is internal error as they are not actionable by the user // everything else is internal error as they are not actionable by the user
@ -520,7 +521,7 @@ func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3)
} }
} }
case v3.QueryTypePromQL: case v3.QueryTypePromQL:
results, errQueriesByName, err = q.runPromQueries(ctx, params) results, errQueriesByName, err = q.runPromQueries(ctx, orgID, params)
case v3.QueryTypeClickHouseSQL: case v3.QueryTypeClickHouseSQL:
if params.CompositeQuery.PanelType == v3.PanelTypeList || params.CompositeQuery.PanelType == v3.PanelTypeTrace { if params.CompositeQuery.PanelType == v3.PanelTypeList || params.CompositeQuery.PanelType == v3.PanelTypeTrace {
results, errQueriesByName, err = q.runBuilderListQueries(ctx, params) results, errQueriesByName, err = q.runBuilderListQueries(ctx, params)

View File

@ -2,7 +2,6 @@ package v2
import ( import (
"context" "context"
"encoding/json"
"fmt" "fmt"
"math" "math"
"strings" "strings"
@ -10,19 +9,22 @@ import (
"time" "time"
"github.com/DATA-DOG/go-sqlmock" "github.com/DATA-DOG/go-sqlmock"
"github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/cache/cachetest"
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest" "github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
"github.com/SigNoz/signoz/pkg/prometheus" "github.com/SigNoz/signoz/pkg/prometheus"
"github.com/SigNoz/signoz/pkg/prometheus/prometheustest" "github.com/SigNoz/signoz/pkg/prometheus/prometheustest"
"github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader" "github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader"
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder" "github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
tracesV3 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v3" tracesV3 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v3"
"github.com/SigNoz/signoz/pkg/query-service/cache/inmemory"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3" v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/query-service/querycache" "github.com/SigNoz/signoz/pkg/query-service/querycache"
"github.com/SigNoz/signoz/pkg/query-service/utils" "github.com/SigNoz/signoz/pkg/query-service/utils"
"github.com/SigNoz/signoz/pkg/telemetrystore" "github.com/SigNoz/signoz/pkg/telemetrystore"
"github.com/SigNoz/signoz/pkg/telemetrystore/telemetrystoretest" "github.com/SigNoz/signoz/pkg/telemetrystore/telemetrystoretest"
"github.com/SigNoz/signoz/pkg/valuer"
cmock "github.com/srikanthccv/ClickHouse-go-mock" cmock "github.com/srikanthccv/ClickHouse-go-mock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -233,28 +235,28 @@ func TestV2FindMissingTimeRangesZeroFreshNess(t *testing.T) {
}, },
} }
c := inmemory.New(&inmemory.Options{TTL: 5 * time.Minute, CleanupInterval: 10 * time.Minute}) opts := cache.Memory{
TTL: 5 * time.Minute,
CleanupInterval: 10 * time.Minute,
}
c, err := cachetest.New(cache.Config{Provider: "memory", Memory: opts})
require.NoError(t, err)
qc := querycache.NewQueryCache(querycache.WithCache(c)) qc := querycache.NewQueryCache(querycache.WithCache(c))
for idx, tc := range testCases { for idx, tc := range testCases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
cacheKey := fmt.Sprintf("test-cache-key-%d", idx) cacheKey := fmt.Sprintf("test-cache-key-%d", idx)
cachedData := &querycache.CachedSeriesData{ cachedData := querycache.CachedSeriesData{
Start: minTimestamp(tc.cachedSeries), Start: minTimestamp(tc.cachedSeries),
End: maxTimestamp(tc.cachedSeries), End: maxTimestamp(tc.cachedSeries),
Data: tc.cachedSeries, Data: tc.cachedSeries,
} }
jsonData, err := json.Marshal([]*querycache.CachedSeriesData{cachedData}) orgID := valuer.GenerateUUID()
if err != nil { cacheableData := querycache.CacheableSeriesData{Series: []querycache.CachedSeriesData{cachedData}}
t.Errorf("error marshalling cached data: %v", err) err = c.Set(context.Background(), orgID, cacheKey, &cacheableData, 0)
} assert.NoError(t, err)
err = c.Store(cacheKey, jsonData, 5*time.Minute)
if err != nil {
t.Errorf("error storing cached data: %v", err)
}
misses := qc.FindMissingTimeRanges(tc.requestedStart, tc.requestedEnd, tc.requestedStep, cacheKey) misses := qc.FindMissingTimeRanges(orgID, tc.requestedStart, tc.requestedEnd, tc.requestedStep, cacheKey)
if len(misses) != len(tc.expectedMiss) { if len(misses) != len(tc.expectedMiss) {
t.Errorf("expected %d misses, got %d", len(tc.expectedMiss), len(misses)) t.Errorf("expected %d misses, got %d", len(tc.expectedMiss), len(misses))
} }
@ -453,29 +455,28 @@ func TestV2FindMissingTimeRangesWithFluxInterval(t *testing.T) {
}, },
} }
c := inmemory.New(&inmemory.Options{TTL: 5 * time.Minute, CleanupInterval: 10 * time.Minute}) opts := cache.Memory{
TTL: 5 * time.Minute,
CleanupInterval: 10 * time.Minute,
}
c, err := cachetest.New(cache.Config{Provider: "memory", Memory: opts})
require.NoError(t, err)
qc := querycache.NewQueryCache(querycache.WithCache(c)) qc := querycache.NewQueryCache(querycache.WithCache(c))
for idx, tc := range testCases { for idx, tc := range testCases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
cacheKey := fmt.Sprintf("test-cache-key-%d", idx) cacheKey := fmt.Sprintf("test-cache-key-%d", idx)
cachedData := &querycache.CachedSeriesData{ cachedData := querycache.CachedSeriesData{
Start: minTimestamp(tc.cachedSeries), Start: minTimestamp(tc.cachedSeries),
End: maxTimestamp(tc.cachedSeries), End: maxTimestamp(tc.cachedSeries),
Data: tc.cachedSeries, Data: tc.cachedSeries,
} }
jsonData, err := json.Marshal([]*querycache.CachedSeriesData{cachedData}) orgID := valuer.GenerateUUID()
if err != nil { cacheableData := querycache.CacheableSeriesData{Series: []querycache.CachedSeriesData{cachedData}}
t.Errorf("error marshalling cached data: %v", err) err = c.Set(context.Background(), orgID, cacheKey, &cacheableData, 0)
return assert.NoError(t, err)
}
err = c.Store(cacheKey, jsonData, 5*time.Minute) misses := qc.FindMissingTimeRanges(orgID, tc.requestedStart, tc.requestedEnd, tc.requestedStep, cacheKey)
if err != nil {
t.Errorf("error storing cached data: %v", err)
return
}
misses := qc.FindMissingTimeRanges(tc.requestedStart, tc.requestedEnd, tc.requestedStep, cacheKey)
if len(misses) != len(tc.expectedMiss) { if len(misses) != len(tc.expectedMiss) {
t.Errorf("expected %d misses, got %d", len(tc.expectedMiss), len(misses)) t.Errorf("expected %d misses, got %d", len(tc.expectedMiss), len(misses))
} }
@ -634,9 +635,14 @@ func TestV2QueryRangePanelGraph(t *testing.T) {
}, },
}, },
} }
cache := inmemory.New(&inmemory.Options{TTL: 5 * time.Minute, CleanupInterval: 10 * time.Minute}) cacheOpts := cache.Memory{
TTL: 5 * time.Minute,
CleanupInterval: 10 * time.Minute,
}
c, err := cachetest.New(cache.Config{Provider: "memory", Memory: cacheOpts})
require.NoError(t, err)
opts := QuerierOptions{ opts := QuerierOptions{
Cache: cache, Cache: c,
Reader: nil, Reader: nil,
FluxInterval: 5 * time.Minute, FluxInterval: 5 * time.Minute,
KeyGenerator: queryBuilder.NewKeyGenerator(), KeyGenerator: queryBuilder.NewKeyGenerator(),
@ -667,9 +673,10 @@ func TestV2QueryRangePanelGraph(t *testing.T) {
fmt.Sprintf("timestamp >= '%d' AND timestamp <= '%d'", (1675115580000+60*60*1000)*int64(1000000), (1675115580000+180*60*1000)*int64(1000000)), // 31st Jan, 04:23:00 to 31st Jan, 06:23:00 fmt.Sprintf("timestamp >= '%d' AND timestamp <= '%d'", (1675115580000+60*60*1000)*int64(1000000), (1675115580000+180*60*1000)*int64(1000000)), // 31st Jan, 04:23:00 to 31st Jan, 06:23:00
} }
orgID := valuer.GenerateUUID()
for i, param := range params { for i, param := range params {
tracesV3.Enrich(param, map[string]v3.AttributeKey{}) tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param) _, errByName, err := q.QueryRange(context.Background(), orgID, param)
if err != nil { if err != nil {
t.Errorf("expected no error, got %s", err) t.Errorf("expected no error, got %s", err)
} }
@ -783,9 +790,14 @@ func TestV2QueryRangeValueType(t *testing.T) {
}, },
}, },
} }
cache := inmemory.New(&inmemory.Options{TTL: 60 * time.Minute, CleanupInterval: 10 * time.Minute}) cacheOpts := cache.Memory{
TTL: 5 * time.Minute,
CleanupInterval: 10 * time.Minute,
}
c, err := cachetest.New(cache.Config{Provider: "memory", Memory: cacheOpts})
require.NoError(t, err)
opts := QuerierOptions{ opts := QuerierOptions{
Cache: cache, Cache: c,
Reader: nil, Reader: nil,
FluxInterval: 5 * time.Minute, FluxInterval: 5 * time.Minute,
KeyGenerator: queryBuilder.NewKeyGenerator(), KeyGenerator: queryBuilder.NewKeyGenerator(),
@ -813,9 +825,10 @@ func TestV2QueryRangeValueType(t *testing.T) {
fmt.Sprintf("timestamp >= '%d' AND timestamp <= '%d'", (1675119196722)*int64(1000000), (1675126396722)*int64(1000000)), // 31st Jan, 05:23:00 to 31st Jan, 06:23:00 fmt.Sprintf("timestamp >= '%d' AND timestamp <= '%d'", (1675119196722)*int64(1000000), (1675126396722)*int64(1000000)), // 31st Jan, 05:23:00 to 31st Jan, 06:23:00
} }
orgID := valuer.GenerateUUID()
for i, param := range params { for i, param := range params {
tracesV3.Enrich(param, map[string]v3.AttributeKey{}) tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param) _, errByName, err := q.QueryRange(context.Background(), orgID, param)
if err != nil { if err != nil {
t.Errorf("expected no error, got %s", err) t.Errorf("expected no error, got %s", err)
} }
@ -870,7 +883,7 @@ func TestV2QueryRangeTimeShift(t *testing.T) {
for i, param := range params { for i, param := range params {
tracesV3.Enrich(param, map[string]v3.AttributeKey{}) tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param) _, errByName, err := q.QueryRange(context.Background(), valuer.GenerateUUID(), param)
if err != nil { if err != nil {
t.Errorf("expected no error, got %s", err) t.Errorf("expected no error, got %s", err)
} }
@ -944,9 +957,14 @@ func TestV2QueryRangeTimeShiftWithCache(t *testing.T) {
}, },
}, },
} }
cache := inmemory.New(&inmemory.Options{TTL: 60 * time.Minute, CleanupInterval: 10 * time.Minute}) cacheOpts := cache.Memory{
TTL: 5 * time.Minute,
CleanupInterval: 10 * time.Minute,
}
c, err := cachetest.New(cache.Config{Provider: "memory", Memory: cacheOpts})
require.NoError(t, err)
opts := QuerierOptions{ opts := QuerierOptions{
Cache: cache, Cache: c,
Reader: nil, Reader: nil,
FluxInterval: 5 * time.Minute, FluxInterval: 5 * time.Minute,
KeyGenerator: queryBuilder.NewKeyGenerator(), KeyGenerator: queryBuilder.NewKeyGenerator(),
@ -971,7 +989,7 @@ func TestV2QueryRangeTimeShiftWithCache(t *testing.T) {
for i, param := range params { for i, param := range params {
tracesV3.Enrich(param, map[string]v3.AttributeKey{}) tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param) _, errByName, err := q.QueryRange(context.Background(), valuer.GenerateUUID(), param)
if err != nil { if err != nil {
t.Errorf("expected no error, got %s", err) t.Errorf("expected no error, got %s", err)
} }
@ -1047,9 +1065,14 @@ func TestV2QueryRangeTimeShiftWithLimitAndCache(t *testing.T) {
}, },
}, },
} }
cache := inmemory.New(&inmemory.Options{TTL: 60 * time.Minute, CleanupInterval: 10 * time.Minute}) cacheOpts := cache.Memory{
TTL: 5 * time.Minute,
CleanupInterval: 10 * time.Minute,
}
c, err := cachetest.New(cache.Config{Provider: "memory", Memory: cacheOpts})
require.NoError(t, err)
opts := QuerierOptions{ opts := QuerierOptions{
Cache: cache, Cache: c,
Reader: nil, Reader: nil,
FluxInterval: 5 * time.Minute, FluxInterval: 5 * time.Minute,
KeyGenerator: queryBuilder.NewKeyGenerator(), KeyGenerator: queryBuilder.NewKeyGenerator(),
@ -1074,7 +1097,7 @@ func TestV2QueryRangeTimeShiftWithLimitAndCache(t *testing.T) {
for i, param := range params { for i, param := range params {
tracesV3.Enrich(param, map[string]v3.AttributeKey{}) tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param) _, errByName, err := q.QueryRange(context.Background(), valuer.GenerateUUID(), param)
if err != nil { if err != nil {
t.Errorf("expected no error, got %s", err) t.Errorf("expected no error, got %s", err)
} }
@ -1121,9 +1144,14 @@ func TestV2QueryRangeValueTypePromQL(t *testing.T) {
}, },
}, },
} }
cache := inmemory.New(&inmemory.Options{TTL: 60 * time.Minute, CleanupInterval: 10 * time.Minute}) cacheOpts := cache.Memory{
TTL: 5 * time.Minute,
CleanupInterval: 10 * time.Minute,
}
c, err := cachetest.New(cache.Config{Provider: "memory", Memory: cacheOpts})
require.NoError(t, err)
opts := QuerierOptions{ opts := QuerierOptions{
Cache: cache, Cache: c,
Reader: nil, Reader: nil,
FluxInterval: 5 * time.Minute, FluxInterval: 5 * time.Minute,
KeyGenerator: queryBuilder.NewKeyGenerator(), KeyGenerator: queryBuilder.NewKeyGenerator(),
@ -1166,7 +1194,7 @@ func TestV2QueryRangeValueTypePromQL(t *testing.T) {
for i, param := range params { for i, param := range params {
tracesV3.Enrich(param, map[string]v3.AttributeKey{}) tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param) _, errByName, err := q.QueryRange(context.Background(), valuer.GenerateUUID(), param)
if err != nil { if err != nil {
t.Errorf("expected no error, got %s", err) t.Errorf("expected no error, got %s", err)
} }

View File

@ -5,8 +5,8 @@ import (
"strings" "strings"
"github.com/SigNoz/govaluate" "github.com/SigNoz/govaluate"
"github.com/SigNoz/signoz/pkg/cache"
metricsV3 "github.com/SigNoz/signoz/pkg/query-service/app/metrics/v3" metricsV3 "github.com/SigNoz/signoz/pkg/query-service/app/metrics/v3"
"github.com/SigNoz/signoz/pkg/query-service/cache"
"github.com/SigNoz/signoz/pkg/query-service/constants" "github.com/SigNoz/signoz/pkg/query-service/constants"
"github.com/SigNoz/signoz/pkg/query-service/interfaces" "github.com/SigNoz/signoz/pkg/query-service/interfaces"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3" v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"

View File

@ -33,8 +33,8 @@ import (
"github.com/rs/cors" "github.com/rs/cors"
"github.com/soheilhy/cmux" "github.com/soheilhy/cmux"
"github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/query-service/app/explorer" "github.com/SigNoz/signoz/pkg/query-service/app/explorer"
"github.com/SigNoz/signoz/pkg/query-service/cache"
"github.com/SigNoz/signoz/pkg/query-service/constants" "github.com/SigNoz/signoz/pkg/query-service/constants"
"github.com/SigNoz/signoz/pkg/query-service/dao" "github.com/SigNoz/signoz/pkg/query-service/dao"
"github.com/SigNoz/signoz/pkg/query-service/featureManager" "github.com/SigNoz/signoz/pkg/query-service/featureManager"
@ -114,19 +114,10 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
serverOptions.SigNoz.Cache, serverOptions.SigNoz.Cache,
) )
var c cache.Cache
if serverOptions.CacheConfigPath != "" {
cacheOpts, err := cache.LoadFromYAMLCacheConfigFile(serverOptions.CacheConfigPath)
if err != nil {
return nil, err
}
c = cache.NewCache(cacheOpts)
}
rm, err := makeRulesManager( rm, err := makeRulesManager(
serverOptions.SigNoz.SQLStore.SQLxDB(), serverOptions.SigNoz.SQLStore.SQLxDB(),
reader, reader,
c, serverOptions.SigNoz.Cache,
serverOptions.SigNoz.SQLStore, serverOptions.SigNoz.SQLStore,
serverOptions.SigNoz.TelemetryStore, serverOptions.SigNoz.TelemetryStore,
serverOptions.SigNoz.Prometheus, serverOptions.SigNoz.Prometheus,
@ -168,7 +159,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
IntegrationsController: integrationsController, IntegrationsController: integrationsController,
CloudIntegrationsController: cloudIntegrationsController, CloudIntegrationsController: cloudIntegrationsController,
LogsParsingPipelineController: logParsingPipelineController, LogsParsingPipelineController: logParsingPipelineController,
Cache: c,
FluxInterval: fluxInterval, FluxInterval: fluxInterval,
JWT: serverOptions.Jwt, JWT: serverOptions.Jwt,
AlertmanagerAPI: alertmanager.NewAPI(serverOptions.SigNoz.Alertmanager), AlertmanagerAPI: alertmanager.NewAPI(serverOptions.SigNoz.Alertmanager),
@ -220,9 +210,15 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
&opAmpModel.AllAgents, agentConfMgr, &opAmpModel.AllAgents, agentConfMgr,
) )
errorList := reader.PreloadMetricsMetadata(context.Background()) orgs, err := apiHandler.Signoz.Modules.Organization.GetAll(context.Background())
for _, er := range errorList { if err != nil {
zap.L().Error("preload metrics updated metadata failed", zap.Error(er)) return nil, err
}
for _, org := range orgs {
errorList := reader.PreloadMetricsMetadata(context.Background(), org.ID)
for _, er := range errorList {
zap.L().Error("failed to preload metrics metadata", zap.Error(er))
}
} }
return s, nil return s, nil

View File

@ -5,7 +5,10 @@ import (
"io" "io"
"net/http" "net/http"
"github.com/SigNoz/signoz/pkg/http/render"
"github.com/SigNoz/signoz/pkg/query-service/model" "github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/gorilla/mux" "github.com/gorilla/mux"
explorer "github.com/SigNoz/signoz/pkg/query-service/app/metricsexplorer" explorer "github.com/SigNoz/signoz/pkg/query-service/app/metricsexplorer"
@ -13,9 +16,10 @@ import (
) )
func (aH *APIHandler) FilterKeysSuggestion(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) FilterKeysSuggestion(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
bodyBytes, _ := io.ReadAll(r.Body) bodyBytes, _ := io.ReadAll(r.Body)
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes)) r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
ctx := r.Context()
params, apiError := explorer.ParseFilterKeySuggestions(r) params, apiError := explorer.ParseFilterKeySuggestions(r)
if apiError != nil { if apiError != nil {
zap.L().Error("error parsing summary filter keys request", zap.Error(apiError.Err)) zap.L().Error("error parsing summary filter keys request", zap.Error(apiError.Err))
@ -32,9 +36,20 @@ func (aH *APIHandler) FilterKeysSuggestion(w http.ResponseWriter, r *http.Reques
} }
func (aH *APIHandler) FilterValuesSuggestion(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) FilterValuesSuggestion(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
bodyBytes, _ := io.ReadAll(r.Body) bodyBytes, _ := io.ReadAll(r.Body)
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes)) r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
ctx := r.Context()
params, apiError := explorer.ParseFilterValueSuggestions(r) params, apiError := explorer.ParseFilterValueSuggestions(r)
if apiError != nil { if apiError != nil {
zap.L().Error("error parsing summary filter values request", zap.Error(apiError.Err)) zap.L().Error("error parsing summary filter values request", zap.Error(apiError.Err))
@ -42,7 +57,7 @@ func (aH *APIHandler) FilterValuesSuggestion(w http.ResponseWriter, r *http.Requ
return return
} }
values, apiError := aH.SummaryService.FilterValues(ctx, params) values, apiError := aH.SummaryService.FilterValues(ctx, orgID, params)
if apiError != nil { if apiError != nil {
zap.L().Error("error getting filter values", zap.Error(apiError.Err)) zap.L().Error("error getting filter values", zap.Error(apiError.Err))
RespondError(w, apiError, nil) RespondError(w, apiError, nil)
@ -52,9 +67,20 @@ func (aH *APIHandler) FilterValuesSuggestion(w http.ResponseWriter, r *http.Requ
} }
func (aH *APIHandler) GetMetricsDetails(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) GetMetricsDetails(w http.ResponseWriter, r *http.Request) {
metricName := mux.Vars(r)["metric_name"]
ctx := r.Context() ctx := r.Context()
metricsDetail, apiError := aH.SummaryService.GetMetricsSummary(ctx, metricName) claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
metricName := mux.Vars(r)["metric_name"]
metricsDetail, apiError := aH.SummaryService.GetMetricsSummary(ctx, orgID, metricName)
if apiError != nil { if apiError != nil {
zap.L().Error("error getting metrics summary error", zap.Error(apiError.Err)) zap.L().Error("error getting metrics summary error", zap.Error(apiError.Err))
RespondError(w, apiError, nil) RespondError(w, apiError, nil)
@ -64,9 +90,20 @@ func (aH *APIHandler) GetMetricsDetails(w http.ResponseWriter, r *http.Request)
} }
func (aH *APIHandler) ListMetrics(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) ListMetrics(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
bodyBytes, _ := io.ReadAll(r.Body) bodyBytes, _ := io.ReadAll(r.Body)
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes)) r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
ctx := r.Context()
params, apiErr := explorer.ParseSummaryListMetricsParams(r) params, apiErr := explorer.ParseSummaryListMetricsParams(r)
if apiErr != nil { if apiErr != nil {
zap.L().Error("error parsing metric list metric summary api request", zap.Error(apiErr.Err)) zap.L().Error("error parsing metric list metric summary api request", zap.Error(apiErr.Err))
@ -74,7 +111,7 @@ func (aH *APIHandler) ListMetrics(w http.ResponseWriter, r *http.Request) {
return return
} }
slmr, apiErr := aH.SummaryService.ListMetricsWithSummary(ctx, params) slmr, apiErr := aH.SummaryService.ListMetricsWithSummary(ctx, orgID, params)
if apiErr != nil { if apiErr != nil {
zap.L().Error("error in getting list metrics summary", zap.Error(apiErr.Err)) zap.L().Error("error in getting list metrics summary", zap.Error(apiErr.Err))
RespondError(w, apiErr, nil) RespondError(w, apiErr, nil)
@ -144,16 +181,27 @@ func (aH *APIHandler) GetInspectMetricsData(w http.ResponseWriter, r *http.Reque
} }
func (aH *APIHandler) UpdateMetricsMetadata(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) UpdateMetricsMetadata(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
bodyBytes, _ := io.ReadAll(r.Body) bodyBytes, _ := io.ReadAll(r.Body)
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes)) r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
ctx := r.Context()
params, apiError := explorer.ParseUpdateMetricsMetadataParams(r) params, apiError := explorer.ParseUpdateMetricsMetadataParams(r)
if apiError != nil { if apiError != nil {
zap.L().Error("error parsing update metrics metadata params", zap.Error(apiError.Err)) zap.L().Error("error parsing update metrics metadata params", zap.Error(apiError.Err))
RespondError(w, apiError, nil) RespondError(w, apiError, nil)
return return
} }
apiError = aH.SummaryService.UpdateMetricsMetadata(ctx, params) apiError = aH.SummaryService.UpdateMetricsMetadata(ctx, orgID, params)
if apiError != nil { if apiError != nil {
zap.L().Error("error updating metrics metadata", zap.Error(apiError.Err)) zap.L().Error("error updating metrics metadata", zap.Error(apiError.Err))
RespondError(w, apiError, nil) RespondError(w, apiError, nil)

View File

@ -1,69 +0,0 @@
package cache
import (
"os"
"time"
inmemory "github.com/SigNoz/signoz/pkg/query-service/cache/inmemory"
redis "github.com/SigNoz/signoz/pkg/query-service/cache/redis"
"github.com/SigNoz/signoz/pkg/query-service/cache/status"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"gopkg.in/yaml.v2"
)
type Options struct {
Name string `yaml:"-"`
Provider string `yaml:"provider"`
Redis *redis.Options `yaml:"redis,omitempty"`
InMemory *inmemory.Options `yaml:"inmemory,omitempty"`
}
// Cache is the interface for the storage backend
type Cache interface {
Connect() error
Store(cacheKey string, data []byte, ttl time.Duration) error
Retrieve(cacheKey string, allowExpired bool) ([]byte, status.RetrieveStatus, error)
SetTTL(cacheKey string, ttl time.Duration)
Remove(cacheKey string)
BulkRemove(cacheKeys []string)
Close() error
}
// KeyGenerator is the interface for the key generator
// The key generator is used to generate the cache keys for the cache entries
type KeyGenerator interface {
// GenerateKeys generates the cache keys for the given query range params
// The keys are returned as a map where the key is the query name and the value is the cache key
GenerateKeys(*v3.QueryRangeParamsV3) map[string]string
}
// LoadFromYAMLCacheConfig loads the cache options from the given YAML config bytes
func LoadFromYAMLCacheConfig(yamlConfig []byte) (*Options, error) {
var options Options
err := yaml.Unmarshal(yamlConfig, &options)
if err != nil {
return nil, err
}
return &options, nil
}
// LoadFromYAMLCacheConfigFile loads the cache options from the given YAML config file
func LoadFromYAMLCacheConfigFile(configFile string) (*Options, error) {
bytes, err := os.ReadFile(configFile)
if err != nil {
return nil, err
}
return LoadFromYAMLCacheConfig(bytes)
}
// NewCache creates a new cache based on the given options
func NewCache(options *Options) Cache {
switch options.Provider {
case "redis":
return redis.New(options.Redis)
case "inmemory":
return inmemory.New(options.InMemory)
default:
return nil
}
}

View File

@ -1,52 +0,0 @@
package cache
import "testing"
func TestNewCacheUnKnownProvider(t *testing.T) {
c := NewCache(&Options{
Name: "test",
Provider: "unknown",
})
if c != nil {
t.Fatalf("expected nil, got %v", c)
}
}
func TestNewCacheInMemory(t *testing.T) {
c := NewCache(&Options{
Name: "test",
Provider: "inmemory",
})
if c == nil {
t.Fatalf("expected non-nil, got nil")
}
}
func TestNewCacheRedis(t *testing.T) {
c := NewCache(&Options{
Name: "test",
Provider: "redis",
})
if c == nil {
t.Fatalf("expected non-nil, got nil")
}
}
func TestLoadFromYAMLCacheConfig(t *testing.T) {
_, err := LoadFromYAMLCacheConfig([]byte(`
provider: inmemory
`))
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
}
func TestLoadFromYAMLCacheConfigFile(t *testing.T) {
_, err := LoadFromYAMLCacheConfigFile("testdata/cache.yaml")
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
}

View File

@ -1,73 +0,0 @@
package inmemory
import (
"time"
"github.com/SigNoz/signoz/pkg/query-service/cache/status"
go_cache "github.com/patrickmn/go-cache"
)
// cache implements the Cache interface
type cache struct {
cc *go_cache.Cache
}
// New creates a new in-memory cache
func New(opts *Options) *cache {
if opts == nil {
opts = defaultOptions()
}
return &cache{cc: go_cache.New(opts.TTL, opts.CleanupInterval)}
}
// Connect does nothing
func (c *cache) Connect() error {
return nil
}
// Store stores the data in the cache
func (c *cache) Store(cacheKey string, data []byte, ttl time.Duration) error {
c.cc.Set(cacheKey, data, ttl)
return nil
}
// Retrieve retrieves the data from the cache
func (c *cache) Retrieve(cacheKey string, allowExpired bool) ([]byte, status.RetrieveStatus, error) {
data, found := c.cc.Get(cacheKey)
if !found {
return nil, status.RetrieveStatusKeyMiss, nil
}
return data.([]byte), status.RetrieveStatusHit, nil
}
// SetTTL sets the TTL for the cache entry
func (c *cache) SetTTL(cacheKey string, ttl time.Duration) {
item, found := c.cc.Get(cacheKey)
if !found {
return
}
_ = c.cc.Replace(cacheKey, item, ttl)
}
// Remove removes the cache entry
func (c *cache) Remove(cacheKey string) {
c.cc.Delete(cacheKey)
}
// BulkRemove removes the cache entries
func (c *cache) BulkRemove(cacheKeys []string) {
for _, cacheKey := range cacheKeys {
c.cc.Delete(cacheKey)
}
}
// Close does nothing
func (c *cache) Close() error {
return nil
}
// Configuration returns the cache configuration
func (c *cache) Configuration() *Options {
return nil
}

View File

@ -1,102 +0,0 @@
package inmemory
import (
"testing"
"time"
"github.com/SigNoz/signoz/pkg/query-service/cache/status"
"github.com/stretchr/testify/assert"
)
// TestNew tests the New function
func TestNew(t *testing.T) {
opts := &Options{
TTL: 10 * time.Second,
CleanupInterval: 10 * time.Second,
}
c := New(opts)
assert.NotNil(t, c)
assert.NotNil(t, c.cc)
}
// TestConnect tests the Connect function
func TestConnect(t *testing.T) {
c := New(nil)
assert.NoError(t, c.Connect())
}
// TestStore tests the Store function
func TestStore(t *testing.T) {
c := New(nil)
assert.NoError(t, c.Store("key", []byte("value"), 10*time.Second))
}
// TestRetrieve tests the Retrieve function
func TestRetrieve(t *testing.T) {
c := New(nil)
assert.NoError(t, c.Store("key", []byte("value"), 10*time.Second))
data, retrieveStatus, err := c.Retrieve("key", false)
assert.NoError(t, err)
assert.Equal(t, retrieveStatus, status.RetrieveStatusHit)
assert.Equal(t, data, []byte("value"))
}
// TestSetTTL tests the SetTTL function
func TestSetTTL(t *testing.T) {
c := New(&Options{TTL: 10 * time.Second, CleanupInterval: 1 * time.Second})
assert.NoError(t, c.Store("key", []byte("value"), 2*time.Second))
time.Sleep(3 * time.Second)
data, retrieveStatus, err := c.Retrieve("key", false)
assert.NoError(t, err)
assert.Equal(t, retrieveStatus, status.RetrieveStatusKeyMiss)
assert.Nil(t, data)
assert.NoError(t, c.Store("key", []byte("value"), 2*time.Second))
c.SetTTL("key", 4*time.Second)
time.Sleep(3 * time.Second)
data, retrieveStatus, err = c.Retrieve("key", false)
assert.NoError(t, err)
assert.Equal(t, retrieveStatus, status.RetrieveStatusHit)
assert.Equal(t, data, []byte("value"))
}
// TestRemove tests the Remove function
func TestRemove(t *testing.T) {
c := New(nil)
assert.NoError(t, c.Store("key", []byte("value"), 10*time.Second))
c.Remove("key")
data, retrieveStatus, err := c.Retrieve("key", false)
assert.NoError(t, err)
assert.Equal(t, retrieveStatus, status.RetrieveStatusKeyMiss)
assert.Nil(t, data)
}
// TestBulkRemove tests the BulkRemove function
func TestBulkRemove(t *testing.T) {
c := New(nil)
assert.NoError(t, c.Store("key1", []byte("value"), 10*time.Second))
assert.NoError(t, c.Store("key2", []byte("value"), 10*time.Second))
c.BulkRemove([]string{"key1", "key2"})
data, retrieveStatus, err := c.Retrieve("key1", false)
assert.NoError(t, err)
assert.Equal(t, retrieveStatus, status.RetrieveStatusKeyMiss)
assert.Nil(t, data)
data, retrieveStatus, err = c.Retrieve("key2", false)
assert.NoError(t, err)
assert.Equal(t, retrieveStatus, status.RetrieveStatusKeyMiss)
assert.Nil(t, data)
}
// TestCache tests the cache
func TestCache(t *testing.T) {
c := New(nil)
assert.NoError(t, c.Store("key", []byte("value"), 10*time.Second))
data, retrieveStatus, err := c.Retrieve("key", false)
assert.NoError(t, err)
assert.Equal(t, retrieveStatus, status.RetrieveStatusHit)
assert.Equal(t, data, []byte("value"))
c.Remove("key")
}

View File

@ -1,23 +0,0 @@
package inmemory
import (
"time"
go_cache "github.com/patrickmn/go-cache"
)
const (
defaultTTL = go_cache.NoExpiration
defaultCleanupInterval = 1 * time.Minute
)
// Options holds the options for the in-memory cache
type Options struct {
// TTL is the time to live for the cache entries
TTL time.Duration `yaml:"ttl,omitempty"`
CleanupInterval time.Duration `yaml:"cleanupInterval,omitempty"`
}
func defaultOptions() *Options {
return &Options{TTL: defaultTTL, CleanupInterval: defaultCleanupInterval}
}

View File

@ -1,24 +0,0 @@
package redis
const (
defaultHost = "localhost"
defaultPort = 6379
defaultPassword = ""
defaultDB = 0
)
type Options struct {
Host string `yaml:"host,omitempty"`
Port int `yaml:"port,omitempty"`
Password string `yaml:"password,omitempty"`
DB int `yaml:"db,omitempty"`
}
func defaultOptions() *Options {
return &Options{
Host: defaultHost,
Port: defaultPort,
Password: defaultPassword,
DB: defaultDB,
}
}

View File

@ -1,124 +0,0 @@
package redis
import (
"context"
"errors"
"fmt"
"time"
"github.com/SigNoz/signoz/pkg/query-service/cache/status"
"github.com/go-redis/redis/v8"
"go.uber.org/zap"
)
type cache struct {
client *redis.Client
opts *Options
}
// New creates a new cache
func New(opts *Options) *cache {
if opts == nil {
opts = defaultOptions()
}
return &cache{opts: opts}
}
// WithClient creates a new cache with the given client
func WithClient(client *redis.Client) *cache {
return &cache{client: client}
}
// Connect connects to the redis server
func (c *cache) Connect() error {
c.client = redis.NewClient(&redis.Options{
Addr: fmt.Sprintf("%s:%d", c.opts.Host, c.opts.Port),
Password: c.opts.Password,
DB: c.opts.DB,
})
return nil
}
// Store stores the data in the cache
func (c *cache) Store(cacheKey string, data []byte, ttl time.Duration) error {
return c.client.Set(context.Background(), cacheKey, data, ttl).Err()
}
// Retrieve retrieves the data from the cache
func (c *cache) Retrieve(cacheKey string, allowExpired bool) ([]byte, status.RetrieveStatus, error) {
data, err := c.client.Get(context.Background(), cacheKey).Bytes()
if err != nil {
if errors.Is(err, redis.Nil) {
return nil, status.RetrieveStatusKeyMiss, nil
}
return nil, status.RetrieveStatusError, err
}
return data, status.RetrieveStatusHit, nil
}
// SetTTL sets the TTL for the cache entry
func (c *cache) SetTTL(cacheKey string, ttl time.Duration) {
err := c.client.Expire(context.Background(), cacheKey, ttl).Err()
if err != nil {
zap.L().Error("error setting TTL for cache key", zap.String("cacheKey", cacheKey), zap.Duration("ttl", ttl), zap.Error(err))
}
}
// Remove removes the cache entry
func (c *cache) Remove(cacheKey string) {
c.BulkRemove([]string{cacheKey})
}
// BulkRemove removes the cache entries
func (c *cache) BulkRemove(cacheKeys []string) {
if err := c.client.Del(context.Background(), cacheKeys...).Err(); err != nil {
zap.L().Error("error deleting cache keys", zap.Strings("cacheKeys", cacheKeys), zap.Error(err))
}
}
// Close closes the connection to the redis server
func (c *cache) Close() error {
return c.client.Close()
}
// Ping pings the redis server
func (c *cache) Ping() error {
return c.client.Ping(context.Background()).Err()
}
// GetClient returns the redis client
func (c *cache) GetClient() *redis.Client {
return c.client
}
// GetOptions returns the options
func (c *cache) GetOptions() *Options {
return c.opts
}
// GetTTL returns the TTL for the cache entry
func (c *cache) GetTTL(cacheKey string) time.Duration {
ttl, err := c.client.TTL(context.Background(), cacheKey).Result()
if err != nil {
zap.L().Error("error getting TTL for cache key", zap.String("cacheKey", cacheKey), zap.Error(err))
}
return ttl
}
// GetKeys returns the keys matching the pattern
func (c *cache) GetKeys(pattern string) ([]string, error) {
return c.client.Keys(context.Background(), pattern).Result()
}
// GetKeysWithTTL returns the keys matching the pattern with their TTL
func (c *cache) GetKeysWithTTL(pattern string) (map[string]time.Duration, error) {
keys, err := c.GetKeys(pattern)
if err != nil {
return nil, err
}
result := make(map[string]time.Duration)
for _, key := range keys {
result[key] = c.GetTTL(key)
}
return result, nil
}

View File

@ -1,91 +0,0 @@
package redis
import (
"testing"
"time"
"github.com/SigNoz/signoz/pkg/query-service/cache/status"
"github.com/go-redis/redismock/v8"
)
func TestStore(t *testing.T) {
db, mock := redismock.NewClientMock()
c := WithClient(db)
mock.ExpectSet("key", []byte("value"), 10*time.Second).RedisNil()
_ = c.Store("key", []byte("value"), 10*time.Second)
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
}
}
func TestRetrieve(t *testing.T) {
db, mock := redismock.NewClientMock()
c := WithClient(db)
mock.ExpectSet("key", []byte("value"), 10*time.Second).RedisNil()
_ = c.Store("key", []byte("value"), 10*time.Second)
mock.ExpectGet("key").SetVal("value")
data, retrieveStatus, err := c.Retrieve("key", false)
if err != nil {
t.Errorf("unexpected error: %s", err)
}
if retrieveStatus != status.RetrieveStatusHit {
t.Errorf("expected status %d, got %d", status.RetrieveStatusHit, retrieveStatus)
}
if string(data) != "value" {
t.Errorf("expected value %s, got %s", "value", string(data))
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
}
}
func TestSetTTL(t *testing.T) {
db, mock := redismock.NewClientMock()
c := WithClient(db)
mock.ExpectSet("key", []byte("value"), 10*time.Second).RedisNil()
_ = c.Store("key", []byte("value"), 10*time.Second)
mock.ExpectExpire("key", 4*time.Second).RedisNil()
c.SetTTL("key", 4*time.Second)
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
}
}
func TestRemove(t *testing.T) {
db, mock := redismock.NewClientMock()
c := WithClient(db)
mock.ExpectSet("key", []byte("value"), 10*time.Second).RedisNil()
_ = c.Store("key", []byte("value"), 10*time.Second)
mock.ExpectDel("key").RedisNil()
c.Remove("key")
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
}
}
func TestBulkRemove(t *testing.T) {
db, mock := redismock.NewClientMock()
c := WithClient(db)
mock.ExpectSet("key", []byte("value"), 10*time.Second).RedisNil()
_ = c.Store("key", []byte("value"), 10*time.Second)
mock.ExpectSet("key2", []byte("value2"), 10*time.Second).RedisNil()
_ = c.Store("key2", []byte("value2"), 10*time.Second)
mock.ExpectDel("key", "key2").RedisNil()
c.BulkRemove([]string{"key", "key2"})
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
}
}

View File

@ -1,33 +0,0 @@
package status
// RetrieveStatus defines the possible status of a cache lookup
type RetrieveStatus int
const (
RetrieveStatusHit = RetrieveStatus(iota)
RetrieveStatusPartialHit
RetrieveStatusRangeMiss
RetrieveStatusKeyMiss
RetrieveStatusRevalidated
RetrieveStatusError
)
func (s RetrieveStatus) String() string {
switch s {
case RetrieveStatusHit:
return "hit"
case RetrieveStatusPartialHit:
return "partial hit"
case RetrieveStatusRangeMiss:
return "range miss"
case RetrieveStatusKeyMiss:
return "key miss"
case RetrieveStatusRevalidated:
return "revalidated"
case RetrieveStatusError:
return "error"
default:
return "unknown"
}
}

View File

@ -1,43 +0,0 @@
package status
import (
"testing"
)
func TestRetrieveStatusString(t *testing.T) {
tests := []struct {
status RetrieveStatus
want string
}{
{
status: RetrieveStatusHit,
want: "hit",
},
{
status: RetrieveStatusPartialHit,
want: "partial hit",
},
{
status: RetrieveStatusRangeMiss,
want: "range miss",
},
{
status: RetrieveStatusKeyMiss,
want: "key miss",
},
{
status: RetrieveStatusRevalidated,
want: "revalidated",
},
{
status: RetrieveStatusError,
want: "error",
},
}
for _, tt := range tests {
if got := tt.status.String(); got != tt.want {
t.Errorf("RetrieveStatus.String() = %v, want %v", got, tt.want)
}
}
}

View File

@ -1,2 +0,0 @@
name: test
provider: inmemory

View File

@ -8,6 +8,7 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/model/metrics_explorer" "github.com/SigNoz/signoz/pkg/query-service/model/metrics_explorer"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3" v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/query-service/querycache" "github.com/SigNoz/signoz/pkg/query-service/querycache"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/util/stats" "github.com/prometheus/prometheus/util/stats"
) )
@ -40,14 +41,14 @@ type Reader interface {
// Search Interfaces // Search Interfaces
SearchTraces(ctx context.Context, params *model.SearchTracesParams) (*[]model.SearchSpansResult, error) SearchTraces(ctx context.Context, params *model.SearchTracesParams) (*[]model.SearchSpansResult, error)
GetWaterfallSpansForTraceWithMetadata(ctx context.Context, traceID string, req *model.GetWaterfallSpansForTraceWithMetadataParams) (*model.GetWaterfallSpansForTraceWithMetadataResponse, *model.ApiError) GetWaterfallSpansForTraceWithMetadata(ctx context.Context, orgID valuer.UUID, traceID string, req *model.GetWaterfallSpansForTraceWithMetadataParams) (*model.GetWaterfallSpansForTraceWithMetadataResponse, *model.ApiError)
GetFlamegraphSpansForTrace(ctx context.Context, traceID string, req *model.GetFlamegraphSpansForTraceParams) (*model.GetFlamegraphSpansForTraceResponse, *model.ApiError) GetFlamegraphSpansForTrace(ctx context.Context, orgID valuer.UUID, traceID string, req *model.GetFlamegraphSpansForTraceParams) (*model.GetFlamegraphSpansForTraceResponse, *model.ApiError)
// Setter Interfaces // Setter Interfaces
SetTTL(ctx context.Context, orgID string, ttlParams *model.TTLParams) (*model.SetTTLResponseItem, *model.ApiError) SetTTL(ctx context.Context, orgID string, ttlParams *model.TTLParams) (*model.SetTTLResponseItem, *model.ApiError)
FetchTemporality(ctx context.Context, metricNames []string) (map[string]map[v3.Temporality]bool, error) FetchTemporality(ctx context.Context, orgID valuer.UUID, metricNames []string) (map[string]map[v3.Temporality]bool, error)
GetMetricAggregateAttributes(ctx context.Context, req *v3.AggregateAttributeRequest, skipDotNames bool, skipSignozMetrics bool) (*v3.AggregateAttributeResponse, error) GetMetricAggregateAttributes(ctx context.Context, orgID valuer.UUID, req *v3.AggregateAttributeRequest, skipDotNames bool, skipSignozMetrics bool) (*v3.AggregateAttributeResponse, error)
GetMetricAttributeKeys(ctx context.Context, req *v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error) GetMetricAttributeKeys(ctx context.Context, req *v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error)
GetMetricAttributeValues(ctx context.Context, req *v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error) GetMetricAttributeValues(ctx context.Context, req *v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error)
@ -88,7 +89,7 @@ type Reader interface {
QueryDashboardVars(ctx context.Context, query string) (*model.DashboardVar, error) QueryDashboardVars(ctx context.Context, query string) (*model.DashboardVar, error)
CheckClickHouse(ctx context.Context) error CheckClickHouse(ctx context.Context) error
GetMetricMetadata(context.Context, string, string) (*v3.MetricMetadataResponse, error) GetMetricMetadata(context.Context, valuer.UUID, string, string) (*v3.MetricMetadataResponse, error)
AddRuleStateHistory(ctx context.Context, ruleStateHistory []model.RuleStateHistory) error AddRuleStateHistory(ctx context.Context, ruleStateHistory []model.RuleStateHistory) error
GetOverallStateTransitions(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) ([]model.ReleStateItem, error) GetOverallStateTransitions(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) ([]model.ReleStateItem, error)
@ -123,7 +124,7 @@ type Reader interface {
GetActiveTimeSeriesForMetricName(ctx context.Context, metricName string, duration time.Duration) (uint64, *model.ApiError) GetActiveTimeSeriesForMetricName(ctx context.Context, metricName string, duration time.Duration) (uint64, *model.ApiError)
GetAttributesForMetricName(ctx context.Context, metricName string, start, end *int64) (*[]metrics_explorer.Attribute, *model.ApiError) GetAttributesForMetricName(ctx context.Context, metricName string, start, end *int64) (*[]metrics_explorer.Attribute, *model.ApiError)
ListSummaryMetrics(ctx context.Context, req *metrics_explorer.SummaryListMetricsRequest) (*metrics_explorer.SummaryListMetricsResponse, *model.ApiError) ListSummaryMetrics(ctx context.Context, orgID valuer.UUID, req *metrics_explorer.SummaryListMetricsRequest) (*metrics_explorer.SummaryListMetricsResponse, *model.ApiError)
GetMetricsTimeSeriesPercentage(ctx context.Context, request *metrics_explorer.TreeMapMetricsRequest) (*[]metrics_explorer.TreeMapResponseItem, *model.ApiError) GetMetricsTimeSeriesPercentage(ctx context.Context, request *metrics_explorer.TreeMapMetricsRequest) (*[]metrics_explorer.TreeMapResponseItem, *model.ApiError)
GetMetricsSamplesPercentage(ctx context.Context, req *metrics_explorer.TreeMapMetricsRequest) (*[]metrics_explorer.TreeMapResponseItem, *model.ApiError) GetMetricsSamplesPercentage(ctx context.Context, req *metrics_explorer.TreeMapMetricsRequest) (*[]metrics_explorer.TreeMapResponseItem, *model.ApiError)
@ -135,15 +136,15 @@ type Reader interface {
GetInspectMetricsFingerprints(ctx context.Context, attributes []string, req *metrics_explorer.InspectMetricsRequest) ([]string, *model.ApiError) GetInspectMetricsFingerprints(ctx context.Context, attributes []string, req *metrics_explorer.InspectMetricsRequest) ([]string, *model.ApiError)
GetInspectMetrics(ctx context.Context, req *metrics_explorer.InspectMetricsRequest, fingerprints []string) (*metrics_explorer.InspectMetricsResponse, *model.ApiError) GetInspectMetrics(ctx context.Context, req *metrics_explorer.InspectMetricsRequest, fingerprints []string) (*metrics_explorer.InspectMetricsResponse, *model.ApiError)
DeleteMetricsMetadata(ctx context.Context, metricName string) *model.ApiError DeleteMetricsMetadata(ctx context.Context, orgID valuer.UUID, metricName string) *model.ApiError
UpdateMetricsMetadata(ctx context.Context, req *model.UpdateMetricsMetadata) *model.ApiError UpdateMetricsMetadata(ctx context.Context, orgID valuer.UUID, req *model.UpdateMetricsMetadata) *model.ApiError
GetUpdatedMetricsMetadata(ctx context.Context, metricNames ...string) (map[string]*model.UpdateMetricsMetadata, *model.ApiError) GetUpdatedMetricsMetadata(ctx context.Context, orgID valuer.UUID, metricNames ...string) (map[string]*model.UpdateMetricsMetadata, *model.ApiError)
CheckForLabelsInMetric(ctx context.Context, metricName string, labels []string) (bool, *model.ApiError) CheckForLabelsInMetric(ctx context.Context, metricName string, labels []string) (bool, *model.ApiError)
} }
type Querier interface { type Querier interface {
QueryRange(context.Context, *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) QueryRange(context.Context, valuer.UUID, *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error)
// test helpers // test helpers
QueriesExecuted() []string QueriesExecuted() []string
@ -151,9 +152,9 @@ type Querier interface {
} }
type QueryCache interface { type QueryCache interface {
FindMissingTimeRanges(start, end int64, step int64, cacheKey string) []querycache.MissInterval FindMissingTimeRanges(orgID valuer.UUID, start, end int64, step int64, cacheKey string) []querycache.MissInterval
FindMissingTimeRangesV2(start, end int64, step int64, cacheKey string) []querycache.MissInterval FindMissingTimeRangesV2(orgID valuer.UUID, start, end int64, step int64, cacheKey string) []querycache.MissInterval
MergeWithCachedSeriesData(cacheKey string, newData []querycache.CachedSeriesData) []querycache.CachedSeriesData MergeWithCachedSeriesData(orgID valuer.UUID, cacheKey string, newData []querycache.CachedSeriesData) []querycache.CachedSeriesData
StoreSeriesInCache(cacheKey string, series []querycache.CachedSeriesData) StoreSeriesInCache(orgID valuer.UUID, cacheKey string, series []querycache.CachedSeriesData)
MergeWithCachedSeriesDataV2(cacheKey string, series []querycache.CachedSeriesData) []querycache.CachedSeriesData MergeWithCachedSeriesDataV2(orgID valuer.UUID, cacheKey string, series []querycache.CachedSeriesData) []querycache.CachedSeriesData
} }

View File

@ -60,6 +60,7 @@ func main() {
flag.BoolVar(&preferSpanMetrics, "prefer-span-metrics", false, "(prefer span metrics for service level metrics)") flag.BoolVar(&preferSpanMetrics, "prefer-span-metrics", false, "(prefer span metrics for service level metrics)")
// Deprecated // Deprecated
flag.StringVar(&ruleRepoURL, "rules.repo-url", constants.AlertHelpPage, "(host address used to build rule link in alert messages)") flag.StringVar(&ruleRepoURL, "rules.repo-url", constants.AlertHelpPage, "(host address used to build rule link in alert messages)")
// Deprecated
flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)") flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)")
flag.StringVar(&fluxInterval, "flux-interval", "5m", "(the interval to exclude data from being cached to avoid incorrect cache for data in motion)") flag.StringVar(&fluxInterval, "flux-interval", "5m", "(the interval to exclude data from being cached to avoid incorrect cache for data in motion)")
flag.StringVar(&fluxIntervalForTraceDetail, "flux-interval-trace-detail", "2m", "(the interval to exclude data from being cached to avoid incorrect cache for trace data in motion)") flag.StringVar(&fluxIntervalForTraceDetail, "flux-interval-trace-detail", "2m", "(the interval to exclude data from being cached to avoid incorrect cache for trace data in motion)")

View File

@ -1,14 +1,18 @@
package querycache package querycache
import ( import (
"context"
"encoding/json" "encoding/json"
"math" "math"
"sort" "sort"
"time" "time"
"github.com/SigNoz/signoz/pkg/query-service/cache" "github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/errors"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3" v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/query-service/utils/labels" "github.com/SigNoz/signoz/pkg/query-service/utils/labels"
"github.com/SigNoz/signoz/pkg/types/cachetypes"
"github.com/SigNoz/signoz/pkg/valuer"
"go.uber.org/zap" "go.uber.org/zap"
) )
@ -27,6 +31,19 @@ type CachedSeriesData struct {
Data []*v3.Series `json:"data"` Data []*v3.Series `json:"data"`
} }
var _ cachetypes.Cacheable = (*CacheableSeriesData)(nil)
type CacheableSeriesData struct {
Series []CachedSeriesData
}
func (c *CacheableSeriesData) MarshalBinary() (data []byte, err error) {
return json.Marshal(c)
}
func (c *CacheableSeriesData) UnmarshalBinary(data []byte) error {
return json.Unmarshal(data, c)
}
type QueryCacheOption func(q *queryCache) type QueryCacheOption func(q *queryCache)
func NewQueryCache(opts ...QueryCacheOption) *queryCache { func NewQueryCache(opts ...QueryCacheOption) *queryCache {
@ -51,7 +68,7 @@ func WithFluxInterval(fluxInterval time.Duration) QueryCacheOption {
// FindMissingTimeRangesV2 is a new correct implementation of FindMissingTimeRanges // FindMissingTimeRangesV2 is a new correct implementation of FindMissingTimeRanges
// It takes care of any timestamps that were not queried due to rounding in the first version. // It takes care of any timestamps that were not queried due to rounding in the first version.
func (q *queryCache) FindMissingTimeRangesV2(start, end int64, step int64, cacheKey string) []MissInterval { func (q *queryCache) FindMissingTimeRangesV2(orgID valuer.UUID, start, end int64, step int64, cacheKey string) []MissInterval {
if q.cache == nil || cacheKey == "" { if q.cache == nil || cacheKey == "" {
return []MissInterval{{Start: start, End: end}} return []MissInterval{{Start: start, End: end}}
} }
@ -63,7 +80,7 @@ func (q *queryCache) FindMissingTimeRangesV2(start, end int64, step int64, cache
return []MissInterval{{Start: start, End: end}} return []MissInterval{{Start: start, End: end}}
} }
cachedSeriesDataList := q.getCachedSeriesData(cacheKey) cachedSeriesDataList := q.getCachedSeriesData(orgID, cacheKey)
// Sort the cached data by start time // Sort the cached data by start time
sort.Slice(cachedSeriesDataList, func(i, j int) bool { sort.Slice(cachedSeriesDataList, func(i, j int) bool {
@ -151,12 +168,12 @@ func (q *queryCache) FindMissingTimeRangesV2(start, end int64, step int64, cache
return merged return merged
} }
func (q *queryCache) FindMissingTimeRanges(start, end, step int64, cacheKey string) []MissInterval { func (q *queryCache) FindMissingTimeRanges(orgID valuer.UUID, start, end, step int64, cacheKey string) []MissInterval {
if q.cache == nil || cacheKey == "" { if q.cache == nil || cacheKey == "" {
return []MissInterval{{Start: start, End: end}} return []MissInterval{{Start: start, End: end}}
} }
cachedSeriesDataList := q.getCachedSeriesData(cacheKey) cachedSeriesDataList := q.getCachedSeriesData(orgID, cacheKey)
// Sort the cached data by start time // Sort the cached data by start time
sort.Slice(cachedSeriesDataList, func(i, j int) bool { sort.Slice(cachedSeriesDataList, func(i, j int) bool {
@ -217,13 +234,17 @@ func (q *queryCache) FindMissingTimeRanges(start, end, step int64, cacheKey stri
return missingRanges return missingRanges
} }
func (q *queryCache) getCachedSeriesData(cacheKey string) []*CachedSeriesData { func (q *queryCache) getCachedSeriesData(orgID valuer.UUID, cacheKey string) []*CachedSeriesData {
cachedData, _, _ := q.cache.Retrieve(cacheKey, true) cacheableSeriesData := new(CacheableSeriesData)
var cachedSeriesDataList []*CachedSeriesData err := q.cache.Get(context.TODO(), orgID, cacheKey, cacheableSeriesData, true)
if err := json.Unmarshal(cachedData, &cachedSeriesDataList); err != nil { if err != nil && !errors.Ast(err, errors.TypeNotFound) {
return nil return nil
} }
return cachedSeriesDataList cachedSeriesData := make([]*CachedSeriesData, 0)
for _, cachedSeries := range cacheableSeriesData.Series {
cachedSeriesData = append(cachedSeriesData, &cachedSeries)
}
return cachedSeriesData
} }
func (q *queryCache) mergeSeries(cachedSeries, missedSeries []*v3.Series) []*v3.Series { func (q *queryCache) mergeSeries(cachedSeries, missedSeries []*v3.Series) []*v3.Series {
@ -263,34 +284,28 @@ func (q *queryCache) mergeSeries(cachedSeries, missedSeries []*v3.Series) []*v3.
return mergedSeries return mergedSeries
} }
func (q *queryCache) storeMergedData(cacheKey string, mergedData []CachedSeriesData) { func (q *queryCache) storeMergedData(orgID valuer.UUID, cacheKey string, mergedData []CachedSeriesData) {
if q.cache == nil { if q.cache == nil {
return return
} }
mergedDataJSON, err := json.Marshal(mergedData) cacheableSeriesData := CacheableSeriesData{Series: mergedData}
if err != nil { err := q.cache.Set(context.TODO(), orgID, cacheKey, &cacheableSeriesData, 0)
zap.L().Error("error marshalling merged data", zap.Error(err))
return
}
err = q.cache.Store(cacheKey, mergedDataJSON, 0)
if err != nil { if err != nil {
zap.L().Error("error storing merged data", zap.Error(err)) zap.L().Error("error storing merged data", zap.Error(err))
} }
} }
func (q *queryCache) MergeWithCachedSeriesDataV2(cacheKey string, newData []CachedSeriesData) []CachedSeriesData { func (q *queryCache) MergeWithCachedSeriesDataV2(orgID valuer.UUID, cacheKey string, newData []CachedSeriesData) []CachedSeriesData {
if q.cache == nil { if q.cache == nil {
return newData return newData
} }
cachedData, _, _ := q.cache.Retrieve(cacheKey, true) cacheableSeriesData := new(CacheableSeriesData)
var existingData []CachedSeriesData err := q.cache.Get(context.TODO(), orgID, cacheKey, cacheableSeriesData, true)
if err := json.Unmarshal(cachedData, &existingData); err != nil { if err != nil && !errors.Ast(err, errors.TypeNotFound) {
zap.L().Error("error unmarshalling existing data", zap.Error(err)) return nil
return newData
} }
allData := append(cacheableSeriesData.Series, newData...)
allData := append(existingData, newData...)
sort.Slice(allData, func(i, j int) bool { sort.Slice(allData, func(i, j int) bool {
return allData[i].Start < allData[j].Start return allData[i].Start < allData[j].Start
@ -334,13 +349,13 @@ func (q *queryCache) MergeWithCachedSeriesDataV2(cacheKey string, newData []Cach
return mergedData return mergedData
} }
func (q *queryCache) MergeWithCachedSeriesData(cacheKey string, newData []CachedSeriesData) []CachedSeriesData { func (q *queryCache) MergeWithCachedSeriesData(orgID valuer.UUID, cacheKey string, newData []CachedSeriesData) []CachedSeriesData {
mergedData := q.MergeWithCachedSeriesDataV2(cacheKey, newData) mergedData := q.MergeWithCachedSeriesDataV2(orgID, cacheKey, newData)
q.storeMergedData(cacheKey, mergedData) q.storeMergedData(orgID, cacheKey, mergedData)
return mergedData return mergedData
} }
func (q *queryCache) StoreSeriesInCache(cacheKey string, series []CachedSeriesData) { func (q *queryCache) StoreSeriesInCache(orgID valuer.UUID, cacheKey string, series []CachedSeriesData) {
q.storeMergedData(cacheKey, series) q.storeMergedData(orgID, cacheKey, series)
} }

View File

@ -1,23 +1,31 @@
package querycache_test package querycache_test
import ( import (
"encoding/json" "context"
"testing" "testing"
"time" "time"
"github.com/SigNoz/signoz/pkg/query-service/cache/inmemory" "github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/cache/cachetest"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3" v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/query-service/querycache" "github.com/SigNoz/signoz/pkg/query-service/querycache"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
func TestFindMissingTimeRanges(t *testing.T) { func TestFindMissingTimeRanges(t *testing.T) {
// Initialize the mock cache // Initialize the mock cache
mockCache := inmemory.New(&inmemory.Options{TTL: 5 * time.Minute, CleanupInterval: 10 * time.Minute}) opts := cache.Memory{
TTL: 5 * time.Minute,
CleanupInterval: 10 * time.Minute,
}
c, err := cachetest.New(cache.Config{Provider: "memory", Memory: opts})
require.NoError(t, err)
// Create a queryCache instance with the mock cache and a fluxInterval // Create a queryCache instance with the mock cache and a fluxInterval
q := querycache.NewQueryCache( q := querycache.NewQueryCache(
querycache.WithCache(mockCache), querycache.WithCache(c),
querycache.WithFluxInterval(0), // Set to zero for testing purposes querycache.WithFluxInterval(0), // Set to zero for testing purposes
) )
@ -216,15 +224,15 @@ func TestFindMissingTimeRanges(t *testing.T) {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
// Store the cached data in the mock cache // Store the cached data in the mock cache
orgID := valuer.GenerateUUID()
if len(tc.cachedData) > 0 { if len(tc.cachedData) > 0 {
cachedDataJSON, err := json.Marshal(tc.cachedData) cacheableData := querycache.CacheableSeriesData{Series: tc.cachedData}
assert.NoError(t, err) err = c.Set(context.Background(), orgID, tc.cacheKey, &cacheableData, 0)
err = mockCache.Store(tc.cacheKey, cachedDataJSON, 0)
assert.NoError(t, err) assert.NoError(t, err)
} }
// Call FindMissingTimeRanges // Call FindMissingTimeRanges
missingRanges := q.FindMissingTimeRanges(tc.requestedStart, tc.requestedEnd, tc.step, tc.cacheKey) missingRanges := q.FindMissingTimeRanges(orgID, tc.requestedStart, tc.requestedEnd, tc.step, tc.cacheKey)
// Verify the missing ranges // Verify the missing ranges
assert.Equal(t, tc.expectedMiss, missingRanges) assert.Equal(t, tc.expectedMiss, missingRanges)
@ -234,11 +242,16 @@ func TestFindMissingTimeRanges(t *testing.T) {
func TestFindMissingTimeRangesV2(t *testing.T) { func TestFindMissingTimeRangesV2(t *testing.T) {
// Initialize the mock cache // Initialize the mock cache
mockCache := inmemory.New(&inmemory.Options{TTL: 5 * time.Minute, CleanupInterval: 10 * time.Minute}) opts := cache.Memory{
TTL: 5 * time.Minute,
CleanupInterval: 10 * time.Minute,
}
c, err := cachetest.New(cache.Config{Provider: "memory", Memory: opts})
require.NoError(t, err)
// Create a queryCache instance with the mock cache and a fluxInterval // Create a queryCache instance with the mock cache and a fluxInterval
q := querycache.NewQueryCache( q := querycache.NewQueryCache(
querycache.WithCache(mockCache), querycache.WithCache(c),
querycache.WithFluxInterval(0), // Set to zero for testing purposes querycache.WithFluxInterval(0), // Set to zero for testing purposes
) )
@ -557,16 +570,16 @@ func TestFindMissingTimeRangesV2(t *testing.T) {
for _, tc := range testCases { for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
orgID := valuer.GenerateUUID()
// Store the cached data in the mock cache // Store the cached data in the mock cache
if len(tc.cachedData) > 0 { if len(tc.cachedData) > 0 {
cachedDataJSON, err := json.Marshal(tc.cachedData) cacheableData := querycache.CacheableSeriesData{Series: tc.cachedData}
assert.NoError(t, err) err = c.Set(context.Background(), orgID, tc.cacheKey, &cacheableData, 0)
err = mockCache.Store(tc.cacheKey, cachedDataJSON, 0)
assert.NoError(t, err) assert.NoError(t, err)
} }
// Call FindMissingTimeRanges // Call FindMissingTimeRanges
missingRanges := q.FindMissingTimeRangesV2(tc.requestedStart, tc.requestedEnd, tc.step, tc.cacheKey) missingRanges := q.FindMissingTimeRangesV2(orgID, tc.requestedStart, tc.requestedEnd, tc.step, tc.cacheKey)
// Verify the missing ranges // Verify the missing ranges
assert.Equal(t, tc.expectedMiss, missingRanges) assert.Equal(t, tc.expectedMiss, missingRanges)
@ -576,11 +589,16 @@ func TestFindMissingTimeRangesV2(t *testing.T) {
func TestMergeWithCachedSeriesData(t *testing.T) { func TestMergeWithCachedSeriesData(t *testing.T) {
// Initialize the mock cache // Initialize the mock cache
mockCache := inmemory.New(&inmemory.Options{TTL: 5 * time.Minute, CleanupInterval: 10 * time.Minute}) opts := cache.Memory{
TTL: 5 * time.Minute,
CleanupInterval: 10 * time.Minute,
}
c, err := cachetest.New(cache.Config{Provider: "memory", Memory: opts})
require.NoError(t, err)
// Create a queryCache instance with the mock cache and a fluxInterval // Create a queryCache instance with the mock cache and a fluxInterval
q := querycache.NewQueryCache( q := querycache.NewQueryCache(
querycache.WithCache(mockCache), querycache.WithCache(c),
querycache.WithFluxInterval(0), // Set to zero for testing purposes querycache.WithFluxInterval(0), // Set to zero for testing purposes
) )
@ -649,13 +667,14 @@ func TestMergeWithCachedSeriesData(t *testing.T) {
} }
// Store existing data in cache // Store existing data in cache
cachedDataJSON, err := json.Marshal(existingData)
assert.NoError(t, err) orgID := valuer.GenerateUUID()
err = mockCache.Store(cacheKey, cachedDataJSON, 0) cacheableData := querycache.CacheableSeriesData{Series: existingData}
err = c.Set(context.Background(), orgID, cacheKey, &cacheableData, 0)
assert.NoError(t, err) assert.NoError(t, err)
// Call MergeWithCachedSeriesData // Call MergeWithCachedSeriesData
mergedData := q.MergeWithCachedSeriesData(cacheKey, newData) mergedData := q.MergeWithCachedSeriesData(orgID, cacheKey, newData)
// Verify the merged data // Verify the merged data
assert.Equal(t, len(expectedMergedData), len(mergedData)) assert.Equal(t, len(expectedMergedData), len(mergedData))

View File

@ -15,6 +15,7 @@ import (
qslabels "github.com/SigNoz/signoz/pkg/query-service/utils/labels" qslabels "github.com/SigNoz/signoz/pkg/query-service/utils/labels"
"github.com/SigNoz/signoz/pkg/sqlstore" "github.com/SigNoz/signoz/pkg/sqlstore"
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes" ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
"github.com/SigNoz/signoz/pkg/valuer"
"go.uber.org/zap" "go.uber.org/zap"
) )
@ -22,6 +23,7 @@ import (
type BaseRule struct { type BaseRule struct {
id string id string
name string name string
orgID valuer.UUID
source string source string
handledRestart bool handledRestart bool
@ -116,13 +118,14 @@ func WithSQLStore(sqlstore sqlstore.SQLStore) RuleOption {
} }
} }
func NewBaseRule(id string, p *ruletypes.PostableRule, reader interfaces.Reader, opts ...RuleOption) (*BaseRule, error) { func NewBaseRule(id string, orgID valuer.UUID, p *ruletypes.PostableRule, reader interfaces.Reader, opts ...RuleOption) (*BaseRule, error) {
if p.RuleCondition == nil || !p.RuleCondition.IsValid() { if p.RuleCondition == nil || !p.RuleCondition.IsValid() {
return nil, fmt.Errorf("invalid rule condition") return nil, fmt.Errorf("invalid rule condition")
} }
baseRule := &BaseRule{ baseRule := &BaseRule{
id: id, id: id,
orgID: orgID,
name: p.AlertName, name: p.AlertName,
source: p.Source, source: p.Source,
typ: p.AlertType, typ: p.AlertType,
@ -218,6 +221,7 @@ func (r *ThresholdRule) hostFromSource() string {
} }
func (r *BaseRule) ID() string { return r.id } func (r *BaseRule) ID() string { return r.id }
func (r *BaseRule) OrgID() valuer.UUID { return r.orgID }
func (r *BaseRule) Name() string { return r.name } func (r *BaseRule) Name() string { return r.name }
func (r *BaseRule) Condition() *ruletypes.RuleCondition { return r.ruleCondition } func (r *BaseRule) Condition() *ruletypes.RuleCondition { return r.ruleCondition }
func (r *BaseRule) Labels() qslabels.BaseLabels { return r.labels } func (r *BaseRule) Labels() qslabels.BaseLabels { return r.labels }
@ -679,7 +683,7 @@ func (r *BaseRule) RecordRuleStateHistory(ctx context.Context, prevState, curren
return nil return nil
} }
func (r *BaseRule) PopulateTemporality(ctx context.Context, qp *v3.QueryRangeParamsV3) error { func (r *BaseRule) PopulateTemporality(ctx context.Context, orgID valuer.UUID, qp *v3.QueryRangeParamsV3) error {
missingTemporality := make([]string, 0) missingTemporality := make([]string, 0)
metricNameToTemporality := make(map[string]map[v3.Temporality]bool) metricNameToTemporality := make(map[string]map[v3.Temporality]bool)
@ -711,7 +715,7 @@ func (r *BaseRule) PopulateTemporality(ctx context.Context, qp *v3.QueryRangePar
var err error var err error
if len(missingTemporality) > 0 { if len(missingTemporality) > 0 {
nameToTemporality, err = r.reader.FetchTemporality(ctx, missingTemporality) nameToTemporality, err = r.reader.FetchTemporality(ctx, orgID, missingTemporality)
if err != nil { if err != nil {
return err return err
} }

View File

@ -18,8 +18,8 @@ import (
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
"github.com/SigNoz/signoz/pkg/alertmanager" "github.com/SigNoz/signoz/pkg/alertmanager"
"github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/prometheus" "github.com/SigNoz/signoz/pkg/prometheus"
"github.com/SigNoz/signoz/pkg/query-service/cache"
"github.com/SigNoz/signoz/pkg/query-service/interfaces" "github.com/SigNoz/signoz/pkg/query-service/interfaces"
"github.com/SigNoz/signoz/pkg/query-service/model" "github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/query-service/telemetry" "github.com/SigNoz/signoz/pkg/query-service/telemetry"
@ -44,7 +44,7 @@ type PrepareTaskOptions struct {
ManagerOpts *ManagerOptions ManagerOpts *ManagerOptions
NotifyFunc NotifyFunc NotifyFunc NotifyFunc
SQLStore sqlstore.SQLStore SQLStore sqlstore.SQLStore
OrgID string OrgID valuer.UUID
} }
type PrepareTestRuleOptions struct { type PrepareTestRuleOptions struct {
@ -57,6 +57,7 @@ type PrepareTestRuleOptions struct {
ManagerOpts *ManagerOptions ManagerOpts *ManagerOptions
NotifyFunc NotifyFunc NotifyFunc NotifyFunc
SQLStore sqlstore.SQLStore SQLStore sqlstore.SQLStore
OrgID valuer.UUID
} }
const taskNamesuffix = "webAppEditor" const taskNamesuffix = "webAppEditor"
@ -144,6 +145,7 @@ func defaultPrepareTaskFunc(opts PrepareTaskOptions) (Task, error) {
// create a threshold rule // create a threshold rule
tr, err := NewThresholdRule( tr, err := NewThresholdRule(
ruleId, ruleId,
opts.OrgID,
opts.Rule, opts.Rule,
opts.Reader, opts.Reader,
WithEvalDelay(opts.ManagerOpts.EvalDelay), WithEvalDelay(opts.ManagerOpts.EvalDelay),
@ -164,6 +166,7 @@ func defaultPrepareTaskFunc(opts PrepareTaskOptions) (Task, error) {
// create promql rule // create promql rule
pr, err := NewPromRule( pr, err := NewPromRule(
ruleId, ruleId,
opts.OrgID,
opts.Rule, opts.Rule,
opts.Logger, opts.Logger,
opts.Reader, opts.Reader,
@ -245,7 +248,7 @@ func (m *Manager) initiate(ctx context.Context) error {
var loadErrors []error var loadErrors []error
for _, orgID := range orgIDs { for _, orgID := range orgIDs {
storedRules, err := m.ruleStore.GetStoredRules(ctx, orgID) storedRules, err := m.ruleStore.GetStoredRules(ctx, orgID.StringValue())
if err != nil { if err != nil {
return err return err
} }
@ -320,6 +323,10 @@ func (m *Manager) EditRule(ctx context.Context, ruleStr string, idStr string) er
if err != nil { if err != nil {
return err return err
} }
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
return err
}
ruleUUID, err := valuer.NewUUID(idStr) ruleUUID, err := valuer.NewUUID(idStr)
if err != nil { if err != nil {
@ -379,7 +386,7 @@ func (m *Manager) EditRule(ctx context.Context, ruleStr string, idStr string) er
return err return err
} }
err = m.syncRuleStateWithTask(ctx, claims.OrgID, prepareTaskName(existingRule.ID.StringValue()), parsedRule) err = m.syncRuleStateWithTask(ctx, orgID, prepareTaskName(existingRule.ID.StringValue()), parsedRule)
if err != nil { if err != nil {
return err return err
} }
@ -388,7 +395,7 @@ func (m *Manager) EditRule(ctx context.Context, ruleStr string, idStr string) er
}) })
} }
func (m *Manager) editTask(_ context.Context, orgID string, rule *ruletypes.PostableRule, taskName string) error { func (m *Manager) editTask(_ context.Context, orgID valuer.UUID, rule *ruletypes.PostableRule, taskName string) error {
m.mtx.Lock() m.mtx.Lock()
defer m.mtx.Unlock() defer m.mtx.Unlock()
@ -506,6 +513,11 @@ func (m *Manager) CreateRule(ctx context.Context, ruleStr string) (*ruletypes.Ge
return nil, err return nil, err
} }
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
return nil, err
}
parsedRule, err := ruletypes.ParsePostableRule([]byte(ruleStr)) parsedRule, err := ruletypes.ParsePostableRule([]byte(ruleStr))
if err != nil { if err != nil {
return nil, err return nil, err
@ -559,7 +571,7 @@ func (m *Manager) CreateRule(ctx context.Context, ruleStr string) (*ruletypes.Ge
} }
taskName := prepareTaskName(id.StringValue()) taskName := prepareTaskName(id.StringValue())
if err := m.addTask(ctx, claims.OrgID, parsedRule, taskName); err != nil { if err := m.addTask(ctx, orgID, parsedRule, taskName); err != nil {
return err return err
} }
@ -575,7 +587,7 @@ func (m *Manager) CreateRule(ctx context.Context, ruleStr string) (*ruletypes.Ge
}, nil }, nil
} }
func (m *Manager) addTask(_ context.Context, orgID string, rule *ruletypes.PostableRule, taskName string) error { func (m *Manager) addTask(_ context.Context, orgID valuer.UUID, rule *ruletypes.PostableRule, taskName string) error {
m.mtx.Lock() m.mtx.Lock()
defer m.mtx.Unlock() defer m.mtx.Unlock()
@ -854,7 +866,7 @@ func (m *Manager) GetRule(ctx context.Context, idStr string) (*ruletypes.Gettabl
// syncRuleStateWithTask ensures that the state of a stored rule matches // syncRuleStateWithTask ensures that the state of a stored rule matches
// the task state. For example - if a stored rule is disabled, then // the task state. For example - if a stored rule is disabled, then
// there is no task running against it. // there is no task running against it.
func (m *Manager) syncRuleStateWithTask(ctx context.Context, orgID string, taskName string, rule *ruletypes.PostableRule) error { func (m *Manager) syncRuleStateWithTask(ctx context.Context, orgID valuer.UUID, taskName string, rule *ruletypes.PostableRule) error {
if rule.Disabled { if rule.Disabled {
// check if rule has any task running // check if rule has any task running
@ -891,6 +903,11 @@ func (m *Manager) PatchRule(ctx context.Context, ruleStr string, ruleIdStr strin
return nil, err return nil, err
} }
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
return nil, err
}
ruleID, err := valuer.NewUUID(ruleIdStr) ruleID, err := valuer.NewUUID(ruleIdStr)
if err != nil { if err != nil {
return nil, errors.New(err.Error()) return nil, errors.New(err.Error())
@ -919,7 +936,7 @@ func (m *Manager) PatchRule(ctx context.Context, ruleStr string, ruleIdStr strin
} }
// deploy or un-deploy task according to patched (new) rule state // deploy or un-deploy task according to patched (new) rule state
if err := m.syncRuleStateWithTask(ctx, claims.OrgID, taskName, patchedRule); err != nil { if err := m.syncRuleStateWithTask(ctx, orgID, taskName, patchedRule); err != nil {
zap.L().Error("failed to sync stored rule state with the task", zap.String("taskName", taskName), zap.Error(err)) zap.L().Error("failed to sync stored rule state with the task", zap.String("taskName", taskName), zap.Error(err))
return nil, err return nil, err
} }
@ -937,7 +954,7 @@ func (m *Manager) PatchRule(ctx context.Context, ruleStr string, ruleIdStr strin
err = m.ruleStore.EditRule(ctx, storedJSON, func(ctx context.Context) error { return nil }) err = m.ruleStore.EditRule(ctx, storedJSON, func(ctx context.Context) error { return nil })
if err != nil { if err != nil {
if err := m.syncRuleStateWithTask(ctx, claims.OrgID, taskName, &storedRule); err != nil { if err := m.syncRuleStateWithTask(ctx, orgID, taskName, &storedRule); err != nil {
zap.L().Error("failed to restore rule after patch failure", zap.String("taskName", taskName), zap.Error(err)) zap.L().Error("failed to restore rule after patch failure", zap.String("taskName", taskName), zap.Error(err))
} }
return nil, err return nil, err
@ -962,7 +979,7 @@ func (m *Manager) PatchRule(ctx context.Context, ruleStr string, ruleIdStr strin
// TestNotification prepares a dummy rule for given rule parameters and // TestNotification prepares a dummy rule for given rule parameters and
// sends a test notification. returns alert count and error (if any) // sends a test notification. returns alert count and error (if any)
func (m *Manager) TestNotification(ctx context.Context, ruleStr string) (int, *model.ApiError) { func (m *Manager) TestNotification(ctx context.Context, orgID valuer.UUID, ruleStr string) (int, *model.ApiError) {
parsedRule, err := ruletypes.ParsePostableRule([]byte(ruleStr)) parsedRule, err := ruletypes.ParsePostableRule([]byte(ruleStr))
@ -980,6 +997,7 @@ func (m *Manager) TestNotification(ctx context.Context, ruleStr string) (int, *m
ManagerOpts: m.opts, ManagerOpts: m.opts,
NotifyFunc: m.prepareTestNotifyFunc(), NotifyFunc: m.prepareTestNotifyFunc(),
SQLStore: m.sqlstore, SQLStore: m.sqlstore,
OrgID: orgID,
}) })
return alertCount, apiErr return alertCount, apiErr

View File

@ -17,6 +17,7 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/utils/times" "github.com/SigNoz/signoz/pkg/query-service/utils/times"
"github.com/SigNoz/signoz/pkg/query-service/utils/timestamp" "github.com/SigNoz/signoz/pkg/query-service/utils/timestamp"
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes" ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql"
yaml "gopkg.in/yaml.v2" yaml "gopkg.in/yaml.v2"
) )
@ -28,6 +29,7 @@ type PromRule struct {
func NewPromRule( func NewPromRule(
id string, id string,
orgID valuer.UUID,
postableRule *ruletypes.PostableRule, postableRule *ruletypes.PostableRule,
logger *zap.Logger, logger *zap.Logger,
reader interfaces.Reader, reader interfaces.Reader,
@ -35,7 +37,7 @@ func NewPromRule(
opts ...RuleOption, opts ...RuleOption,
) (*PromRule, error) { ) (*PromRule, error) {
baseRule, err := NewBaseRule(id, postableRule, reader, opts...) baseRule, err := NewBaseRule(id, orgID, postableRule, reader, opts...)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -9,6 +9,7 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/common" "github.com/SigNoz/signoz/pkg/query-service/common"
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes" ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
"github.com/SigNoz/signoz/pkg/valuer"
opentracing "github.com/opentracing/opentracing-go" opentracing "github.com/opentracing/opentracing-go"
plabels "github.com/prometheus/prometheus/model/labels" plabels "github.com/prometheus/prometheus/model/labels"
"go.uber.org/zap" "go.uber.org/zap"
@ -37,12 +38,12 @@ type PromRuleTask struct {
notify NotifyFunc notify NotifyFunc
maintenanceStore ruletypes.MaintenanceStore maintenanceStore ruletypes.MaintenanceStore
orgID string orgID valuer.UUID
} }
// newPromRuleTask holds rules that have promql condition // newPromRuleTask holds rules that have promql condition
// and evalutes the rule at a given frequency // and evalutes the rule at a given frequency
func NewPromRuleTask(name, file string, frequency time.Duration, rules []Rule, opts *ManagerOptions, notify NotifyFunc, maintenanceStore ruletypes.MaintenanceStore, orgID string) *PromRuleTask { func NewPromRuleTask(name, file string, frequency time.Duration, rules []Rule, opts *ManagerOptions, notify NotifyFunc, maintenanceStore ruletypes.MaintenanceStore, orgID valuer.UUID) *PromRuleTask {
zap.L().Info("Initiating a new rule group", zap.String("name", name), zap.Duration("frequency", frequency)) zap.L().Info("Initiating a new rule group", zap.String("name", name), zap.Duration("frequency", frequency))
if time.Now() == time.Now().Add(frequency) { if time.Now() == time.Now().Add(frequency) {
@ -326,7 +327,7 @@ func (g *PromRuleTask) Eval(ctx context.Context, ts time.Time) {
}() }()
zap.L().Info("promql rule task", zap.String("name", g.name), zap.Time("eval started at", ts)) zap.L().Info("promql rule task", zap.String("name", g.name), zap.Time("eval started at", ts))
maintenance, err := g.maintenanceStore.GetAllPlannedMaintenance(ctx, g.orgID) maintenance, err := g.maintenanceStore.GetAllPlannedMaintenance(ctx, g.orgID.StringValue())
if err != nil { if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err)) zap.L().Error("Error in processing sql query", zap.Error(err))
} }

View File

@ -6,6 +6,7 @@ import (
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3" v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes" ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
"github.com/SigNoz/signoz/pkg/valuer"
pql "github.com/prometheus/prometheus/promql" pql "github.com/prometheus/prometheus/promql"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"go.uber.org/zap" "go.uber.org/zap"
@ -657,7 +658,7 @@ func TestPromRuleShouldAlert(t *testing.T) {
postableRule.RuleCondition.MatchType = ruletypes.MatchType(c.matchType) postableRule.RuleCondition.MatchType = ruletypes.MatchType(c.matchType)
postableRule.RuleCondition.Target = &c.target postableRule.RuleCondition.Target = &c.target
rule, err := NewPromRule("69", &postableRule, zap.NewNop(), nil, nil) rule, err := NewPromRule("69", valuer.GenerateUUID(), &postableRule, zap.NewNop(), nil, nil)
if err != nil { if err != nil {
assert.NoError(t, err) assert.NoError(t, err)
} }

View File

@ -10,6 +10,7 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/common" "github.com/SigNoz/signoz/pkg/query-service/common"
"github.com/SigNoz/signoz/pkg/query-service/utils/labels" "github.com/SigNoz/signoz/pkg/query-service/utils/labels"
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes" ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
"github.com/SigNoz/signoz/pkg/valuer"
opentracing "github.com/opentracing/opentracing-go" opentracing "github.com/opentracing/opentracing-go"
"go.uber.org/zap" "go.uber.org/zap"
) )
@ -34,13 +35,13 @@ type RuleTask struct {
notify NotifyFunc notify NotifyFunc
maintenanceStore ruletypes.MaintenanceStore maintenanceStore ruletypes.MaintenanceStore
orgID string orgID valuer.UUID
} }
const DefaultFrequency = 1 * time.Minute const DefaultFrequency = 1 * time.Minute
// NewRuleTask makes a new RuleTask with the given name, options, and rules. // NewRuleTask makes a new RuleTask with the given name, options, and rules.
func NewRuleTask(name, file string, frequency time.Duration, rules []Rule, opts *ManagerOptions, notify NotifyFunc, maintenanceStore ruletypes.MaintenanceStore, orgID string) *RuleTask { func NewRuleTask(name, file string, frequency time.Duration, rules []Rule, opts *ManagerOptions, notify NotifyFunc, maintenanceStore ruletypes.MaintenanceStore, orgID valuer.UUID) *RuleTask {
if time.Now() == time.Now().Add(frequency) { if time.Now() == time.Now().Add(frequency) {
frequency = DefaultFrequency frequency = DefaultFrequency
@ -308,7 +309,7 @@ func (g *RuleTask) Eval(ctx context.Context, ts time.Time) {
zap.L().Debug("rule task eval started", zap.String("name", g.name), zap.Time("start time", ts)) zap.L().Debug("rule task eval started", zap.String("name", g.name), zap.Time("start time", ts))
maintenance, err := g.maintenanceStore.GetAllPlannedMaintenance(ctx, g.orgID) maintenance, err := g.maintenanceStore.GetAllPlannedMaintenance(ctx, g.orgID.StringValue())
if err != nil { if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err)) zap.L().Error("Error in processing sql query", zap.Error(err))

View File

@ -5,6 +5,7 @@ import (
"time" "time"
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes" ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
"github.com/SigNoz/signoz/pkg/valuer"
) )
type TaskType string type TaskType string
@ -31,7 +32,7 @@ type Task interface {
// newTask returns an appropriate group for // newTask returns an appropriate group for
// rule type // rule type
func newTask(taskType TaskType, name, file string, frequency time.Duration, rules []Rule, opts *ManagerOptions, notify NotifyFunc, maintenanceStore ruletypes.MaintenanceStore, orgID string) Task { func newTask(taskType TaskType, name, file string, frequency time.Duration, rules []Rule, opts *ManagerOptions, notify NotifyFunc, maintenanceStore ruletypes.MaintenanceStore, orgID valuer.UUID) Task {
if taskType == TaskTypeCh { if taskType == TaskTypeCh {
return NewRuleTask(name, file, frequency, rules, opts, notify, maintenanceStore, orgID) return NewRuleTask(name, file, frequency, rules, opts, notify, maintenanceStore, orgID)
} }

View File

@ -45,6 +45,7 @@ func defaultTestNotification(opts PrepareTestRuleOptions) (int, *model.ApiError)
// create a threshold rule // create a threshold rule
rule, err = NewThresholdRule( rule, err = NewThresholdRule(
alertname, alertname,
opts.OrgID,
parsedRule, parsedRule,
opts.Reader, opts.Reader,
WithSendAlways(), WithSendAlways(),
@ -53,7 +54,7 @@ func defaultTestNotification(opts PrepareTestRuleOptions) (int, *model.ApiError)
) )
if err != nil { if err != nil {
zap.L().Error("failed to prepare a new threshold rule for test", zap.String("name", rule.Name()), zap.Error(err)) zap.L().Error("failed to prepare a new threshold rule for test", zap.Error(err))
return 0, model.BadRequest(err) return 0, model.BadRequest(err)
} }
@ -62,6 +63,7 @@ func defaultTestNotification(opts PrepareTestRuleOptions) (int, *model.ApiError)
// create promql rule // create promql rule
rule, err = NewPromRule( rule, err = NewPromRule(
alertname, alertname,
opts.OrgID,
parsedRule, parsedRule,
opts.Logger, opts.Logger,
opts.Reader, opts.Reader,
@ -72,7 +74,7 @@ func defaultTestNotification(opts PrepareTestRuleOptions) (int, *model.ApiError)
) )
if err != nil { if err != nil {
zap.L().Error("failed to prepare a new promql rule for test", zap.String("name", rule.Name()), zap.Error(err)) zap.L().Error("failed to prepare a new promql rule for test", zap.Error(err))
return 0, model.BadRequest(err) return 0, model.BadRequest(err)
} }
} else { } else {

View File

@ -16,6 +16,7 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/model" "github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/query-service/postprocess" "github.com/SigNoz/signoz/pkg/query-service/postprocess"
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes" ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/SigNoz/signoz/pkg/query-service/app/querier" "github.com/SigNoz/signoz/pkg/query-service/app/querier"
querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2" querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2"
@ -55,6 +56,7 @@ type ThresholdRule struct {
func NewThresholdRule( func NewThresholdRule(
id string, id string,
orgID valuer.UUID,
p *ruletypes.PostableRule, p *ruletypes.PostableRule,
reader interfaces.Reader, reader interfaces.Reader,
opts ...RuleOption, opts ...RuleOption,
@ -62,7 +64,7 @@ func NewThresholdRule(
zap.L().Info("creating new ThresholdRule", zap.String("id", id), zap.Any("opts", opts)) zap.L().Info("creating new ThresholdRule", zap.String("id", id), zap.Any("opts", opts))
baseRule, err := NewBaseRule(id, p, reader, opts...) baseRule, err := NewBaseRule(id, orgID, p, reader, opts...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -249,13 +251,13 @@ func (r *ThresholdRule) GetSelectedQuery() string {
return r.ruleCondition.GetSelectedQueryName() return r.ruleCondition.GetSelectedQueryName()
} }
func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, ts time.Time) (ruletypes.Vector, error) { func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, orgID valuer.UUID, ts time.Time) (ruletypes.Vector, error) {
params, err := r.prepareQueryRange(ts) params, err := r.prepareQueryRange(ts)
if err != nil { if err != nil {
return nil, err return nil, err
} }
err = r.PopulateTemporality(ctx, params) err = r.PopulateTemporality(ctx, orgID, params)
if err != nil { if err != nil {
return nil, fmt.Errorf("internal error while setting temporality") return nil, fmt.Errorf("internal error while setting temporality")
} }
@ -299,9 +301,9 @@ func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, ts time.Time) (rul
var queryErrors map[string]error var queryErrors map[string]error
if r.version == "v4" { if r.version == "v4" {
results, queryErrors, err = r.querierV2.QueryRange(ctx, params) results, queryErrors, err = r.querierV2.QueryRange(ctx, orgID, params)
} else { } else {
results, queryErrors, err = r.querier.QueryRange(ctx, params) results, queryErrors, err = r.querier.QueryRange(ctx, orgID, params)
} }
if err != nil { if err != nil {
@ -361,7 +363,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time) (interface{}, er
prevState := r.State() prevState := r.State()
valueFormatter := formatter.FromUnit(r.Unit()) valueFormatter := formatter.FromUnit(r.Unit())
res, err := r.buildAndRunQuery(ctx, ts) res, err := r.buildAndRunQuery(ctx, r.orgID, ts)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -8,14 +8,14 @@ import (
"time" "time"
"github.com/SigNoz/signoz/pkg/cache" "github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/cache/memorycache" "github.com/SigNoz/signoz/pkg/cache/cachetest"
"github.com/SigNoz/signoz/pkg/factory/factorytest"
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest" "github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
"github.com/SigNoz/signoz/pkg/prometheus" "github.com/SigNoz/signoz/pkg/prometheus"
"github.com/SigNoz/signoz/pkg/prometheus/prometheustest" "github.com/SigNoz/signoz/pkg/prometheus/prometheustest"
"github.com/SigNoz/signoz/pkg/telemetrystore" "github.com/SigNoz/signoz/pkg/telemetrystore"
"github.com/SigNoz/signoz/pkg/telemetrystore/telemetrystoretest" "github.com/SigNoz/signoz/pkg/telemetrystore/telemetrystoretest"
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes" ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader" "github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader"
"github.com/SigNoz/signoz/pkg/query-service/common" "github.com/SigNoz/signoz/pkg/query-service/common"
@ -801,7 +801,7 @@ func TestThresholdRuleShouldAlert(t *testing.T) {
postableRule.RuleCondition.MatchType = ruletypes.MatchType(c.matchType) postableRule.RuleCondition.MatchType = ruletypes.MatchType(c.matchType)
postableRule.RuleCondition.Target = &c.target postableRule.RuleCondition.Target = &c.target
rule, err := NewThresholdRule("69", &postableRule, nil, WithEvalDelay(2*time.Minute)) rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, nil, WithEvalDelay(2*time.Minute))
if err != nil { if err != nil {
assert.NoError(t, err) assert.NoError(t, err)
} }
@ -889,7 +889,7 @@ func TestPrepareLinksToLogs(t *testing.T) {
}, },
} }
rule, err := NewThresholdRule("69", &postableRule, nil, WithEvalDelay(2*time.Minute)) rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, nil, WithEvalDelay(2*time.Minute))
if err != nil { if err != nil {
assert.NoError(t, err) assert.NoError(t, err)
} }
@ -930,7 +930,7 @@ func TestPrepareLinksToTraces(t *testing.T) {
}, },
} }
rule, err := NewThresholdRule("69", &postableRule, nil, WithEvalDelay(2*time.Minute)) rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, nil, WithEvalDelay(2*time.Minute))
if err != nil { if err != nil {
assert.NoError(t, err) assert.NoError(t, err)
} }
@ -1005,7 +1005,7 @@ func TestThresholdRuleLabelNormalization(t *testing.T) {
postableRule.RuleCondition.MatchType = ruletypes.MatchType(c.matchType) postableRule.RuleCondition.MatchType = ruletypes.MatchType(c.matchType)
postableRule.RuleCondition.Target = &c.target postableRule.RuleCondition.Target = &c.target
rule, err := NewThresholdRule("69", &postableRule, nil, WithEvalDelay(2*time.Minute)) rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, nil, WithEvalDelay(2*time.Minute))
if err != nil { if err != nil {
assert.NoError(t, err) assert.NoError(t, err)
} }
@ -1057,7 +1057,7 @@ func TestThresholdRuleEvalDelay(t *testing.T) {
} }
for idx, c := range cases { for idx, c := range cases {
rule, err := NewThresholdRule("69", &postableRule, nil) // no eval delay rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, nil) // no eval delay
if err != nil { if err != nil {
assert.NoError(t, err) assert.NoError(t, err)
} }
@ -1105,7 +1105,7 @@ func TestThresholdRuleClickHouseTmpl(t *testing.T) {
} }
for idx, c := range cases { for idx, c := range cases {
rule, err := NewThresholdRule("69", &postableRule, nil, WithEvalDelay(2*time.Minute)) rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, nil, WithEvalDelay(2*time.Minute))
if err != nil { if err != nil {
assert.NoError(t, err) assert.NoError(t, err)
} }
@ -1242,10 +1242,10 @@ func TestThresholdRuleUnitCombinations(t *testing.T) {
} }
options := clickhouseReader.NewOptions("", "", "archiveNamespace") options := clickhouseReader.NewOptions("", "", "archiveNamespace")
readerCache, err := memorycache.New(context.Background(), factorytest.NewSettings(), cache.Config{Provider: "memory", Memory: cache.Memory{TTL: DefaultFrequency}}) readerCache, err := cachetest.New(cache.Config{Provider: "memory", Memory: cache.Memory{TTL: DefaultFrequency}})
require.NoError(t, err) require.NoError(t, err)
reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), "", time.Duration(time.Second), readerCache) reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), "", time.Duration(time.Second), readerCache)
rule, err := NewThresholdRule("69", &postableRule, reader) rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, reader)
rule.TemporalityMap = map[string]map[v3.Temporality]bool{ rule.TemporalityMap = map[string]map[v3.Temporality]bool{
"signoz_calls_total": { "signoz_calls_total": {
v3.Delta: true, v3.Delta: true,
@ -1338,11 +1338,12 @@ func TestThresholdRuleNoData(t *testing.T) {
"description": "This alert is fired when the defined metric (current value: {{$value}}) crosses the threshold ({{$threshold}})", "description": "This alert is fired when the defined metric (current value: {{$value}}) crosses the threshold ({{$threshold}})",
"summary": "The rule threshold is set to {{$threshold}}, and the observed metric value is {{$value}}", "summary": "The rule threshold is set to {{$threshold}}, and the observed metric value is {{$value}}",
} }
readerCache, err := memorycache.New(context.Background(), factorytest.NewSettings(), cache.Config{Provider: "memory", Memory: cache.Memory{TTL: DefaultFrequency}}) readerCache, err := cachetest.New(cache.Config{Provider: "memory", Memory: cache.Memory{TTL: DefaultFrequency}})
assert.NoError(t, err)
options := clickhouseReader.NewOptions("", "", "archiveNamespace") options := clickhouseReader.NewOptions("", "", "archiveNamespace")
reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), "", time.Duration(time.Second), readerCache) reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), "", time.Duration(time.Second), readerCache)
rule, err := NewThresholdRule("69", &postableRule, reader) rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, reader)
rule.TemporalityMap = map[string]map[v3.Temporality]bool{ rule.TemporalityMap = map[string]map[v3.Temporality]bool{
"signoz_calls_total": { "signoz_calls_total": {
v3.Delta: true, v3.Delta: true,
@ -1446,7 +1447,7 @@ func TestThresholdRuleTracesLink(t *testing.T) {
options := clickhouseReader.NewOptions("", "", "archiveNamespace") options := clickhouseReader.NewOptions("", "", "archiveNamespace")
reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), "", time.Duration(time.Second), nil) reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), "", time.Duration(time.Second), nil)
rule, err := NewThresholdRule("69", &postableRule, reader) rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, reader)
rule.TemporalityMap = map[string]map[v3.Temporality]bool{ rule.TemporalityMap = map[string]map[v3.Temporality]bool{
"signoz_calls_total": { "signoz_calls_total": {
v3.Delta: true, v3.Delta: true,
@ -1567,7 +1568,7 @@ func TestThresholdRuleLogsLink(t *testing.T) {
options := clickhouseReader.NewOptions("", "", "archiveNamespace") options := clickhouseReader.NewOptions("", "", "archiveNamespace")
reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), "", time.Duration(time.Second), nil) reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), "", time.Duration(time.Second), nil)
rule, err := NewThresholdRule("69", &postableRule, reader) rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, reader)
rule.TemporalityMap = map[string]map[v3.Temporality]bool{ rule.TemporalityMap = map[string]map[v3.Temporality]bool{
"signoz_calls_total": { "signoz_calls_total": {
v3.Delta: true, v3.Delta: true,
@ -1643,7 +1644,7 @@ func TestThresholdRuleShiftBy(t *testing.T) {
}, },
} }
rule, err := NewThresholdRule("69", &postableRule, nil) rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, nil)
if err != nil { if err != nil {
assert.NoError(t, err) assert.NoError(t, err)
} }

View File

@ -128,8 +128,8 @@ func (r *rule) GetRuleUUID(ctx context.Context, ruleID int) (*ruletypes.RuleHist
return ruleHistory, nil return ruleHistory, nil
} }
func (r *rule) ListOrgs(ctx context.Context) ([]string, error) { func (r *rule) ListOrgs(ctx context.Context) ([]valuer.UUID, error) {
orgIDs := []string{} orgIDs := make([]valuer.UUID, 0)
err := r.sqlstore. err := r.sqlstore.
BunDB(). BunDB().
NewSelect(). NewSelect().

View File

@ -0,0 +1,33 @@
package cachetypes
import (
"encoding"
"reflect"
"github.com/SigNoz/signoz/pkg/errors"
)
type Cacheable interface {
encoding.BinaryMarshaler
encoding.BinaryUnmarshaler
}
func WrapCacheableErrors(rt reflect.Type, caller string) error {
if rt == nil {
return errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "%s: (nil)", caller)
}
if rt.Kind() != reflect.Pointer {
return errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "%s: (non-pointer \"%s\")", caller, rt.String())
}
return errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "%s: (nil \"%s\")", caller, rt.String())
}
func ValidatePointer(dest any, caller string) error {
rv := reflect.ValueOf(dest)
if rv.Kind() != reflect.Pointer || rv.IsNil() {
return WrapCacheableErrors(reflect.TypeOf(dest), caller)
}
return nil
}

View File

@ -33,5 +33,5 @@ type RuleStore interface {
GetStoredRule(context.Context, valuer.UUID) (*Rule, error) GetStoredRule(context.Context, valuer.UUID) (*Rule, error)
GetRuleUUID(context.Context, int) (*RuleHistory, error) GetRuleUUID(context.Context, int) (*RuleHistory, error)
GetAlertsInfo(context.Context) (*model.AlertsInfo, error) GetAlertsInfo(context.Context) (*model.AlertsInfo, error)
ListOrgs(context.Context) ([]string, error) ListOrgs(context.Context) ([]valuer.UUID, error)
} }