diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 8f346bf882..8b9f8ed3a5 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -8,6 +8,21 @@ on: - release/v* jobs: + lint-and-test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2.4.0 + + - uses: actions/setup-go@v2 + with: + go-version: 1.17 + + - name: Install tools + run: make install-ci + + - name: Run unit tests + run: make test-ci + build-frontend: runs-on: ubuntu-latest steps: diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 0000000000..e61fbca27f --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,37 @@ +run: + timeout: 10m +linters-settings: + depguard: + list-type: blacklist + include-go-root: true + gofmt: + simplify: true + gosimple: + go: '1.17' +linters: + enable: + - gofmt + - goimports + - misspell + disable: + - staticcheck + - typecheck + - gosec + - govet + - errcheck + - gocritic + - revive + - deadcode + - gosimple + - ineffassign + - depguard + - errorlint + - structcheck + - varcheck + - unused + +issues: + exclude-rules: + - path: _test\.go + linters: + - gosec diff --git a/Makefile b/Makefile index 7aaa3a41d6..4852df372d 100644 --- a/Makefile +++ b/Makefile @@ -14,6 +14,14 @@ QUERY_SERVICE_DIRECTORY ?= pkg/query-service STANDALONE_DIRECTORY ?= deploy/docker/clickhouse-setup SWARM_DIRECTORY ?= deploy/docker-swarm/clickhouse-setup +GOOS ?= $(shell go env GOOS) +GOARCH ?= $(shell go env GOARCH) +GOPATH ?= $(shell go env GOPATH) +GOTEST=go test -v $(RACE) +GOFMT=gofmt +FMT_LOG=.fmt.log +IMPORT_LOG=.import.log + REPONAME ?= signoz DOCKER_TAG ?= latest @@ -30,6 +38,12 @@ gitBranch=${PACKAGE}/version.gitBranch LD_FLAGS="-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH}" all: build-push-frontend build-push-query-service + +.DEFAULT_GOAL := test-and-lint + +.PHONY: test-and-lint +test-and-lint: fmt lint + # Steps to build and push docker image of frontend .PHONY: build-frontend-amd64 build-push-frontend # Step to build docker image of frontend in amd64 (used in build pipeline) @@ -92,3 +106,23 @@ clear-standalone-data: clear-swarm-data: @docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \ sh -c "cd /pwd && rm -rf alertmanager/* clickhouse/* signoz/*" + +.PHONY: install-tools +install-tools: + go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.42.0 + +.PHONY: lint +lint: + @cd $(QUERY_SERVICE_DIRECTORY) && \ + $(GOPATH)/bin/golangci-lint -v run + +.PHONY: fmt +fmt: + @echo Running go fmt on query service ... + @$(GOFMT) -e -s -l -w $(QUERY_SERVICE_DIRECTORY) + +.PHONY: install-ci +install-ci: install-tools + +.PHONY: test-ci +test-ci: lint diff --git a/pkg/query-service/app/clickhouseReader/options.go b/pkg/query-service/app/clickhouseReader/options.go index 99fe5080ae..96766fc674 100644 --- a/pkg/query-service/app/clickhouseReader/options.go +++ b/pkg/query-service/app/clickhouseReader/options.go @@ -23,7 +23,7 @@ const ( defaultOperationsTable string = "signoz_operations" defaultIndexTable string = "signoz_index_v2" defaultErrorTable string = "signoz_error_index_v2" - defaulDurationTable string = "durationSortMV" + defaultDurationTable string = "durationSortMV" defaultSpansTable string = "signoz_spans" defaultWriteBatchDelay time.Duration = 5 * time.Second defaultWriteBatchSize int = 10000 @@ -58,12 +58,15 @@ type namespaceConfig struct { Connector Connector } -// Connecto defines how to connect to the database +// Connector defines how to connect to the database type Connector func(cfg *namespaceConfig) (clickhouse.Conn, error) func defaultConnector(cfg *namespaceConfig) (clickhouse.Conn, error) { ctx := context.Background() dsnURL, err := url.Parse(cfg.Datasource) + if err != nil { + return nil, err + } options := &clickhouse.Options{ Addr: []string{dsnURL.Host}, } @@ -109,7 +112,7 @@ func NewOptions(datasource string, primaryNamespace string, otherNamespaces ...s OperationsTable: defaultOperationsTable, IndexTable: defaultIndexTable, ErrorTable: defaultErrorTable, - DurationTable: defaulDurationTable, + DurationTable: defaultDurationTable, SpansTable: defaultSpansTable, WriteBatchDelay: defaultWriteBatchDelay, WriteBatchSize: defaultWriteBatchSize, diff --git a/pkg/query-service/app/clickhouseReader/reader.go b/pkg/query-service/app/clickhouseReader/reader.go index 42182a8e81..8657632ddf 100644 --- a/pkg/query-service/app/clickhouseReader/reader.go +++ b/pkg/query-service/app/clickhouseReader/reader.go @@ -30,7 +30,7 @@ import ( "github.com/prometheus/common/promlog" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery" - sd_config "github.com/prometheus/prometheus/discovery/config" + sdconfig "github.com/prometheus/prometheus/discovery/config" "github.com/prometheus/prometheus/notifier" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/rules" @@ -73,14 +73,14 @@ const ( ) var ( - ErrNoOperationsTable = errors.New("no operations table supplied") - ErrNoIndexTable = errors.New("no index table supplied") - ErrStartTimeRequired = errors.New("start time is required for search queries") - seededRand *rand.Rand = rand.New( + ErrNoOperationsTable = errors.New("no operations table supplied") + ErrNoIndexTable = errors.New("no index table supplied") + ErrStartTimeRequired = errors.New("start time is required for search queries") + seededRand = rand.New( rand.NewSource(time.Now().UnixNano())) ) -// SpanWriter for reading spans from ClickHouse +// ClickHouseReader reads the data from ClickHouse type ClickHouseReader struct { db clickhouse.Conn localDB *sqlx.DB @@ -97,7 +97,7 @@ type ClickHouseReader struct { alertManager am.Manager } -// NewTraceReader returns a TraceReader for the database +// NewReader returns a reader for the database func NewReader(localDB *sqlx.DB) *ClickHouseReader { datasource := os.Getenv("ClickHouseUrl") @@ -142,7 +142,7 @@ func (r *ClickHouseReader) Start() { } - remoteStorage := remote.NewStorage(log.With(logger, "component", "remote"), startTime, time.Duration(1*time.Minute)) + remoteStorage := remote.NewStorage(log.With(logger, "component", "remote"), startTime, 1*time.Minute) // conf, err := config.LoadFile(*filename) // if err != nil { @@ -188,7 +188,7 @@ func (r *ClickHouseReader) Start() { cfg.notifier.QueueCapacity = 10000 cfg.notifierTimeout = promModel.Duration(time.Duration.Seconds(10)) - notifier := notifier.NewManager(&cfg.notifier, log.With(logger, "component", "notifier")) + notifierManager := notifier.NewManager(&cfg.notifier, log.With(logger, "component", "notifier")) // notifier.ApplyConfig(conf) ExternalURL, err := computeExternalURL("", "0.0.0.0:3301") @@ -214,7 +214,7 @@ func (r *ClickHouseReader) Start() { Reg: nil, MaxConcurrent: 20, MaxSamples: 50000000, - Timeout: time.Duration(2 * time.Minute), + Timeout: 2 * time.Minute, } queryEngine := promql.NewEngine(opts) @@ -223,7 +223,7 @@ func (r *ClickHouseReader) Start() { Appendable: fanoutStorage, TSDB: localStorage, QueryFunc: rules.EngineQueryFunc(queryEngine, fanoutStorage), - NotifyFunc: sendAlerts(notifier, ExternalURL.String()), + NotifyFunc: sendAlerts(notifierManager, ExternalURL.String()), Context: context.Background(), ExternalURL: ExternalURL, Registerer: prometheus.DefaultRegisterer, @@ -237,17 +237,17 @@ func (r *ClickHouseReader) Start() { remoteStorage.ApplyConfig, // The Scrape and notifier managers need to reload before the Discovery manager as // they need to read the most updated config when receiving the new targets list. - notifier.ApplyConfig, + notifierManager.ApplyConfig, scrapeManager.ApplyConfig, func(cfg *config.Config) error { - c := make(map[string]sd_config.ServiceDiscoveryConfig) + c := make(map[string]sdconfig.ServiceDiscoveryConfig) for _, v := range cfg.ScrapeConfigs { c[v.JobName] = v.ServiceDiscoveryConfig } return discoveryManagerScrape.ApplyConfig(c) }, func(cfg *config.Config) error { - c := make(map[string]sd_config.ServiceDiscoveryConfig) + c := make(map[string]sdconfig.ServiceDiscoveryConfig) for _, v := range cfg.AlertingConfig.AlertmanagerConfigs { // AlertmanagerConfigs doesn't hold an unique identifier so we use the config hash as the identifier. b, err := json.Marshal(v) @@ -361,15 +361,15 @@ func (r *ClickHouseReader) Start() { reloadReady.Close() - rules, apiErrorObj := r.GetRulesFromDB() + rulesFromDB, apiErrorObj := r.GetRulesFromDB() if apiErrorObj != nil { zap.S().Errorf("Not able to read rules from DB") } - for _, rule := range *rules { + for _, rule := range *rulesFromDB { apiErrorObj = r.LoadRule(rule) if apiErrorObj != nil { - zap.S().Errorf("Not able to load rule with id=%d loaded from DB", rule.Id, rule.Data) + zap.S().Errorf("Not able to load rule with id=%d loaded from DB", rule.Id) } } @@ -381,7 +381,7 @@ func (r *ClickHouseReader) Start() { for _, channel := range *channels { apiErrorObj = r.LoadChannel(&channel) if apiErrorObj != nil { - zap.S().Errorf("Not able to load channel with id=%d loaded from DB", channel.Id, channel.Data) + zap.S().Errorf("Not able to load channel with id=%d loaded from DB", channel.Id) } } @@ -424,12 +424,12 @@ func (r *ClickHouseReader) Start() { // so we wait until the config is fully loaded. <-reloadReady.C - notifier.Run(discoveryManagerNotify.SyncCh()) + notifierManager.Run(discoveryManagerNotify.SyncCh()) level.Info(logger).Log("msg", "Notifier manager stopped") return nil }, func(err error) { - notifier.Stop() + notifierManager.Stop() }, ) } @@ -573,11 +573,11 @@ type AlertingRuleWithGroup struct { func (r *ClickHouseReader) GetRulesFromDB() (*[]model.RuleResponseItem, *model.ApiError) { - rules := []model.RuleResponseItem{} + var ruleResponseItems []model.RuleResponseItem query := fmt.Sprintf("SELECT id, updated_at, data FROM rules") - err := r.localDB.Select(&rules, query) + err := r.localDB.Select(&ruleResponseItems, query) zap.S().Info(query) @@ -586,7 +586,7 @@ func (r *ClickHouseReader) GetRulesFromDB() (*[]model.RuleResponseItem, *model.A return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} } - return &rules, nil + return &ruleResponseItems, nil } func (r *ClickHouseReader) GetRule(id string) (*model.RuleResponseItem, *model.ApiError) { @@ -637,15 +637,15 @@ func (r *ClickHouseReader) ListRulesFromProm() (*model.AlertDiscovery, *model.Ap groups := r.ruleManager.RuleGroups() - alertingRulesWithGroupObjects := []*AlertingRuleWithGroup{} + var alertingRulesWithGroupObjects []*AlertingRuleWithGroup - for _, group := range groups { - groupNameParts := strings.Split(group.Name(), "-groupname") + for _, groupItem := range groups { + groupNameParts := strings.Split(groupItem.Name(), "-groupname") if len(groupNameParts) < 2 { continue } id, _ := strconv.Atoi(groupNameParts[0]) - for _, rule := range group.Rules() { + for _, rule := range groupItem.Rules() { if alertingRule, ok := rule.(*rules.AlertingRule); ok { alertingRulesWithGroupObject := AlertingRuleWithGroup{ *alertingRule, @@ -660,7 +660,7 @@ func (r *ClickHouseReader) ListRulesFromProm() (*model.AlertDiscovery, *model.Ap alertsSorter := byAlertStateAndNameSorter{alerts: alertingRulesWithGroupObjects} sort.Sort(alertsSorter) - alerts := []*model.AlertingRuleResponse{} + var alerts []*model.AlertingRuleResponse for _, alertingRule := range alertsSorter.alerts { @@ -710,7 +710,7 @@ func (r *ClickHouseReader) LoadChannel(channel *model.ChannelItem) *model.ApiErr response, err := http.Post(constants.GetAlertManagerApiPrefix()+"v1/receivers", "application/json", bytes.NewBuffer([]byte(channel.Data))) if err != nil { - zap.S().Errorf("Error in getting response of API call to alertmanager/v1/receivers\n", err) + zap.S().Error("Error in getting response of API call to alertmanager/v1/receivers\n", err) return &model.ApiError{Typ: model.ErrorInternal, Err: err} } if response.StatusCode > 299 { @@ -763,14 +763,14 @@ func (r *ClickHouseReader) DeleteChannel(id string) *model.ApiError { { stmt, err := tx.Prepare(`DELETE FROM notification_channels WHERE id=$1;`) if err != nil { - zap.S().Errorf("Error in preparing statement for INSERT to notification_channels\n", err) + zap.S().Error("Error in preparing statement for INSERT to notification_channels\n", err) tx.Rollback() return &model.ApiError{Typ: model.ErrorInternal, Err: err} } defer stmt.Close() if _, err := stmt.Exec(idInt); err != nil { - zap.S().Errorf("Error in Executing prepared statement for INSERT to notification_channels\n", err) + zap.S().Error("Error in Executing prepared statement for INSERT to notification_channels\n", err) tx.Rollback() // return an error too, we may want to wrap them return &model.ApiError{Typ: model.ErrorInternal, Err: err} } @@ -784,7 +784,7 @@ func (r *ClickHouseReader) DeleteChannel(id string) *model.ApiError { err = tx.Commit() if err != nil { - zap.S().Errorf("Error in committing transaction for DELETE command to notification_channels\n", err) + zap.S().Error("Error in committing transaction for DELETE command to notification_channels\n", err) return &model.ApiError{Typ: model.ErrorInternal, Err: err} } @@ -794,7 +794,7 @@ func (r *ClickHouseReader) DeleteChannel(id string) *model.ApiError { func (r *ClickHouseReader) GetChannels() (*[]model.ChannelItem, *model.ApiError) { - channels := []model.ChannelItem{} + var channels []model.ChannelItem query := fmt.Sprintf("SELECT id, created_at, updated_at, name, type, data data FROM notification_channels") @@ -862,21 +862,21 @@ func (r *ClickHouseReader) EditChannel(receiver *am.Receiver, id string) (*am.Re return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} } - channel_type := getChannelType(receiver) + channelType := getChannelType(receiver) receiverString, _ := json.Marshal(receiver) { stmt, err := tx.Prepare(`UPDATE notification_channels SET updated_at=$1, type=$2, data=$3 WHERE id=$4;`) if err != nil { - zap.S().Errorf("Error in preparing statement for UPDATE to notification_channels\n", err) + zap.S().Error("Error in preparing statement for UPDATE to notification_channels\n", err) tx.Rollback() return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} } defer stmt.Close() - if _, err := stmt.Exec(time.Now(), channel_type, string(receiverString), idInt); err != nil { - zap.S().Errorf("Error in Executing prepared statement for UPDATE to notification_channels\n", err) + if _, err := stmt.Exec(time.Now(), channelType, string(receiverString), idInt); err != nil { + zap.S().Error("Error in Executing prepared statement for UPDATE to notification_channels\n", err) tx.Rollback() // return an error too, we may want to wrap them return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} } @@ -890,7 +890,7 @@ func (r *ClickHouseReader) EditChannel(receiver *am.Receiver, id string) (*am.Re err = tx.Commit() if err != nil { - zap.S().Errorf("Error in committing transaction for INSERT to notification_channels\n", err) + zap.S().Error("Error in committing transaction for INSERT to notification_channels\n", err) return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} } @@ -905,7 +905,7 @@ func (r *ClickHouseReader) CreateChannel(receiver *am.Receiver) (*am.Receiver, * return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} } - channel_type := getChannelType(receiver) + channelType := getChannelType(receiver) receiverString, _ := json.Marshal(receiver) // todo: check if the channel name already exists, raise an error if so @@ -913,14 +913,14 @@ func (r *ClickHouseReader) CreateChannel(receiver *am.Receiver) (*am.Receiver, * { stmt, err := tx.Prepare(`INSERT INTO notification_channels (created_at, updated_at, name, type, data) VALUES($1,$2,$3,$4,$5);`) if err != nil { - zap.S().Errorf("Error in preparing statement for INSERT to notification_channels\n", err) + zap.S().Error("Error in preparing statement for INSERT to notification_channels\n", err) tx.Rollback() return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} } defer stmt.Close() - if _, err := stmt.Exec(time.Now(), time.Now(), receiver.Name, channel_type, string(receiverString)); err != nil { - zap.S().Errorf("Error in Executing prepared statement for INSERT to notification_channels\n", err) + if _, err := stmt.Exec(time.Now(), time.Now(), receiver.Name, channelType, string(receiverString)); err != nil { + zap.S().Error("Error in Executing prepared statement for INSERT to notification_channels\n", err) tx.Rollback() // return an error too, we may want to wrap them return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} } @@ -934,7 +934,7 @@ func (r *ClickHouseReader) CreateChannel(receiver *am.Receiver) (*am.Receiver, * err = tx.Commit() if err != nil { - zap.S().Errorf("Error in committing transaction for INSERT to notification_channels\n", err) + zap.S().Error("Error in committing transaction for INSERT to notification_channels\n", err) return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} } @@ -954,7 +954,7 @@ func (r *ClickHouseReader) CreateRule(rule string) *model.ApiError { { stmt, err := tx.Prepare(`INSERT into rules (updated_at, data) VALUES($1,$2);`) if err != nil { - zap.S().Errorf("Error in preparing statement for INSERT to rules\n", err) + zap.S().Error("Error in preparing statement for INSERT to rules\n", err) tx.Rollback() return &model.ApiError{Typ: model.ErrorInternal, Err: err} } @@ -962,7 +962,7 @@ func (r *ClickHouseReader) CreateRule(rule string) *model.ApiError { result, err := stmt.Exec(time.Now(), rule) if err != nil { - zap.S().Errorf("Error in Executing prepared statement for INSERT to rules\n", err) + zap.S().Error("Error in Executing prepared statement for INSERT to rules\n", err) tx.Rollback() // return an error too, we may want to wrap them return &model.ApiError{Typ: model.ErrorInternal, Err: err} } @@ -979,7 +979,7 @@ func (r *ClickHouseReader) CreateRule(rule string) *model.ApiError { } err = tx.Commit() if err != nil { - zap.S().Errorf("Error in committing transaction for INSERT to rules\n", err) + zap.S().Error("Error in committing transaction for INSERT to rules\n", err) return &model.ApiError{Typ: model.ErrorInternal, Err: err} } return nil @@ -997,14 +997,14 @@ func (r *ClickHouseReader) EditRule(rule string, id string) *model.ApiError { { stmt, err := tx.Prepare(`UPDATE rules SET updated_at=$1, data=$2 WHERE id=$3;`) if err != nil { - zap.S().Errorf("Error in preparing statement for UPDATE to rules\n", err) + zap.S().Error("Error in preparing statement for UPDATE to rules\n", err) tx.Rollback() return &model.ApiError{Typ: model.ErrorInternal, Err: err} } defer stmt.Close() if _, err := stmt.Exec(time.Now(), rule, idInt); err != nil { - zap.S().Errorf("Error in Executing prepared statement for UPDATE to rules\n", err) + zap.S().Error("Error in Executing prepared statement for UPDATE to rules\n", err) tx.Rollback() // return an error too, we may want to wrap them return &model.ApiError{Typ: model.ErrorInternal, Err: err} } @@ -1021,7 +1021,7 @@ func (r *ClickHouseReader) EditRule(rule string, id string) *model.ApiError { err = tx.Commit() if err != nil { - zap.S().Errorf("Error in committing transaction for UPDATE to rules\n", err) + zap.S().Error("Error in committing transaction for UPDATE to rules\n", err) return &model.ApiError{Typ: model.ErrorInternal, Err: err} } @@ -1046,7 +1046,7 @@ func (r *ClickHouseReader) DeleteRule(id string) *model.ApiError { defer stmt.Close() if _, err := stmt.Exec(idInt); err != nil { - zap.S().Errorf("Error in Executing prepared statement for DELETE to rules\n", err) + zap.S().Error("Error in Executing prepared statement for DELETE to rules\n", err) tx.Rollback() // return an error too, we may want to wrap them return &model.ApiError{Typ: model.ErrorInternal, Err: err} } @@ -1059,7 +1059,7 @@ func (r *ClickHouseReader) DeleteRule(id string) *model.ApiError { if err != nil { tx.Rollback() - zap.S().Errorf("Error in deleting rule from rulemanager...\n", err) + zap.S().Error("Error in deleting rule from rulemanager...\n", err) return &model.ApiError{Typ: model.ErrorInternal, Err: err} } @@ -1067,7 +1067,7 @@ func (r *ClickHouseReader) DeleteRule(id string) *model.ApiError { err = tx.Commit() if err != nil { - zap.S().Errorf("Error in committing transaction for deleting rules\n", err) + zap.S().Error("Error in committing transaction for deleting rules\n", err) return &model.ApiError{Typ: model.ErrorInternal, Err: err} } @@ -1115,7 +1115,7 @@ func (r *ClickHouseReader) GetQueryRangeResult(ctx context.Context, query *model func (r *ClickHouseReader) GetServicesList(ctx context.Context) (*[]string, error) { - services := []string{} + var services []string query := fmt.Sprintf(`SELECT DISTINCT serviceName FROM %s.%s WHERE toDate(timestamp) > now() - INTERVAL 1 DAY`, r.traceDB, r.indexTable) rows, err := r.db.Query(ctx, query) @@ -1124,7 +1124,7 @@ func (r *ClickHouseReader) GetServicesList(ctx context.Context) (*[]string, erro if err != nil { zap.S().Debug("Error in processing sql query: ", err) - return nil, fmt.Errorf("Error in processing sql query") + return nil, fmt.Errorf("error in processing sql query") } defer rows.Close() @@ -1144,10 +1144,10 @@ func (r *ClickHouseReader) GetServices(ctx context.Context, queryParams *model.G return nil, &model.ApiError{Typ: model.ErrorExec, Err: ErrNoIndexTable} } - serviceItems := []model.ServiceItem{} + var serviceItems []model.ServiceItem query := fmt.Sprintf("SELECT serviceName, quantile(0.99)(durationNano) as p99, avg(durationNano) as avgDuration, count(*) as numCalls FROM %s.%s WHERE timestamp>='%s' AND timestamp<='%s' AND kind='2'", r.traceDB, r.indexTable, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10)) - args := []interface{}{} + var args []interface{} args, errStatus := buildQueryWithTagParams(ctx, queryParams.Tags, &query, args) if errStatus != nil { return nil, errStatus @@ -1159,11 +1159,11 @@ func (r *ClickHouseReader) GetServices(ctx context.Context, queryParams *model.G if err != nil { zap.S().Debug("Error in processing sql query: ", err) - return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")} + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")} } ////////////////// Below block gets 5xx of services - serviceErrorItems := []model.ServiceItem{} + var serviceErrorItems []model.ServiceItem query = fmt.Sprintf("SELECT serviceName, count(*) as numErrors FROM %s.%s WHERE timestamp>='%s' AND timestamp<='%s' AND kind='2' AND (statusCode>=500 OR statusCode=2)", r.traceDB, r.indexTable, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10)) args = []interface{}{} @@ -1178,7 +1178,7 @@ func (r *ClickHouseReader) GetServices(ctx context.Context, queryParams *model.G if err != nil { zap.S().Debug("Error in processing sql query: ", err) - return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")} + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")} } m5xx := make(map[string]uint64) @@ -1190,7 +1190,7 @@ func (r *ClickHouseReader) GetServices(ctx context.Context, queryParams *model.G ////////////////// Below block gets 4xx of services - service4xxItems := []model.ServiceItem{} + var service4xxItems []model.ServiceItem query = fmt.Sprintf("SELECT serviceName, count(*) as num4xx FROM %s.%s WHERE timestamp>='%s' AND timestamp<='%s' AND kind='2' AND statusCode>=400 AND statusCode<500", r.traceDB, r.indexTable, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10)) args = []interface{}{} @@ -1205,7 +1205,7 @@ func (r *ClickHouseReader) GetServices(ctx context.Context, queryParams *model.G if err != nil { zap.S().Debug("Error in processing sql query: ", err) - return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")} + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")} } m4xx := make(map[string]uint64) @@ -1231,10 +1231,10 @@ func (r *ClickHouseReader) GetServices(ctx context.Context, queryParams *model.G func (r *ClickHouseReader) GetServiceOverview(ctx context.Context, queryParams *model.GetServiceOverviewParams) (*[]model.ServiceOverviewItem, *model.ApiError) { - serviceOverviewItems := []model.ServiceOverviewItem{} + var serviceOverviewItems []model.ServiceOverviewItem - query := fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %s minute) as time, quantile(0.99)(durationNano) as p99, quantile(0.95)(durationNano) as p95,quantile(0.50)(durationNano) as p50, count(*) as numCalls FROM %s.%s WHERE timestamp>='%s' AND timestamp<='%s' AND kind='2' AND serviceName='%s'", strconv.Itoa(int(queryParams.StepSeconds/60)), r.traceDB, r.indexTable, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10), queryParams.ServiceName) - args := []interface{}{} + query := fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %s minute) as time, quantile(0.99)(durationNano) as p99, quantile(0.95)(durationNano) as p95,quantile(0.50)(durationNano) as p50, count(*) as numCalls FROM %s.%s WHERE timestamp>='%s' AND timestamp<='%s' AND kind='2' AND serviceName='%s'", strconv.Itoa(queryParams.StepSeconds/60), r.traceDB, r.indexTable, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10), queryParams.ServiceName) + var args []interface{} args, errStatus := buildQueryWithTagParams(ctx, queryParams.Tags, &query, args) if errStatus != nil { return nil, errStatus @@ -1246,12 +1246,12 @@ func (r *ClickHouseReader) GetServiceOverview(ctx context.Context, queryParams * if err != nil { zap.S().Debug("Error in processing sql query: ", err) - return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")} + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")} } - serviceErrorItems := []model.ServiceErrorItem{} + var serviceErrorItems []model.ServiceErrorItem - query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %s minute) as time, count(*) as numErrors FROM %s.%s WHERE timestamp>='%s' AND timestamp<='%s' AND kind='2' AND serviceName='%s' AND hasError=true", strconv.Itoa(int(queryParams.StepSeconds/60)), r.traceDB, r.indexTable, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10), queryParams.ServiceName) + query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %s minute) as time, count(*) as numErrors FROM %s.%s WHERE timestamp>='%s' AND timestamp<='%s' AND kind='2' AND serviceName='%s' AND hasError=true", strconv.Itoa(queryParams.StepSeconds/60), r.traceDB, r.indexTable, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10), queryParams.ServiceName) args = []interface{}{} args, errStatus = buildQueryWithTagParams(ctx, queryParams.Tags, &query, args) if errStatus != nil { @@ -1264,17 +1264,17 @@ func (r *ClickHouseReader) GetServiceOverview(ctx context.Context, queryParams * if err != nil { zap.S().Debug("Error in processing sql query: ", err) - return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")} + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")} } m := make(map[int64]int) for j := range serviceErrorItems { - m[int64(serviceErrorItems[j].Time.UnixNano())] = int(serviceErrorItems[j].NumErrors) + m[serviceErrorItems[j].Time.UnixNano()] = int(serviceErrorItems[j].NumErrors) } for i := range serviceOverviewItems { - serviceOverviewItems[i].Timestamp = int64(serviceOverviewItems[i].Time.UnixNano()) + serviceOverviewItems[i].Timestamp = serviceOverviewItems[i].Time.UnixNano() if val, ok := m[serviceOverviewItems[i].Timestamp]; ok { serviceOverviewItems[i].NumErrors = uint64(val) @@ -1360,7 +1360,7 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode query = getStatusFilters(query, queryParams.Status, excludeMap) - traceFilterReponse := model.SpanFiltersResponse{ + traceFilterResponse := model.SpanFiltersResponse{ Status: map[string]uint64{}, Duration: map[string]uint64{}, ServiceName: map[string]uint64{}, @@ -1385,11 +1385,11 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode if err != nil { zap.S().Debug("Error in processing sql query: ", err) - return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)} + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query: %s", err)} } for _, service := range dBResponse { if service.ServiceName != "" { - traceFilterReponse.ServiceName[service.ServiceName] = service.Count + traceFilterResponse.ServiceName[service.ServiceName] = service.Count } } case constants.HttpCode: @@ -1402,11 +1402,11 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode if err != nil { zap.S().Debug("Error in processing sql query: ", err) - return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)} + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query: %s", err)} } for _, service := range dBResponse { if service.HttpCode != "" { - traceFilterReponse.HttpCode[service.HttpCode] = service.Count + traceFilterResponse.HttpCode[service.HttpCode] = service.Count } } case constants.HttpRoute: @@ -1419,11 +1419,11 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode if err != nil { zap.S().Debug("Error in processing sql query: ", err) - return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)} + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query: %s", err)} } for _, service := range dBResponse { if service.HttpRoute != "" { - traceFilterReponse.HttpRoute[service.HttpRoute] = service.Count + traceFilterResponse.HttpRoute[service.HttpRoute] = service.Count } } case constants.HttpUrl: @@ -1436,11 +1436,11 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode if err != nil { zap.S().Debug("Error in processing sql query: ", err) - return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)} + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query: %s", err)} } for _, service := range dBResponse { if service.HttpUrl != "" { - traceFilterReponse.HttpUrl[service.HttpUrl] = service.Count + traceFilterResponse.HttpUrl[service.HttpUrl] = service.Count } } case constants.HttpMethod: @@ -1453,11 +1453,11 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode if err != nil { zap.S().Debug("Error in processing sql query: ", err) - return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)} + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query: %s", err)} } for _, service := range dBResponse { if service.HttpMethod != "" { - traceFilterReponse.HttpMethod[service.HttpMethod] = service.Count + traceFilterResponse.HttpMethod[service.HttpMethod] = service.Count } } case constants.HttpHost: @@ -1470,11 +1470,11 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode if err != nil { zap.S().Debug("Error in processing sql query: ", err) - return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)} + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query: %s", err)} } for _, service := range dBResponse { if service.HttpHost != "" { - traceFilterReponse.HttpHost[service.HttpHost] = service.Count + traceFilterResponse.HttpHost[service.HttpHost] = service.Count } } case constants.OperationRequest: @@ -1487,11 +1487,11 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode if err != nil { zap.S().Debug("Error in processing sql query: ", err) - return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)} + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query: %s", err)} } for _, service := range dBResponse { if service.Operation != "" { - traceFilterReponse.Operation[service.Operation] = service.Count + traceFilterResponse.Operation[service.Operation] = service.Count } } case constants.Component: @@ -1504,11 +1504,11 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode if err != nil { zap.S().Debug("Error in processing sql query: ", err) - return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)} + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query: %s", err)} } for _, service := range dBResponse { if service.Component != "" { - traceFilterReponse.Component[service.Component] = service.Count + traceFilterResponse.Component[service.Component] = service.Count } } case constants.Status: @@ -1520,7 +1520,7 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode if err != nil { zap.S().Debug("Error in processing sql query: ", err) - return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)} + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query: %s", err)} } finalQuery2 := fmt.Sprintf("SELECT COUNT(*) as numTotal FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU AND hasError = false", r.traceDB, r.indexTable) @@ -1531,16 +1531,16 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode if err != nil { zap.S().Debug("Error in processing sql query: ", err) - return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)} + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query: %s", err)} } if len(dBResponse) > 0 && len(dBResponse2) > 0 { - traceFilterReponse.Status = map[string]uint64{"ok": dBResponse2[0].NumTotal, "error": dBResponse[0].NumTotal} + traceFilterResponse.Status = map[string]uint64{"ok": dBResponse2[0].NumTotal, "error": dBResponse[0].NumTotal} } else if len(dBResponse) > 0 { - traceFilterReponse.Status = map[string]uint64{"ok": 0, "error": dBResponse[0].NumTotal} + traceFilterResponse.Status = map[string]uint64{"ok": 0, "error": dBResponse[0].NumTotal} } else if len(dBResponse2) > 0 { - traceFilterReponse.Status = map[string]uint64{"ok": dBResponse2[0].NumTotal, "error": 0} + traceFilterResponse.Status = map[string]uint64{"ok": dBResponse2[0].NumTotal, "error": 0} } else { - traceFilterReponse.Status = map[string]uint64{"ok": 0, "error": 0} + traceFilterResponse.Status = map[string]uint64{"ok": 0, "error": 0} } case constants.Duration: finalQuery := fmt.Sprintf("SELECT durationNano as numTotal FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", r.traceDB, r.durationTable) @@ -1552,7 +1552,7 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode if err != nil { zap.S().Debug("Error in processing sql query: ", err) - return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)} + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query: %s", err)} } finalQuery = fmt.Sprintf("SELECT durationNano as numTotal FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", r.traceDB, r.durationTable) finalQuery += query @@ -1563,20 +1563,20 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode if err != nil { zap.S().Debug("Error in processing sql query: ", err) - return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)} + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query: %s", err)} } if len(dBResponse) > 0 { - traceFilterReponse.Duration["minDuration"] = dBResponse[0].NumTotal + traceFilterResponse.Duration["minDuration"] = dBResponse[0].NumTotal } if len(dBResponse2) > 0 { - traceFilterReponse.Duration["maxDuration"] = dBResponse2[0].NumTotal + traceFilterResponse.Duration["maxDuration"] = dBResponse2[0].NumTotal } default: return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("filter type: %s not supported", e)} } } - return &traceFilterReponse, nil + return &traceFilterResponse, nil } func getStatusFilters(query string, statusParams []string, excludeMap map[string]struct{}) string { @@ -1676,7 +1676,7 @@ func (r *ClickHouseReader) GetFilteredSpans(ctx context.Context, queryParams *mo if err != nil { zap.S().Debug("Error in processing sql query: ", err) - return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")} + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")} } if queryParams.Order == constants.Descending { query = query + " ORDER BY timestamp DESC" @@ -1719,7 +1719,7 @@ func (r *ClickHouseReader) GetFilteredSpans(ctx context.Context, queryParams *mo if err != nil { zap.S().Debug("Error in processing sql query: ", err) - return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")} + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")} } getFilterSpansResponse := model.GetFilterSpansResponse{ @@ -1778,7 +1778,7 @@ func buildQueryWithTagParams(ctx context.Context, tags []model.TagQuery, query * args = append(args, clickhouse.Named(tagValue, value)) } } else { - return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Tag Operator %s not supported", item.Operator)} + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("tag operator %s not supported", item.Operator)} } } return args, nil @@ -1832,7 +1832,7 @@ func (r *ClickHouseReader) GetTagFilters(ctx context.Context, queryParams *model query = getStatusFilters(query, queryParams.Status, excludeMap) - tagFilters := []model.TagFilters{} + var tagFilters []model.TagFilters finalQuery := fmt.Sprintf(`SELECT DISTINCT arrayJoin(tagMap.keys) as tagKeys FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU`, r.traceDB, r.indexTable) // Alternative query: SELECT groupUniqArrayArray(mapKeys(tagMap)) as tagKeys FROM signoz_index_v2 @@ -1843,7 +1843,7 @@ func (r *ClickHouseReader) GetTagFilters(ctx context.Context, queryParams *model if err != nil { zap.S().Debug("Error in processing sql query: ", err) - return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")} + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")} } tagFilters = excludeTags(ctx, tagFilters) @@ -1922,7 +1922,7 @@ func (r *ClickHouseReader) GetTagValues(ctx context.Context, queryParams *model. query = getStatusFilters(query, queryParams.Status, excludeMap) - tagValues := []model.TagValues{} + var tagValues []model.TagValues finalQuery := fmt.Sprintf(`SELECT tagMap[@key] as tagValues FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU`, r.traceDB, r.indexTable) finalQuery += query @@ -1934,10 +1934,10 @@ func (r *ClickHouseReader) GetTagValues(ctx context.Context, queryParams *model. if err != nil { zap.S().Debug("Error in processing sql query: ", err) - return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")} + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")} } - cleanedTagValues := []model.TagValues{} + var cleanedTagValues []model.TagValues for _, e := range tagValues { if e.TagValues != "" { cleanedTagValues = append(cleanedTagValues, e) @@ -1951,7 +1951,7 @@ func (r *ClickHouseReader) GetTopEndpoints(ctx context.Context, queryParams *mod var topEndpointsItems []model.TopEndpointsItem query := fmt.Sprintf("SELECT quantile(0.5)(durationNano) as p50, quantile(0.95)(durationNano) as p95, quantile(0.99)(durationNano) as p99, COUNT(1) as numCalls, name FROM %s.%s WHERE timestamp >= '%s' AND timestamp <= '%s' AND kind='2' and serviceName='%s'", r.traceDB, r.indexTable, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10), queryParams.ServiceName) - args := []interface{}{} + var args []interface{} args, errStatus := buildQueryWithTagParams(ctx, queryParams.Tags, &query, args) if errStatus != nil { return nil, errStatus @@ -1963,7 +1963,7 @@ func (r *ClickHouseReader) GetTopEndpoints(ctx context.Context, queryParams *mod if err != nil { zap.S().Debug("Error in processing sql query: ", err) - return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")} + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")} } if topEndpointsItems == nil { @@ -1990,7 +1990,7 @@ func (r *ClickHouseReader) GetUsage(ctx context.Context, queryParams *model.GetU if err != nil { zap.S().Debug("Error in processing sql query: ", err) - return nil, fmt.Errorf("Error in processing sql query") + return nil, fmt.Errorf("error in processing sql query") } for i := range usageItems { @@ -2006,27 +2006,27 @@ func (r *ClickHouseReader) GetUsage(ctx context.Context, queryParams *model.GetU func (r *ClickHouseReader) SearchTraces(ctx context.Context, traceId string) (*[]model.SearchSpansResult, error) { - var searchScanReponses []model.SearchSpanDBReponseItem + var searchScanResponses []model.SearchSpanDBResponseItem query := fmt.Sprintf("SELECT timestamp, traceID, model FROM %s.%s WHERE traceID=$1", r.traceDB, r.spansTable) - err := r.db.Select(ctx, &searchScanReponses, query, traceId) + err := r.db.Select(ctx, &searchScanResponses, query, traceId) zap.S().Info(query) if err != nil { zap.S().Debug("Error in processing sql query: ", err) - return nil, fmt.Errorf("Error in processing sql query") + return nil, fmt.Errorf("error in processing sql query") } searchSpansResult := []model.SearchSpansResult{{ Columns: []string{"__time", "SpanId", "TraceId", "ServiceName", "Name", "Kind", "DurationNano", "TagsKeys", "TagsValues", "References", "Events", "HasError"}, - Events: make([][]interface{}, len(searchScanReponses)), + Events: make([][]interface{}, len(searchScanResponses)), }, } - for i, item := range searchScanReponses { - var jsonItem model.SearchSpanReponseItem + for i, item := range searchScanResponses { + var jsonItem model.SearchSpanResponseItem json.Unmarshal([]byte(item.Model), &jsonItem) jsonItem.TimeUnixNano = uint64(item.Timestamp.UnixNano() / 1000000) spanEvents := jsonItem.GetValues() @@ -2045,7 +2045,7 @@ func interfaceArrayToStringArray(array []interface{}) []string { } func (r *ClickHouseReader) GetServiceMapDependencies(ctx context.Context, queryParams *model.GetServicesParams) (*[]model.ServiceMapDependencyResponseItem, error) { - serviceMapDependencyItems := []model.ServiceMapDependencyItem{} + var serviceMapDependencyItems []model.ServiceMapDependencyItem query := fmt.Sprintf(`SELECT spanID, parentSpanID, serviceName FROM %s.%s WHERE timestamp>='%s' AND timestamp<='%s'`, r.traceDB, r.indexTable, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10)) @@ -2055,7 +2055,7 @@ func (r *ClickHouseReader) GetServiceMapDependencies(ctx context.Context, queryP if err != nil { zap.S().Debug("Error in processing sql query: ", err) - return nil, fmt.Errorf("Error in processing sql query") + return nil, fmt.Errorf("error in processing sql query") } serviceMap := make(map[string]*model.ServiceMapDependencyResponseItem) @@ -2099,32 +2099,32 @@ func (r *ClickHouseReader) GetFilteredSpansAggregates(ctx context.Context, query excludeMap[e] = struct{}{} } - SpanAggregatesDBResponseItems := []model.SpanAggregatesDBResponseItem{} + var SpanAggregatesDBResponseItems []model.SpanAggregatesDBResponseItem - aggregation_query := "" + aggregationQuery := "" if queryParams.Dimension == "duration" { switch queryParams.AggregationOption { case "p50": - aggregation_query = " quantile(0.50)(durationNano) as float64Value " + aggregationQuery = " quantile(0.50)(durationNano) as float64Value " case "p95": - aggregation_query = " quantile(0.95)(durationNano) as float64Value " + aggregationQuery = " quantile(0.95)(durationNano) as float64Value " case "p90": - aggregation_query = " quantile(0.90)(durationNano) as float64Value " + aggregationQuery = " quantile(0.90)(durationNano) as float64Value " case "p99": - aggregation_query = " quantile(0.99)(durationNano) as float64Value " + aggregationQuery = " quantile(0.99)(durationNano) as float64Value " case "max": - aggregation_query = " max(durationNano) as value " + aggregationQuery = " max(durationNano) as value " case "min": - aggregation_query = " min(durationNano) as value " + aggregationQuery = " min(durationNano) as value " case "avg": - aggregation_query = " avg(durationNano) as float64Value " + aggregationQuery = " avg(durationNano) as float64Value " case "sum": - aggregation_query = " sum(durationNano) as value " + aggregationQuery = " sum(durationNano) as value " default: - return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("Aggregate type: %s not supported", queryParams.AggregationOption)} + return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("aggregate type: %s not supported", queryParams.AggregationOption)} } } else if queryParams.Dimension == "calls" { - aggregation_query = " count(*) as value " + aggregationQuery = " count(*) as value " } args := []interface{}{clickhouse.Named("timestampL", strconv.FormatInt(queryParams.Start.UnixNano(), 10)), clickhouse.Named("timestampU", strconv.FormatInt(queryParams.End.UnixNano(), 10))} @@ -2133,36 +2133,36 @@ func (r *ClickHouseReader) GetFilteredSpansAggregates(ctx context.Context, query if queryParams.GroupBy != "" { switch queryParams.GroupBy { case constants.ServiceName: - query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, serviceName as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryParams.StepSeconds/60, aggregation_query, r.traceDB, r.indexTable) + query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, serviceName as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryParams.StepSeconds/60, aggregationQuery, r.traceDB, r.indexTable) case constants.HttpCode: - query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, httpCode as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryParams.StepSeconds/60, aggregation_query, r.traceDB, r.indexTable) + query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, httpCode as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryParams.StepSeconds/60, aggregationQuery, r.traceDB, r.indexTable) case constants.HttpMethod: - query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, httpMethod as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryParams.StepSeconds/60, aggregation_query, r.traceDB, r.indexTable) + query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, httpMethod as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryParams.StepSeconds/60, aggregationQuery, r.traceDB, r.indexTable) case constants.HttpUrl: - query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, httpUrl as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryParams.StepSeconds/60, aggregation_query, r.traceDB, r.indexTable) + query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, httpUrl as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryParams.StepSeconds/60, aggregationQuery, r.traceDB, r.indexTable) case constants.HttpRoute: - query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, httpRoute as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryParams.StepSeconds/60, aggregation_query, r.traceDB, r.indexTable) + query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, httpRoute as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryParams.StepSeconds/60, aggregationQuery, r.traceDB, r.indexTable) case constants.HttpHost: - query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, httpHost as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryParams.StepSeconds/60, aggregation_query, r.traceDB, r.indexTable) + query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, httpHost as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryParams.StepSeconds/60, aggregationQuery, r.traceDB, r.indexTable) case constants.DBName: - query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, dbName as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryParams.StepSeconds/60, aggregation_query, r.traceDB, r.indexTable) + query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, dbName as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryParams.StepSeconds/60, aggregationQuery, r.traceDB, r.indexTable) case constants.DBOperation: - query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, dbOperation as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryParams.StepSeconds/60, aggregation_query, r.traceDB, r.indexTable) + query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, dbOperation as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryParams.StepSeconds/60, aggregationQuery, r.traceDB, r.indexTable) case constants.OperationRequest: - query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, name as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryParams.StepSeconds/60, aggregation_query, r.traceDB, r.indexTable) + query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, name as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryParams.StepSeconds/60, aggregationQuery, r.traceDB, r.indexTable) case constants.MsgSystem: - query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, msgSystem as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryParams.StepSeconds/60, aggregation_query, r.traceDB, r.indexTable) + query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, msgSystem as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryParams.StepSeconds/60, aggregationQuery, r.traceDB, r.indexTable) case constants.MsgOperation: - query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, msgOperation as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryParams.StepSeconds/60, aggregation_query, r.traceDB, r.indexTable) + query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, msgOperation as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryParams.StepSeconds/60, aggregationQuery, r.traceDB, r.indexTable) case constants.DBSystem: - query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, dbSystem as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryParams.StepSeconds/60, aggregation_query, r.traceDB, r.indexTable) + query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, dbSystem as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryParams.StepSeconds/60, aggregationQuery, r.traceDB, r.indexTable) case constants.Component: - query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, component as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryParams.StepSeconds/60, aggregation_query, r.traceDB, r.indexTable) + query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, component as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryParams.StepSeconds/60, aggregationQuery, r.traceDB, r.indexTable) default: return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("groupBy type: %s not supported", queryParams.GroupBy)} } } else { - query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryParams.StepSeconds/60, aggregation_query, r.traceDB, r.indexTable) + query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryParams.StepSeconds/60, aggregationQuery, r.traceDB, r.indexTable) } if len(queryParams.ServiceName) > 0 { @@ -2250,7 +2250,7 @@ func (r *ClickHouseReader) GetFilteredSpansAggregates(ctx context.Context, query if err != nil { zap.S().Debug("Error in processing sql query: ", err) - return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")} + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")} } GetFilteredSpansAggregatesResponse := model.GetFilteredSpansAggregatesResponse{ @@ -2261,7 +2261,7 @@ func (r *ClickHouseReader) GetFilteredSpansAggregates(ctx context.Context, query if SpanAggregatesDBResponseItems[i].Value == 0 { SpanAggregatesDBResponseItems[i].Value = uint64(SpanAggregatesDBResponseItems[i].Float64Value) } - SpanAggregatesDBResponseItems[i].Timestamp = int64(SpanAggregatesDBResponseItems[i].Time.UnixNano()) + SpanAggregatesDBResponseItems[i].Timestamp = SpanAggregatesDBResponseItems[i].Time.UnixNano() SpanAggregatesDBResponseItems[i].FloatValue = float32(SpanAggregatesDBResponseItems[i].Value) if queryParams.AggregationOption == "rate_per_sec" { SpanAggregatesDBResponseItems[i].FloatValue = float32(SpanAggregatesDBResponseItems[i].Value) / float32(queryParams.StepSeconds) @@ -2313,7 +2313,7 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context, for _, tableName = range tableNameArray { statusItem, err := r.checkTTLStatusItem(ctx, tableName) if err != nil { - return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing ttl_status check sql query")} + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing ttl_status check sql query")} } if statusItem.Status == constants.StatusPending { return nil, &model.ApiError{Typ: model.ErrorConflict, Err: fmt.Errorf("TTL is already running")} @@ -2324,7 +2324,7 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context, go func(tableName string) { _, dbErr := r.localDB.Exec("INSERT INTO ttl_status (transaction_id, created_at, updated_at, table_name, ttl, status, cold_storage_ttl) VALUES (?, ?, ?, ?, ?, ?, ?)", uuid, time.Now(), time.Now(), tableName, params.DelDuration, constants.StatusPending, coldStorageDuration) if dbErr != nil { - zap.S().Error(fmt.Errorf("Error in inserting to ttl_status table: %s", dbErr.Error())) + zap.S().Error(fmt.Errorf("error in inserting to ttl_status table: %s", dbErr.Error())) return } req = fmt.Sprintf( @@ -2336,7 +2336,7 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context, } err := r.setColdStorage(context.Background(), tableName, params.ColdStorageVolume) if err != nil { - zap.S().Error(fmt.Errorf("Error in setting cold storage: %s", err.Err.Error())) + zap.S().Error(fmt.Errorf("error in setting cold storage: %s", err.Err.Error())) statusItem, err := r.checkTTLStatusItem(ctx, tableName) if err == nil { _, dbErr := r.localDB.Exec("UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?", time.Now(), constants.StatusFailed, statusItem.Id) @@ -2350,7 +2350,7 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context, zap.S().Debugf("Executing TTL request: %s\n", req) statusItem, _ := r.checkTTLStatusItem(ctx, tableName) if err := r.db.Exec(context.Background(), req); err != nil { - zap.S().Error(fmt.Errorf("Error in executing set TTL query: %s", err.Error())) + zap.S().Error(fmt.Errorf("error in executing set TTL query: %s", err.Error())) _, dbErr := r.localDB.Exec("UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?", time.Now(), constants.StatusFailed, statusItem.Id) if dbErr != nil { zap.S().Debug("Error in processing ttl_status update sql query: ", dbErr) @@ -2370,7 +2370,7 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context, tableName = signozMetricDBName + "." + signozSampleTableName statusItem, err := r.checkTTLStatusItem(ctx, tableName) if err != nil { - return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing ttl_status check sql query")} + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing ttl_status check sql query")} } if statusItem.Status == constants.StatusPending { return nil, &model.ApiError{Typ: model.ErrorConflict, Err: fmt.Errorf("TTL is already running")} @@ -2378,7 +2378,7 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context, go func(tableName string) { _, dbErr := r.localDB.Exec("INSERT INTO ttl_status (transaction_id, created_at, updated_at, table_name, ttl, status, cold_storage_ttl) VALUES (?, ?, ?, ?, ?, ?, ?)", uuid, time.Now(), time.Now(), tableName, params.DelDuration, constants.StatusPending, coldStorageDuration) if dbErr != nil { - zap.S().Error(fmt.Errorf("Error in inserting to ttl_status table: %s", dbErr.Error())) + zap.S().Error(fmt.Errorf("error in inserting to ttl_status table: %s", dbErr.Error())) return } req = fmt.Sprintf( @@ -2391,7 +2391,7 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context, } err := r.setColdStorage(context.Background(), tableName, params.ColdStorageVolume) if err != nil { - zap.S().Error(fmt.Errorf("Error in setting cold storage: %s", err.Err.Error())) + zap.S().Error(fmt.Errorf("error in setting cold storage: %s", err.Err.Error())) statusItem, err := r.checkTTLStatusItem(ctx, tableName) if err == nil { _, dbErr := r.localDB.Exec("UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?", time.Now(), constants.StatusFailed, statusItem.Id) @@ -2437,7 +2437,7 @@ func (r *ClickHouseReader) deleteTtlTransactions(ctx context.Context, numberOfTr // checkTTLStatusItem checks if ttl_status table has an entry for the given table name func (r *ClickHouseReader) checkTTLStatusItem(ctx context.Context, tableName string) (model.TTLStatusItem, *model.ApiError) { - statusItem := []model.TTLStatusItem{} + var statusItem []model.TTLStatusItem query := fmt.Sprintf("SELECT id, status, ttl, cold_storage_ttl FROM ttl_status WHERE table_name = '%s' ORDER BY created_at DESC", tableName) @@ -2450,7 +2450,7 @@ func (r *ClickHouseReader) checkTTLStatusItem(ctx context.Context, tableName str } if err != nil { zap.S().Debug("Error in processing sql query: ", err) - return model.TTLStatusItem{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing ttl_status check sql query")} + return model.TTLStatusItem{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing ttl_status check sql query")} } return statusItem[0], nil } @@ -2466,7 +2466,7 @@ func (r *ClickHouseReader) setTTLQueryStatus(ctx context.Context, tableNameArray return "", nil } if err != nil { - return "", &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing ttl_status check sql query")} + return "", &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing ttl_status check sql query")} } if statusItem.Status == constants.StatusPending && statusItem.UpdatedAt.Unix()-time.Now().Unix() < 3600 { status = constants.StatusPending @@ -2501,7 +2501,7 @@ func (r *ClickHouseReader) setColdStorage(ctx context.Context, tableName string, // GetDisks returns a list of disks {name, type} configured in clickhouse DB. func (r *ClickHouseReader) GetDisks(ctx context.Context) (*[]model.DiskItem, *model.ApiError) { - diskItems := []model.DiskItem{} + var diskItems []model.DiskItem query := "SELECT name,type FROM system.disks" if err := r.db.Select(ctx, &diskItems, query); err != nil { @@ -2523,24 +2523,24 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, ttlParams *model.GetTTLPa deleteTTLExp := regexp.MustCompile(`toIntervalSecond\(([0-9]*)\)`) moveTTLExp := regexp.MustCompile(`toIntervalSecond\(([0-9]*)\) TO VOLUME`) - var delTTL, moveTTL int = -1, -1 + var delTTL, moveTTL = -1, -1 m := deleteTTLExp.FindStringSubmatch(queryResp) if len(m) > 1 { - seconds_int, err := strconv.Atoi(m[1]) + secondsInt, err := strconv.Atoi(m[1]) if err != nil { return -1, -1 } - delTTL = seconds_int / 3600 + delTTL = secondsInt / 3600 } m = moveTTLExp.FindStringSubmatch(queryResp) if len(m) > 1 { - seconds_int, err := strconv.Atoi(m[1]) + secondsInt, err := strconv.Atoi(m[1]) if err != nil { return -1, -1 } - moveTTL = seconds_int / 3600 + moveTTL = secondsInt / 3600 } return delTTL, moveTTL @@ -2661,7 +2661,7 @@ func (r *ClickHouseReader) ListErrors(ctx context.Context, queryParams *model.Li if err != nil { zap.S().Debug("Error in processing sql query: ", err) - return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")} + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")} } return &getErrorResponses, nil @@ -2691,21 +2691,21 @@ func (r *ClickHouseReader) GetErrorFromErrorID(ctx context.Context, queryParams zap.S().Debug("errorId missing from params") return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("ErrorID missing from params")} } - var getErrorWithSpanReponse []model.ErrorWithSpan + var getErrorWithSpanResponse []model.ErrorWithSpan query := fmt.Sprintf("SELECT * FROM %s.%s WHERE timestamp = @timestamp AND groupID = @groupID AND errorID = @errorID LIMIT 1", r.traceDB, r.errorTable) args := []interface{}{clickhouse.Named("errorID", queryParams.ErrorID), clickhouse.Named("groupID", queryParams.GroupID), clickhouse.Named("timestamp", strconv.FormatInt(queryParams.Timestamp.UnixNano(), 10))} - err := r.db.Select(ctx, &getErrorWithSpanReponse, query, args...) + err := r.db.Select(ctx, &getErrorWithSpanResponse, query, args...) zap.S().Info(query) if err != nil { zap.S().Debug("Error in processing sql query: ", err) - return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")} + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")} } - if len(getErrorWithSpanReponse) > 0 { - return &getErrorWithSpanReponse[0], nil + if len(getErrorWithSpanResponse) > 0 { + return &getErrorWithSpanResponse[0], nil } else { return nil, &model.ApiError{Typ: model.ErrorNotFound, Err: fmt.Errorf("Error/Exception not found")} } @@ -2714,22 +2714,22 @@ func (r *ClickHouseReader) GetErrorFromErrorID(ctx context.Context, queryParams func (r *ClickHouseReader) GetErrorFromGroupID(ctx context.Context, queryParams *model.GetErrorParams) (*model.ErrorWithSpan, *model.ApiError) { - var getErrorWithSpanReponse []model.ErrorWithSpan + var getErrorWithSpanResponse []model.ErrorWithSpan query := fmt.Sprintf("SELECT * FROM %s.%s WHERE timestamp = @timestamp AND groupID = @groupID LIMIT 1", r.traceDB, r.errorTable) args := []interface{}{clickhouse.Named("groupID", queryParams.GroupID), clickhouse.Named("timestamp", strconv.FormatInt(queryParams.Timestamp.UnixNano(), 10))} - err := r.db.Select(ctx, &getErrorWithSpanReponse, query, args...) + err := r.db.Select(ctx, &getErrorWithSpanResponse, query, args...) zap.S().Info(query) if err != nil { zap.S().Debug("Error in processing sql query: ", err) - return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")} + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")} } - if len(getErrorWithSpanReponse) > 0 { - return &getErrorWithSpanReponse[0], nil + if len(getErrorWithSpanResponse) > 0 { + return &getErrorWithSpanResponse[0], nil } else { return nil, &model.ApiError{Typ: model.ErrorNotFound, Err: fmt.Errorf("Error/Exception not found")} } @@ -3099,7 +3099,10 @@ func (r *ClickHouseReader) GetTotalSpans(ctx context.Context) (uint64, error) { var totalSpans uint64 queryStr := fmt.Sprintf("SELECT count() from %s.%s;", signozTraceDBName, signozTraceTableName) - r.db.QueryRow(ctx, queryStr).Scan(&totalSpans) + err := r.db.QueryRow(ctx, queryStr).Scan(&totalSpans) + if err != nil { + return 0, err + } return totalSpans, nil } @@ -3110,7 +3113,10 @@ func (r *ClickHouseReader) GetSpansInLastHeartBeatInterval(ctx context.Context) queryStr := fmt.Sprintf("SELECT count() from %s.%s where timestamp > toUnixTimestamp(now()-toIntervalMinute(%d));", signozTraceDBName, signozSpansTable, 30) - r.db.QueryRow(ctx, queryStr).Scan(&spansInLastHeartBeatInterval) + err := r.db.QueryRow(ctx, queryStr).Scan(&spansInLastHeartBeatInterval) + if err != nil { + return 0, err + } return spansInLastHeartBeatInterval, nil } @@ -3142,7 +3148,10 @@ func (r *ClickHouseReader) GetTimeSeriesInfo(ctx context.Context) (map[string]in for rows.Next() { var value uint64 - rows.Scan(&value) + err := rows.Scan(&value) + if err != nil { + return nil, err + } totalTS += value if count == 0 { maxTS = value @@ -3163,7 +3172,10 @@ func (r *ClickHouseReader) GetSamplesInfoInLastHeartBeatInterval(ctx context.Con queryStr := fmt.Sprintf("select count() from %s.%s where timestamp_ms > toUnixTimestamp(now()-toIntervalMinute(%d))*1000;", signozMetricDBName, signozSampleTableName, 30) - r.db.QueryRow(ctx, queryStr).Scan(&totalSamples) + err := r.db.QueryRow(ctx, queryStr).Scan(&totalSamples) + if err != nil { + return 0, err + } return totalSamples, nil } diff --git a/pkg/query-service/app/dashboards/model.go b/pkg/query-service/app/dashboards/model.go index 1b74857d41..ae08171674 100644 --- a/pkg/query-service/app/dashboards/model.go +++ b/pkg/query-service/app/dashboards/model.go @@ -17,7 +17,7 @@ import ( // This time the global variable is unexported. var db *sqlx.DB -// InitDB sets up setting up the connection pool global variable. +// InitDB sets up the connection pool global variable. func InitDB(dataSourceName string) (*sqlx.DB, error) { var err error @@ -26,7 +26,7 @@ func InitDB(dataSourceName string) (*sqlx.DB, error) { return nil, err } - table_schema := `CREATE TABLE IF NOT EXISTS dashboards ( + tableSchema := `CREATE TABLE IF NOT EXISTS dashboards ( id INTEGER PRIMARY KEY AUTOINCREMENT, uuid TEXT NOT NULL UNIQUE, created_at datetime NOT NULL, @@ -34,24 +34,24 @@ func InitDB(dataSourceName string) (*sqlx.DB, error) { data TEXT NOT NULL );` - _, err = db.Exec(table_schema) + _, err = db.Exec(tableSchema) if err != nil { - return nil, fmt.Errorf("Error in creating dashboard table: %s", err.Error()) + return nil, fmt.Errorf("error in creating dashboard table: %s", err.Error()) } - table_schema = `CREATE TABLE IF NOT EXISTS rules ( + tableSchema = `CREATE TABLE IF NOT EXISTS rules ( id INTEGER PRIMARY KEY AUTOINCREMENT, updated_at datetime NOT NULL, deleted INTEGER DEFAULT 0, data TEXT NOT NULL );` - _, err = db.Exec(table_schema) + _, err = db.Exec(tableSchema) if err != nil { - return nil, fmt.Errorf("Error in creating rules table: %s", err.Error()) + return nil, fmt.Errorf("error in creating rules table: %s", err.Error()) } - table_schema = `CREATE TABLE IF NOT EXISTS notification_channels ( + tableSchema = `CREATE TABLE IF NOT EXISTS notification_channels ( id INTEGER PRIMARY KEY AUTOINCREMENT, created_at datetime NOT NULL, updated_at datetime NOT NULL, @@ -61,12 +61,12 @@ func InitDB(dataSourceName string) (*sqlx.DB, error) { data TEXT NOT NULL );` - _, err = db.Exec(table_schema) + _, err = db.Exec(tableSchema) if err != nil { - return nil, fmt.Errorf("Error in creating notification_channles table: %s", err.Error()) + return nil, fmt.Errorf("error in creating notification_channles table: %s", err.Error()) } - table_schema = `CREATE TABLE IF NOT EXISTS ttl_status ( + tableSchema = `CREATE TABLE IF NOT EXISTS ttl_status ( id INTEGER PRIMARY KEY AUTOINCREMENT, transaction_id TEXT NOT NULL, created_at datetime NOT NULL, @@ -77,9 +77,9 @@ func InitDB(dataSourceName string) (*sqlx.DB, error) { status TEXT NOT NULL );` - _, err = db.Exec(table_schema) + _, err = db.Exec(tableSchema) if err != nil { - return nil, fmt.Errorf("Error in creating ttl_status table: %s", err.Error()) + return nil, fmt.Errorf("error in creating ttl_status table: %s", err.Error()) } return db, nil @@ -128,17 +128,17 @@ func CreateDashboard(data map[string]interface{}) (*Dashboard, *model.ApiError) dash.UpdateSlug() dash.Uuid = uuid.New().String() - map_data, err := json.Marshal(dash.Data) + mapData, err := json.Marshal(dash.Data) if err != nil { - zap.S().Errorf("Error in marshalling data field in dashboard: ", dash, err) + zap.S().Error("Error in marshalling data field in dashboard: ", dash, err) return nil, &model.ApiError{Typ: model.ErrorExec, Err: err} } // db.Prepare("Insert into dashboards where") - result, err := db.Exec("INSERT INTO dashboards (uuid, created_at, updated_at, data) VALUES ($1, $2, $3, $4)", dash.Uuid, dash.CreatedAt, dash.UpdatedAt, map_data) + result, err := db.Exec("INSERT INTO dashboards (uuid, created_at, updated_at, data) VALUES ($1, $2, $3, $4)", dash.Uuid, dash.CreatedAt, dash.UpdatedAt, mapData) if err != nil { - zap.S().Errorf("Error in inserting dashboard data: ", dash, err) + zap.S().Error("Error in inserting dashboard data: ", dash, err) return nil, &model.ApiError{Typ: model.ErrorExec, Err: err} } lastInsertId, err := result.LastInsertId() @@ -153,7 +153,7 @@ func CreateDashboard(data map[string]interface{}) (*Dashboard, *model.ApiError) func GetDashboards() ([]Dashboard, *model.ApiError) { - dashboards := []Dashboard{} + var dashboards []Dashboard query := fmt.Sprintf("SELECT * FROM dashboards;") err := db.Select(&dashboards, query) @@ -200,9 +200,9 @@ func GetDashboard(uuid string) (*Dashboard, *model.ApiError) { func UpdateDashboard(uuid string, data map[string]interface{}) (*Dashboard, *model.ApiError) { - map_data, err := json.Marshal(data) + mapData, err := json.Marshal(data) if err != nil { - zap.S().Errorf("Error in marshalling data field in dashboard: ", data, err) + zap.S().Error("Error in marshalling data field in dashboard: ", data, err) return nil, &model.ApiError{Typ: model.ErrorBadData, Err: err} } @@ -215,10 +215,10 @@ func UpdateDashboard(uuid string, data map[string]interface{}) (*Dashboard, *mod dashboard.Data = data // db.Prepare("Insert into dashboards where") - _, err = db.Exec("UPDATE dashboards SET updated_at=$1, data=$2 WHERE uuid=$3 ", dashboard.UpdatedAt, map_data, dashboard.Uuid) + _, err = db.Exec("UPDATE dashboards SET updated_at=$1, data=$2 WHERE uuid=$3 ", dashboard.UpdatedAt, mapData, dashboard.Uuid) if err != nil { - zap.S().Errorf("Error in inserting dashboard data: ", data, err) + zap.S().Error("Error in inserting dashboard data: ", data, err) return nil, &model.ApiError{Typ: model.ErrorExec, Err: err} } @@ -249,7 +249,7 @@ func IsPostDataSane(data *map[string]interface{}) error { func SlugifyTitle(title string) string { s := slug.Make(strings.ToLower(title)) if s == "" { - // If the dashboard name is only characters outside of the + // If the dashboard name is only characters outside the // sluggable characters, the slug creation will return an // empty string which will mess up URLs. This failsafe picks // that up and creates the slug as a base64 identifier instead. diff --git a/pkg/query-service/app/http_handler.go b/pkg/query-service/app/http_handler.go index 6f5af546cd..6215641f42 100644 --- a/pkg/query-service/app/http_handler.go +++ b/pkg/query-service/app/http_handler.go @@ -210,7 +210,7 @@ func ViewAccess(f func(http.ResponseWriter, *http.Request)) http.HandlerFunc { if !(auth.IsViewer(user) || auth.IsEditor(user) || auth.IsAdmin(user)) { respondError(w, &model.ApiError{ Typ: model.ErrorForbidden, - Err: errors.New("API is accessible to viewers/editors/admins."), + Err: errors.New("API is accessible to viewers/editors/admins"), }, nil) return } @@ -231,7 +231,7 @@ func EditAccess(f func(http.ResponseWriter, *http.Request)) http.HandlerFunc { if !(auth.IsEditor(user) || auth.IsAdmin(user)) { respondError(w, &model.ApiError{ Typ: model.ErrorForbidden, - Err: errors.New("API is accessible to editors/admins."), + Err: errors.New("API is accessible to editors/admins"), }, nil) return } @@ -253,7 +253,7 @@ func SelfAccess(f func(http.ResponseWriter, *http.Request)) http.HandlerFunc { if !(auth.IsSelfAccessRequest(user, id) || auth.IsAdmin(user)) { respondError(w, &model.ApiError{ Typ: model.ErrorForbidden, - Err: errors.New("API is accessible for self access or to the admins."), + Err: errors.New("API is accessible for self access or to the admins"), }, nil) return } @@ -455,13 +455,13 @@ func (aH *APIHandler) queryRangeMetricsV2(w http.ResponseWriter, r *http.Request } // prometheus instant query needs same timestamp - if metricsQueryRangeParams.CompositeMetricQuery.PanelType == model.QUERY_VALUE && - metricsQueryRangeParams.CompositeMetricQuery.QueryType == model.PROM { + if metricsQueryRangeParams.CompositeMetricQuery.PanelType == model.QueryValue && + metricsQueryRangeParams.CompositeMetricQuery.QueryType == model.Prom { metricsQueryRangeParams.Start = metricsQueryRangeParams.End } - // round up the end to neaerest multiple - if metricsQueryRangeParams.CompositeMetricQuery.QueryType == model.QUERY_BUILDER { + // round down the end to the nearest multiple + if metricsQueryRangeParams.CompositeMetricQuery.QueryType == model.QueryBuilder { end := (metricsQueryRangeParams.End) / 1000 step := metricsQueryRangeParams.Step metricsQueryRangeParams.End = (end / step * step) * 1000 @@ -571,15 +571,15 @@ func (aH *APIHandler) queryRangeMetricsV2(w http.ResponseWriter, r *http.Request var seriesList []*model.Series var err error switch metricsQueryRangeParams.CompositeMetricQuery.QueryType { - case model.QUERY_BUILDER: - runQueries := metrics.PrepareBuilderMetricQueries(metricsQueryRangeParams, constants.SIGNOZ_TIMESERIES_TABLENAME) + case model.QueryBuilder: + runQueries := metrics.PrepareBuilderMetricQueries(metricsQueryRangeParams, constants.SignozTimeSeriesTableName) if runQueries.Err != nil { respondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: runQueries.Err}, nil) return } seriesList, err = execClickHouseQueries(runQueries.Queries) - case model.CLICKHOUSE: + case model.ClickHouse: queries := make(map[string]string) for name, chQuery := range metricsQueryRangeParams.CompositeMetricQuery.ClickHouseQueries { if chQuery.Disabled { @@ -588,7 +588,7 @@ func (aH *APIHandler) queryRangeMetricsV2(w http.ResponseWriter, r *http.Request queries[name] = chQuery.Query } seriesList, err = execClickHouseQueries(queries) - case model.PROM: + case model.Prom: seriesList, err = execPromQueries(metricsQueryRangeParams) default: err = fmt.Errorf("invalid query type") @@ -601,10 +601,10 @@ func (aH *APIHandler) queryRangeMetricsV2(w http.ResponseWriter, r *http.Request respondError(w, apiErrObj, nil) return } - if metricsQueryRangeParams.CompositeMetricQuery.PanelType == model.QUERY_VALUE && + if metricsQueryRangeParams.CompositeMetricQuery.PanelType == model.QueryValue && len(seriesList) > 1 && - (metricsQueryRangeParams.CompositeMetricQuery.QueryType == model.QUERY_BUILDER || - metricsQueryRangeParams.CompositeMetricQuery.QueryType == model.CLICKHOUSE) { + (metricsQueryRangeParams.CompositeMetricQuery.QueryType == model.QueryBuilder || + metricsQueryRangeParams.CompositeMetricQuery.QueryType == model.ClickHouse) { respondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("invalid: query resulted in more than one series for value type")}, nil) return } @@ -667,7 +667,7 @@ func (aH *APIHandler) getDashboards(w http.ResponseWriter, r *http.Request) { inter = Intersection(inter, tags2Dash[tag]) } - filteredDashboards := []dashboards.Dashboard{} + var filteredDashboards []dashboards.Dashboard for _, val := range inter { dash := (allDashboards)[val] filteredDashboards = append(filteredDashboards, dash) @@ -827,14 +827,14 @@ func (aH *APIHandler) testChannel(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() body, err := ioutil.ReadAll(r.Body) if err != nil { - zap.S().Errorf("Error in getting req body of testChannel API\n", err) + zap.S().Error("Error in getting req body of testChannel API\n", err) respondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil) return } receiver := &am.Receiver{} if err := json.Unmarshal(body, receiver); err != nil { // Parse []byte to go struct pointer - zap.S().Errorf("Error in parsing req body of testChannel API\n", err) + zap.S().Error("Error in parsing req body of testChannel API\n", err) respondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil) return } @@ -855,14 +855,14 @@ func (aH *APIHandler) editChannel(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() body, err := ioutil.ReadAll(r.Body) if err != nil { - zap.S().Errorf("Error in getting req body of editChannel API\n", err) + zap.S().Error("Error in getting req body of editChannel API\n", err) respondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil) return } receiver := &am.Receiver{} if err := json.Unmarshal(body, receiver); err != nil { // Parse []byte to go struct pointer - zap.S().Errorf("Error in parsing req body of editChannel API\n", err) + zap.S().Error("Error in parsing req body of editChannel API\n", err) respondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil) return } @@ -883,14 +883,14 @@ func (aH *APIHandler) createChannel(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() body, err := ioutil.ReadAll(r.Body) if err != nil { - zap.S().Errorf("Error in getting req body of createChannel API\n", err) + zap.S().Error("Error in getting req body of createChannel API\n", err) respondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil) return } receiver := &am.Receiver{} if err := json.Unmarshal(body, receiver); err != nil { // Parse []byte to go struct pointer - zap.S().Errorf("Error in parsing req body of createChannel API\n", err) + zap.S().Error("Error in parsing req body of createChannel API\n", err) respondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil) return } @@ -968,20 +968,20 @@ func (aH *APIHandler) queryRangeMetrics(w http.ResponseWriter, r *http.Request) if res.Err != nil { switch res.Err.(type) { case promql.ErrQueryCanceled: - respondError(w, &model.ApiError{model.ErrorCanceled, res.Err}, nil) + respondError(w, &model.ApiError{Typ: model.ErrorCanceled, Err: res.Err}, nil) case promql.ErrQueryTimeout: - respondError(w, &model.ApiError{model.ErrorTimeout, res.Err}, nil) + respondError(w, &model.ApiError{Typ: model.ErrorTimeout, Err: res.Err}, nil) } - respondError(w, &model.ApiError{model.ErrorExec, res.Err}, nil) + respondError(w, &model.ApiError{Typ: model.ErrorExec, Err: res.Err}, nil) } - response_data := &model.QueryData{ + responseData := &model.QueryData{ ResultType: res.Value.Type(), Result: res.Value, Stats: qs, } - aH.respond(w, response_data) + aH.respond(w, responseData) } @@ -1022,20 +1022,20 @@ func (aH *APIHandler) queryMetrics(w http.ResponseWriter, r *http.Request) { if res.Err != nil { switch res.Err.(type) { case promql.ErrQueryCanceled: - respondError(w, &model.ApiError{model.ErrorCanceled, res.Err}, nil) + respondError(w, &model.ApiError{Typ: model.ErrorCanceled, Err: res.Err}, nil) case promql.ErrQueryTimeout: - respondError(w, &model.ApiError{model.ErrorTimeout, res.Err}, nil) + respondError(w, &model.ApiError{Typ: model.ErrorTimeout, Err: res.Err}, nil) } - respondError(w, &model.ApiError{model.ErrorExec, res.Err}, nil) + respondError(w, &model.ApiError{Typ: model.ErrorExec, Err: res.Err}, nil) } - response_data := &model.QueryData{ + responseData := &model.QueryData{ ResultType: res.Value.Type(), Result: res.Value, Stats: qs, } - aH.respond(w, response_data) + aH.respond(w, responseData) } @@ -1065,7 +1065,7 @@ func (aH *APIHandler) submitFeedback(w http.ResponseWriter, r *http.Request) { "email": email, "message": message, } - telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_INPRODUCT_FEEDBACK, data) + telemetry.GetInstance().SendEvent(telemetry.EventInproductFeedback, data) } @@ -1134,7 +1134,7 @@ func (aH *APIHandler) getServices(w http.ResponseWriter, r *http.Request) { "number": len(*result), } - telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_NUMBER_OF_SERVICES, data) + telemetry.GetInstance().SendEvent(telemetry.EventNumberOfServices, data) aH.writeJSON(w, r, result) } @@ -1378,8 +1378,8 @@ func (aH *APIHandler) getDisks(w http.ResponseWriter, r *http.Request) { } func (aH *APIHandler) getVersion(w http.ResponseWriter, r *http.Request) { - version := version.GetVersion() - aH.writeJSON(w, r, map[string]string{"version": version}) + v := version.GetVersion() + aH.writeJSON(w, r, map[string]string{"version": v}) } // inviteUser is used to invite a user. It is used by an admin api. @@ -1411,7 +1411,7 @@ func (aH *APIHandler) getInvite(w http.ResponseWriter, r *http.Request) { aH.writeJSON(w, r, resp) } -// revokeInvite is used to revoke an invite. +// revokeInvite is used to revoke an invitation. func (aH *APIHandler) revokeInvite(w http.ResponseWriter, r *http.Request) { email := mux.Vars(r)["email"] @@ -1529,7 +1529,7 @@ func (aH *APIHandler) getUser(w http.ResponseWriter, r *http.Request) { if user == nil { respondError(w, &model.ApiError{ Typ: model.ErrorInternal, - Err: errors.New("User not found"), + Err: errors.New("user not found"), }, nil) return } @@ -1540,7 +1540,7 @@ func (aH *APIHandler) getUser(w http.ResponseWriter, r *http.Request) { } // editUser only changes the user's Name and ProfilePictureURL. It is intentionally designed -// to not support update of orgId, Password, createdAt for the sucurity reasons. +// to not support update of orgId, Password, createdAt for the security reasons. func (aH *APIHandler) editUser(w http.ResponseWriter, r *http.Request) { id := mux.Vars(r)["id"] @@ -1596,7 +1596,7 @@ func (aH *APIHandler) deleteUser(w http.ResponseWriter, r *http.Request) { if user == nil { respondError(w, &model.ApiError{ Typ: model.ErrorNotFound, - Err: errors.New("User not found"), + Err: errors.New("user not found"), }, nil) return } @@ -1638,7 +1638,7 @@ func (aH *APIHandler) getRole(w http.ResponseWriter, r *http.Request) { if user == nil { respondError(w, &model.ApiError{ Typ: model.ErrorNotFound, - Err: errors.New("No user found"), + Err: errors.New("no user found"), }, nil) return } @@ -1678,8 +1678,8 @@ func (aH *APIHandler) editRole(w http.ResponseWriter, r *http.Request) { } // Make sure that the request is not demoting the last admin user. - if user.GroupId == auth.AuthCacheObj.AdminGroupId { - adminUsers, apiErr := dao.DB().GetUsersByGroup(ctx, auth.AuthCacheObj.AdminGroupId) + if user.GroupId == auth.CacheObj.AdminGroupId { + adminUsers, apiErr := dao.DB().GetUsersByGroup(ctx, auth.CacheObj.AdminGroupId) if apiErr != nil { respondError(w, apiErr, "Failed to fetch adminUsers") return @@ -1687,7 +1687,7 @@ func (aH *APIHandler) editRole(w http.ResponseWriter, r *http.Request) { if len(adminUsers) == 1 { respondError(w, &model.ApiError{ - Err: errors.New("Cannot demote the last admin"), + Err: errors.New("cannot demote the last admin"), Typ: model.ErrorInternal}, nil) return } @@ -1739,7 +1739,7 @@ func (aH *APIHandler) editOrg(w http.ResponseWriter, r *http.Request) { "organizationName": req.Name, } - telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_ORG_SETTINGS, data) + telemetry.GetInstance().SendEvent(telemetry.EventOrgSettings, data) aH.writeJSON(w, r, map[string]string{"data": "org updated successfully"}) } diff --git a/pkg/query-service/app/metrics/query_builder.go b/pkg/query-service/app/metrics/query_builder.go index 26f57261b9..c8ec84e07c 100644 --- a/pkg/query-service/app/metrics/query_builder.go +++ b/pkg/query-service/app/metrics/query_builder.go @@ -28,14 +28,14 @@ var AggregateOperatorToPercentile = map[model.AggregateOperator]float64{ } var AggregateOperatorToSQLFunc = map[model.AggregateOperator]string{ - model.AVG: "avg", - model.MAX: "max", - model.MIN: "min", - model.SUM: "sum", - model.RATE_SUM: "sum", - model.RATE_AVG: "avg", - model.RATE_MAX: "max", - model.RATE_MIN: "min", + model.Avg: "avg", + model.Max: "max", + model.Min: "min", + model.Sum: "sum", + model.RateSum: "sum", + model.RateAvg: "avg", + model.RateMax: "max", + model.RateMin: "min", } var SupportedFunctions = []string{"exp", "log", "ln", "exp2", "log2", "exp10", "log10", "sqrt", "cbrt", "erf", "erfc", "lgamma", "tgamma", "sin", "cos", "tan", "asin", "acos", "atan", "degrees", "radians"} @@ -128,7 +128,7 @@ func BuildMetricsTimeSeriesFilterQuery(fs *model.FilterSet, groupTags []string, queryString := strings.Join(conditions, " AND ") var selectLabels string - if aggregateOperator == model.NOOP || aggregateOperator == model.RATE { + if aggregateOperator == model.NoOp || aggregateOperator == model.Rate { selectLabels = "labels," } else { for _, tag := range groupTags { @@ -136,14 +136,14 @@ func BuildMetricsTimeSeriesFilterQuery(fs *model.FilterSet, groupTags []string, } } - filterSubQuery := fmt.Sprintf("SELECT %s fingerprint FROM %s.%s WHERE %s", selectLabels, constants.SIGNOZ_METRIC_DBNAME, constants.SIGNOZ_TIMESERIES_TABLENAME, queryString) + filterSubQuery := fmt.Sprintf("SELECT %s fingerprint FROM %s.%s WHERE %s", selectLabels, constants.SignozMetricDbname, constants.SignozTimeSeriesTableName, queryString) return filterSubQuery, nil } func BuildMetricQuery(qp *model.QueryRangeParamsV2, mq *model.MetricQuery, tableName string) (string, error) { - if qp.CompositeMetricQuery.PanelType == model.QUERY_VALUE && len(mq.GroupingTags) != 0 { + if qp.CompositeMetricQuery.PanelType == model.QueryValue && len(mq.GroupingTags) != 0 { return "", fmt.Errorf("reduce operator cannot be applied for the query") } @@ -159,7 +159,7 @@ func BuildMetricQuery(qp *model.QueryRangeParamsV2, mq *model.MetricQuery, table "SELECT %s" + " toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL %d SECOND) as ts," + " %s as value" + - " FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_TABLENAME + + " FROM " + constants.SignozMetricDbname + "." + constants.SignozSamplesTableName + " INNER JOIN" + " (%s) as filtered_time_series" + " USING fingerprint" + @@ -171,7 +171,7 @@ func BuildMetricQuery(qp *model.QueryRangeParamsV2, mq *model.MetricQuery, table groupTags := groupSelect(mq.GroupingTags...) switch mq.AggregateOperator { - case model.RATE: + case model.Rate: // Calculate rate of change of metric for each unique time series groupBy = "fingerprint, ts" groupTags = "fingerprint," @@ -183,7 +183,7 @@ func BuildMetricQuery(qp *model.QueryRangeParamsV2, mq *model.MetricQuery, table query = fmt.Sprintf(query, "labels as fullLabels,", subQuery) return query, nil - case model.SUM_RATE: + case model.SumRate: rateGroupBy := "fingerprint, " + groupBy rateGroupTags := "fingerprint, " + groupTags op := "max(value)" @@ -194,7 +194,7 @@ func BuildMetricQuery(qp *model.QueryRangeParamsV2, mq *model.MetricQuery, table query = fmt.Sprintf(query, groupTags, subQuery) query = fmt.Sprintf(`SELECT %s ts, sum(value) as value FROM (%s) GROUP BY %s ORDER BY %s ts`, groupTags, query, groupBy, groupTags) return query, nil - case model.RATE_SUM, model.RATE_MAX, model.RATE_AVG, model.RATE_MIN: + case model.RateSum, model.RateMax, model.RateAvg, model.RateMin: op := fmt.Sprintf("%s(value)", AggregateOperatorToSQLFunc[mq.AggregateOperator]) subQuery := fmt.Sprintf(queryTmpl, groupTags, qp.Step, op, filterSubQuery, groupBy, groupTags) query := `SELECT %s ts, runningDifference(value)/runningDifference(ts) as value FROM(%s) OFFSET 1` @@ -204,24 +204,24 @@ func BuildMetricQuery(qp *model.QueryRangeParamsV2, mq *model.MetricQuery, table op := fmt.Sprintf("quantile(%v)(value)", AggregateOperatorToPercentile[mq.AggregateOperator]) query := fmt.Sprintf(queryTmpl, groupTags, qp.Step, op, filterSubQuery, groupBy, groupTags) return query, nil - case model.AVG, model.SUM, model.MIN, model.MAX: + case model.Avg, model.Sum, model.Min, model.Max: op := fmt.Sprintf("%s(value)", AggregateOperatorToSQLFunc[mq.AggregateOperator]) query := fmt.Sprintf(queryTmpl, groupTags, qp.Step, op, filterSubQuery, groupBy, groupTags) return query, nil - case model.COUNT: + case model.Count: op := "toFloat64(count(*))" query := fmt.Sprintf(queryTmpl, groupTags, qp.Step, op, filterSubQuery, groupBy, groupTags) return query, nil - case model.COUNT_DISTINCT: + case model.CountDistinct: op := "toFloat64(count(distinct(value)))" query := fmt.Sprintf(queryTmpl, groupTags, qp.Step, op, filterSubQuery, groupBy, groupTags) return query, nil - case model.NOOP: + case model.NoOp: queryTmpl := "SELECT fingerprint, labels as fullLabels," + " toStartOfInterval(toDateTime(intDiv(timestamp_ms, 1000)), INTERVAL %d SECOND) as ts," + " any(value) as value" + - " FROM " + constants.SIGNOZ_METRIC_DBNAME + "." + constants.SIGNOZ_SAMPLES_TABLENAME + + " FROM " + constants.SignozMetricDbname + "." + constants.SignozSamplesTableName + " INNER JOIN" + " (%s) as filtered_time_series" + " USING fingerprint" + @@ -275,24 +275,24 @@ func reduceQuery(query string, reduceTo model.ReduceToOperator, aggregateOperato var groupBy string // NOOP and RATE can possibly return multiple time series and reduce should be applied // for each uniques series. When the final result contains more than one series we throw - // an error post DB fetching. Otherwise just return the single data. This is not known until queried so the - // the query is prepared accordingly. - if aggregateOperator == model.NOOP || aggregateOperator == model.RATE { + // an error post DB fetching. Otherwise, just return the single data. This is not known until queried so the + // query is prepared accordingly. + if aggregateOperator == model.NoOp || aggregateOperator == model.Rate { selectLabels = ", any(fullLabels) as fullLabels" groupBy = "GROUP BY fingerprint" } // the timestamp picked is not relevant here since the final value used is show the single - // chart with just the query value. For the quer + // chart with just the query value. switch reduceTo { - case model.RLAST: + case model.RLast: query = fmt.Sprintf("SELECT anyLast(value) as value, any(ts) as ts %s FROM (%s) %s", selectLabels, query, groupBy) - case model.RSUM: + case model.RSum: query = fmt.Sprintf("SELECT sum(value) as value, any(ts) as ts %s FROM (%s) %s", selectLabels, query, groupBy) - case model.RAVG: + case model.RAvg: query = fmt.Sprintf("SELECT avg(value) as value, any(ts) as ts %s FROM (%s) %s", selectLabels, query, groupBy) - case model.RMAX: + case model.RMax: query = fmt.Sprintf("SELECT max(value) as value, any(ts) as ts %s FROM (%s) %s", selectLabels, query, groupBy) - case model.RMIN: + case model.RMin: query = fmt.Sprintf("SELECT min(value) as value, any(ts) as ts %s FROM (%s) %s", selectLabels, query, groupBy) default: return "", fmt.Errorf("unsupported reduce operator") @@ -317,7 +317,7 @@ func varToQuery(qp *model.QueryRangeParamsV2, tableName string) (map[string]stri if err != nil { errs = append(errs, err) } else { - if qp.CompositeMetricQuery.PanelType == model.QUERY_VALUE { + if qp.CompositeMetricQuery.PanelType == model.QueryValue { query, err = reduceQuery(query, mq.ReduceTo, mq.AggregateOperator) if err != nil { errs = append(errs, err) diff --git a/pkg/query-service/app/metrics/query_builder_test.go b/pkg/query-service/app/metrics/query_builder_test.go index f6f9944605..a5b68f469d 100644 --- a/pkg/query-service/app/metrics/query_builder_test.go +++ b/pkg/query-service/app/metrics/query_builder_test.go @@ -18,7 +18,7 @@ func TestBuildQuery(t *testing.T) { "a": { QueryName: "a", MetricName: "name", - AggregateOperator: model.RATE_MAX, + AggregateOperator: model.RateMax, Expression: "a", }, }, @@ -46,7 +46,7 @@ func TestBuildQueryWithFilters(t *testing.T) { {Key: "a", Value: "b", Operator: "neq"}, {Key: "code", Value: "ERROR_*", Operator: "nmatch"}, }}, - AggregateOperator: model.RATE_MAX, + AggregateOperator: model.RateMax, Expression: "a", }, }, @@ -75,13 +75,13 @@ func TestBuildQueryWithMultipleQueries(t *testing.T) { TagFilters: &model.FilterSet{Operator: "AND", Items: []model.FilterItem{ {Key: "in", Value: []interface{}{"a", "b", "c"}, Operator: "in"}, }}, - AggregateOperator: model.RATE_AVG, + AggregateOperator: model.RateAvg, Expression: "a", }, "b": { QueryName: "b", MetricName: "name2", - AggregateOperator: model.RATE_MAX, + AggregateOperator: model.RateMax, Expression: "b", }, }, @@ -108,12 +108,12 @@ func TestBuildQueryWithMultipleQueriesAndFormula(t *testing.T) { TagFilters: &model.FilterSet{Operator: "AND", Items: []model.FilterItem{ {Key: "in", Value: []interface{}{"a", "b", "c"}, Operator: "in"}, }}, - AggregateOperator: model.RATE_MAX, + AggregateOperator: model.RateMax, Expression: "a", }, "b": { MetricName: "name2", - AggregateOperator: model.RATE_AVG, + AggregateOperator: model.RateAvg, Expression: "b", }, "c": { diff --git a/pkg/query-service/app/parser.go b/pkg/query-service/app/parser.go index e81b986a3d..f09b5f8eb6 100644 --- a/pkg/query-service/app/parser.go +++ b/pkg/query-service/app/parser.go @@ -467,8 +467,8 @@ func parseCountErrorsRequest(r *http.Request) (*model.CountErrorsParams, error) } params := &model.CountErrorsParams{ - Start: startTime, - End: endTime, + Start: startTime, + End: endTime, } return params, nil @@ -597,7 +597,7 @@ func parseTTLParams(r *http.Request) (*model.TTLParams, error) { // Validate the TTL duration. durationParsed, err := time.ParseDuration(delDuration) if err != nil || durationParsed.Seconds() <= 0 { - return nil, fmt.Errorf("Not a valid TTL duration %v", delDuration) + return nil, fmt.Errorf("not a valid TTL duration %v", delDuration) } var toColdParsed time.Duration @@ -606,10 +606,10 @@ func parseTTLParams(r *http.Request) (*model.TTLParams, error) { if len(coldStorage) > 0 { toColdParsed, err = time.ParseDuration(toColdDuration) if err != nil || toColdParsed.Seconds() <= 0 { - return nil, fmt.Errorf("Not a valid toCold TTL duration %v", toColdDuration) + return nil, fmt.Errorf("not a valid toCold TTL duration %v", toColdDuration) } if toColdParsed.Seconds() != 0 && toColdParsed.Seconds() >= durationParsed.Seconds() { - return nil, fmt.Errorf("Delete TTL should be greater than cold storage move TTL.") + return nil, fmt.Errorf("delete TTL should be greater than cold storage move TTL") } } diff --git a/pkg/query-service/app/parser/metrics.go b/pkg/query-service/app/parser/metrics.go index ce4d079fa5..d5ff26451f 100644 --- a/pkg/query-service/app/parser/metrics.go +++ b/pkg/query-service/app/parser/metrics.go @@ -11,13 +11,13 @@ import ( func validateQueryRangeParamsV2(qp *model.QueryRangeParamsV2) error { var errs []error - if !(qp.DataSource >= model.METRICS && qp.DataSource <= model.LOGS) { + if !(qp.DataSource >= model.Metrics && qp.DataSource <= model.Logs) { errs = append(errs, fmt.Errorf("unsupported data source")) } - if !(qp.CompositeMetricQuery.QueryType >= model.QUERY_BUILDER && qp.CompositeMetricQuery.QueryType <= model.PROM) { + if !(qp.CompositeMetricQuery.QueryType >= model.QueryBuilder && qp.CompositeMetricQuery.QueryType <= model.Prom) { errs = append(errs, fmt.Errorf("unsupported query type")) } - if !(qp.CompositeMetricQuery.PanelType >= model.TIME_SERIES && qp.CompositeMetricQuery.PanelType <= model.QUERY_VALUE) { + if !(qp.CompositeMetricQuery.PanelType >= model.TimeSeries && qp.CompositeMetricQuery.PanelType <= model.QueryValue) { errs = append(errs, fmt.Errorf("unsupported panel type")) } if len(errs) != 0 { diff --git a/pkg/query-service/app/parser_test.go b/pkg/query-service/app/parser_test.go index 6fa049a05e..678c6d98eb 100644 --- a/pkg/query-service/app/parser_test.go +++ b/pkg/query-service/app/parser_test.go @@ -22,7 +22,7 @@ func TestParseFilterSingleFilter(t *testing.T) { }`) req, _ := http.NewRequest("POST", "", bytes.NewReader(postBody)) res, _ := parseFilterSet(req) - query, _ := metrics.BuildMetricsTimeSeriesFilterQuery(res, []string{}, "table", model.NOOP) + query, _ := metrics.BuildMetricsTimeSeriesFilterQuery(res, []string{}, "table", model.NoOp) So(query, ShouldContainSubstring, "signoz_metrics.time_series_v2 WHERE metric_name = 'table' AND labels_object.namespace = 'a'") }) } @@ -38,7 +38,7 @@ func TestParseFilterMultipleFilter(t *testing.T) { }`) req, _ := http.NewRequest("POST", "", bytes.NewReader(postBody)) res, _ := parseFilterSet(req) - query, _ := metrics.BuildMetricsTimeSeriesFilterQuery(res, []string{}, "table", model.NOOP) + query, _ := metrics.BuildMetricsTimeSeriesFilterQuery(res, []string{}, "table", model.NoOp) So(query, should.ContainSubstring, "labels_object.host IN ['host-1','host-2']") So(query, should.ContainSubstring, "labels_object.namespace = 'a'") }) @@ -54,7 +54,7 @@ func TestParseFilterNotSupportedOp(t *testing.T) { }`) req, _ := http.NewRequest("POST", "", bytes.NewReader(postBody)) res, _ := parseFilterSet(req) - _, err := metrics.BuildMetricsTimeSeriesFilterQuery(res, []string{}, "table", model.NOOP) + _, err := metrics.BuildMetricsTimeSeriesFilterQuery(res, []string{}, "table", model.NoOp) So(err, should.BeError, "unsupported operation") }) } diff --git a/pkg/query-service/app/server.go b/pkg/query-service/app/server.go index 5bccea66e2..f3ab5e5d39 100644 --- a/pkg/query-service/app/server.go +++ b/pkg/query-service/app/server.go @@ -55,10 +55,10 @@ func (s Server) HealthCheckStatus() chan healthcheck.Status { // NewServer creates and initializes Server func NewServer(serverOptions *ServerOptions) (*Server, error) { - if err := dao.InitDao("sqlite", constants.RELATIONAL_DATASOURCE_PATH); err != nil { + if err := dao.InitDao("sqlite", constants.RelationalDatasourcePath); err != nil { return nil, err } - localDB, err := dashboards.InitDB(constants.RELATIONAL_DATASOURCE_PATH) + localDB, err := dashboards.InitDB(constants.RelationalDatasourcePath) if err != nil { return nil, err @@ -70,11 +70,11 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) { storage := os.Getenv("STORAGE") if storage == "clickhouse" { zap.S().Info("Using ClickHouse as datastore ...") - clickhouseReader := clickhouseReader.NewReader(localDB) - go clickhouseReader.Start() - reader = clickhouseReader + chReader := clickhouseReader.NewReader(localDB) + go chReader.Start() + reader = chReader } else { - return nil, fmt.Errorf("Storage type: %s is not supported in query service", storage) + return nil, fmt.Errorf("storage type: %s is not supported in query service", storage) } telemetry.GetInstance().SetReader(reader) @@ -211,7 +211,7 @@ func (s *Server) analyticsMiddleware(next http.Handler) http.Handler { data := map[string]interface{}{"path": path, "statusCode": lrw.statusCode} if _, ok := telemetry.IgnoredPaths()[path]; !ok { - telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_PATH, data) + telemetry.GetInstance().SendEvent(telemetry.EventPath, data) } }) diff --git a/pkg/query-service/auth/auth.go b/pkg/query-service/auth/auth.go index 1a5775334a..43e869565b 100644 --- a/pkg/query-service/auth/auth.go +++ b/pkg/query-service/auth/auth.go @@ -21,9 +21,10 @@ const ( ) var ( - ErrorInvalidCreds = fmt.Errorf("Invalid credentials") + ErrorInvalidCreds = fmt.Errorf("invalid credentials") ) +// Invite sends the invitation for users // The root user should be able to invite people to create account on SigNoz cluster. func Invite(ctx context.Context, req *model.InviteRequest) (*model.InviteResponse, error) { zap.S().Debugf("Got an invite request for email: %s\n", req.Email) diff --git a/pkg/query-service/auth/rbac.go b/pkg/query-service/auth/rbac.go index d45a06e5d5..a33445defe 100644 --- a/pkg/query-service/auth/rbac.go +++ b/pkg/query-service/auth/rbac.go @@ -17,13 +17,13 @@ type Group struct { GroupName string } -type AuthCache struct { +type Cache struct { AdminGroupId string EditorGroupId string ViewerGroupId string } -var AuthCacheObj AuthCache +var CacheObj Cache // InitAuthCache reads the DB and initialize the auth cache. func InitAuthCache(ctx context.Context) error { @@ -37,13 +37,13 @@ func InitAuthCache(ctx context.Context) error { return nil } - if err := setGroupId(constants.AdminGroup, &AuthCacheObj.AdminGroupId); err != nil { + if err := setGroupId(constants.AdminGroup, &CacheObj.AdminGroupId); err != nil { return err } - if err := setGroupId(constants.EditorGroup, &AuthCacheObj.EditorGroupId); err != nil { + if err := setGroupId(constants.EditorGroup, &CacheObj.EditorGroupId); err != nil { return err } - if err := setGroupId(constants.ViewerGroup, &AuthCacheObj.ViewerGroupId); err != nil { + if err := setGroupId(constants.ViewerGroup, &CacheObj.ViewerGroupId); err != nil { return err } @@ -65,9 +65,9 @@ func GetUserFromRequest(r *http.Request) (*model.UserPayload, error) { func IsSelfAccessRequest(user *model.UserPayload, id string) bool { return user.Id == id } -func IsViewer(user *model.UserPayload) bool { return user.GroupId == AuthCacheObj.ViewerGroupId } -func IsEditor(user *model.UserPayload) bool { return user.GroupId == AuthCacheObj.EditorGroupId } -func IsAdmin(user *model.UserPayload) bool { return user.GroupId == AuthCacheObj.AdminGroupId } +func IsViewer(user *model.UserPayload) bool { return user.GroupId == CacheObj.ViewerGroupId } +func IsEditor(user *model.UserPayload) bool { return user.GroupId == CacheObj.EditorGroupId } +func IsAdmin(user *model.UserPayload) bool { return user.GroupId == CacheObj.AdminGroupId } func ValidatePassword(password string) error { if len(password) < minimumPasswordLength { diff --git a/pkg/query-service/auth/utils.go b/pkg/query-service/auth/utils.go index 4de59ea2c0..7006cde152 100644 --- a/pkg/query-service/auth/utils.go +++ b/pkg/query-service/auth/utils.go @@ -33,7 +33,6 @@ func isValidRole(role string) bool { default: return false } - return false } func validateInviteRequest(req *model.InviteRequest) error { diff --git a/pkg/query-service/constants/constants.go b/pkg/query-service/constants/constants.go index f74a63c7ff..75a1159cf9 100644 --- a/pkg/query-service/constants/constants.go +++ b/pkg/query-service/constants/constants.go @@ -11,7 +11,7 @@ const ( DebugHttpPort = "0.0.0.0:6060" // Address to serve http (pprof) ) -var DEFAULT_TELEMETRY_ANONYMOUS = false +var DefaultTelemetryAnonymous = false func IsTelemetryEnabled() bool { isTelemetryEnabledStr := os.Getenv("TELEMETRY_ENABLED") @@ -32,10 +32,10 @@ func GetAlertManagerApiPrefix() string { return "http://alertmanager:9093/api/" } -// Alert manager channel subpath +// AmChannelApiPath is a channel subpath for Alert manager var AmChannelApiPath = GetOrDefaultEnv("ALERTMANAGER_API_CHANNEL_PATH", "v1/routes") -var RELATIONAL_DATASOURCE_PATH = GetOrDefaultEnv("SIGNOZ_LOCAL_DB_PATH", "/var/lib/signoz/signoz.db") +var RelationalDatasourcePath = GetOrDefaultEnv("SIGNOZ_LOCAL_DB_PATH", "/var/lib/signoz/signoz.db") const ( ServiceName = "serviceName" @@ -67,9 +67,9 @@ const ( FirstSeen = "firstSeen" ) const ( - SIGNOZ_METRIC_DBNAME = "signoz_metrics" - SIGNOZ_SAMPLES_TABLENAME = "samples_v2" - SIGNOZ_TIMESERIES_TABLENAME = "time_series_v2" + SignozMetricDbname = "signoz_metrics" + SignozSamplesTableName = "samples_v2" + SignozTimeSeriesTableName = "time_series_v2" ) func GetOrDefaultEnv(key string, fallback string) string { diff --git a/pkg/query-service/dao/sqlite/connection.go b/pkg/query-service/dao/sqlite/connection.go index b66ddeec12..546d58eaa2 100644 --- a/pkg/query-service/dao/sqlite/connection.go +++ b/pkg/query-service/dao/sqlite/connection.go @@ -26,7 +26,7 @@ func InitDB(dataSourceName string) (*ModelDaoSqlite, error) { } db.SetMaxOpenConns(10) - table_schema := ` + tableSchema := ` PRAGMA foreign_keys = ON; CREATE TABLE IF NOT EXISTS invites ( @@ -70,9 +70,9 @@ func InitDB(dataSourceName string) (*ModelDaoSqlite, error) { ); ` - _, err = db.Exec(table_schema) + _, err = db.Exec(tableSchema) if err != nil { - return nil, fmt.Errorf("Error in creating tables: %v", err.Error()) + return nil, fmt.Errorf("error in creating tables: %v", err.Error()) } mds := &ModelDaoSqlite{db: db} @@ -96,7 +96,7 @@ func InitDB(dataSourceName string) (*ModelDaoSqlite, error) { func (mds *ModelDaoSqlite) initializeOrgPreferences(ctx context.Context) error { // set anonymous setting as default in case of any failures to fetch UserPreference in below section - telemetry.GetInstance().SetTelemetryAnonymous(constants.DEFAULT_TELEMETRY_ANONYMOUS) + telemetry.GetInstance().SetTelemetryAnonymous(constants.DefaultTelemetryAnonymous) orgs, apiError := mds.GetOrgs(ctx) if apiError != nil { diff --git a/pkg/query-service/dao/sqlite/rbac.go b/pkg/query-service/dao/sqlite/rbac.go index 9c74f812c2..c9f99bd52b 100644 --- a/pkg/query-service/dao/sqlite/rbac.go +++ b/pkg/query-service/dao/sqlite/rbac.go @@ -35,7 +35,7 @@ func (mds *ModelDaoSqlite) DeleteInvitation(ctx context.Context, email string) * func (mds *ModelDaoSqlite) GetInviteFromEmail(ctx context.Context, email string, ) (*model.InvitationObject, *model.ApiError) { - invites := []model.InvitationObject{} + var invites []model.InvitationObject err := mds.db.Select(&invites, `SELECT * FROM invites WHERE email=?;`, email) @@ -57,7 +57,7 @@ func (mds *ModelDaoSqlite) GetInviteFromEmail(ctx context.Context, email string, func (mds *ModelDaoSqlite) GetInviteFromToken(ctx context.Context, token string, ) (*model.InvitationObject, *model.ApiError) { - invites := []model.InvitationObject{} + var invites []model.InvitationObject err := mds.db.Select(&invites, `SELECT * FROM invites WHERE token=?;`, token) @@ -77,7 +77,7 @@ func (mds *ModelDaoSqlite) GetInviteFromToken(ctx context.Context, token string, func (mds *ModelDaoSqlite) GetInvites(ctx context.Context, ) ([]model.InvitationObject, *model.ApiError) { - invites := []model.InvitationObject{} + var invites []model.InvitationObject err := mds.db.Select(&invites, "SELECT * FROM invites") if err != nil { return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} @@ -103,7 +103,7 @@ func (mds *ModelDaoSqlite) CreateOrg(ctx context.Context, func (mds *ModelDaoSqlite) GetOrg(ctx context.Context, id string) (*model.Organization, *model.ApiError) { - orgs := []model.Organization{} + var orgs []model.Organization err := mds.db.Select(&orgs, `SELECT * FROM organizations WHERE id=?;`, id) if err != nil { @@ -125,7 +125,7 @@ func (mds *ModelDaoSqlite) GetOrg(ctx context.Context, func (mds *ModelDaoSqlite) GetOrgByName(ctx context.Context, name string) (*model.Organization, *model.ApiError) { - orgs := []model.Organization{} + var orgs []model.Organization if err := mds.db.Select(&orgs, `SELECT * FROM organizations WHERE name=?;`, name); err != nil { return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} @@ -144,7 +144,7 @@ func (mds *ModelDaoSqlite) GetOrgByName(ctx context.Context, } func (mds *ModelDaoSqlite) GetOrgs(ctx context.Context) ([]model.Organization, *model.ApiError) { - orgs := []model.Organization{} + var orgs []model.Organization err := mds.db.Select(&orgs, `SELECT * FROM organizations`) if err != nil { @@ -194,7 +194,7 @@ func (mds *ModelDaoSqlite) CreateUser(ctx context.Context, "email": user.Email, } telemetry.GetInstance().IdentifyUser(user) - telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_USER, data) + telemetry.GetInstance().SendEvent(telemetry.EventUser, data) return user, nil } @@ -254,7 +254,7 @@ func (mds *ModelDaoSqlite) DeleteUser(ctx context.Context, id string) *model.Api func (mds *ModelDaoSqlite) GetUser(ctx context.Context, id string) (*model.UserPayload, *model.ApiError) { - users := []model.UserPayload{} + var users []model.UserPayload query := `select u.id, u.name, @@ -291,7 +291,7 @@ func (mds *ModelDaoSqlite) GetUser(ctx context.Context, func (mds *ModelDaoSqlite) GetUserByEmail(ctx context.Context, email string) (*model.UserPayload, *model.ApiError) { - users := []model.UserPayload{} + var users []model.UserPayload query := `select u.id, u.name, @@ -326,7 +326,7 @@ func (mds *ModelDaoSqlite) GetUserByEmail(ctx context.Context, } func (mds *ModelDaoSqlite) GetUsers(ctx context.Context) ([]model.UserPayload, *model.ApiError) { - users := []model.UserPayload{} + var users []model.UserPayload query := `select u.id, @@ -355,7 +355,7 @@ func (mds *ModelDaoSqlite) GetUsers(ctx context.Context) ([]model.UserPayload, * func (mds *ModelDaoSqlite) GetUsersByOrg(ctx context.Context, orgId string) ([]model.UserPayload, *model.ApiError) { - users := []model.UserPayload{} + var users []model.UserPayload query := `select u.id, u.name, @@ -382,7 +382,7 @@ func (mds *ModelDaoSqlite) GetUsersByOrg(ctx context.Context, func (mds *ModelDaoSqlite) GetUsersByGroup(ctx context.Context, groupId string) ([]model.UserPayload, *model.ApiError) { - users := []model.UserPayload{} + var users []model.UserPayload query := `select u.id, u.name, @@ -430,7 +430,7 @@ func (mds *ModelDaoSqlite) DeleteGroup(ctx context.Context, id string) *model.Ap func (mds *ModelDaoSqlite) GetGroup(ctx context.Context, id string) (*model.Group, *model.ApiError) { - groups := []model.Group{} + var groups []model.Group if err := mds.db.Select(&groups, `SELECT id, name FROM groups WHERE id=?`, id); err != nil { return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} } @@ -451,7 +451,7 @@ func (mds *ModelDaoSqlite) GetGroup(ctx context.Context, func (mds *ModelDaoSqlite) GetGroupByName(ctx context.Context, name string) (*model.Group, *model.ApiError) { - groups := []model.Group{} + var groups []model.Group if err := mds.db.Select(&groups, `SELECT id, name FROM groups WHERE name=?`, name); err != nil { return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} } @@ -472,7 +472,7 @@ func (mds *ModelDaoSqlite) GetGroupByName(ctx context.Context, func (mds *ModelDaoSqlite) GetGroups(ctx context.Context) ([]model.Group, *model.ApiError) { - groups := []model.Group{} + var groups []model.Group if err := mds.db.Select(&groups, "SELECT * FROM groups"); err != nil { return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} } @@ -502,7 +502,7 @@ func (mds *ModelDaoSqlite) DeleteResetPasswordEntry(ctx context.Context, func (mds *ModelDaoSqlite) GetResetPasswordEntry(ctx context.Context, token string) (*model.ResetPasswordEntry, *model.ApiError) { - entries := []model.ResetPasswordEntry{} + var entries []model.ResetPasswordEntry q := `SELECT user_id,token FROM reset_password_request WHERE token=?;` if err := mds.db.Select(&entries, q, token); err != nil { diff --git a/pkg/query-service/integrations/alertManager/manager.go b/pkg/query-service/integrations/alertManager/manager.go index 47dc96f366..6742e62675 100644 --- a/pkg/query-service/integrations/alertManager/manager.go +++ b/pkg/query-service/integrations/alertManager/manager.go @@ -40,7 +40,7 @@ func prepareAmChannelApiURL() string { basePath := constants.GetAlertManagerApiPrefix() AmChannelApiPath := constants.AmChannelApiPath - if len(AmChannelApiPath) > 0 && rune(AmChannelApiPath[0]) == rune('/') { + if len(AmChannelApiPath) > 0 && rune(AmChannelApiPath[0]) == '/' { AmChannelApiPath = AmChannelApiPath[1:] } @@ -109,7 +109,7 @@ func (m *manager) DeleteRoute(name string) *model.ApiError { req, err := http.NewRequest(http.MethodDelete, amURL, bytes.NewBuffer(requestData)) if err != nil { - zap.S().Errorf("Error in creating new delete request to alertmanager/v1/receivers\n", err) + zap.S().Error("Error in creating new delete request to alertmanager/v1/receivers\n", err) return &model.ApiError{Typ: model.ErrorInternal, Err: err} } @@ -119,7 +119,7 @@ func (m *manager) DeleteRoute(name string) *model.ApiError { response, err := client.Do(req) if err != nil { - zap.S().Errorf(fmt.Sprintf("Error in getting response of API call to alertmanager(DELETE %s)\n", amURL), err) + zap.S().Error("Error in getting response of API call to alertmanager(DELETE %s)\n", amURL, err) return &model.ApiError{Typ: model.ErrorInternal, Err: err} } diff --git a/pkg/query-service/integrations/alertManager/model.go b/pkg/query-service/integrations/alertManager/model.go index 705b0492fd..a3f90be461 100644 --- a/pkg/query-service/integrations/alertManager/model.go +++ b/pkg/query-service/integrations/alertManager/model.go @@ -19,4 +19,4 @@ type Receiver struct { type ReceiverResponse struct { Status string `json:"status"` Data Receiver `json:"data"` -} \ No newline at end of file +} diff --git a/pkg/query-service/interfaces/interface.go b/pkg/query-service/interfaces/interface.go index 705a77c6a3..690e99710e 100644 --- a/pkg/query-service/interfaces/interface.go +++ b/pkg/query-service/interfaces/interface.go @@ -47,10 +47,8 @@ type Reader interface { GetErrorFromGroupID(ctx context.Context, params *model.GetErrorParams) (*model.ErrorWithSpan, *model.ApiError) GetNextPrevErrorIDs(ctx context.Context, params *model.GetErrorParams) (*model.NextPrevErrorIDs, *model.ApiError) - // Search Interfaces SearchTraces(ctx context.Context, traceID string) (*[]model.SearchSpansResult, error) - // Setter Interfaces SetTTL(ctx context.Context, ttlParams *model.TTLParams) (*model.SetTTLResponseItem, *model.ApiError) GetMetricAutocompleteMetricNames(ctx context.Context, matchText string, limit int) (*[]string, *model.ApiError) diff --git a/pkg/query-service/model/queryParams.go b/pkg/query-service/model/queryParams.go index 813b62d17f..56ff5c0075 100644 --- a/pkg/query-service/model/queryParams.go +++ b/pkg/query-service/model/queryParams.go @@ -33,20 +33,20 @@ type ReduceToOperator int const ( _ ReduceToOperator = iota - RLAST - RSUM - RAVG - RMAX - RMIN + RLast + RSum + RAvg + RMax + RMin ) type QueryType int const ( _ QueryType = iota - QUERY_BUILDER - CLICKHOUSE - PROM + QueryBuilder + ClickHouse + Prom ) type PromQuery struct { @@ -64,8 +64,8 @@ type PanelType int const ( _ PanelType = iota - TIME_SERIES - QUERY_VALUE + TimeSeries + QueryValue ) type CompositeMetricQuery struct { @@ -80,13 +80,13 @@ type AggregateOperator int const ( _ AggregateOperator = iota - NOOP - COUNT - COUNT_DISTINCT - SUM - AVG - MAX - MIN + NoOp + Count + CountDistinct + Sum + Avg + Max + Min P05 P10 P20 @@ -96,25 +96,25 @@ const ( P90 P95 P99 - RATE - SUM_RATE - // leave blank space for possily {AVG, X}_RATE + Rate + SumRate + // leave blank space for possibly {AVG, X}_RATE _ _ _ - RATE_SUM - RATE_AVG - RATE_MAX - RATE_MIN + RateSum + RateAvg + RateMax + RateMin ) type DataSource int const ( _ DataSource = iota - METRICS - TRACES - LOGS + Metrics + Traces + Logs ) type QueryRangeParamsV2 struct { diff --git a/pkg/query-service/model/response.go b/pkg/query-service/model/response.go index 8c9dfad572..f30c1c3544 100644 --- a/pkg/query-service/model/response.go +++ b/pkg/query-service/model/response.go @@ -73,7 +73,7 @@ type AlertDiscovery struct { Alerts []*AlertingRuleResponse `json:"rules"` } -// Alert has info for an alert. +// AlertingRuleResponse has info for an alert. type AlertingRuleResponse struct { Labels labels.Labels `json:"labels"` Annotations labels.Labels `json:"annotations"` @@ -137,7 +137,7 @@ type GetFilterSpansResponse struct { TotalSpans uint64 `json:"totalSpans"` } -type SearchSpanDBReponseItem struct { +type SearchSpanDBResponseItem struct { Timestamp time.Time `ch:"timestamp"` TraceID string `ch:"traceID"` Model string `ch:"model"` @@ -150,7 +150,7 @@ type Event struct { IsError bool `json:"isError,omitempty"` } -type SearchSpanReponseItem struct { +type SearchSpanResponseItem struct { TimeUnixNano uint64 `json:"timestamp"` SpanID string `json:"spanID"` TraceID string `json:"traceID"` @@ -177,13 +177,13 @@ func (ref *OtelSpanRef) toString() string { return retString } -func (item *SearchSpanReponseItem) GetValues() []interface{} { +func (item *SearchSpanResponseItem) GetValues() []interface{} { - references := []OtelSpanRef{} - jsonbody, _ := json.Marshal(item.References) - json.Unmarshal(jsonbody, &references) + var references []OtelSpanRef + jsonBody, _ := json.Marshal(item.References) + json.Unmarshal(jsonBody, &references) - referencesStringArray := []string{} + var referencesStringArray []string for _, item := range references { referencesStringArray = append(referencesStringArray, item.toString()) } diff --git a/pkg/query-service/telemetry/telemetry.go b/pkg/query-service/telemetry/telemetry.go index ca402e4da8..45fa72f8a5 100644 --- a/pkg/query-service/telemetry/telemetry.go +++ b/pkg/query-service/telemetry/telemetry.go @@ -16,18 +16,18 @@ import ( ) const ( - TELEMETRY_EVENT_PATH = "API Call" - TELEMETRY_EVENT_USER = "User" - TELEMETRY_EVENT_INPRODUCT_FEEDBACK = "InProduct Feeback Submitted" - TELEMETRY_EVENT_NUMBER_OF_SERVICES = "Number of Services" - TELEMETRY_EVENT_HEART_BEAT = "Heart Beat" - TELEMETRY_EVENT_ORG_SETTINGS = "Org Settings" + EventPath = "API Call" + EventUser = "User" + EventInproductFeedback = "InProduct Feedback Submitted" + EventNumberOfServices = "Number of Services" + EventHeartBeat = "Heart Beat" + EventOrgSettings = "Org Settings" ) -const api_key = "4Gmoa4ixJAUHx2BpJxsjwA1bEfnwEeRz" -const IP_NOT_FOUND_PLACEHOLDER = "NA" +const writeKey = "4Gmoa4ixJAUHx2BpJxsjwA1bEfnwEeRz" +const IpNotFoundPlaceholder = "NA" -const HEART_BEAT_DURATION = 6 * time.Hour +const HeartBeatDuration = 6 * time.Hour // const HEART_BEAT_DURATION = 10 * time.Second @@ -45,15 +45,15 @@ type Telemetry struct { func createTelemetry() { telemetry = &Telemetry{ - operator: analytics.New(api_key), + operator: analytics.New(writeKey), ipAddress: getOutboundIP(), } data := map[string]interface{}{} telemetry.SetTelemetryEnabled(constants.IsTelemetryEnabled()) - telemetry.SendEvent(TELEMETRY_EVENT_HEART_BEAT, data) - ticker := time.NewTicker(HEART_BEAT_DURATION) + telemetry.SendEvent(EventHeartBeat, data) + ticker := time.NewTicker(HeartBeatDuration) go func() { for { select { @@ -71,7 +71,7 @@ func createTelemetry() { for key, value := range tsInfo { data[key] = value } - telemetry.SendEvent(TELEMETRY_EVENT_HEART_BEAT, data) + telemetry.SendEvent(EventHeartBeat, data) } } }() @@ -81,7 +81,7 @@ func createTelemetry() { // Get preferred outbound ip of this machine func getOutboundIP() string { - ip := []byte(IP_NOT_FOUND_PLACEHOLDER) + ip := []byte(IpNotFoundPlaceholder) resp, err := http.Get("https://api.ipify.org?format=text") if err != nil { @@ -112,7 +112,7 @@ func (a *Telemetry) IdentifyUser(user *model.User) { } func (a *Telemetry) checkEvents(event string) bool { sendEvent := true - if event == TELEMETRY_EVENT_USER && a.isTelemetryAnonymous() { + if event == EventUser && a.isTelemetryAnonymous() { sendEvent = false } return sendEvent @@ -139,7 +139,7 @@ func (a *Telemetry) SendEvent(event string, data map[string]interface{}) { } userId := a.ipAddress - if a.isTelemetryAnonymous() || userId == IP_NOT_FOUND_PLACEHOLDER { + if a.isTelemetryAnonymous() || userId == IpNotFoundPlaceholder { userId = a.GetDistinctId() } diff --git a/pkg/query-service/tests/cold_storage_test.go b/pkg/query-service/tests/cold_storage_test.go index a72e48e770..6ccacb3c74 100644 --- a/pkg/query-service/tests/cold_storage_test.go +++ b/pkg/query-service/tests/cold_storage_test.go @@ -5,6 +5,7 @@ import ( "fmt" "io/ioutil" "net/http" + "os" "testing" "time" @@ -27,6 +28,9 @@ func setTTL(table, coldStorage, toColdTTL, deleteTTL string, jwtToken string) ([ } var bearer = "Bearer " + jwtToken req, err := http.NewRequest("POST", endpoint+"/api/v1/settings/ttl?"+params, nil) + if err != nil { + return nil, err + } req.Header.Add("Authorization", bearer) resp, err := client.Do(req) @@ -128,6 +132,9 @@ func getTTL(t *testing.T, table string, jwtToken string) *model.GetTTLResponseIt var bearer = "Bearer " + jwtToken req, err := http.NewRequest("GET", url, nil) + if err != nil { + t.Fatal(err) + } req.Header.Add("Authorization", bearer) resp, err := client.Do(req) @@ -214,5 +221,5 @@ func TestMain(m *testing.M) { } defer stopCluster() - m.Run() + os.Exit(m.Run()) }