Nityananda Gohain 67058b2a17
feat: trace v4 integration (#6226)
* feat: trace v4 inital commit

* fix: add remaining files

* fix: integrate with querier

* fix: get trace by id api updated

* fix: add servicename resource filter

* fix: tests

* fix: use correct prepQUery

* fix: services page

* fix: minor fixes to use the new table in api's and querier

* fix: add support for window based pagination

* feat: support for faster trace detail

* fix: searchTraces

* fix: attribute enrichment updated and issue in group by

* fix: issues in group by

* fix: enrichment using alias

* fix: test file added

* fix: tests

* fix: group by with filters

* fix: add subquery

* fix: trigger builde

* fix: update pagination logic and few ch column names

* fix: update qb

* fix: add tests

* feat: minor fixes

* fix: update pagination logic

* fix: update pagination logic

* fix: remove utils

* fix: remove unwanted API's

* fix: attribute and attribute values v2

* fix: autocomplete api's updated

* fix: tests fixed

* feat: minor fixes

* fix: update telemetry functions

* fix: dont use alias, use proper col names

* fix: move models to it's own file

* fix: minor fixes

* fix: address comments

* fix: add to serviceoverview function

* fix: add changes to overview function

* fix: address comments

* fix: remove printlines

---------

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2024-11-22 12:00:29 +05:30

142 lines
4.9 KiB
Go

package main
import (
"context"
"flag"
"os"
"os/signal"
"syscall"
"time"
prommodel "github.com/prometheus/common/model"
"go.signoz.io/signoz/pkg/query-service/app"
"go.signoz.io/signoz/pkg/query-service/auth"
"go.signoz.io/signoz/pkg/query-service/constants"
"go.signoz.io/signoz/pkg/query-service/migrate"
"go.signoz.io/signoz/pkg/query-service/version"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
func initZapLog() *zap.Logger {
config := zap.NewProductionConfig()
config.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder
config.EncoderConfig.TimeKey = "timestamp"
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
logger, _ := config.Build()
return logger
}
func init() {
prommodel.NameValidationScheme = prommodel.UTF8Validation
}
func main() {
var promConfigPath, skipTopLvlOpsPath string
// disables rule execution but allows change to the rule definition
var disableRules bool
var useLogsNewSchema bool
var useTraceNewSchema bool
// the url used to build link in the alert messages in slack and other systems
var ruleRepoURL, cacheConfigPath, fluxInterval string
var cluster string
var preferSpanMetrics bool
var maxIdleConns int
var maxOpenConns int
var dialTimeout time.Duration
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
flag.BoolVar(&useTraceNewSchema, "use-trace-new-schema", false, "use new schema for traces")
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
flag.BoolVar(&preferSpanMetrics, "prefer-span-metrics", false, "(prefer span metrics for service level metrics)")
flag.StringVar(&ruleRepoURL, "rules.repo-url", constants.AlertHelpPage, "(host address used to build rule link in alert messages)")
flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)")
flag.StringVar(&fluxInterval, "flux-interval", "5m", "(the interval to exclude data from being cached to avoid incorrect cache for data in motion)")
flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')")
// Allow using the consistent naming with the signoz collector
flag.StringVar(&cluster, "cluster-name", "cluster", "(cluster name - defaults to 'cluster')")
flag.IntVar(&maxIdleConns, "max-idle-conns", 50, "(number of connections to maintain in the pool, only used with clickhouse if not set in ClickHouseUrl env var DSN.)")
flag.IntVar(&maxOpenConns, "max-open-conns", 100, "(max connections for use at any time, only used with clickhouse if not set in ClickHouseUrl env var DSN.)")
flag.DurationVar(&dialTimeout, "dial-timeout", 5*time.Second, "(the maximum time to establish a connection, only used with clickhouse if not set in ClickHouseUrl env var DSN.)")
flag.Parse()
loggerMgr := initZapLog()
zap.ReplaceGlobals(loggerMgr)
defer loggerMgr.Sync() // flushes buffer, if any
logger := loggerMgr.Sugar()
version.PrintVersion()
serverOptions := &app.ServerOptions{
HTTPHostPort: constants.HTTPHostPort,
PromConfigPath: promConfigPath,
SkipTopLvlOpsPath: skipTopLvlOpsPath,
PreferSpanMetrics: preferSpanMetrics,
PrivateHostPort: constants.PrivateHostPort,
DisableRules: disableRules,
RuleRepoURL: ruleRepoURL,
MaxIdleConns: maxIdleConns,
MaxOpenConns: maxOpenConns,
DialTimeout: dialTimeout,
CacheConfigPath: cacheConfigPath,
FluxInterval: fluxInterval,
Cluster: cluster,
UseLogsNewSchema: useLogsNewSchema,
UseTraceNewSchema: useTraceNewSchema,
}
// Read the jwt secret key
auth.JwtSecret = os.Getenv("SIGNOZ_JWT_SECRET")
if len(auth.JwtSecret) == 0 {
zap.L().Warn("No JWT secret key is specified.")
} else {
zap.L().Info("JWT secret key set successfully.")
}
if err := migrate.Migrate(constants.RELATIONAL_DATASOURCE_PATH); err != nil {
zap.L().Error("Failed to migrate", zap.Error(err))
} else {
zap.L().Info("Migration successful")
}
server, err := app.NewServer(serverOptions)
if err != nil {
logger.Fatal("Failed to create server", zap.Error(err))
}
if err := server.Start(); err != nil {
logger.Fatal("Could not start servers", zap.Error(err))
}
if err := auth.InitAuthCache(context.Background()); err != nil {
logger.Fatal("Failed to initialize auth cache", zap.Error(err))
}
signalsChannel := make(chan os.Signal, 1)
signal.Notify(signalsChannel, os.Interrupt, syscall.SIGTERM)
for {
select {
case status := <-server.HealthCheckStatus():
logger.Info("Received HealthCheck status: ", zap.Int("status", int(status)))
case <-signalsChannel:
logger.Info("Received OS Interrupt Signal ... ")
err := server.Stop()
if err != nil {
logger.Fatal("Failed to stop server", zap.Error(err))
}
logger.Info("Server stopped")
return
}
}
}