mirror of
https://git.mirrors.martin98.com/https://github.com/SigNoz/signoz
synced 2025-08-14 03:46:00 +08:00
chore: remove references to unused flags (#7739)
### Summary remove references to unused flags
This commit is contained in:
parent
a7cad0f1a5
commit
9815ec7d81
42
.github/workflows/README.md
vendored
42
.github/workflows/README.md
vendored
@ -1,42 +0,0 @@
|
|||||||
# Github actions
|
|
||||||
|
|
||||||
## Testing the UI manually on each PR
|
|
||||||
|
|
||||||
First we need to make sure the UI is ready
|
|
||||||
* Check the `Start tunnel` step in `e2e-k8s/deploy-on-k3s-cluster` job and make sure you see `your url is: https://pull-<number>-signoz.loca.lt`
|
|
||||||
* This job will run until the PR is merged or closed to keep the local tunneling alive
|
|
||||||
- github will cancel this job if the PR wasn't merged after 6h
|
|
||||||
- if the job was cancel, go to the action and press `Re-run all jobs`
|
|
||||||
|
|
||||||
Now you can open your browser at https://pull-<number>-signoz.loca.lt and check the UI.
|
|
||||||
|
|
||||||
## Environment Variables
|
|
||||||
|
|
||||||
To run GitHub workflow, a few environment variables needs to add in GitHub secrets
|
|
||||||
|
|
||||||
<table>
|
|
||||||
<tr>
|
|
||||||
<th> Variables </th>
|
|
||||||
<th> Description </th>
|
|
||||||
<th> Example </th>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td> REPONAME </td>
|
|
||||||
<td> Provide the DockerHub user/organisation name of the image. </td>
|
|
||||||
<td> signoz</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td> DOCKERHUB_USERNAME </td>
|
|
||||||
<td> Docker hub username </td>
|
|
||||||
<td> signoz</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td> DOCKERHUB_TOKEN </td>
|
|
||||||
<td> Docker hub password/token with push permission </td>
|
|
||||||
<td> **** </td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td> SONAR_TOKEN </td>
|
|
||||||
<td> <a href="https://sonarcloud.io">SonarCloud</a> token </td>
|
|
||||||
<td> **** </td>
|
|
||||||
</tr>
|
|
16
.github/workflows/remove-label.yaml
vendored
16
.github/workflows/remove-label.yaml
vendored
@ -1,16 +0,0 @@
|
|||||||
name: remove-label
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request_target:
|
|
||||||
types: [synchronize]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
remove:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Remove label testing-deploy from PR
|
|
||||||
uses: buildsville/add-remove-label@v2.0.0
|
|
||||||
with:
|
|
||||||
label: testing-deploy
|
|
||||||
type: remove
|
|
||||||
token: ${{ secrets.GITHUB_TOKEN }}
|
|
@ -29,7 +29,6 @@ import (
|
|||||||
|
|
||||||
type APIHandlerOptions struct {
|
type APIHandlerOptions struct {
|
||||||
DataConnector interfaces.DataConnector
|
DataConnector interfaces.DataConnector
|
||||||
SkipConfig *basemodel.SkipConfig
|
|
||||||
PreferSpanMetrics bool
|
PreferSpanMetrics bool
|
||||||
AppDao dao.ModelDao
|
AppDao dao.ModelDao
|
||||||
RulesManager *rules.Manager
|
RulesManager *rules.Manager
|
||||||
@ -58,7 +57,6 @@ type APIHandler struct {
|
|||||||
func NewAPIHandler(opts APIHandlerOptions, signoz *signoz.SigNoz) (*APIHandler, error) {
|
func NewAPIHandler(opts APIHandlerOptions, signoz *signoz.SigNoz) (*APIHandler, error) {
|
||||||
baseHandler, err := baseapp.NewAPIHandler(baseapp.APIHandlerOpts{
|
baseHandler, err := baseapp.NewAPIHandler(baseapp.APIHandlerOpts{
|
||||||
Reader: opts.DataConnector,
|
Reader: opts.DataConnector,
|
||||||
SkipConfig: opts.SkipConfig,
|
|
||||||
PreferSpanMetrics: opts.PreferSpanMetrics,
|
PreferSpanMetrics: opts.PreferSpanMetrics,
|
||||||
AppDao: opts.AppDao,
|
AppDao: opts.AppDao,
|
||||||
RuleManager: opts.RulesManager,
|
RuleManager: opts.RulesManager,
|
||||||
|
@ -45,25 +45,17 @@ import (
|
|||||||
baseconst "github.com/SigNoz/signoz/pkg/query-service/constants"
|
baseconst "github.com/SigNoz/signoz/pkg/query-service/constants"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/healthcheck"
|
"github.com/SigNoz/signoz/pkg/query-service/healthcheck"
|
||||||
baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
||||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
|
||||||
baserules "github.com/SigNoz/signoz/pkg/query-service/rules"
|
baserules "github.com/SigNoz/signoz/pkg/query-service/rules"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/telemetry"
|
"github.com/SigNoz/signoz/pkg/query-service/telemetry"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/utils"
|
"github.com/SigNoz/signoz/pkg/query-service/utils"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
const AppDbEngine = "sqlite"
|
|
||||||
|
|
||||||
type ServerOptions struct {
|
type ServerOptions struct {
|
||||||
Config signoz.Config
|
Config signoz.Config
|
||||||
SigNoz *signoz.SigNoz
|
SigNoz *signoz.SigNoz
|
||||||
PromConfigPath string
|
HTTPHostPort string
|
||||||
SkipTopLvlOpsPath string
|
PrivateHostPort string
|
||||||
HTTPHostPort string
|
|
||||||
PrivateHostPort string
|
|
||||||
// alert specific params
|
|
||||||
DisableRules bool
|
|
||||||
RuleRepoURL string
|
|
||||||
PreferSpanMetrics bool
|
PreferSpanMetrics bool
|
||||||
CacheConfigPath string
|
CacheConfigPath string
|
||||||
FluxInterval string
|
FluxInterval string
|
||||||
@ -146,14 +138,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
serverOptions.SigNoz.Cache,
|
serverOptions.SigNoz.Cache,
|
||||||
)
|
)
|
||||||
|
|
||||||
skipConfig := &basemodel.SkipConfig{}
|
|
||||||
if serverOptions.SkipTopLvlOpsPath != "" {
|
|
||||||
// read skip config
|
|
||||||
skipConfig, err = basemodel.ReadSkipConfig(serverOptions.SkipTopLvlOpsPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var c cache.Cache
|
var c cache.Cache
|
||||||
if serverOptions.CacheConfigPath != "" {
|
if serverOptions.CacheConfigPath != "" {
|
||||||
cacheOpts, err := cache.LoadFromYAMLCacheConfigFile(serverOptions.CacheConfigPath)
|
cacheOpts, err := cache.LoadFromYAMLCacheConfigFile(serverOptions.CacheConfigPath)
|
||||||
@ -164,11 +148,9 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
rm, err := makeRulesManager(
|
rm, err := makeRulesManager(
|
||||||
serverOptions.RuleRepoURL,
|
|
||||||
serverOptions.SigNoz.SQLStore.SQLxDB(),
|
serverOptions.SigNoz.SQLStore.SQLxDB(),
|
||||||
reader,
|
reader,
|
||||||
c,
|
c,
|
||||||
serverOptions.DisableRules,
|
|
||||||
serverOptions.UseLogsNewSchema,
|
serverOptions.UseLogsNewSchema,
|
||||||
serverOptions.UseTraceNewSchema,
|
serverOptions.UseTraceNewSchema,
|
||||||
serverOptions.SigNoz.Alertmanager,
|
serverOptions.SigNoz.Alertmanager,
|
||||||
@ -238,7 +220,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
|
|
||||||
apiOpts := api.APIHandlerOptions{
|
apiOpts := api.APIHandlerOptions{
|
||||||
DataConnector: reader,
|
DataConnector: reader,
|
||||||
SkipConfig: skipConfig,
|
|
||||||
PreferSpanMetrics: serverOptions.PreferSpanMetrics,
|
PreferSpanMetrics: serverOptions.PreferSpanMetrics,
|
||||||
AppDao: modelDao,
|
AppDao: modelDao,
|
||||||
RulesManager: rm,
|
RulesManager: rm,
|
||||||
@ -411,13 +392,7 @@ func (s *Server) initListeners() error {
|
|||||||
|
|
||||||
// Start listening on http and private http port concurrently
|
// Start listening on http and private http port concurrently
|
||||||
func (s *Server) Start(ctx context.Context) error {
|
func (s *Server) Start(ctx context.Context) error {
|
||||||
|
s.ruleManager.Start(ctx)
|
||||||
// initiate rule manager first
|
|
||||||
if !s.serverOptions.DisableRules {
|
|
||||||
s.ruleManager.Start(ctx)
|
|
||||||
} else {
|
|
||||||
zap.L().Info("msg: Rules disabled as rules.disable is set to TRUE")
|
|
||||||
}
|
|
||||||
|
|
||||||
err := s.initListeners()
|
err := s.initListeners()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -508,11 +483,9 @@ func (s *Server) Stop() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func makeRulesManager(
|
func makeRulesManager(
|
||||||
ruleRepoURL string,
|
|
||||||
db *sqlx.DB,
|
db *sqlx.DB,
|
||||||
ch baseint.Reader,
|
ch baseint.Reader,
|
||||||
cache cache.Cache,
|
cache cache.Cache,
|
||||||
disableRules bool,
|
|
||||||
useLogsNewSchema bool,
|
useLogsNewSchema bool,
|
||||||
useTraceNewSchema bool,
|
useTraceNewSchema bool,
|
||||||
alertmanager alertmanager.Alertmanager,
|
alertmanager alertmanager.Alertmanager,
|
||||||
@ -524,11 +497,9 @@ func makeRulesManager(
|
|||||||
managerOpts := &baserules.ManagerOptions{
|
managerOpts := &baserules.ManagerOptions{
|
||||||
TelemetryStore: telemetryStore,
|
TelemetryStore: telemetryStore,
|
||||||
Prometheus: prometheus,
|
Prometheus: prometheus,
|
||||||
RepoURL: ruleRepoURL,
|
|
||||||
DBConn: db,
|
DBConn: db,
|
||||||
Context: context.Background(),
|
Context: context.Background(),
|
||||||
Logger: zap.L(),
|
Logger: zap.L(),
|
||||||
DisableRules: disableRules,
|
|
||||||
Reader: ch,
|
Reader: ch,
|
||||||
Cache: cache,
|
Cache: cache,
|
||||||
EvalDelay: baseconst.GetEvalDelay(),
|
EvalDelay: baseconst.GetEvalDelay(),
|
||||||
|
@ -52,19 +52,27 @@ func main() {
|
|||||||
|
|
||||||
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
|
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
|
||||||
flag.BoolVar(&useTraceNewSchema, "use-trace-new-schema", false, "use new schema for traces")
|
flag.BoolVar(&useTraceNewSchema, "use-trace-new-schema", false, "use new schema for traces")
|
||||||
|
// Deprecated
|
||||||
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
|
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
|
||||||
|
// Deprecated
|
||||||
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
|
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
|
||||||
|
// Deprecated
|
||||||
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
|
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
|
||||||
flag.BoolVar(&preferSpanMetrics, "prefer-span-metrics", false, "(prefer span metrics for service level metrics)")
|
flag.BoolVar(&preferSpanMetrics, "prefer-span-metrics", false, "(prefer span metrics for service level metrics)")
|
||||||
|
// Deprecated
|
||||||
flag.IntVar(&maxIdleConns, "max-idle-conns", 50, "(number of connections to maintain in the pool.)")
|
flag.IntVar(&maxIdleConns, "max-idle-conns", 50, "(number of connections to maintain in the pool.)")
|
||||||
|
// Deprecated
|
||||||
flag.IntVar(&maxOpenConns, "max-open-conns", 100, "(max connections for use at any time.)")
|
flag.IntVar(&maxOpenConns, "max-open-conns", 100, "(max connections for use at any time.)")
|
||||||
|
// Deprecated
|
||||||
flag.DurationVar(&dialTimeout, "dial-timeout", 5*time.Second, "(the maximum time to establish a connection.)")
|
flag.DurationVar(&dialTimeout, "dial-timeout", 5*time.Second, "(the maximum time to establish a connection.)")
|
||||||
|
// Deprecated
|
||||||
flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)")
|
flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)")
|
||||||
flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)")
|
flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)")
|
||||||
flag.StringVar(&fluxInterval, "flux-interval", "5m", "(the interval to exclude data from being cached to avoid incorrect cache for data in motion)")
|
flag.StringVar(&fluxInterval, "flux-interval", "5m", "(the interval to exclude data from being cached to avoid incorrect cache for data in motion)")
|
||||||
flag.StringVar(&fluxIntervalForTraceDetail, "flux-interval-trace-detail", "2m", "(the interval to exclude data from being cached to avoid incorrect cache for trace data in motion)")
|
flag.StringVar(&fluxIntervalForTraceDetail, "flux-interval-trace-detail", "2m", "(the interval to exclude data from being cached to avoid incorrect cache for trace data in motion)")
|
||||||
flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')")
|
flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')")
|
||||||
flag.StringVar(&gatewayUrl, "gateway-url", "", "(url to the gateway)")
|
flag.StringVar(&gatewayUrl, "gateway-url", "", "(url to the gateway)")
|
||||||
|
// Deprecated
|
||||||
flag.BoolVar(&useLicensesV3, "use-licenses-v3", false, "use licenses_v3 schema for licenses")
|
flag.BoolVar(&useLicensesV3, "use-licenses-v3", false, "use licenses_v3 schema for licenses")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
@ -121,12 +129,8 @@ func main() {
|
|||||||
Config: config,
|
Config: config,
|
||||||
SigNoz: signoz,
|
SigNoz: signoz,
|
||||||
HTTPHostPort: baseconst.HTTPHostPort,
|
HTTPHostPort: baseconst.HTTPHostPort,
|
||||||
PromConfigPath: promConfigPath,
|
|
||||||
SkipTopLvlOpsPath: skipTopLvlOpsPath,
|
|
||||||
PreferSpanMetrics: preferSpanMetrics,
|
PreferSpanMetrics: preferSpanMetrics,
|
||||||
PrivateHostPort: baseconst.PrivateHostPort,
|
PrivateHostPort: baseconst.PrivateHostPort,
|
||||||
DisableRules: disableRules,
|
|
||||||
RuleRepoURL: ruleRepoURL,
|
|
||||||
CacheConfigPath: cacheConfigPath,
|
CacheConfigPath: cacheConfigPath,
|
||||||
FluxInterval: fluxInterval,
|
FluxInterval: fluxInterval,
|
||||||
FluxIntervalForTraceDetail: fluxIntervalForTraceDetail,
|
FluxIntervalForTraceDetail: fluxIntervalForTraceDetail,
|
||||||
|
@ -316,8 +316,7 @@ func (r *ClickHouseReader) GetServicesList(ctx context.Context) (*[]string, erro
|
|||||||
return &services, nil
|
return &services, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *ClickHouseReader) GetTopLevelOperations(ctx context.Context, skipConfig *model.SkipConfig, start, end time.Time, services []string) (*map[string][]string, *model.ApiError) {
|
func (r *ClickHouseReader) GetTopLevelOperations(ctx context.Context, start, end time.Time, services []string) (*map[string][]string, *model.ApiError) {
|
||||||
|
|
||||||
start = start.In(time.UTC)
|
start = start.In(time.UTC)
|
||||||
|
|
||||||
// The `top_level_operations` that have `time` >= start
|
// The `top_level_operations` that have `time` >= start
|
||||||
@ -347,9 +346,6 @@ func (r *ClickHouseReader) GetTopLevelOperations(ctx context.Context, skipConfig
|
|||||||
if _, ok := operations[serviceName]; !ok {
|
if _, ok := operations[serviceName]; !ok {
|
||||||
operations[serviceName] = []string{"overflow_operation"}
|
operations[serviceName] = []string{"overflow_operation"}
|
||||||
}
|
}
|
||||||
if skipConfig.ShouldSkip(serviceName, name) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
operations[serviceName] = append(operations[serviceName], name)
|
operations[serviceName] = append(operations[serviceName], name)
|
||||||
}
|
}
|
||||||
return &operations, nil
|
return &operations, nil
|
||||||
@ -414,13 +410,13 @@ func (r *ClickHouseReader) buildResourceSubQuery(tags []model.TagQueryParam, svc
|
|||||||
return resourceSubQuery, nil
|
return resourceSubQuery, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *ClickHouseReader) GetServicesV2(ctx context.Context, queryParams *model.GetServicesParams, skipConfig *model.SkipConfig) (*[]model.ServiceItem, *model.ApiError) {
|
func (r *ClickHouseReader) GetServicesV2(ctx context.Context, queryParams *model.GetServicesParams) (*[]model.ServiceItem, *model.ApiError) {
|
||||||
|
|
||||||
if r.indexTable == "" {
|
if r.indexTable == "" {
|
||||||
return nil, &model.ApiError{Typ: model.ErrorExec, Err: ErrNoIndexTable}
|
return nil, &model.ApiError{Typ: model.ErrorExec, Err: ErrNoIndexTable}
|
||||||
}
|
}
|
||||||
|
|
||||||
topLevelOps, apiErr := r.GetTopLevelOperations(ctx, skipConfig, *queryParams.Start, *queryParams.End, nil)
|
topLevelOps, apiErr := r.GetTopLevelOperations(ctx, *queryParams.Start, *queryParams.End, nil)
|
||||||
if apiErr != nil {
|
if apiErr != nil {
|
||||||
return nil, apiErr
|
return nil, apiErr
|
||||||
}
|
}
|
||||||
@ -539,16 +535,16 @@ func (r *ClickHouseReader) GetServicesV2(ctx context.Context, queryParams *model
|
|||||||
return &serviceItems, nil
|
return &serviceItems, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *ClickHouseReader) GetServices(ctx context.Context, queryParams *model.GetServicesParams, skipConfig *model.SkipConfig) (*[]model.ServiceItem, *model.ApiError) {
|
func (r *ClickHouseReader) GetServices(ctx context.Context, queryParams *model.GetServicesParams) (*[]model.ServiceItem, *model.ApiError) {
|
||||||
if r.useTraceNewSchema {
|
if r.useTraceNewSchema {
|
||||||
return r.GetServicesV2(ctx, queryParams, skipConfig)
|
return r.GetServicesV2(ctx, queryParams)
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.indexTable == "" {
|
if r.indexTable == "" {
|
||||||
return nil, &model.ApiError{Typ: model.ErrorExec, Err: ErrNoIndexTable}
|
return nil, &model.ApiError{Typ: model.ErrorExec, Err: ErrNoIndexTable}
|
||||||
}
|
}
|
||||||
|
|
||||||
topLevelOps, apiErr := r.GetTopLevelOperations(ctx, skipConfig, *queryParams.Start, *queryParams.End, nil)
|
topLevelOps, apiErr := r.GetTopLevelOperations(ctx, *queryParams.Start, *queryParams.End, nil)
|
||||||
if apiErr != nil {
|
if apiErr != nil {
|
||||||
return nil, apiErr
|
return nil, apiErr
|
||||||
}
|
}
|
||||||
|
@ -89,7 +89,6 @@ func NewRouter() *mux.Router {
|
|||||||
// APIHandler implements the query service public API
|
// APIHandler implements the query service public API
|
||||||
type APIHandler struct {
|
type APIHandler struct {
|
||||||
reader interfaces.Reader
|
reader interfaces.Reader
|
||||||
skipConfig *model.SkipConfig
|
|
||||||
appDao dao.ModelDao
|
appDao dao.ModelDao
|
||||||
ruleManager *rules.Manager
|
ruleManager *rules.Manager
|
||||||
featureFlags interfaces.FeatureLookup
|
featureFlags interfaces.FeatureLookup
|
||||||
@ -152,8 +151,6 @@ type APIHandlerOpts struct {
|
|||||||
// business data reader e.g. clickhouse
|
// business data reader e.g. clickhouse
|
||||||
Reader interfaces.Reader
|
Reader interfaces.Reader
|
||||||
|
|
||||||
SkipConfig *model.SkipConfig
|
|
||||||
|
|
||||||
PreferSpanMetrics bool
|
PreferSpanMetrics bool
|
||||||
|
|
||||||
// dao layer to perform crud on app objects like dashboard, alerts etc
|
// dao layer to perform crud on app objects like dashboard, alerts etc
|
||||||
@ -233,7 +230,6 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
|
|||||||
aH := &APIHandler{
|
aH := &APIHandler{
|
||||||
reader: opts.Reader,
|
reader: opts.Reader,
|
||||||
appDao: opts.AppDao,
|
appDao: opts.AppDao,
|
||||||
skipConfig: opts.SkipConfig,
|
|
||||||
preferSpanMetrics: opts.PreferSpanMetrics,
|
preferSpanMetrics: opts.PreferSpanMetrics,
|
||||||
temporalityMap: make(map[string]map[v3.Temporality]bool),
|
temporalityMap: make(map[string]map[v3.Temporality]bool),
|
||||||
ruleManager: opts.RuleManager,
|
ruleManager: opts.RuleManager,
|
||||||
@ -1684,7 +1680,7 @@ func (aH *APIHandler) getServicesTopLevelOps(w http.ResponseWriter, r *http.Requ
|
|||||||
end = time.Unix(0, endEpochInt)
|
end = time.Unix(0, endEpochInt)
|
||||||
}
|
}
|
||||||
|
|
||||||
result, apiErr := aH.reader.GetTopLevelOperations(r.Context(), aH.skipConfig, start, end, services)
|
result, apiErr := aH.reader.GetTopLevelOperations(r.Context(), start, end, services)
|
||||||
if apiErr != nil {
|
if apiErr != nil {
|
||||||
RespondError(w, apiErr, nil)
|
RespondError(w, apiErr, nil)
|
||||||
return
|
return
|
||||||
@ -1700,7 +1696,7 @@ func (aH *APIHandler) getServices(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
result, apiErr := aH.reader.GetServices(r.Context(), query, aH.skipConfig)
|
result, apiErr := aH.reader.GetServices(r.Context(), query)
|
||||||
if apiErr != nil && aH.HandleError(w, apiErr.Err, http.StatusInternalServerError) {
|
if apiErr != nil && aH.HandleError(w, apiErr.Err, http.StatusInternalServerError) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -38,7 +38,6 @@ import (
|
|||||||
"github.com/SigNoz/signoz/pkg/query-service/featureManager"
|
"github.com/SigNoz/signoz/pkg/query-service/featureManager"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/healthcheck"
|
"github.com/SigNoz/signoz/pkg/query-service/healthcheck"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/rules"
|
"github.com/SigNoz/signoz/pkg/query-service/rules"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/telemetry"
|
"github.com/SigNoz/signoz/pkg/query-service/telemetry"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/utils"
|
"github.com/SigNoz/signoz/pkg/query-service/utils"
|
||||||
@ -46,14 +45,9 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type ServerOptions struct {
|
type ServerOptions struct {
|
||||||
Config signoz.Config
|
Config signoz.Config
|
||||||
PromConfigPath string
|
HTTPHostPort string
|
||||||
SkipTopLvlOpsPath string
|
PrivateHostPort string
|
||||||
HTTPHostPort string
|
|
||||||
PrivateHostPort string
|
|
||||||
// alert specific params
|
|
||||||
DisableRules bool
|
|
||||||
RuleRepoURL string
|
|
||||||
PreferSpanMetrics bool
|
PreferSpanMetrics bool
|
||||||
CacheConfigPath string
|
CacheConfigPath string
|
||||||
FluxInterval string
|
FluxInterval string
|
||||||
@ -122,15 +116,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
serverOptions.SigNoz.Cache,
|
serverOptions.SigNoz.Cache,
|
||||||
)
|
)
|
||||||
|
|
||||||
skipConfig := &model.SkipConfig{}
|
|
||||||
if serverOptions.SkipTopLvlOpsPath != "" {
|
|
||||||
// read skip config
|
|
||||||
skipConfig, err = model.ReadSkipConfig(serverOptions.SkipTopLvlOpsPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var c cache.Cache
|
var c cache.Cache
|
||||||
if serverOptions.CacheConfigPath != "" {
|
if serverOptions.CacheConfigPath != "" {
|
||||||
cacheOpts, err := cache.LoadFromYAMLCacheConfigFile(serverOptions.CacheConfigPath)
|
cacheOpts, err := cache.LoadFromYAMLCacheConfigFile(serverOptions.CacheConfigPath)
|
||||||
@ -141,11 +126,9 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
rm, err := makeRulesManager(
|
rm, err := makeRulesManager(
|
||||||
serverOptions.RuleRepoURL,
|
|
||||||
serverOptions.SigNoz.SQLStore.SQLxDB(),
|
serverOptions.SigNoz.SQLStore.SQLxDB(),
|
||||||
reader,
|
reader,
|
||||||
c,
|
c,
|
||||||
serverOptions.DisableRules,
|
|
||||||
serverOptions.UseLogsNewSchema,
|
serverOptions.UseLogsNewSchema,
|
||||||
serverOptions.UseTraceNewSchema,
|
serverOptions.UseTraceNewSchema,
|
||||||
serverOptions.SigNoz.SQLStore,
|
serverOptions.SigNoz.SQLStore,
|
||||||
@ -181,7 +164,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
telemetry.GetInstance().SetReader(reader)
|
telemetry.GetInstance().SetReader(reader)
|
||||||
apiHandler, err := NewAPIHandler(APIHandlerOpts{
|
apiHandler, err := NewAPIHandler(APIHandlerOpts{
|
||||||
Reader: reader,
|
Reader: reader,
|
||||||
SkipConfig: skipConfig,
|
|
||||||
PreferSpanMetrics: serverOptions.PreferSpanMetrics,
|
PreferSpanMetrics: serverOptions.PreferSpanMetrics,
|
||||||
AppDao: dao.DB(),
|
AppDao: dao.DB(),
|
||||||
RuleManager: rm,
|
RuleManager: rm,
|
||||||
@ -203,8 +185,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
s := &Server{
|
s := &Server{
|
||||||
// logger: logger,
|
|
||||||
// tracer: tracer,
|
|
||||||
ruleManager: rm,
|
ruleManager: rm,
|
||||||
serverOptions: serverOptions,
|
serverOptions: serverOptions,
|
||||||
unavailableChannel: make(chan healthcheck.Status),
|
unavailableChannel: make(chan healthcheck.Status),
|
||||||
@ -364,13 +344,7 @@ func (s *Server) initListeners() error {
|
|||||||
|
|
||||||
// Start listening on http and private http port concurrently
|
// Start listening on http and private http port concurrently
|
||||||
func (s *Server) Start(ctx context.Context) error {
|
func (s *Server) Start(ctx context.Context) error {
|
||||||
|
s.ruleManager.Start(ctx)
|
||||||
// initiate rule manager first
|
|
||||||
if !s.serverOptions.DisableRules {
|
|
||||||
s.ruleManager.Start(ctx)
|
|
||||||
} else {
|
|
||||||
zap.L().Info("msg: Rules disabled as rules.disable is set to TRUE")
|
|
||||||
}
|
|
||||||
|
|
||||||
err := s.initListeners()
|
err := s.initListeners()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -458,11 +432,9 @@ func (s *Server) Stop(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func makeRulesManager(
|
func makeRulesManager(
|
||||||
ruleRepoURL string,
|
|
||||||
db *sqlx.DB,
|
db *sqlx.DB,
|
||||||
ch interfaces.Reader,
|
ch interfaces.Reader,
|
||||||
cache cache.Cache,
|
cache cache.Cache,
|
||||||
disableRules bool,
|
|
||||||
useLogsNewSchema bool,
|
useLogsNewSchema bool,
|
||||||
useTraceNewSchema bool,
|
useTraceNewSchema bool,
|
||||||
sqlstore sqlstore.SQLStore,
|
sqlstore sqlstore.SQLStore,
|
||||||
@ -473,11 +445,9 @@ func makeRulesManager(
|
|||||||
managerOpts := &rules.ManagerOptions{
|
managerOpts := &rules.ManagerOptions{
|
||||||
TelemetryStore: telemetryStore,
|
TelemetryStore: telemetryStore,
|
||||||
Prometheus: prometheus,
|
Prometheus: prometheus,
|
||||||
RepoURL: ruleRepoURL,
|
|
||||||
DBConn: db,
|
DBConn: db,
|
||||||
Context: context.Background(),
|
Context: context.Background(),
|
||||||
Logger: zap.L(),
|
Logger: zap.L(),
|
||||||
DisableRules: disableRules,
|
|
||||||
Reader: ch,
|
Reader: ch,
|
||||||
Cache: cache,
|
Cache: cache,
|
||||||
EvalDelay: constants.GetEvalDelay(),
|
EvalDelay: constants.GetEvalDelay(),
|
||||||
|
@ -18,10 +18,6 @@ const (
|
|||||||
OpAmpWsEndpoint = "0.0.0.0:4320" // address for opamp websocket
|
OpAmpWsEndpoint = "0.0.0.0:4320" // address for opamp websocket
|
||||||
)
|
)
|
||||||
|
|
||||||
type ContextKey string
|
|
||||||
|
|
||||||
const ContextUserKey ContextKey = "user"
|
|
||||||
|
|
||||||
var DEFAULT_TELEMETRY_ANONYMOUS = false
|
var DEFAULT_TELEMETRY_ANONYMOUS = false
|
||||||
|
|
||||||
func IsOSSTelemetryEnabled() bool {
|
func IsOSSTelemetryEnabled() bool {
|
||||||
@ -57,9 +53,6 @@ var TELEMETRY_ACTIVE_USER_DURATION_MINUTES = GetOrDefaultEnvInt("TELEMETRY_ACTIV
|
|||||||
|
|
||||||
var InviteEmailTemplate = GetOrDefaultEnv("INVITE_EMAIL_TEMPLATE", "/root/templates/invitation_email_template.html")
|
var InviteEmailTemplate = GetOrDefaultEnv("INVITE_EMAIL_TEMPLATE", "/root/templates/invitation_email_template.html")
|
||||||
|
|
||||||
// [Deprecated] SIGNOZ_LOCAL_DB_PATH is deprecated and scheduled for removal. Please use SIGNOZ_SQLSTORE_SQLITE_PATH instead.
|
|
||||||
var RELATIONAL_DATASOURCE_PATH = GetOrDefaultEnv("SIGNOZ_LOCAL_DB_PATH", "/var/lib/signoz/signoz.db")
|
|
||||||
|
|
||||||
var MetricsExplorerClickhouseThreads = GetOrDefaultEnvInt("METRICS_EXPLORER_CLICKHOUSE_THREADS", 8)
|
var MetricsExplorerClickhouseThreads = GetOrDefaultEnvInt("METRICS_EXPLORER_CLICKHOUSE_THREADS", 8)
|
||||||
var UpdatedMetricsMetadataCachePrefix = GetOrDefaultEnv("METRICS_UPDATED_METADATA_CACHE_KEY", "UPDATED_METRICS_METADATA")
|
var UpdatedMetricsMetadataCachePrefix = GetOrDefaultEnv("METRICS_UPDATED_METADATA_CACHE_KEY", "UPDATED_METRICS_METADATA")
|
||||||
|
|
||||||
|
@ -15,8 +15,8 @@ import (
|
|||||||
type Reader interface {
|
type Reader interface {
|
||||||
GetInstantQueryMetricsResult(ctx context.Context, query *model.InstantQueryMetricsParams) (*promql.Result, *stats.QueryStats, *model.ApiError)
|
GetInstantQueryMetricsResult(ctx context.Context, query *model.InstantQueryMetricsParams) (*promql.Result, *stats.QueryStats, *model.ApiError)
|
||||||
GetQueryRangeResult(ctx context.Context, query *model.QueryRangeParams) (*promql.Result, *stats.QueryStats, *model.ApiError)
|
GetQueryRangeResult(ctx context.Context, query *model.QueryRangeParams) (*promql.Result, *stats.QueryStats, *model.ApiError)
|
||||||
GetTopLevelOperations(ctx context.Context, skipConfig *model.SkipConfig, start, end time.Time, services []string) (*map[string][]string, *model.ApiError)
|
GetTopLevelOperations(ctx context.Context, start, end time.Time, services []string) (*map[string][]string, *model.ApiError)
|
||||||
GetServices(ctx context.Context, query *model.GetServicesParams, skipConfig *model.SkipConfig) (*[]model.ServiceItem, *model.ApiError)
|
GetServices(ctx context.Context, query *model.GetServicesParams) (*[]model.ServiceItem, *model.ApiError)
|
||||||
GetTopOperations(ctx context.Context, query *model.GetTopOperationsParams) (*[]model.TopOperationsItem, *model.ApiError)
|
GetTopOperations(ctx context.Context, query *model.GetTopOperationsParams) (*[]model.TopOperationsItem, *model.ApiError)
|
||||||
GetUsage(ctx context.Context, query *model.GetUsageParams) (*[]model.UsageItem, error)
|
GetUsage(ctx context.Context, query *model.GetUsageParams) (*[]model.UsageItem, error)
|
||||||
GetServicesList(ctx context.Context) (*[]string, error)
|
GetServicesList(ctx context.Context) (*[]string, error)
|
||||||
|
@ -47,10 +47,14 @@ func main() {
|
|||||||
|
|
||||||
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
|
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
|
||||||
flag.BoolVar(&useTraceNewSchema, "use-trace-new-schema", false, "use new schema for traces")
|
flag.BoolVar(&useTraceNewSchema, "use-trace-new-schema", false, "use new schema for traces")
|
||||||
|
// Deprecated
|
||||||
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
|
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
|
||||||
|
// Deprecated
|
||||||
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
|
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
|
||||||
|
// Deprecated
|
||||||
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
|
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
|
||||||
flag.BoolVar(&preferSpanMetrics, "prefer-span-metrics", false, "(prefer span metrics for service level metrics)")
|
flag.BoolVar(&preferSpanMetrics, "prefer-span-metrics", false, "(prefer span metrics for service level metrics)")
|
||||||
|
// Deprecated
|
||||||
flag.StringVar(&ruleRepoURL, "rules.repo-url", constants.AlertHelpPage, "(host address used to build rule link in alert messages)")
|
flag.StringVar(&ruleRepoURL, "rules.repo-url", constants.AlertHelpPage, "(host address used to build rule link in alert messages)")
|
||||||
flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)")
|
flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)")
|
||||||
flag.StringVar(&fluxInterval, "flux-interval", "5m", "(the interval to exclude data from being cached to avoid incorrect cache for data in motion)")
|
flag.StringVar(&fluxInterval, "flux-interval", "5m", "(the interval to exclude data from being cached to avoid incorrect cache for data in motion)")
|
||||||
@ -58,8 +62,11 @@ func main() {
|
|||||||
flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')")
|
flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')")
|
||||||
// Allow using the consistent naming with the signoz collector
|
// Allow using the consistent naming with the signoz collector
|
||||||
flag.StringVar(&cluster, "cluster-name", "cluster", "(cluster name - defaults to 'cluster')")
|
flag.StringVar(&cluster, "cluster-name", "cluster", "(cluster name - defaults to 'cluster')")
|
||||||
|
// Deprecated
|
||||||
flag.IntVar(&maxIdleConns, "max-idle-conns", 50, "(number of connections to maintain in the pool, only used with clickhouse if not set in ClickHouseUrl env var DSN.)")
|
flag.IntVar(&maxIdleConns, "max-idle-conns", 50, "(number of connections to maintain in the pool, only used with clickhouse if not set in ClickHouseUrl env var DSN.)")
|
||||||
|
// Deprecated
|
||||||
flag.IntVar(&maxOpenConns, "max-open-conns", 100, "(max connections for use at any time, only used with clickhouse if not set in ClickHouseUrl env var DSN.)")
|
flag.IntVar(&maxOpenConns, "max-open-conns", 100, "(max connections for use at any time, only used with clickhouse if not set in ClickHouseUrl env var DSN.)")
|
||||||
|
// Deprecated
|
||||||
flag.DurationVar(&dialTimeout, "dial-timeout", 5*time.Second, "(the maximum time to establish a connection, only used with clickhouse if not set in ClickHouseUrl env var DSN.)")
|
flag.DurationVar(&dialTimeout, "dial-timeout", 5*time.Second, "(the maximum time to establish a connection, only used with clickhouse if not set in ClickHouseUrl env var DSN.)")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
@ -113,12 +120,8 @@ func main() {
|
|||||||
serverOptions := &app.ServerOptions{
|
serverOptions := &app.ServerOptions{
|
||||||
Config: config,
|
Config: config,
|
||||||
HTTPHostPort: constants.HTTPHostPort,
|
HTTPHostPort: constants.HTTPHostPort,
|
||||||
PromConfigPath: promConfigPath,
|
|
||||||
SkipTopLvlOpsPath: skipTopLvlOpsPath,
|
|
||||||
PreferSpanMetrics: preferSpanMetrics,
|
PreferSpanMetrics: preferSpanMetrics,
|
||||||
PrivateHostPort: constants.PrivateHostPort,
|
PrivateHostPort: constants.PrivateHostPort,
|
||||||
DisableRules: disableRules,
|
|
||||||
RuleRepoURL: ruleRepoURL,
|
|
||||||
CacheConfigPath: cacheConfigPath,
|
CacheConfigPath: cacheConfigPath,
|
||||||
FluxInterval: fluxInterval,
|
FluxInterval: fluxInterval,
|
||||||
FluxIntervalForTraceDetail: fluxIntervalForTraceDetail,
|
FluxIntervalForTraceDetail: fluxIntervalForTraceDetail,
|
||||||
|
@ -88,6 +88,11 @@ type ChangePasswordRequest struct {
|
|||||||
NewPassword string `json:"newPassword"`
|
NewPassword string `json:"newPassword"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ResetPasswordRequest struct {
|
||||||
|
Password string `json:"password"`
|
||||||
|
Token string `json:"token"`
|
||||||
|
}
|
||||||
|
|
||||||
type UserRole struct {
|
type UserRole struct {
|
||||||
UserId string `json:"user_id"`
|
UserId string `json:"user_id"`
|
||||||
GroupName string `json:"group_name"`
|
GroupName string `json:"group_name"`
|
||||||
|
@ -1,57 +0,0 @@
|
|||||||
package model
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"gopkg.in/yaml.v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
type SkipConfig struct {
|
|
||||||
Services []ServiceSkipConfig `yaml:"services"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ServiceSkipConfig struct {
|
|
||||||
Name string `yaml:"name"`
|
|
||||||
Operations []string `yaml:"operations"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SkipConfig) ShouldSkip(serviceName, name string) bool {
|
|
||||||
for _, service := range s.Services {
|
|
||||||
if service.Name == serviceName {
|
|
||||||
for _, operation := range service.Operations {
|
|
||||||
if name == operation {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func ReadYaml(path string, v interface{}) error {
|
|
||||||
f, err := os.Open(path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
decoder := yaml.NewDecoder(f)
|
|
||||||
err = decoder.Decode(v)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func ReadSkipConfig(path string) (*SkipConfig, error) {
|
|
||||||
if path == "" {
|
|
||||||
return &SkipConfig{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
skipConfig := &SkipConfig{}
|
|
||||||
err := ReadYaml(path, skipConfig)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return skipConfig, nil
|
|
||||||
}
|
|
@ -1,17 +0,0 @@
|
|||||||
package model
|
|
||||||
|
|
||||||
import "time"
|
|
||||||
|
|
||||||
type ResetPasswordRequest struct {
|
|
||||||
Password string `json:"password"`
|
|
||||||
Token string `json:"token"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type IngestionKey struct {
|
|
||||||
KeyId string `json:"keyId" db:"key_id"`
|
|
||||||
Name string `json:"name" db:"name"`
|
|
||||||
CreatedAt time.Time `json:"createdAt" db:"created_at"`
|
|
||||||
IngestionKey string `json:"ingestionKey" db:"ingestion_key"`
|
|
||||||
IngestionURL string `json:"ingestionURL" db:"ingestion_url"`
|
|
||||||
DataRegion string `json:"dataRegion" db:"data_region"`
|
|
||||||
}
|
|
@ -84,18 +84,14 @@ func prepareTaskName(ruleId interface{}) string {
|
|||||||
type ManagerOptions struct {
|
type ManagerOptions struct {
|
||||||
TelemetryStore telemetrystore.TelemetryStore
|
TelemetryStore telemetrystore.TelemetryStore
|
||||||
Prometheus prometheus.Prometheus
|
Prometheus prometheus.Prometheus
|
||||||
// RepoURL is used to generate a backlink in sent alert messages
|
|
||||||
RepoURL string
|
|
||||||
|
|
||||||
// rule db conn
|
// rule db conn
|
||||||
DBConn *sqlx.DB
|
DBConn *sqlx.DB
|
||||||
|
|
||||||
Context context.Context
|
Context context.Context
|
||||||
Logger *zap.Logger
|
Logger *zap.Logger
|
||||||
ResendDelay time.Duration
|
ResendDelay time.Duration
|
||||||
DisableRules bool
|
Reader interfaces.Reader
|
||||||
Reader interfaces.Reader
|
Cache cache.Cache
|
||||||
Cache cache.Cache
|
|
||||||
|
|
||||||
EvalDelay time.Duration
|
EvalDelay time.Duration
|
||||||
|
|
||||||
@ -395,11 +391,9 @@ func (m *Manager) EditRule(ctx context.Context, ruleStr string, idStr string) er
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !m.opts.DisableRules {
|
err = m.syncRuleStateWithTask(ctx, claims.OrgID, prepareTaskName(existingRule.ID.StringValue()), parsedRule)
|
||||||
err = m.syncRuleStateWithTask(ctx, claims.OrgID, prepareTaskName(existingRule.ID.StringValue()), parsedRule)
|
if err != nil {
|
||||||
if err != nil {
|
return err
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -496,9 +490,7 @@ func (m *Manager) DeleteRule(ctx context.Context, idStr string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
taskName := prepareTaskName(id.StringValue())
|
taskName := prepareTaskName(id.StringValue())
|
||||||
if !m.opts.DisableRules {
|
m.deleteTask(taskName)
|
||||||
m.deleteTask(taskName)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
@ -581,10 +573,8 @@ func (m *Manager) CreateRule(ctx context.Context, ruleStr string) (*ruletypes.Ge
|
|||||||
}
|
}
|
||||||
|
|
||||||
taskName := prepareTaskName(id.StringValue())
|
taskName := prepareTaskName(id.StringValue())
|
||||||
if !m.opts.DisableRules {
|
if err := m.addTask(ctx, claims.OrgID, parsedRule, taskName); err != nil {
|
||||||
if err := m.addTask(ctx, claims.OrgID, parsedRule, taskName); err != nil {
|
return err
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -724,9 +714,6 @@ func (m *Manager) prepareNotifyFunc() NotifyFunc {
|
|||||||
|
|
||||||
for _, alert := range alerts {
|
for _, alert := range alerts {
|
||||||
generatorURL := alert.GeneratorURL
|
generatorURL := alert.GeneratorURL
|
||||||
if generatorURL == "" {
|
|
||||||
generatorURL = m.opts.RepoURL
|
|
||||||
}
|
|
||||||
|
|
||||||
a := &alertmanagertypes.PostableAlert{
|
a := &alertmanagertypes.PostableAlert{
|
||||||
Annotations: alert.Annotations.Map(),
|
Annotations: alert.Annotations.Map(),
|
||||||
@ -759,9 +746,6 @@ func (m *Manager) prepareTestNotifyFunc() NotifyFunc {
|
|||||||
|
|
||||||
alert := alerts[0]
|
alert := alerts[0]
|
||||||
generatorURL := alert.GeneratorURL
|
generatorURL := alert.GeneratorURL
|
||||||
if generatorURL == "" {
|
|
||||||
generatorURL = m.opts.RepoURL
|
|
||||||
}
|
|
||||||
|
|
||||||
a := &alertmanagertypes.PostableAlert{
|
a := &alertmanagertypes.PostableAlert{
|
||||||
Annotations: alert.Annotations.Map(),
|
Annotations: alert.Annotations.Map(),
|
||||||
|
Loading…
x
Reference in New Issue
Block a user