chore: fix query-service logging (#4696)

This commit is contained in:
Srikanth Chekuri 2024-03-27 00:07:29 +05:30 committed by GitHub
parent 9e02147d4c
commit ae594061e9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
63 changed files with 689 additions and 737 deletions

View File

@ -74,7 +74,7 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
requestBody, err := io.ReadAll(r.Body)
if err != nil {
zap.S().Errorf("received no input in api\n", err)
zap.L().Error("received no input in api", zap.Error(err))
RespondError(w, model.BadRequest(err), nil)
return
}
@ -82,7 +82,7 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
err = json.Unmarshal(requestBody, &req)
if err != nil {
zap.S().Errorf("received invalid user registration request", zap.Error(err))
zap.L().Error("received invalid user registration request", zap.Error(err))
RespondError(w, model.BadRequest(fmt.Errorf("failed to register user")), nil)
return
}
@ -90,13 +90,13 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
// get invite object
invite, err := baseauth.ValidateInvite(ctx, req)
if err != nil {
zap.S().Errorf("failed to validate invite token", err)
zap.L().Error("failed to validate invite token", zap.Error(err))
RespondError(w, model.BadRequest(err), nil)
return
}
if invite == nil {
zap.S().Errorf("failed to validate invite token: it is either empty or invalid", err)
zap.L().Error("failed to validate invite token: it is either empty or invalid", zap.Error(err))
RespondError(w, model.BadRequest(basemodel.ErrSignupFailed{}), nil)
return
}
@ -104,7 +104,7 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
// get auth domain from email domain
domain, apierr := ah.AppDao().GetDomainByEmail(ctx, invite.Email)
if apierr != nil {
zap.S().Errorf("failed to get domain from email", apierr)
zap.L().Error("failed to get domain from email", zap.Error(apierr))
RespondError(w, model.InternalError(basemodel.ErrSignupFailed{}), nil)
}
@ -205,24 +205,24 @@ func (ah *APIHandler) receiveGoogleAuth(w http.ResponseWriter, r *http.Request)
ctx := context.Background()
if !ah.CheckFeature(model.SSO) {
zap.S().Errorf("[receiveGoogleAuth] sso requested but feature unavailable %s in org domain %s", model.SSO)
zap.L().Error("[receiveGoogleAuth] sso requested but feature unavailable in org domain")
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
return
}
q := r.URL.Query()
if errType := q.Get("error"); errType != "" {
zap.S().Errorf("[receiveGoogleAuth] failed to login with google auth", q.Get("error_description"))
zap.L().Error("[receiveGoogleAuth] failed to login with google auth", zap.String("error", errType), zap.String("error_description", q.Get("error_description")))
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "failed to login through SSO "), http.StatusMovedPermanently)
return
}
relayState := q.Get("state")
zap.S().Debug("[receiveGoogleAuth] relay state received", zap.String("state", relayState))
zap.L().Debug("[receiveGoogleAuth] relay state received", zap.String("state", relayState))
parsedState, err := url.Parse(relayState)
if err != nil || relayState == "" {
zap.S().Errorf("[receiveGoogleAuth] failed to process response - invalid response from IDP", err, r)
zap.L().Error("[receiveGoogleAuth] failed to process response - invalid response from IDP", zap.Error(err), zap.Any("request", r))
handleSsoError(w, r, redirectUri)
return
}
@ -244,14 +244,14 @@ func (ah *APIHandler) receiveGoogleAuth(w http.ResponseWriter, r *http.Request)
identity, err := callbackHandler.HandleCallback(r)
if err != nil {
zap.S().Errorf("[receiveGoogleAuth] failed to process HandleCallback ", domain.String(), zap.Error(err))
zap.L().Error("[receiveGoogleAuth] failed to process HandleCallback ", zap.String("domain", domain.String()), zap.Error(err))
handleSsoError(w, r, redirectUri)
return
}
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, identity.Email)
if err != nil {
zap.S().Errorf("[receiveGoogleAuth] failed to generate redirect URI after successful login ", domain.String(), zap.Error(err))
zap.L().Error("[receiveGoogleAuth] failed to generate redirect URI after successful login ", zap.String("domain", domain.String()), zap.Error(err))
handleSsoError(w, r, redirectUri)
return
}
@ -266,14 +266,14 @@ func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
if !ah.CheckFeature(model.SSO) {
zap.S().Errorf("[receiveSAML] sso requested but feature unavailable %s in org domain %s", model.SSO)
zap.L().Error("[receiveSAML] sso requested but feature unavailable in org domain")
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
return
}
err := r.ParseForm()
if err != nil {
zap.S().Errorf("[receiveSAML] failed to process response - invalid response from IDP", err, r)
zap.L().Error("[receiveSAML] failed to process response - invalid response from IDP", zap.Error(err), zap.Any("request", r))
handleSsoError(w, r, redirectUri)
return
}
@ -281,11 +281,11 @@ func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
// the relay state is sent when a login request is submitted to
// Idp.
relayState := r.FormValue("RelayState")
zap.S().Debug("[receiveML] relay state", zap.String("relayState", relayState))
zap.L().Debug("[receiveML] relay state", zap.String("relayState", relayState))
parsedState, err := url.Parse(relayState)
if err != nil || relayState == "" {
zap.S().Errorf("[receiveSAML] failed to process response - invalid response from IDP", err, r)
zap.L().Error("[receiveSAML] failed to process response - invalid response from IDP", zap.Error(err), zap.Any("request", r))
handleSsoError(w, r, redirectUri)
return
}
@ -302,34 +302,34 @@ func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
sp, err := domain.PrepareSamlRequest(parsedState)
if err != nil {
zap.S().Errorf("[receiveSAML] failed to prepare saml request for domain (%s): %v", domain.String(), err)
zap.L().Error("[receiveSAML] failed to prepare saml request for domain", zap.String("domain", domain.String()), zap.Error(err))
handleSsoError(w, r, redirectUri)
return
}
assertionInfo, err := sp.RetrieveAssertionInfo(r.FormValue("SAMLResponse"))
if err != nil {
zap.S().Errorf("[receiveSAML] failed to retrieve assertion info from saml response for organization (%s): %v", domain.String(), err)
zap.L().Error("[receiveSAML] failed to retrieve assertion info from saml response", zap.String("domain", domain.String()), zap.Error(err))
handleSsoError(w, r, redirectUri)
return
}
if assertionInfo.WarningInfo.InvalidTime {
zap.S().Errorf("[receiveSAML] expired saml response for organization (%s): %v", domain.String(), err)
zap.L().Error("[receiveSAML] expired saml response", zap.String("domain", domain.String()), zap.Error(err))
handleSsoError(w, r, redirectUri)
return
}
email := assertionInfo.NameID
if email == "" {
zap.S().Errorf("[receiveSAML] invalid email in the SSO response (%s)", domain.String())
zap.L().Error("[receiveSAML] invalid email in the SSO response", zap.String("domain", domain.String()))
handleSsoError(w, r, redirectUri)
return
}
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, email)
if err != nil {
zap.S().Errorf("[receiveSAML] failed to generate redirect URI after successful login ", domain.String(), zap.Error(err))
zap.L().Error("[receiveSAML] failed to generate redirect URI after successful login ", zap.String("domain", domain.String()), zap.Error(err))
handleSsoError(w, r, redirectUri)
return
}

View File

@ -191,7 +191,7 @@ func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) {
url := fmt.Sprintf("%s/trial?licenseKey=%s", constants.LicenseSignozIo, currentActiveLicenseKey)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
zap.S().Error("Error while creating request for trial details", err)
zap.L().Error("Error while creating request for trial details", zap.Error(err))
// If there is an error in fetching trial details, we will still return the license details
// to avoid blocking the UI
ah.Respond(w, resp)
@ -200,7 +200,7 @@ func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) {
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
trialResp, err := hClient.Do(req)
if err != nil {
zap.S().Error("Error while fetching trial details", err)
zap.L().Error("Error while fetching trial details", zap.Error(err))
// If there is an error in fetching trial details, we will still return the license details
// to avoid incorrectly blocking the UI
ah.Respond(w, resp)
@ -211,7 +211,7 @@ func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) {
trialRespBody, err := io.ReadAll(trialResp.Body)
if err != nil || trialResp.StatusCode != http.StatusOK {
zap.S().Error("Error while fetching trial details", err)
zap.L().Error("Error while fetching trial details", zap.Error(err))
// If there is an error in fetching trial details, we will still return the license details
// to avoid incorrectly blocking the UI
ah.Respond(w, resp)
@ -222,7 +222,7 @@ func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) {
var trialRespData model.SubscriptionServerResp
if err := json.Unmarshal(trialRespBody, &trialRespData); err != nil {
zap.S().Error("Error while decoding trial details", err)
zap.L().Error("Error while decoding trial details", zap.Error(err))
// If there is an error in fetching trial details, we will still return the license details
// to avoid incorrectly blocking the UI
ah.Respond(w, resp)

View File

@ -18,14 +18,14 @@ import (
func (ah *APIHandler) queryRangeMetricsV2(w http.ResponseWriter, r *http.Request) {
if !ah.CheckFeature(basemodel.CustomMetricsFunction) {
zap.S().Info("CustomMetricsFunction feature is not enabled in this plan")
zap.L().Info("CustomMetricsFunction feature is not enabled in this plan")
ah.APIHandler.QueryRangeMetricsV2(w, r)
return
}
metricsQueryRangeParams, apiErrorObj := parser.ParseMetricQueryRangeParams(r)
if apiErrorObj != nil {
zap.S().Errorf(apiErrorObj.Err.Error())
zap.L().Error("Error in parsing metric query params", zap.Error(apiErrorObj.Err))
RespondError(w, apiErrorObj, nil)
return
}

View File

@ -43,8 +43,8 @@ func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
return
}
pat := model.PAT{
Name: req.Name,
Role: req.Role,
Name: req.Name,
Role: req.Role,
ExpiresAt: req.ExpiresInDays,
}
err = validatePATRequest(pat)
@ -65,7 +65,7 @@ func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
pat.ExpiresAt = time.Now().Unix() + (pat.ExpiresAt * 24 * 60 * 60)
}
zap.S().Debugf("Got Create PAT request: %+v", pat)
zap.L().Info("Got Create PAT request", zap.Any("pat", pat))
var apierr basemodel.BaseApiError
if pat, apierr = ah.AppDao().CreatePAT(ctx, pat); apierr != nil {
RespondError(w, apierr, nil)
@ -115,7 +115,7 @@ func (ah *APIHandler) updatePAT(w http.ResponseWriter, r *http.Request) {
req.UpdatedByUserID = user.Id
id := mux.Vars(r)["id"]
req.UpdatedAt = time.Now().Unix()
zap.S().Debugf("Got Update PAT request: %+v", req)
zap.L().Info("Got Update PAT request", zap.Any("pat", req))
var apierr basemodel.BaseApiError
if apierr = ah.AppDao().UpdatePAT(ctx, req, id); apierr != nil {
RespondError(w, apierr, nil)
@ -135,7 +135,7 @@ func (ah *APIHandler) getPATs(w http.ResponseWriter, r *http.Request) {
}, nil)
return
}
zap.S().Infof("Get PATs for user: %+v", user.Id)
zap.L().Info("Get PATs for user", zap.String("user_id", user.Id))
pats, apierr := ah.AppDao().ListPATs(ctx)
if apierr != nil {
RespondError(w, apierr, nil)
@ -156,7 +156,7 @@ func (ah *APIHandler) revokePAT(w http.ResponseWriter, r *http.Request) {
return
}
zap.S().Debugf("Revoke PAT with id: %+v", id)
zap.L().Info("Revoke PAT with id", zap.String("id", id))
if apierr := ah.AppDao().RevokePAT(ctx, id, user.Id); apierr != nil {
RespondError(w, apierr, nil)
return

View File

@ -15,7 +15,7 @@ import (
func (ah *APIHandler) searchTraces(w http.ResponseWriter, r *http.Request) {
if !ah.CheckFeature(basemodel.SmartTraceDetail) {
zap.S().Info("SmartTraceDetail feature is not enabled in this plan")
zap.L().Info("SmartTraceDetail feature is not enabled in this plan")
ah.APIHandler.SearchTraces(w, r)
return
}
@ -26,7 +26,7 @@ func (ah *APIHandler) searchTraces(w http.ResponseWriter, r *http.Request) {
}
spanLimit, err := strconv.Atoi(constants.SpanLimitStr)
if err != nil {
zap.S().Error("Error during strconv.Atoi() on SPAN_LIMIT env variable: ", err)
zap.L().Error("Error during strconv.Atoi() on SPAN_LIMIT env variable", zap.Error(err))
return
}
result, err := ah.opts.DataConnector.SearchTraces(r.Context(), traceId, spanId, levelUpInt, levelDownInt, spanLimit, db.SmartTraceAlgorithm)

View File

@ -22,7 +22,7 @@ import (
func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string) ([]*basemodel.Series, string, error) {
defer utils.Elapsed("GetMetricResult")()
zap.S().Infof("Executing metric result query: %s", query)
zap.L().Info("Executing metric result query: ", zap.String("query", query))
var hash string
// If getSubTreeSpans function is used in the clickhouse query
@ -38,9 +38,8 @@ func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string)
}
rows, err := r.conn.Query(ctx, query)
zap.S().Debug(query)
if err != nil {
zap.S().Debug("Error in processing query: ", err)
zap.L().Error("Error in processing query", zap.Error(err))
return nil, "", fmt.Errorf("error in processing query")
}
@ -117,7 +116,7 @@ func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string)
groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int())
}
default:
zap.S().Errorf("invalid var found in metric builder query result", v, colName)
zap.L().Error("invalid var found in metric builder query result", zap.Any("var", v), zap.String("colName", colName))
}
}
sort.Strings(groupBy)
@ -140,7 +139,7 @@ func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string)
}
// err = r.conn.Exec(ctx, "DROP TEMPORARY TABLE IF EXISTS getSubTreeSpans"+hash)
// if err != nil {
// zap.S().Error("Error in dropping temporary table: ", err)
// zap.L().Error("Error in dropping temporary table: ", err)
// return nil, err
// }
if hash == "" {
@ -152,7 +151,7 @@ func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string)
func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, query string, hash string) (string, string, error) {
zap.S().Debugf("Executing getSubTreeSpans function")
zap.L().Debug("Executing getSubTreeSpans function")
// str1 := `select fromUnixTimestamp64Milli(intDiv( toUnixTimestamp64Milli ( timestamp ), 100) * 100) AS interval, toFloat64(count()) as count from (select timestamp, spanId, parentSpanId, durationNano from getSubTreeSpans(select * from signoz_traces.signoz_index_v2 where serviceName='frontend' and name='/driver.DriverService/FindNearest' and traceID='00000000000000004b0a863cb5ed7681') where name='FindDriverIDs' group by interval order by interval asc;`
@ -162,28 +161,28 @@ func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, qu
err := r.conn.Exec(ctx, "DROP TABLE IF EXISTS getSubTreeSpans"+hash)
if err != nil {
zap.S().Error("Error in dropping temporary table: ", err)
zap.L().Error("Error in dropping temporary table", zap.Error(err))
return query, hash, err
}
// Create temporary table to store the getSubTreeSpans() results
zap.S().Debugf("Creating temporary table getSubTreeSpans%s", hash)
zap.L().Debug("Creating temporary table getSubTreeSpans", zap.String("hash", hash))
err = r.conn.Exec(ctx, "CREATE TABLE IF NOT EXISTS "+"getSubTreeSpans"+hash+" (timestamp DateTime64(9) CODEC(DoubleDelta, LZ4), traceID FixedString(32) CODEC(ZSTD(1)), spanID String CODEC(ZSTD(1)), parentSpanID String CODEC(ZSTD(1)), rootSpanID String CODEC(ZSTD(1)), serviceName LowCardinality(String) CODEC(ZSTD(1)), name LowCardinality(String) CODEC(ZSTD(1)), rootName LowCardinality(String) CODEC(ZSTD(1)), durationNano UInt64 CODEC(T64, ZSTD(1)), kind Int8 CODEC(T64, ZSTD(1)), tagMap Map(LowCardinality(String), String) CODEC(ZSTD(1)), events Array(String) CODEC(ZSTD(2))) ENGINE = MergeTree() ORDER BY (timestamp)")
if err != nil {
zap.S().Error("Error in creating temporary table: ", err)
zap.L().Error("Error in creating temporary table", zap.Error(err))
return query, hash, err
}
var getSpansSubQueryDBResponses []model.GetSpansSubQueryDBResponse
getSpansSubQuery := subtreeInput
// Execute the subTree query
zap.S().Debugf("Executing subTree query: %s", getSpansSubQuery)
zap.L().Debug("Executing subTree query", zap.String("query", getSpansSubQuery))
err = r.conn.Select(ctx, &getSpansSubQueryDBResponses, getSpansSubQuery)
// zap.S().Info(getSpansSubQuery)
// zap.L().Info(getSpansSubQuery)
if err != nil {
zap.S().Debug("Error in processing sql query: ", err)
zap.L().Error("Error in processing sql query", zap.Error(err))
return query, hash, fmt.Errorf("Error in processing sql query")
}
@ -196,16 +195,16 @@ func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, qu
if len(getSpansSubQueryDBResponses) == 0 {
return query, hash, fmt.Errorf("No spans found for the given query")
}
zap.S().Debugf("Executing query to fetch all the spans from the same TraceID: %s", modelQuery)
zap.L().Debug("Executing query to fetch all the spans from the same TraceID: ", zap.String("modelQuery", modelQuery))
err = r.conn.Select(ctx, &searchScanResponses, modelQuery, getSpansSubQueryDBResponses[0].TraceID)
if err != nil {
zap.S().Debug("Error in processing sql query: ", err)
zap.L().Error("Error in processing sql query", zap.Error(err))
return query, hash, fmt.Errorf("Error in processing sql query")
}
// Process model to fetch the spans
zap.S().Debugf("Processing model to fetch the spans")
zap.L().Debug("Processing model to fetch the spans")
searchSpanResponses := []basemodel.SearchSpanResponseItem{}
for _, item := range searchScanResponses {
var jsonItem basemodel.SearchSpanResponseItem
@ -218,17 +217,17 @@ func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, qu
}
// Build the subtree and store all the subtree spans in temporary table getSubTreeSpans+hash
// Use map to store pointer to the spans to avoid duplicates and save memory
zap.S().Debugf("Building the subtree to store all the subtree spans in temporary table getSubTreeSpans%s", hash)
zap.L().Debug("Building the subtree to store all the subtree spans in temporary table getSubTreeSpans", zap.String("hash", hash))
treeSearchResponse, err := getSubTreeAlgorithm(searchSpanResponses, getSpansSubQueryDBResponses)
if err != nil {
zap.S().Error("Error in getSubTreeAlgorithm function: ", err)
zap.L().Error("Error in getSubTreeAlgorithm function", zap.Error(err))
return query, hash, err
}
zap.S().Debugf("Preparing batch to store subtree spans in temporary table getSubTreeSpans%s", hash)
zap.L().Debug("Preparing batch to store subtree spans in temporary table getSubTreeSpans", zap.String("hash", hash))
statement, err := r.conn.PrepareBatch(context.Background(), fmt.Sprintf("INSERT INTO getSubTreeSpans"+hash))
if err != nil {
zap.S().Error("Error in preparing batch statement: ", err)
zap.L().Error("Error in preparing batch statement", zap.Error(err))
return query, hash, err
}
for _, span := range treeSearchResponse {
@ -251,14 +250,14 @@ func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, qu
span.Events,
)
if err != nil {
zap.S().Debug("Error in processing sql query: ", err)
zap.L().Error("Error in processing sql query", zap.Error(err))
return query, hash, err
}
}
zap.S().Debugf("Inserting the subtree spans in temporary table getSubTreeSpans%s", hash)
zap.L().Debug("Inserting the subtree spans in temporary table getSubTreeSpans", zap.String("hash", hash))
err = statement.Send()
if err != nil {
zap.S().Error("Error in sending statement: ", err)
zap.L().Error("Error in sending statement", zap.Error(err))
return query, hash, err
}
return query, hash, nil
@ -323,7 +322,7 @@ func getSubTreeAlgorithm(payload []basemodel.SearchSpanResponseItem, getSpansSub
spans = append(spans, span)
}
zap.S().Debug("Building Tree")
zap.L().Debug("Building Tree")
roots, err := buildSpanTrees(&spans)
if err != nil {
return nil, err
@ -333,7 +332,7 @@ func getSubTreeAlgorithm(payload []basemodel.SearchSpanResponseItem, getSpansSub
// For each root, get the subtree spans
for _, getSpansSubQueryDBResponse := range getSpansSubQueryDBResponses {
targetSpan := &model.SpanForTraceDetails{}
// zap.S().Debug("Building tree for span id: " + getSpansSubQueryDBResponse.SpanID + " " + strconv.Itoa(i+1) + " of " + strconv.Itoa(len(getSpansSubQueryDBResponses)))
// zap.L().Debug("Building tree for span id: " + getSpansSubQueryDBResponse.SpanID + " " + strconv.Itoa(i+1) + " of " + strconv.Itoa(len(getSpansSubQueryDBResponses)))
// Search target span object in the tree
for _, root := range roots {
targetSpan, err = breadthFirstSearch(root, getSpansSubQueryDBResponse.SpanID)
@ -341,7 +340,7 @@ func getSubTreeAlgorithm(payload []basemodel.SearchSpanResponseItem, getSpansSub
break
}
if err != nil {
zap.S().Error("Error during BreadthFirstSearch(): ", err)
zap.L().Error("Error during BreadthFirstSearch()", zap.Error(err))
return nil, err
}
}

View File

@ -49,7 +49,7 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
break
}
if err != nil {
zap.S().Error("Error during BreadthFirstSearch(): ", err)
zap.L().Error("Error during BreadthFirstSearch()", zap.Error(err))
return nil, err
}
}
@ -186,7 +186,7 @@ func buildSpanTrees(spansPtr *[]*model.SpanForTraceDetails) ([]*model.SpanForTra
// If the parent span is not found, add current span to list of roots
if parent == nil {
// zap.S().Debug("Parent Span not found parent_id: ", span.ParentID)
// zap.L().Debug("Parent Span not found parent_id: ", span.ParentID)
roots = append(roots, span)
span.ParentID = ""
continue

View File

@ -134,7 +134,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
var reader interfaces.DataConnector
storage := os.Getenv("STORAGE")
if storage == "clickhouse" {
zap.S().Info("Using ClickHouse as datastore ...")
zap.L().Info("Using ClickHouse as datastore ...")
qb := db.NewDataConnector(
localDB,
serverOptions.PromConfigPath,
@ -525,7 +525,7 @@ func (s *Server) initListeners() error {
return err
}
zap.S().Info(fmt.Sprintf("Query server started listening on %s...", s.serverOptions.HTTPHostPort))
zap.L().Info(fmt.Sprintf("Query server started listening on %s...", s.serverOptions.HTTPHostPort))
// listen on private port to support internal services
privateHostPort := s.serverOptions.PrivateHostPort
@ -538,7 +538,7 @@ func (s *Server) initListeners() error {
if err != nil {
return err
}
zap.S().Info(fmt.Sprintf("Query server started listening on private port %s...", s.serverOptions.PrivateHostPort))
zap.L().Info(fmt.Sprintf("Query server started listening on private port %s...", s.serverOptions.PrivateHostPort))
return nil
}
@ -550,7 +550,7 @@ func (s *Server) Start() error {
if !s.serverOptions.DisableRules {
s.ruleManager.Start()
} else {
zap.S().Info("msg: Rules disabled as rules.disable is set to TRUE")
zap.L().Info("msg: Rules disabled as rules.disable is set to TRUE")
}
err := s.initListeners()
@ -564,23 +564,23 @@ func (s *Server) Start() error {
}
go func() {
zap.S().Info("Starting HTTP server", zap.Int("port", httpPort), zap.String("addr", s.serverOptions.HTTPHostPort))
zap.L().Info("Starting HTTP server", zap.Int("port", httpPort), zap.String("addr", s.serverOptions.HTTPHostPort))
switch err := s.httpServer.Serve(s.httpConn); err {
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
// normal exit, nothing to do
default:
zap.S().Error("Could not start HTTP server", zap.Error(err))
zap.L().Error("Could not start HTTP server", zap.Error(err))
}
s.unavailableChannel <- healthcheck.Unavailable
}()
go func() {
zap.S().Info("Starting pprof server", zap.String("addr", baseconst.DebugHttpPort))
zap.L().Info("Starting pprof server", zap.String("addr", baseconst.DebugHttpPort))
err = http.ListenAndServe(baseconst.DebugHttpPort, nil)
if err != nil {
zap.S().Error("Could not start pprof server", zap.Error(err))
zap.L().Error("Could not start pprof server", zap.Error(err))
}
}()
@ -590,14 +590,14 @@ func (s *Server) Start() error {
}
go func() {
zap.S().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.serverOptions.PrivateHostPort))
zap.L().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.serverOptions.PrivateHostPort))
switch err := s.privateHTTP.Serve(s.privateConn); err {
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
// normal exit, nothing to do
zap.S().Info("private http server closed")
zap.L().Info("private http server closed")
default:
zap.S().Error("Could not start private HTTP server", zap.Error(err))
zap.L().Error("Could not start private HTTP server", zap.Error(err))
}
s.unavailableChannel <- healthcheck.Unavailable
@ -605,10 +605,10 @@ func (s *Server) Start() error {
}()
go func() {
zap.S().Info("Starting OpAmp Websocket server", zap.String("addr", baseconst.OpAmpWsEndpoint))
zap.L().Info("Starting OpAmp Websocket server", zap.String("addr", baseconst.OpAmpWsEndpoint))
err := s.opampServer.Start(baseconst.OpAmpWsEndpoint)
if err != nil {
zap.S().Info("opamp ws server failed to start", err)
zap.L().Error("opamp ws server failed to start", zap.Error(err))
s.unavailableChannel <- healthcheck.Unavailable
}
}()
@ -684,7 +684,7 @@ func makeRulesManager(
return nil, fmt.Errorf("rule manager error: %v", err)
}
zap.S().Info("rules manager is ready")
zap.L().Info("rules manager is ready")
return manager, nil
}

View File

@ -17,25 +17,25 @@ import (
func GetUserFromRequest(r *http.Request, apiHandler *api.APIHandler) (*basemodel.UserPayload, error) {
patToken := r.Header.Get("SIGNOZ-API-KEY")
if len(patToken) > 0 {
zap.S().Debugf("Received a non-zero length PAT token")
zap.L().Debug("Received a non-zero length PAT token")
ctx := context.Background()
dao := apiHandler.AppDao()
pat, err := dao.GetPAT(ctx, patToken)
if err == nil && pat != nil {
zap.S().Debugf("Found valid PAT: %+v", pat)
zap.L().Debug("Found valid PAT: ", zap.Any("pat", pat))
if pat.ExpiresAt < time.Now().Unix() && pat.ExpiresAt != 0 {
zap.S().Debugf("PAT has expired: %+v", pat)
zap.L().Info("PAT has expired: ", zap.Any("pat", pat))
return nil, fmt.Errorf("PAT has expired")
}
group, apiErr := dao.GetGroupByName(ctx, pat.Role)
if apiErr != nil {
zap.S().Debugf("Error while getting group for PAT: %+v", apiErr)
zap.L().Error("Error while getting group for PAT: ", zap.Any("apiErr", apiErr))
return nil, apiErr
}
user, err := dao.GetUser(ctx, pat.UserID)
if err != nil {
zap.S().Debugf("Error while getting user for PAT: %+v", err)
zap.L().Error("Error while getting user for PAT: ", zap.Error(err))
return nil, err
}
telemetry.GetInstance().SetPatTokenUser()
@ -48,7 +48,7 @@ func GetUserFromRequest(r *http.Request, apiHandler *api.APIHandler) (*basemodel
}, nil
}
if err != nil {
zap.S().Debugf("Error while getting user for PAT: %+v", err)
zap.L().Error("Error while getting user for PAT: ", zap.Error(err))
return nil, err
}
}

View File

@ -22,19 +22,19 @@ func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (
domain, apierr := m.GetDomainByEmail(ctx, email)
if apierr != nil {
zap.S().Errorf("failed to get domain from email", apierr)
zap.L().Error("failed to get domain from email", zap.Error(apierr))
return nil, model.InternalErrorStr("failed to get domain from email")
}
hash, err := baseauth.PasswordHash(utils.GeneratePassowrd())
if err != nil {
zap.S().Errorf("failed to generate password hash when registering a user via SSO redirect", zap.Error(err))
zap.L().Error("failed to generate password hash when registering a user via SSO redirect", zap.Error(err))
return nil, model.InternalErrorStr("failed to generate password hash")
}
group, apiErr := m.GetGroupByName(ctx, baseconst.ViewerGroup)
if apiErr != nil {
zap.S().Debugf("GetGroupByName failed, err: %v\n", apiErr.Err)
zap.L().Error("GetGroupByName failed", zap.Error(apiErr))
return nil, apiErr
}
@ -51,7 +51,7 @@ func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (
user, apiErr = m.CreateUser(ctx, user, false)
if apiErr != nil {
zap.S().Debugf("CreateUser failed, err: %v\n", apiErr.Err)
zap.L().Error("CreateUser failed", zap.Error(apiErr))
return nil, apiErr
}
@ -65,7 +65,7 @@ func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email st
userPayload, apierr := m.GetUserByEmail(ctx, email)
if !apierr.IsNil() {
zap.S().Errorf(" failed to get user with email received from auth provider", apierr.Error())
zap.L().Error("failed to get user with email received from auth provider", zap.String("error", apierr.Error()))
return "", model.BadRequestStr("invalid user email received from the auth provider")
}
@ -75,7 +75,7 @@ func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email st
newUser, apiErr := m.createUserForSAMLRequest(ctx, email)
user = newUser
if apiErr != nil {
zap.S().Errorf("failed to create user with email received from auth provider: %v", apierr.Error())
zap.L().Error("failed to create user with email received from auth provider", zap.Error(apiErr))
return "", apiErr
}
} else {
@ -84,7 +84,7 @@ func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email st
tokenStore, err := baseauth.GenerateJWTForUser(user)
if err != nil {
zap.S().Errorf("failed to generate token for SSO login user", err)
zap.L().Error("failed to generate token for SSO login user", zap.Error(err))
return "", model.InternalErrorStr("failed to generate token for the user")
}
@ -143,8 +143,8 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
// do nothing, just skip sso
ssoAvailable = false
default:
zap.S().Errorf("feature check failed", zap.String("featureKey", model.SSO), zap.Error(err))
return resp, model.BadRequest(err)
zap.L().Error("feature check failed", zap.String("featureKey", model.SSO), zap.Error(err))
return resp, model.BadRequestStr(err.Error())
}
}
@ -160,7 +160,7 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
if len(emailComponents) > 0 {
emailDomain = emailComponents[1]
}
zap.S().Errorf("failed to get org domain from email", zap.String("emailDomain", emailDomain), apierr.ToError())
zap.L().Error("failed to get org domain from email", zap.String("emailDomain", emailDomain), zap.Error(apierr.ToError()))
return resp, apierr
}
@ -176,7 +176,7 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
escapedUrl, _ := url.QueryUnescape(sourceUrl)
siteUrl, err := url.Parse(escapedUrl)
if err != nil {
zap.S().Errorf("failed to parse referer", err)
zap.L().Error("failed to parse referer", zap.Error(err))
return resp, model.InternalError(fmt.Errorf("failed to generate login request"))
}
@ -185,7 +185,7 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
resp.SsoUrl, err = orgDomain.BuildSsoUrl(siteUrl)
if err != nil {
zap.S().Errorf("failed to prepare saml request for domain", zap.String("domain", orgDomain.Name), err)
zap.L().Error("failed to prepare saml request for domain", zap.String("domain", orgDomain.Name), zap.Error(err))
return resp, model.InternalError(err)
}

View File

@ -48,13 +48,13 @@ func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url
if domainIdStr != "" {
domainId, err := uuid.Parse(domainIdStr)
if err != nil {
zap.S().Errorf("failed to parse domainId from relay state", err)
zap.L().Error("failed to parse domainId from relay state", zap.Error(err))
return nil, fmt.Errorf("failed to parse domainId from IdP response")
}
domain, err = m.GetDomain(ctx, domainId)
if (err != nil) || domain == nil {
zap.S().Errorf("failed to find domain from domainId received in IdP response", err.Error())
zap.L().Error("failed to find domain from domainId received in IdP response", zap.Error(err))
return nil, fmt.Errorf("invalid credentials")
}
}
@ -64,7 +64,7 @@ func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url
domainFromDB, err := m.GetDomainByName(ctx, domainNameStr)
domain = domainFromDB
if (err != nil) || domain == nil {
zap.S().Errorf("failed to find domain from domainName received in IdP response", err.Error())
zap.L().Error("failed to find domain from domainName received in IdP response", zap.Error(err))
return nil, fmt.Errorf("invalid credentials")
}
}
@ -132,7 +132,7 @@ func (m *modelDao) ListDomains(ctx context.Context, orgId string) ([]model.OrgDo
for _, s := range stored {
domain := model.OrgDomain{Id: s.Id, Name: s.Name, OrgId: s.OrgId}
if err := domain.LoadConfig(s.Data); err != nil {
zap.S().Errorf("ListDomains() failed", zap.Error(err))
zap.L().Error("ListDomains() failed", zap.Error(err))
}
domains = append(domains, domain)
}
@ -153,7 +153,7 @@ func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) ba
configJson, err := json.Marshal(domain)
if err != nil {
zap.S().Errorf("failed to unmarshal domain config", zap.Error(err))
zap.L().Error("failed to unmarshal domain config", zap.Error(err))
return model.InternalError(fmt.Errorf("domain creation failed"))
}
@ -167,7 +167,7 @@ func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) ba
time.Now().Unix())
if err != nil {
zap.S().Errorf("failed to insert domain in db", zap.Error(err))
zap.L().Error("failed to insert domain in db", zap.Error(err))
return model.InternalError(fmt.Errorf("domain creation failed"))
}
@ -178,13 +178,13 @@ func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) ba
func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError {
if domain.Id == uuid.Nil {
zap.S().Errorf("domain update failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
zap.L().Error("domain update failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
return model.InternalError(fmt.Errorf("domain update failed"))
}
configJson, err := json.Marshal(domain)
if err != nil {
zap.S().Errorf("domain update failed", zap.Error(err))
zap.L().Error("domain update failed", zap.Error(err))
return model.InternalError(fmt.Errorf("domain update failed"))
}
@ -195,7 +195,7 @@ func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) ba
domain.Id)
if err != nil {
zap.S().Errorf("domain update failed", zap.Error(err))
zap.L().Error("domain update failed", zap.Error(err))
return model.InternalError(fmt.Errorf("domain update failed"))
}
@ -206,7 +206,7 @@ func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) ba
func (m *modelDao) DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError {
if id == uuid.Nil {
zap.S().Errorf("domain delete failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
zap.L().Error("domain delete failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
return model.InternalError(fmt.Errorf("domain delete failed"))
}
@ -215,7 +215,7 @@ func (m *modelDao) DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.Bas
id)
if err != nil {
zap.S().Errorf("domain delete failed", zap.Error(err))
zap.L().Error("domain delete failed", zap.Error(err))
return model.InternalError(fmt.Errorf("domain delete failed"))
}

View File

@ -26,12 +26,12 @@ func (m *modelDao) CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basem
p.Revoked,
)
if err != nil {
zap.S().Errorf("Failed to insert PAT in db, err: %v", zap.Error(err))
zap.L().Error("Failed to insert PAT in db, err: %v", zap.Error(err))
return model.PAT{}, model.InternalError(fmt.Errorf("PAT insertion failed"))
}
id, err := result.LastInsertId()
if err != nil {
zap.S().Errorf("Failed to get last inserted id, err: %v", zap.Error(err))
zap.L().Error("Failed to get last inserted id, err: %v", zap.Error(err))
return model.PAT{}, model.InternalError(fmt.Errorf("PAT insertion failed"))
}
p.Id = strconv.Itoa(int(id))
@ -62,7 +62,7 @@ func (m *modelDao) UpdatePAT(ctx context.Context, p model.PAT, id string) basemo
p.UpdatedByUserID,
id)
if err != nil {
zap.S().Errorf("Failed to update PAT in db, err: %v", zap.Error(err))
zap.L().Error("Failed to update PAT in db, err: %v", zap.Error(err))
return model.InternalError(fmt.Errorf("PAT update failed"))
}
return nil
@ -74,7 +74,7 @@ func (m *modelDao) UpdatePATLastUsed(ctx context.Context, token string, lastUsed
lastUsed,
token)
if err != nil {
zap.S().Errorf("Failed to update PAT last used in db, err: %v", zap.Error(err))
zap.L().Error("Failed to update PAT last used in db, err: %v", zap.Error(err))
return model.InternalError(fmt.Errorf("PAT last used update failed"))
}
return nil
@ -84,7 +84,7 @@ func (m *modelDao) ListPATs(ctx context.Context) ([]model.PAT, basemodel.BaseApi
pats := []model.PAT{}
if err := m.DB().Select(&pats, "SELECT * FROM personal_access_tokens WHERE revoked=false ORDER by updated_at DESC;"); err != nil {
zap.S().Errorf("Failed to fetch PATs err: %v", zap.Error(err))
zap.L().Error("Failed to fetch PATs err: %v", zap.Error(err))
return nil, model.InternalError(fmt.Errorf("failed to fetch PATs"))
}
for i := range pats {
@ -129,7 +129,7 @@ func (m *modelDao) RevokePAT(ctx context.Context, id string, userID string) base
"UPDATE personal_access_tokens SET revoked=true, updated_by_user_id = $1, updated_at=$2 WHERE id=$3",
userID, updatedAt, id)
if err != nil {
zap.S().Errorf("Failed to revoke PAT in db, err: %v", zap.Error(err))
zap.L().Error("Failed to revoke PAT in db, err: %v", zap.Error(err))
return model.InternalError(fmt.Errorf("PAT revoke failed"))
}
return nil

View File

@ -47,13 +47,13 @@ func ActivateLicense(key, siteId string) (*ActivationResponse, *model.ApiError)
httpResponse, err := http.Post(C.Prefix+"/licenses/activate", APPLICATION_JSON, bytes.NewBuffer(reqString))
if err != nil {
zap.S().Errorf("failed to connect to license.signoz.io", err)
zap.L().Error("failed to connect to license.signoz.io", zap.Error(err))
return nil, model.BadRequest(fmt.Errorf("unable to connect with license.signoz.io, please check your network connection"))
}
httpBody, err := io.ReadAll(httpResponse.Body)
if err != nil {
zap.S().Errorf("failed to read activation response from license.signoz.io", err)
zap.L().Error("failed to read activation response from license.signoz.io", zap.Error(err))
return nil, model.BadRequest(fmt.Errorf("failed to read activation response from license.signoz.io"))
}
@ -63,7 +63,7 @@ func ActivateLicense(key, siteId string) (*ActivationResponse, *model.ApiError)
result := ActivationResult{}
err = json.Unmarshal(httpBody, &result)
if err != nil {
zap.S().Errorf("failed to marshal activation response from license.signoz.io", err)
zap.L().Error("failed to marshal activation response from license.signoz.io", zap.Error(err))
return nil, model.InternalError(errors.Wrap(err, "failed to marshal license activation response"))
}

View File

@ -97,7 +97,7 @@ func (r *Repo) InsertLicense(ctx context.Context, l *model.License) error {
l.ValidationMessage)
if err != nil {
zap.S().Errorf("error in inserting license data: ", zap.Error(err))
zap.L().Error("error in inserting license data: ", zap.Error(err))
return fmt.Errorf("failed to insert license in db: %v", err)
}
@ -121,7 +121,7 @@ func (r *Repo) UpdatePlanDetails(ctx context.Context,
_, err := r.db.ExecContext(ctx, query, planDetails, time.Now(), key)
if err != nil {
zap.S().Errorf("error in updating license: ", zap.Error(err))
zap.L().Error("error in updating license: ", zap.Error(err))
return fmt.Errorf("failed to update license in db: %v", err)
}

View File

@ -100,7 +100,7 @@ func (lm *Manager) SetActive(l *model.License) {
err := lm.InitFeatures(lm.activeFeatures)
if err != nil {
zap.S().Panicf("Couldn't activate features: %v", err)
zap.L().Panic("Couldn't activate features", zap.Error(err))
}
if !lm.validatorRunning {
// we want to make sure only one validator runs,
@ -125,13 +125,13 @@ func (lm *Manager) LoadActiveLicense() error {
if active != nil {
lm.SetActive(active)
} else {
zap.S().Info("No active license found, defaulting to basic plan")
zap.L().Info("No active license found, defaulting to basic plan")
// if no active license is found, we default to basic(free) plan with all default features
lm.activeFeatures = model.BasicPlan
setDefaultFeatures(lm)
err := lm.InitFeatures(lm.activeFeatures)
if err != nil {
zap.S().Error("Couldn't initialize features: ", err)
zap.L().Error("Couldn't initialize features", zap.Error(err))
return err
}
}
@ -191,7 +191,7 @@ func (lm *Manager) Validator(ctx context.Context) {
// Validate validates the current active license
func (lm *Manager) Validate(ctx context.Context) (reterr error) {
zap.S().Info("License validation started")
zap.L().Info("License validation started")
if lm.activeLicense == nil {
return nil
}
@ -201,12 +201,12 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
lm.lastValidated = time.Now().Unix()
if reterr != nil {
zap.S().Errorf("License validation completed with error", reterr)
zap.L().Error("License validation completed with error", zap.Error(reterr))
atomic.AddUint64(&lm.failedAttempts, 1)
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_CHECK_FAILED,
map[string]interface{}{"err": reterr.Error()}, "")
} else {
zap.S().Info("License validation completed with no errors")
zap.L().Info("License validation completed with no errors")
}
lm.mutex.Unlock()
@ -214,7 +214,7 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
response, apiError := validate.ValidateLicense(lm.activeLicense.ActivationId)
if apiError != nil {
zap.S().Errorf("failed to validate license", apiError)
zap.L().Error("failed to validate license", zap.Error(apiError.Err))
return apiError.Err
}
@ -235,7 +235,7 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
}
if err := l.ParsePlan(); err != nil {
zap.S().Errorf("failed to parse updated license", zap.Error(err))
zap.L().Error("failed to parse updated license", zap.Error(err))
return err
}
@ -245,7 +245,7 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
if err != nil {
// unexpected db write issue but we can let the user continue
// and wait for update to work in next cycle.
zap.S().Errorf("failed to validate license", zap.Error(err))
zap.L().Error("failed to validate license", zap.Error(err))
}
}
@ -270,7 +270,7 @@ func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *m
response, apiError := validate.ActivateLicense(key, "")
if apiError != nil {
zap.S().Errorf("failed to activate license", zap.Error(apiError.Err))
zap.L().Error("failed to activate license", zap.Error(apiError.Err))
return nil, apiError
}
@ -284,14 +284,14 @@ func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *m
err := l.ParsePlan()
if err != nil {
zap.S().Errorf("failed to activate license", zap.Error(err))
zap.L().Error("failed to activate license", zap.Error(err))
return nil, model.InternalError(err)
}
// store the license before activating it
err = lm.repo.InsertLicense(ctx, l)
if err != nil {
zap.S().Errorf("failed to activate license", zap.Error(err))
zap.L().Error("failed to activate license", zap.Error(err))
return nil, model.InternalError(err)
}

View File

@ -14,10 +14,10 @@ import (
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
"go.signoz.io/signoz/ee/query-service/app"
"go.signoz.io/signoz/pkg/query-service/auth"
"go.signoz.io/signoz/pkg/query-service/constants"
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
"go.signoz.io/signoz/pkg/query-service/version"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
zapotlpencoder "github.com/SigNoz/zap_otlp/zap_otlp_encoder"
zapotlpsync "github.com/SigNoz/zap_otlp/zap_otlp_sync"
@ -27,18 +27,19 @@ import (
)
func initZapLog(enableQueryServiceLogOTLPExport bool) *zap.Logger {
config := zap.NewDevelopmentConfig()
config := zap.NewProductionConfig()
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt)
defer stop()
config.EncoderConfig.EncodeDuration = zapcore.StringDurationEncoder
otlpEncoder := zapotlpencoder.NewOTLPEncoder(config.EncoderConfig)
consoleEncoder := zapcore.NewConsoleEncoder(config.EncoderConfig)
defaultLogLevel := zapcore.DebugLevel
config.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder
config.EncoderConfig.EncodeDuration = zapcore.MillisDurationEncoder
config.EncoderConfig.EncodeLevel = zapcore.CapitalLevelEncoder
config.EncoderConfig.TimeKey = "timestamp"
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
otlpEncoder := zapotlpencoder.NewOTLPEncoder(config.EncoderConfig)
consoleEncoder := zapcore.NewJSONEncoder(config.EncoderConfig)
defaultLogLevel := zapcore.InfoLevel
res := resource.NewWithAttributes(
semconv.SchemaURL,
semconv.ServiceNameKey.String("query-service"),
@ -48,14 +49,15 @@ func initZapLog(enableQueryServiceLogOTLPExport bool) *zap.Logger {
zapcore.NewCore(consoleEncoder, os.Stdout, defaultLogLevel),
)
if enableQueryServiceLogOTLPExport == true {
conn, err := grpc.DialContext(ctx, constants.OTLPTarget, grpc.WithBlock(), grpc.WithInsecure(), grpc.WithTimeout(time.Second*30))
if enableQueryServiceLogOTLPExport {
ctx, _ := context.WithTimeout(ctx, time.Second*30)
conn, err := grpc.DialContext(ctx, baseconst.OTLPTarget, grpc.WithBlock(), grpc.WithTransportCredentials(insecure.NewCredentials()))
if err != nil {
log.Println("failed to connect to otlp collector to export query service logs with error:", err)
log.Fatalf("failed to establish connection: %v", err)
} else {
logExportBatchSizeInt, err := strconv.Atoi(baseconst.LogExportBatchSize)
if err != nil {
logExportBatchSizeInt = 1000
logExportBatchSizeInt = 512
}
ws := zapcore.AddSync(zapotlpsync.NewOtlpSyncer(conn, zapotlpsync.Options{
BatchSize: logExportBatchSizeInt,
@ -113,7 +115,6 @@ func main() {
zap.ReplaceGlobals(loggerMgr)
defer loggerMgr.Sync() // flushes buffer, if any
logger := loggerMgr.Sugar()
version.PrintVersion()
serverOptions := &app.ServerOptions{
@ -137,22 +138,22 @@ func main() {
auth.JwtSecret = os.Getenv("SIGNOZ_JWT_SECRET")
if len(auth.JwtSecret) == 0 {
zap.S().Warn("No JWT secret key is specified.")
zap.L().Warn("No JWT secret key is specified.")
} else {
zap.S().Info("No JWT secret key set successfully.")
zap.L().Info("JWT secret key set successfully.")
}
server, err := app.NewServer(serverOptions)
if err != nil {
logger.Fatal("Failed to create server", zap.Error(err))
zap.L().Fatal("Failed to create server", zap.Error(err))
}
if err := server.Start(); err != nil {
logger.Fatal("Could not start servers", zap.Error(err))
zap.L().Fatal("Could not start server", zap.Error(err))
}
if err := auth.InitAuthCache(context.Background()); err != nil {
logger.Fatal("Failed to initialize auth cache", zap.Error(err))
zap.L().Fatal("Failed to initialize auth cache", zap.Error(err))
}
signalsChannel := make(chan os.Signal, 1)
@ -161,9 +162,9 @@ func main() {
for {
select {
case status := <-server.HealthCheckStatus():
logger.Info("Received HealthCheck status: ", zap.Int("status", int(status)))
zap.L().Info("Received HealthCheck status: ", zap.Int("status", int(status)))
case <-signalsChannel:
logger.Fatal("Received OS Interrupt Signal ... ")
zap.L().Fatal("Received OS Interrupt Signal ... ")
server.Stop()
}
}

View File

@ -9,8 +9,8 @@ import (
"github.com/google/uuid"
"github.com/pkg/errors"
saml2 "github.com/russellhaering/gosaml2"
"go.signoz.io/signoz/ee/query-service/sso/saml"
"go.signoz.io/signoz/ee/query-service/sso"
"go.signoz.io/signoz/ee/query-service/sso/saml"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap"
)
@ -24,16 +24,16 @@ const (
// OrgDomain identify org owned web domains for auth and other purposes
type OrgDomain struct {
Id uuid.UUID `json:"id"`
Name string `json:"name"`
OrgId string `json:"orgId"`
SsoEnabled bool `json:"ssoEnabled"`
SsoType SSOType `json:"ssoType"`
Id uuid.UUID `json:"id"`
Name string `json:"name"`
OrgId string `json:"orgId"`
SsoEnabled bool `json:"ssoEnabled"`
SsoType SSOType `json:"ssoType"`
SamlConfig *SamlConfig `json:"samlConfig"`
SamlConfig *SamlConfig `json:"samlConfig"`
GoogleAuthConfig *GoogleOAuthConfig `json:"googleAuthConfig"`
Org *basemodel.Organization
Org *basemodel.Organization
}
func (od *OrgDomain) String() string {
@ -100,8 +100,8 @@ func (od *OrgDomain) GetSAMLCert() string {
return ""
}
// PrepareGoogleOAuthProvider creates GoogleProvider that is used in
// requesting OAuth and also used in processing response from google
// PrepareGoogleOAuthProvider creates GoogleProvider that is used in
// requesting OAuth and also used in processing response from google
func (od *OrgDomain) PrepareGoogleOAuthProvider(siteUrl *url.URL) (sso.OAuthCallbackProvider, error) {
if od.GoogleAuthConfig == nil {
return nil, fmt.Errorf("Google auth is not setup correctly for this domain")
@ -137,38 +137,36 @@ func (od *OrgDomain) PrepareSamlRequest(siteUrl *url.URL) (*saml2.SAMLServicePro
}
func (od *OrgDomain) BuildSsoUrl(siteUrl *url.URL) (ssoUrl string, err error) {
fmtDomainId := strings.Replace(od.Id.String(), "-", ":", -1)
// build redirect url from window.location sent by frontend
redirectURL := fmt.Sprintf("%s://%s%s", siteUrl.Scheme, siteUrl.Host, siteUrl.Path)
// prepare state that gets relayed back when the auth provider
// calls back our url. here we pass the app url (where signoz runs)
// and the domain Id. The domain Id helps in identifying sso config
// when the call back occurs and the app url is useful in redirecting user
// back to the right path.
// when the call back occurs and the app url is useful in redirecting user
// back to the right path.
// why do we need to pass app url? the callback typically is handled by backend
// and sometimes backend might right at a different port or is unaware of frontend
// endpoint (unless SITE_URL param is set). hence, we receive this build sso request
// along with frontend window.location and use it to relay the information through
// auth provider to the backend (HandleCallback or HandleSSO method).
// along with frontend window.location and use it to relay the information through
// auth provider to the backend (HandleCallback or HandleSSO method).
relayState := fmt.Sprintf("%s?domainId=%s", redirectURL, fmtDomainId)
switch (od.SsoType) {
switch od.SsoType {
case SAML:
sp, err := od.PrepareSamlRequest(siteUrl)
if err != nil {
return "", err
}
return sp.BuildAuthURL(relayState)
case GoogleAuth:
googleProvider, err := od.PrepareGoogleOAuthProvider(siteUrl)
if err != nil {
return "", err
@ -176,9 +174,8 @@ func (od *OrgDomain) BuildSsoUrl(siteUrl *url.URL) (ssoUrl string, err error) {
return googleProvider.BuildAuthURL(relayState)
default:
zap.S().Errorf("found unsupported SSO config for the org domain", zap.String("orgDomain", od.Name))
return "", fmt.Errorf("unsupported SSO config for the domain")
zap.L().Error("found unsupported SSO config for the org domain", zap.String("orgDomain", od.Name))
return "", fmt.Errorf("unsupported SSO config for the domain")
}
}

View File

@ -102,6 +102,6 @@ func PrepareRequest(issuer, acsUrl, audience, entity, idp, certString string) (*
IDPCertificateStore: certStore,
SPKeyStore: randomKeyStore,
}
zap.S().Debugf("SAML request:", sp)
zap.L().Debug("SAML request", zap.Any("sp", sp))
return sp, nil
}

View File

@ -91,12 +91,12 @@ func (lm *Manager) UploadUsage() {
// check if license is present or not
license, err := lm.licenseRepo.GetActiveLicense(ctx)
if err != nil {
zap.S().Errorf("failed to get active license: %v", zap.Error(err))
zap.L().Error("failed to get active license", zap.Error(err))
return
}
if license == nil {
// we will not start the usage reporting if license is not present.
zap.S().Info("no license present, skipping usage reporting")
zap.L().Info("no license present, skipping usage reporting")
return
}
@ -123,7 +123,7 @@ func (lm *Manager) UploadUsage() {
dbusages := []model.UsageDB{}
err := lm.clickhouseConn.Select(ctx, &dbusages, fmt.Sprintf(query, db, db), time.Now().Add(-(24 * time.Hour)))
if err != nil && !strings.Contains(err.Error(), "doesn't exist") {
zap.S().Errorf("failed to get usage from clickhouse: %v", zap.Error(err))
zap.L().Error("failed to get usage from clickhouse: %v", zap.Error(err))
return
}
for _, u := range dbusages {
@ -133,16 +133,16 @@ func (lm *Manager) UploadUsage() {
}
if len(usages) <= 0 {
zap.S().Info("no snapshots to upload, skipping.")
zap.L().Info("no snapshots to upload, skipping.")
return
}
zap.S().Info("uploading usage data")
zap.L().Info("uploading usage data")
orgName := ""
orgNames, orgError := lm.modelDao.GetOrgs(ctx)
if orgError != nil {
zap.S().Errorf("failed to get org data: %v", zap.Error(orgError))
zap.L().Error("failed to get org data: %v", zap.Error(orgError))
}
if len(orgNames) == 1 {
orgName = orgNames[0].Name
@ -152,14 +152,14 @@ func (lm *Manager) UploadUsage() {
for _, usage := range usages {
usageDataBytes, err := encryption.Decrypt([]byte(usage.ExporterID[:32]), []byte(usage.Data))
if err != nil {
zap.S().Errorf("error while decrypting usage data: %v", zap.Error(err))
zap.L().Error("error while decrypting usage data: %v", zap.Error(err))
return
}
usageData := model.Usage{}
err = json.Unmarshal(usageDataBytes, &usageData)
if err != nil {
zap.S().Errorf("error while unmarshalling usage data: %v", zap.Error(err))
zap.L().Error("error while unmarshalling usage data: %v", zap.Error(err))
return
}
@ -184,13 +184,13 @@ func (lm *Manager) UploadUsageWithExponentalBackOff(ctx context.Context, payload
for i := 1; i <= MaxRetries; i++ {
apiErr := licenseserver.SendUsage(ctx, payload)
if apiErr != nil && i == MaxRetries {
zap.S().Errorf("retries stopped : %v", zap.Error(apiErr))
zap.L().Error("retries stopped : %v", zap.Error(apiErr))
// not returning error here since it is captured in the failed count
return
} else if apiErr != nil {
// sleeping for exponential backoff
sleepDuration := RetryInterval * time.Duration(i)
zap.S().Errorf("failed to upload snapshot retrying after %v secs : %v", sleepDuration.Seconds(), zap.Error(apiErr.Err))
zap.L().Error("failed to upload snapshot retrying after %v secs : %v", zap.Duration("sleepDuration", sleepDuration), zap.Error(apiErr.Err))
time.Sleep(sleepDuration)
} else {
break
@ -201,7 +201,7 @@ func (lm *Manager) UploadUsageWithExponentalBackOff(ctx context.Context, payload
func (lm *Manager) Stop() {
lm.scheduler.Stop()
zap.S().Debug("sending usage data before shutting down")
zap.L().Info("sending usage data before shutting down")
// send usage before shutting down
lm.UploadUsage()

View File

@ -151,7 +151,7 @@ func (r *Repo) insertConfig(
// allowing empty elements for logs - use case is deleting all pipelines
if len(elements) == 0 && c.ElementType != ElementTypeLogPipelines {
zap.S().Error("insert config called with no elements ", c.ElementType)
zap.L().Error("insert config called with no elements ", zap.String("ElementType", string(c.ElementType)))
return model.BadRequest(fmt.Errorf("config must have atleast one element"))
}
@ -159,7 +159,7 @@ func (r *Repo) insertConfig(
// the version can not be set by the user, we want to auto-assign the versions
// in a monotonically increasing order starting with 1. hence, we reject insert
// requests with version anything other than 0. here, 0 indicates un-assigned
zap.S().Error("invalid version assignment while inserting agent config", c.Version, c.ElementType)
zap.L().Error("invalid version assignment while inserting agent config", zap.Int("version", c.Version), zap.String("ElementType", string(c.ElementType)))
return model.BadRequest(fmt.Errorf(
"user defined versions are not supported in the agent config",
))
@ -167,7 +167,7 @@ func (r *Repo) insertConfig(
configVersion, err := r.GetLatestVersion(ctx, c.ElementType)
if err != nil && err.Type() != model.ErrorNotFound {
zap.S().Error("failed to fetch latest config version", err)
zap.L().Error("failed to fetch latest config version", zap.Error(err))
return model.InternalError(fmt.Errorf("failed to fetch latest config version"))
}
@ -212,7 +212,7 @@ func (r *Repo) insertConfig(
c.DeployResult)
if dbErr != nil {
zap.S().Error("error in inserting config version: ", zap.Error(dbErr))
zap.L().Error("error in inserting config version: ", zap.Error(dbErr))
return model.InternalError(errors.Wrap(dbErr, "failed to insert ingestion rule"))
}
@ -258,7 +258,7 @@ func (r *Repo) updateDeployStatus(ctx context.Context,
_, err := r.db.ExecContext(ctx, updateQuery, status, result, lastHash, lastconf, version, string(elementType))
if err != nil {
zap.S().Error("failed to update deploy status", err)
zap.L().Error("failed to update deploy status", zap.Error(err))
return model.BadRequest(fmt.Errorf("failed to update deploy status"))
}
@ -276,7 +276,7 @@ func (r *Repo) updateDeployStatusByHash(
_, err := r.db.ExecContext(ctx, updateQuery, status, result, confighash)
if err != nil {
zap.S().Error("failed to update deploy status", err)
zap.L().Error("failed to update deploy status", zap.Error(err))
return model.InternalError(errors.Wrap(err, "failed to update deploy status"))
}

View File

@ -224,19 +224,19 @@ func Redeploy(ctx context.Context, typ ElementTypeDef, version int) *model.ApiEr
configVersion, err := GetConfigVersion(ctx, typ, version)
if err != nil {
zap.S().Debug("failed to fetch config version during redeploy", err)
zap.L().Error("failed to fetch config version during redeploy", zap.Error(err))
return model.WrapApiError(err, "failed to fetch details of the config version")
}
if configVersion == nil || (configVersion != nil && configVersion.LastConf == "") {
zap.S().Debug("config version has no conf yaml", configVersion)
zap.L().Debug("config version has no conf yaml", zap.Any("configVersion", configVersion))
return model.BadRequest(fmt.Errorf("the config version can not be redeployed"))
}
switch typ {
case ElementTypeSamplingRules:
var config *tsp.Config
if err := yaml.Unmarshal([]byte(configVersion.LastConf), &config); err != nil {
zap.S().Error("failed to read last conf correctly", err)
zap.L().Debug("failed to read last conf correctly", zap.Error(err))
return model.BadRequest(fmt.Errorf("failed to read the stored config correctly"))
}
@ -248,7 +248,7 @@ func Redeploy(ctx context.Context, typ ElementTypeDef, version int) *model.ApiEr
opamp.AddToTracePipelineSpec("signoz_tail_sampling")
configHash, err := opamp.UpsertControlProcessors(ctx, "traces", processorConf, m.OnConfigUpdate)
if err != nil {
zap.S().Error("failed to call agent config update for trace processor:", err)
zap.L().Error("failed to call agent config update for trace processor", zap.Error(err))
return model.InternalError(fmt.Errorf("failed to deploy the config"))
}
@ -256,7 +256,7 @@ func Redeploy(ctx context.Context, typ ElementTypeDef, version int) *model.ApiEr
case ElementTypeDropRules:
var filterConfig *filterprocessor.Config
if err := yaml.Unmarshal([]byte(configVersion.LastConf), &filterConfig); err != nil {
zap.S().Error("failed to read last conf correctly", err)
zap.L().Error("failed to read last conf correctly", zap.Error(err))
return model.InternalError(fmt.Errorf("failed to read the stored config correctly"))
}
processorConf := map[string]interface{}{
@ -266,7 +266,7 @@ func Redeploy(ctx context.Context, typ ElementTypeDef, version int) *model.ApiEr
opamp.AddToMetricsPipelineSpec("filter")
configHash, err := opamp.UpsertControlProcessors(ctx, "metrics", processorConf, m.OnConfigUpdate)
if err != nil {
zap.S().Error("failed to call agent config update for trace processor:", err)
zap.L().Error("failed to call agent config update for trace processor", zap.Error(err))
return err
}
@ -292,13 +292,13 @@ func UpsertFilterProcessor(ctx context.Context, version int, config *filterproce
opamp.AddToMetricsPipelineSpec("filter")
configHash, err := opamp.UpsertControlProcessors(ctx, "metrics", processorConf, m.OnConfigUpdate)
if err != nil {
zap.S().Error("failed to call agent config update for trace processor:", err)
zap.L().Error("failed to call agent config update for trace processor", zap.Error(err))
return err
}
processorConfYaml, yamlErr := yaml.Marshal(config)
if yamlErr != nil {
zap.S().Warnf("unexpected error while transforming processor config to yaml", yamlErr)
zap.L().Warn("unexpected error while transforming processor config to yaml", zap.Error(yamlErr))
}
m.updateDeployStatus(ctx, ElementTypeDropRules, version, string(DeployInitiated), "Deployment started", configHash, string(processorConfYaml))
@ -317,7 +317,7 @@ func (m *Manager) OnConfigUpdate(agentId string, hash string, err error) {
message := "Deployment was successful"
defer func() {
zap.S().Info(status, zap.String("agentId", agentId), zap.String("agentResponse", message))
zap.L().Info(status, zap.String("agentId", agentId), zap.String("agentResponse", message))
}()
if err != nil {
@ -343,13 +343,13 @@ func UpsertSamplingProcessor(ctx context.Context, version int, config *tsp.Confi
opamp.AddToTracePipelineSpec("signoz_tail_sampling")
configHash, err := opamp.UpsertControlProcessors(ctx, "traces", processorConf, m.OnConfigUpdate)
if err != nil {
zap.S().Error("failed to call agent config update for trace processor:", err)
zap.L().Error("failed to call agent config update for trace processor", zap.Error(err))
return err
}
processorConfYaml, yamlErr := yaml.Marshal(config)
if yamlErr != nil {
zap.S().Warnf("unexpected error while transforming processor config to yaml", yamlErr)
zap.L().Warn("unexpected error while transforming processor config to yaml", zap.Error(yamlErr))
}
m.updateDeployStatus(ctx, ElementTypeSamplingRules, version, string(DeployInitiated), "Deployment started", configHash, string(processorConfYaml))

View File

@ -106,7 +106,7 @@ func defaultConnector(cfg *namespaceConfig) (clickhouse.Conn, error) {
options.DialTimeout = cfg.DialTimeout
}
zap.S().Infof("Connecting to Clickhouse at %s, Secure: %t, MaxIdleConns: %d, MaxOpenConns: %d, DialTimeout: %s", options.Addr, options.TLS != nil, options.MaxIdleConns, options.MaxOpenConns, options.DialTimeout)
zap.L().Info("Connecting to Clickhouse", zap.String("at", options.Addr[0]), zap.Int("MaxIdleConns", options.MaxIdleConns), zap.Int("MaxOpenConns", options.MaxOpenConns), zap.Duration("DialTimeout", options.DialTimeout))
db, err := clickhouse.Open(options)
if err != nil {
return nil, err

File diff suppressed because it is too large Load Diff

View File

@ -196,7 +196,7 @@ func CreateDashboard(ctx context.Context, data map[string]interface{}, fm interf
mapData, err := json.Marshal(dash.Data)
if err != nil {
zap.S().Errorf("Error in marshalling data field in dashboard: ", dash, err)
zap.L().Error("Error in marshalling data field in dashboard: ", zap.Any("dashboard", dash), zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: err}
}
@ -212,7 +212,7 @@ func CreateDashboard(ctx context.Context, data map[string]interface{}, fm interf
dash.Uuid, dash.CreatedAt, userEmail, dash.UpdatedAt, userEmail, mapData)
if err != nil {
zap.S().Errorf("Error in inserting dashboard data: ", dash, err)
zap.L().Error("Error in inserting dashboard data: ", zap.Any("dashboard", dash), zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: err}
}
lastInsertId, err := result.LastInsertId()
@ -246,7 +246,7 @@ func DeleteDashboard(ctx context.Context, uuid string, fm interfaces.FeatureLook
dashboard, dErr := GetDashboard(ctx, uuid)
if dErr != nil {
zap.S().Errorf("Error in getting dashboard: ", uuid, dErr)
zap.L().Error("Error in getting dashboard: ", zap.String("uuid", uuid), zap.Any("error", dErr))
return dErr
}
@ -296,7 +296,7 @@ func UpdateDashboard(ctx context.Context, uuid string, data map[string]interface
mapData, err := json.Marshal(data)
if err != nil {
zap.S().Errorf("Error in marshalling data field in dashboard: ", data, err)
zap.L().Error("Error in marshalling data field in dashboard: ", zap.Any("data", data), zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorBadData, Err: err}
}
@ -337,7 +337,7 @@ func UpdateDashboard(ctx context.Context, uuid string, data map[string]interface
dashboard.UpdatedAt, userEmail, mapData, dashboard.Uuid)
if err != nil {
zap.S().Errorf("Error in inserting dashboard data: ", data, err)
zap.L().Error("Error in inserting dashboard data", zap.Any("data", data), zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: err}
}
if existingCount != newCount {
@ -358,7 +358,7 @@ func LockUnlockDashboard(ctx context.Context, uuid string, lock bool) *model.Api
_, err := db.Exec(query, uuid)
if err != nil {
zap.S().Errorf("Error in updating dashboard: ", uuid, err)
zap.L().Error("Error in updating dashboard", zap.String("uuid", uuid), zap.Error(err))
return &model.ApiError{Typ: model.ErrorExec, Err: err}
}
@ -370,10 +370,10 @@ func updateFeatureUsage(fm interfaces.FeatureLookup, usage int64) *model.ApiErro
if err != nil {
switch err.(type) {
case model.ErrFeatureUnavailable:
zap.S().Errorf("feature unavailable", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err))
zap.L().Error("feature unavailable", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err))
return model.BadRequest(err)
default:
zap.S().Errorf("feature check failed", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err))
zap.L().Error("feature check failed", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err))
return model.BadRequest(err)
}
}
@ -397,10 +397,10 @@ func checkFeatureUsage(fm interfaces.FeatureLookup, usage int64) *model.ApiError
if err != nil {
switch err.(type) {
case model.ErrFeatureUnavailable:
zap.S().Errorf("feature unavailable", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err))
zap.L().Error("feature unavailable", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err))
return model.BadRequest(err)
default:
zap.S().Errorf("feature check failed", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err))
zap.L().Error("feature check failed", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err))
return model.BadRequest(err)
}
}
@ -535,13 +535,13 @@ func TransformGrafanaJSONToSignoz(grafanaJSON model.GrafanaJSON) model.Dashboard
if template.Type == "query" {
if template.Datasource == nil {
zap.S().Warnf("Skipping panel %d as it has no datasource", templateIdx)
zap.L().Warn("Skipping panel as it has no datasource", zap.Int("templateIdx", templateIdx))
continue
}
// Skip if the source is not prometheus
source, stringOk := template.Datasource.(string)
if stringOk && !strings.Contains(strings.ToLower(source), "prometheus") {
zap.S().Warnf("Skipping template %d as it is not prometheus", templateIdx)
zap.L().Warn("Skipping template as it is not prometheus", zap.Int("templateIdx", templateIdx))
continue
}
var result model.Datasource
@ -553,12 +553,12 @@ func TransformGrafanaJSONToSignoz(grafanaJSON model.GrafanaJSON) model.Dashboard
}
}
if result.Type != "prometheus" && result.Type != "" {
zap.S().Warnf("Skipping template %d as it is not prometheus", templateIdx)
zap.L().Warn("Skipping template as it is not prometheus", zap.Int("templateIdx", templateIdx))
continue
}
if !stringOk && !structOk {
zap.S().Warnf("Didn't recognize source, skipping")
zap.L().Warn("Didn't recognize source, skipping")
continue
}
typ = "QUERY"
@ -629,13 +629,13 @@ func TransformGrafanaJSONToSignoz(grafanaJSON model.GrafanaJSON) model.Dashboard
continue
}
if panel.Datasource == nil {
zap.S().Warnf("Skipping panel %d as it has no datasource", idx)
zap.L().Warn("Skipping panel as it has no datasource", zap.Int("idx", idx))
continue
}
// Skip if the datasource is not prometheus
source, stringOk := panel.Datasource.(string)
if stringOk && !strings.Contains(strings.ToLower(source), "prometheus") {
zap.S().Warnf("Skipping panel %d as it is not prometheus", idx)
zap.L().Warn("Skipping panel as it is not prometheus", zap.Int("idx", idx))
continue
}
var result model.Datasource
@ -647,12 +647,12 @@ func TransformGrafanaJSONToSignoz(grafanaJSON model.GrafanaJSON) model.Dashboard
}
}
if result.Type != "prometheus" && result.Type != "" {
zap.S().Warnf("Skipping panel %d as it is not prometheus", idx)
zap.L().Warn("Skipping panel as it is not prometheus", zap.Int("idx", idx))
continue
}
if !stringOk && !structOk {
zap.S().Warnf("Didn't recognize source, skipping")
zap.L().Warn("Didn't recognize source, skipping")
continue
}

View File

@ -16,31 +16,31 @@ import (
func readCurrentDir(dir string, fm interfaces.FeatureLookup) error {
file, err := os.Open(dir)
if err != nil {
zap.S().Errorf("failed opening directory: %s", err)
return err
zap.L().Warn("failed opening directory", zap.Error(err))
return nil
}
defer file.Close()
list, _ := file.Readdirnames(0) // 0 to read all files and folders
for _, filename := range list {
zap.S().Info("Provisioning dashboard: ", filename)
zap.L().Info("Provisioning dashboard: ", zap.String("filename", filename))
// using filepath.Join for platform specific path creation
// which is equivalent to "dir+/+filename" (on unix based systems) but cleaner
plan, err := os.ReadFile(filepath.Join(dir, filename))
if err != nil {
zap.S().Errorf("Creating Dashboards: Error in reading json fron file: %s\t%s", filename, err)
zap.L().Error("Creating Dashboards: Error in reading json fron file", zap.String("filename", filename), zap.Error(err))
continue
}
var data map[string]interface{}
err = json.Unmarshal(plan, &data)
if err != nil {
zap.S().Errorf("Creating Dashboards: Error in unmarshalling json from file: %s\t%s", filename, err)
zap.L().Error("Creating Dashboards: Error in unmarshalling json from file", zap.String("filename", filename), zap.Error(err))
continue
}
err = IsPostDataSane(&data)
if err != nil {
zap.S().Infof("Creating Dashboards: Error in file: %s\t%s", filename, err)
zap.L().Info("Creating Dashboards: Error in file", zap.String("filename", filename), zap.Error(err))
continue
}
@ -48,14 +48,14 @@ func readCurrentDir(dir string, fm interfaces.FeatureLookup) error {
if id == nil {
_, apiErr := CreateDashboard(context.Background(), data, fm)
if apiErr != nil {
zap.S().Errorf("Creating Dashboards: Error in file: %s\t%s", filename, apiErr.Err)
zap.L().Error("Creating Dashboards: Error in file", zap.String("filename", filename), zap.Error(apiErr.Err))
}
continue
}
apiErr := upsertDashboard(id.(string), data, filename, fm)
if apiErr != nil {
zap.S().Errorf("Creating Dashboards: Error upserting dashboard: %s\t%s", filename, apiErr.Err)
zap.L().Error("Creating Dashboards: Error upserting dashboard", zap.String("filename", filename), zap.Error(apiErr.Err))
}
}
return nil

View File

@ -207,7 +207,7 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
hasUsers, err := aH.appDao.GetUsersWithOpts(context.Background(), 1)
if err.Error() != "" {
// raise warning but no panic as this is a recoverable condition
zap.S().Warnf("unexpected error while fetch user count while initializing base api handler", err.Error())
zap.L().Warn("unexpected error while fetch user count while initializing base api handler", zap.Error(err))
}
if len(hasUsers) != 0 {
// first user is already created, we can mark the app ready for general use.
@ -273,7 +273,7 @@ func RespondError(w http.ResponseWriter, apiErr model.BaseApiError, data interfa
Data: data,
})
if err != nil {
zap.S().Error("msg", "error marshalling json response", "err", err)
zap.L().Error("error marshalling json response", zap.Error(err))
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
@ -303,7 +303,7 @@ func RespondError(w http.ResponseWriter, apiErr model.BaseApiError, data interfa
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
if n, err := w.Write(b); err != nil {
zap.S().Error("msg", "error writing response", "bytesWritten", n, "err", err)
zap.L().Error("error writing response", zap.Int("bytesWritten", n), zap.Error(err))
}
}
@ -314,7 +314,7 @@ func writeHttpResponse(w http.ResponseWriter, data interface{}) {
Data: data,
})
if err != nil {
zap.S().Error("msg", "error marshalling json response", "err", err)
zap.L().Error("error marshalling json response", zap.Error(err))
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
@ -322,7 +322,7 @@ func writeHttpResponse(w http.ResponseWriter, data interface{}) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
if n, err := w.Write(b); err != nil {
zap.S().Error("msg", "error writing response", "bytesWritten", n, "err", err)
zap.L().Error("error writing response", zap.Int("bytesWritten", n), zap.Error(err))
}
}
@ -567,7 +567,7 @@ func (aH *APIHandler) addTemporality(ctx context.Context, qp *v3.QueryRangeParam
var err error
if aH.preferDelta {
zap.S().Debug("fetching metric temporality")
zap.L().Debug("fetching metric temporality")
metricNameToTemporality, err = aH.reader.FetchTemporality(ctx, metricNames)
if err != nil {
return err
@ -595,7 +595,7 @@ func (aH *APIHandler) QueryRangeMetricsV2(w http.ResponseWriter, r *http.Request
metricsQueryRangeParams, apiErrorObj := parser.ParseMetricQueryRangeParams(r)
if apiErrorObj != nil {
zap.S().Errorf(apiErrorObj.Err.Error())
zap.L().Error("error parsing metric query range params", zap.Error(apiErrorObj.Err))
RespondError(w, apiErrorObj, nil)
return
}
@ -1130,7 +1130,7 @@ func (aH *APIHandler) testRule(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
body, err := io.ReadAll(r.Body)
if err != nil {
zap.S().Errorf("Error in getting req body in test rule API\n", err)
zap.L().Error("Error in getting req body in test rule API", zap.Error(err))
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
@ -1173,7 +1173,7 @@ func (aH *APIHandler) patchRule(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
body, err := io.ReadAll(r.Body)
if err != nil {
zap.S().Errorf("msg: error in getting req body of patch rule API\n", "\t error:", err)
zap.L().Error("error in getting req body of patch rule API\n", zap.Error(err))
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
@ -1194,7 +1194,7 @@ func (aH *APIHandler) editRule(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
body, err := io.ReadAll(r.Body)
if err != nil {
zap.S().Errorf("msg: error in getting req body of edit rule API\n", "\t error:", err)
zap.L().Error("error in getting req body of edit rule API", zap.Error(err))
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
@ -1245,14 +1245,14 @@ func (aH *APIHandler) testChannel(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
body, err := io.ReadAll(r.Body)
if err != nil {
zap.S().Errorf("Error in getting req body of testChannel API\n", err)
zap.L().Error("Error in getting req body of testChannel API", zap.Error(err))
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
receiver := &am.Receiver{}
if err := json.Unmarshal(body, receiver); err != nil { // Parse []byte to go struct pointer
zap.S().Errorf("Error in parsing req body of testChannel API\n", err)
zap.L().Error("Error in parsing req body of testChannel API\n", zap.Error(err))
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
@ -1272,14 +1272,14 @@ func (aH *APIHandler) editChannel(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
body, err := io.ReadAll(r.Body)
if err != nil {
zap.S().Errorf("Error in getting req body of editChannel API\n", err)
zap.L().Error("Error in getting req body of editChannel API", zap.Error(err))
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
receiver := &am.Receiver{}
if err := json.Unmarshal(body, receiver); err != nil { // Parse []byte to go struct pointer
zap.S().Errorf("Error in parsing req body of editChannel API\n", err)
zap.L().Error("Error in parsing req body of editChannel API", zap.Error(err))
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
@ -1300,14 +1300,14 @@ func (aH *APIHandler) createChannel(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
body, err := io.ReadAll(r.Body)
if err != nil {
zap.S().Errorf("Error in getting req body of createChannel API\n", err)
zap.L().Error("Error in getting req body of createChannel API", zap.Error(err))
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
receiver := &am.Receiver{}
if err := json.Unmarshal(body, receiver); err != nil { // Parse []byte to go struct pointer
zap.S().Errorf("Error in parsing req body of createChannel API\n", err)
zap.L().Error("Error in parsing req body of createChannel API", zap.Error(err))
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
@ -1347,7 +1347,7 @@ func (aH *APIHandler) createRule(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
body, err := io.ReadAll(r.Body)
if err != nil {
zap.S().Errorf("Error in getting req body for create rule API\n", err)
zap.L().Error("Error in getting req body for create rule API", zap.Error(err))
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
@ -1374,7 +1374,7 @@ func (aH *APIHandler) queryRangeMetrics(w http.ResponseWriter, r *http.Request)
return
}
// zap.S().Info(query, apiError)
// zap.L().Info(query, apiError)
ctx := r.Context()
if to := r.FormValue("timeout"); to != "" {
@ -1396,7 +1396,7 @@ func (aH *APIHandler) queryRangeMetrics(w http.ResponseWriter, r *http.Request)
}
if res.Err != nil {
zap.S().Error(res.Err)
zap.L().Error("error in query range metrics", zap.Error(res.Err))
}
if res.Err != nil {
@ -1429,7 +1429,7 @@ func (aH *APIHandler) queryMetrics(w http.ResponseWriter, r *http.Request) {
return
}
// zap.S().Info(query, apiError)
// zap.L().Info(query, apiError)
ctx := r.Context()
if to := r.FormValue("timeout"); to != "" {
@ -1451,7 +1451,7 @@ func (aH *APIHandler) queryMetrics(w http.ResponseWriter, r *http.Request) {
}
if res.Err != nil {
zap.S().Error(res.Err)
zap.L().Error("error in query range metrics", zap.Error(res.Err))
}
if res.Err != nil {
@ -2045,7 +2045,7 @@ func (aH *APIHandler) loginUser(w http.ResponseWriter, r *http.Request) {
func (aH *APIHandler) listUsers(w http.ResponseWriter, r *http.Request) {
users, err := dao.DB().GetUsers(context.Background())
if err != nil {
zap.S().Debugf("[listUsers] Failed to query list of users, err: %v", err)
zap.L().Error("[listUsers] Failed to query list of users", zap.Error(err))
RespondError(w, err, nil)
return
}
@ -2062,7 +2062,7 @@ func (aH *APIHandler) getUser(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
user, err := dao.DB().GetUser(ctx, id)
if err != nil {
zap.S().Debugf("[getUser] Failed to query user, err: %v", err)
zap.L().Error("[getUser] Failed to query user", zap.Error(err))
RespondError(w, err, "Failed to get user")
return
}
@ -2092,7 +2092,7 @@ func (aH *APIHandler) editUser(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
old, apiErr := dao.DB().GetUser(ctx, id)
if apiErr != nil {
zap.S().Debugf("[editUser] Failed to query user, err: %v", err)
zap.L().Error("[editUser] Failed to query user", zap.Error(err))
RespondError(w, apiErr, nil)
return
}
@ -2176,7 +2176,7 @@ func (aH *APIHandler) patchUserFlag(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
b, err := io.ReadAll(r.Body)
if err != nil {
zap.S().Errorf("failed read user flags from http request for userId ", userId, "with error: ", err)
zap.L().Error("failed read user flags from http request for userId ", zap.String("userId", userId), zap.Error(err))
RespondError(w, model.BadRequestStr("received user flags in invalid format"), nil)
return
}
@ -2184,7 +2184,7 @@ func (aH *APIHandler) patchUserFlag(w http.ResponseWriter, r *http.Request) {
err = json.Unmarshal(b, &flags)
if err != nil {
zap.S().Errorf("failed parsing user flags for userId ", userId, "with error: ", err)
zap.L().Error("failed parsing user flags for userId ", zap.String("userId", userId), zap.Error(err))
RespondError(w, model.BadRequestStr("received user flags in invalid format"), nil)
return
}
@ -2348,7 +2348,7 @@ func (aH *APIHandler) resetPassword(w http.ResponseWriter, r *http.Request) {
}
if err := auth.ResetPassword(context.Background(), req); err != nil {
zap.S().Debugf("resetPassword failed, err: %v\n", err)
zap.L().Error("resetPassword failed", zap.Error(err))
if aH.HandleError(w, err, http.StatusInternalServerError) {
return
}
@ -2391,7 +2391,7 @@ func (aH *APIHandler) HandleError(w http.ResponseWriter, err error, statusCode i
return false
}
if statusCode == http.StatusInternalServerError {
zap.S().Error("HTTP handler, Internal Server Error", zap.Error(err))
zap.L().Error("HTTP handler, Internal Server Error", zap.Error(err))
}
structuredResp := structuredResponse{
Errors: []structuredError{
@ -2809,10 +2809,10 @@ func (aH *APIHandler) tailLogs(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "data: %v\n\n", buf.String())
flusher.Flush()
case <-client.Done:
zap.S().Debug("done!")
zap.L().Debug("done!")
return
case err := <-client.Error:
zap.S().Error("error occured!", err)
zap.L().Error("error occured", zap.Error(err))
return
}
}
@ -2963,7 +2963,7 @@ func (ah *APIHandler) CreateLogsPipeline(w http.ResponseWriter, r *http.Request)
postable []logparsingpipeline.PostablePipeline,
) (*logparsingpipeline.PipelinesResponse, *model.ApiError) {
if len(postable) == 0 {
zap.S().Warnf("found no pipelines in the http request, this will delete all the pipelines")
zap.L().Warn("found no pipelines in the http request, this will delete all the pipelines")
}
for _, p := range postable {
@ -3403,7 +3403,7 @@ func (aH *APIHandler) QueryRangeV3Format(w http.ResponseWriter, r *http.Request)
queryRangeParams, apiErrorObj := ParseQueryRangeParams(r)
if apiErrorObj != nil {
zap.S().Errorf(apiErrorObj.Err.Error())
zap.L().Error(apiErrorObj.Err.Error())
RespondError(w, apiErrorObj, nil)
return
}
@ -3478,11 +3478,11 @@ func sendQueryResultEvents(r *http.Request, result []*v3.Result, queryRangeParam
dashboardMatched, err := regexp.MatchString(`/dashboard/[a-zA-Z0-9\-]+/(new|edit)(?:\?.*)?$`, referrer)
if err != nil {
zap.S().Errorf("error while matching the dashboard: %v", err)
zap.L().Error("error while matching the referrer", zap.Error(err))
}
alertMatched, err := regexp.MatchString(`/alerts/(new|edit)(?:\?.*)?$`, referrer)
if err != nil {
zap.S().Errorf("error while matching the alert: %v", err)
zap.L().Error("error while matching the alert: ", zap.Error(err))
}
if alertMatched || dashboardMatched {
@ -3559,7 +3559,7 @@ func (aH *APIHandler) QueryRangeV3(w http.ResponseWriter, r *http.Request) {
queryRangeParams, apiErrorObj := ParseQueryRangeParams(r)
if apiErrorObj != nil {
zap.S().Errorf(apiErrorObj.Err.Error())
zap.L().Error("error parsing metric query range params", zap.Error(apiErrorObj.Err))
RespondError(w, apiErrorObj, nil)
return
}
@ -3568,7 +3568,7 @@ func (aH *APIHandler) QueryRangeV3(w http.ResponseWriter, r *http.Request) {
temporalityErr := aH.addTemporality(r.Context(), queryRangeParams)
if temporalityErr != nil {
zap.S().Errorf("Error while adding temporality for metrics: %v", temporalityErr)
zap.L().Error("Error while adding temporality for metrics", zap.Error(temporalityErr))
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: temporalityErr}, nil)
return
}
@ -3584,7 +3584,7 @@ func (aH *APIHandler) liveTailLogs(w http.ResponseWriter, r *http.Request) {
queryRangeParams, apiErrorObj := ParseQueryRangeParams(r)
if apiErrorObj != nil {
zap.S().Errorf(apiErrorObj.Err.Error())
zap.L().Error(apiErrorObj.Err.Error())
RespondError(w, apiErrorObj, nil)
return
}
@ -3645,10 +3645,10 @@ func (aH *APIHandler) liveTailLogs(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "data: %v\n\n", buf.String())
flusher.Flush()
case <-client.Done:
zap.S().Debug("done!")
zap.L().Debug("done!")
return
case err := <-client.Error:
zap.S().Error("error occurred!", err)
zap.L().Error("error occurred", zap.Error(err))
fmt.Fprintf(w, "event: error\ndata: %v\n\n", err.Error())
flusher.Flush()
return
@ -3725,7 +3725,7 @@ func (aH *APIHandler) QueryRangeV4(w http.ResponseWriter, r *http.Request) {
queryRangeParams, apiErrorObj := ParseQueryRangeParams(r)
if apiErrorObj != nil {
zap.S().Errorf(apiErrorObj.Err.Error())
zap.L().Error("error parsing metric query range params", zap.Error(apiErrorObj.Err))
RespondError(w, apiErrorObj, nil)
return
}
@ -3733,7 +3733,7 @@ func (aH *APIHandler) QueryRangeV4(w http.ResponseWriter, r *http.Request) {
// add temporality for each metric
temporalityErr := aH.populateTemporality(r.Context(), queryRangeParams)
if temporalityErr != nil {
zap.S().Errorf("Error while adding temporality for metrics: %v", temporalityErr)
zap.L().Error("Error while adding temporality for metrics", zap.Error(temporalityErr))
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: temporalityErr}, nil)
return
}
@ -3777,12 +3777,12 @@ func postProcessResult(result []*v3.Result, queryRangeParams *v3.QueryRangeParam
expression, err := govaluate.NewEvaluableExpressionWithFunctions(query.Expression, evalFuncs())
// This shouldn't happen here, because it should have been caught earlier in validation
if err != nil {
zap.S().Errorf("error in expression: %s", err.Error())
zap.L().Error("error in expression", zap.Error(err))
return nil, err
}
formulaResult, err := processResults(result, expression)
if err != nil {
zap.S().Errorf("error in expression: %s", err.Error())
zap.L().Error("error in expression", zap.Error(err))
return nil, err
}
formulaResult.QueryName = query.QueryName

View File

@ -138,7 +138,7 @@ func buildLogsProcessors(current []string, logsParserPipeline []string) ([]strin
func checkDuplicateString(pipeline []string) bool {
exists := make(map[string]bool, len(pipeline))
zap.S().Debugf("checking duplicate processors in the pipeline:", pipeline)
zap.L().Debug("checking duplicate processors in the pipeline:", zap.Any("pipeline", pipeline))
for _, processor := range pipeline {
name := processor
if _, ok := exists[name]; ok {

View File

@ -104,7 +104,7 @@ func (ic *LogParsingPipelineController) getEffectivePipelinesByVersion(
if version >= 0 {
savedPipelines, errors := ic.getPipelinesByVersion(ctx, version)
if errors != nil {
zap.S().Errorf("failed to get pipelines for version %d, %w", version, errors)
zap.L().Error("failed to get pipelines for version", zap.Int("version", version), zap.Errors("errors", errors))
return nil, model.InternalError(fmt.Errorf("failed to get pipelines for given version"))
}
result = savedPipelines
@ -158,7 +158,7 @@ func (ic *LogParsingPipelineController) GetPipelinesByVersion(
) (*PipelinesResponse, *model.ApiError) {
pipelines, errors := ic.getEffectivePipelinesByVersion(ctx, version)
if errors != nil {
zap.S().Errorf("failed to get pipelines for version %d, %w", version, errors)
zap.L().Error("failed to get pipelines for version", zap.Int("version", version), zap.Error(errors))
return nil, model.InternalError(fmt.Errorf("failed to get pipelines for given version"))
}
@ -166,7 +166,7 @@ func (ic *LogParsingPipelineController) GetPipelinesByVersion(
if version >= 0 {
cv, err := agentConf.GetConfigVersion(ctx, agentConf.ElementTypeLogPipelines, version)
if err != nil {
zap.S().Errorf("failed to get config for version %d, %s", version, err.Error())
zap.L().Error("failed to get config for version", zap.Int("version", version), zap.Error(err))
return nil, model.WrapApiError(err, "failed to get config for given version")
}
configVersion = cv

View File

@ -99,7 +99,7 @@ func (r *Repo) insertPipeline(
insertRow.RawConfig)
if err != nil {
zap.S().Errorf("error in inserting pipeline data: ", zap.Error(err))
zap.L().Error("error in inserting pipeline data", zap.Error(err))
return nil, model.InternalError(errors.Wrap(err, "failed to insert pipeline"))
}
@ -171,19 +171,19 @@ func (r *Repo) GetPipeline(
err := r.db.SelectContext(ctx, &pipelines, pipelineQuery, id)
if err != nil {
zap.S().Errorf("failed to get ingestion pipeline from db", err)
zap.L().Error("failed to get ingestion pipeline from db", zap.Error(err))
return nil, model.InternalError(errors.Wrap(err, "failed to get ingestion pipeline from db"))
}
if len(pipelines) == 0 {
zap.S().Warnf("No row found for ingestion pipeline id", id)
zap.L().Warn("No row found for ingestion pipeline id", zap.String("id", id))
return nil, model.NotFoundError(fmt.Errorf("No row found for ingestion pipeline id %v", id))
}
if len(pipelines) == 1 {
err := pipelines[0].ParseRawConfig()
if err != nil {
zap.S().Errorf("invalid pipeline config found", id, err)
zap.L().Error("invalid pipeline config found", zap.String("id", id), zap.Error(err))
return nil, model.InternalError(
errors.Wrap(err, "found an invalid pipeline config"),
)

View File

@ -27,10 +27,10 @@ func UpsertControlProcessors(
// AddToTracePipeline() or RemoveFromTracesPipeline() prior to calling
// this method
zap.S().Debug("initiating ingestion rules deployment config", signal, processors)
zap.L().Debug("initiating ingestion rules deployment config", zap.String("signal", signal), zap.Any("processors", processors))
if signal != string(Metrics) && signal != string(Traces) {
zap.S().Error("received invalid signal int UpsertControlProcessors", signal)
zap.L().Error("received invalid signal int UpsertControlProcessors", zap.String("signal", signal))
fnerr = coreModel.BadRequest(fmt.Errorf(
"signal not supported in ingestion rules: %s", signal,
))
@ -51,7 +51,7 @@ func UpsertControlProcessors(
}
if len(agents) > 1 && signal == string(Traces) {
zap.S().Debug("found multiple agents. this feature is not supported for traces pipeline (sampling rules)")
zap.L().Debug("found multiple agents. this feature is not supported for traces pipeline (sampling rules)")
fnerr = coreModel.BadRequest(fmt.Errorf("multiple agents not supported in sampling rules"))
return
}
@ -60,7 +60,7 @@ func UpsertControlProcessors(
agenthash, err := addIngestionControlToAgent(agent, signal, processors, false)
if err != nil {
zap.S().Error("failed to push ingestion rules config to agent", agent.ID, err)
zap.L().Error("failed to push ingestion rules config to agent", zap.String("agentID", agent.ID), zap.Error(err))
continue
}
@ -89,7 +89,7 @@ func addIngestionControlToAgent(agent *model.Agent, signal string, processors ma
// add ingestion control spec
err = makeIngestionControlSpec(agentConf, Signal(signal), processors)
if err != nil {
zap.S().Error("failed to prepare ingestion control processors for agent ", agent.ID, err)
zap.L().Error("failed to prepare ingestion control processors for agent", zap.String("agentID", agent.ID), zap.Error(err))
return confHash, err
}
@ -99,7 +99,7 @@ func addIngestionControlToAgent(agent *model.Agent, signal string, processors ma
return confHash, err
}
zap.S().Debugf("sending new config", string(configR))
zap.L().Debug("sending new config", zap.String("config", string(configR)))
hash := sha256.New()
_, err = hash.Write(configR)
if err != nil {
@ -140,7 +140,7 @@ func makeIngestionControlSpec(agentConf *confmap.Conf, signal Signal, processors
// merge tracesPipelinePlan with current pipeline
mergedPipeline, err := buildPipeline(signal, currentPipeline)
if err != nil {
zap.S().Error("failed to build pipeline", signal, err)
zap.L().Error("failed to build pipeline", zap.String("signal", string(signal)), zap.Error(err))
return err
}

View File

@ -276,7 +276,7 @@ func (agent *Agent) processStatusUpdate(
func (agent *Agent) updateRemoteConfig(configProvider AgentConfigProvider) bool {
recommendedConfig, confId, err := configProvider.RecommendAgentConfig([]byte(agent.EffectiveConfig))
if err != nil {
zap.S().Error("could not generate config recommendation for agent:", agent.ID, err)
zap.L().Error("could not generate config recommendation for agent", zap.String("agentID", agent.ID), zap.Error(err))
return false
}
@ -293,7 +293,7 @@ func (agent *Agent) updateRemoteConfig(configProvider AgentConfigProvider) bool
if len(confId) < 1 {
// Should never happen. Handle gracefully if it does by some chance.
zap.S().Errorf("config provider recommended a config with empty confId. Using content hash for configId")
zap.L().Error("config provider recommended a config with empty confId. Using content hash for configId")
hash := sha256.New()
for k, v := range cfg.Config.ConfigMap {

View File

@ -131,8 +131,8 @@ func (agents *Agents) RecommendLatestConfigToAll(
// Recommendation is same as current config
if string(newConfig) == agent.EffectiveConfig {
zap.S().Infof(
"Recommended config same as current effective config for agent %s", agent.ID,
zap.L().Info(
"Recommended config same as current effective config for agent", zap.String("agentID", agent.ID),
)
return nil
}

View File

@ -40,7 +40,7 @@ func InitializeServer(
agents: agents,
agentConfigProvider: agentConfigProvider,
}
opAmpServer.server = server.New(zap.S())
opAmpServer.server = server.New(zap.L().Sugar())
return opAmpServer
}
@ -58,8 +58,8 @@ func (srv *Server) Start(listener string) error {
unsubscribe := srv.agentConfigProvider.SubscribeToConfigUpdates(func() {
err := srv.agents.RecommendLatestConfigToAll(srv.agentConfigProvider)
if err != nil {
zap.S().Errorf(
"could not roll out latest config recommendation to connected agents: %w", err,
zap.L().Error(
"could not roll out latest config recommendation to connected agents", zap.Error(err),
)
}
})
@ -85,15 +85,14 @@ func (srv *Server) OnMessage(conn types.Connection, msg *protobufs.AgentToServer
agent, created, err := srv.agents.FindOrCreateAgent(agentID, conn)
if err != nil {
zap.S().Error("Failed to find or create agent %q: %v", agentID, err)
zap.L().Error("Failed to find or create agent", zap.String("agentID", agentID), zap.Error(err))
// TODO: handle error
}
if created {
agent.CanLB = model.ExtractLbFlag(msg.AgentDescription)
zap.S().Debugf(
"New agent added:",
zap.Bool("canLb", agent.CanLB),
zap.L().Debug(
"New agent added", zap.Bool("canLb", agent.CanLB),
zap.String("ID", agent.ID),
zap.Any("status", agent.CurrentStatus),
)
@ -117,7 +116,7 @@ func Ready() bool {
return false
}
if opAmpServer.agents.Count() == 0 {
zap.S().Warnf("no agents available, all agent config requests will be rejected")
zap.L().Warn("no agents available, all agent config requests will be rejected")
return false
}
return true

View File

@ -89,7 +89,7 @@ func RemoveFromMetricsPipelineSpec(name string) {
func checkDuplicates(pipeline []interface{}) bool {
exists := make(map[string]bool, len(pipeline))
zap.S().Debugf("checking duplicate processors in the pipeline:", pipeline)
zap.L().Debug("checking duplicate processors in the pipeline", zap.Any("pipeline", pipeline))
for _, processor := range pipeline {
name := processor.(string)
if _, ok := exists[name]; ok {
@ -149,7 +149,7 @@ func buildPipeline(signal Signal, current []interface{}) ([]interface{}, error)
currentPos := loc + inserts
// if disabled then remove from the pipeline
if !m.Enabled {
zap.S().Debugf("build_pipeline: found a disabled item, removing from pipeline at position", currentPos-1, " ", m.Name)
zap.L().Debug("build_pipeline: found a disabled item, removing from pipeline at position", zap.Int("position", currentPos-1), zap.String("processor", m.Name))
if currentPos-1 <= 0 {
pipeline = pipeline[currentPos+1:]
} else {
@ -170,10 +170,10 @@ func buildPipeline(signal Signal, current []interface{}) ([]interface{}, error)
// right after last matched processsor (e.g. insert filters after tail_sampling for existing list of [batch, tail_sampling])
if lastMatched <= 0 {
zap.S().Debugf("build_pipeline: found a new item to be inserted, inserting at position 0:", m.Name)
zap.L().Debug("build_pipeline: found a new item to be inserted, inserting at position 0", zap.String("processor", m.Name))
pipeline = append([]interface{}{m.Name}, pipeline[lastMatched+1:]...)
} else {
zap.S().Debugf("build_pipeline: found a new item to be inserted, inserting at position :", lastMatched, " ", m.Name)
zap.L().Debug("build_pipeline: found a new item to be inserted, inserting at position", zap.Int("position", lastMatched), zap.String("processor", m.Name))
prior := make([]interface{}, len(pipeline[:lastMatched]))
next := make([]interface{}, len(pipeline[lastMatched:]))
copy(prior, pipeline[:lastMatched])

View File

@ -116,7 +116,7 @@ func (q *querier) runBuilderQuery(
if !params.NoCache && q.cache != nil {
var retrieveStatus status.RetrieveStatus
data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true)
zap.S().Infof("cache retrieve status: %s", retrieveStatus.String())
zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String()))
if err == nil {
cachedData = data
}
@ -143,7 +143,7 @@ func (q *querier) runBuilderQuery(
missedSeries = append(missedSeries, series...)
}
if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil {
zap.S().Error("error unmarshalling cached data", zap.Error(err))
zap.L().Error("error unmarshalling cached data", zap.Error(err))
}
mergedSeries := mergeSerieses(cachedSeries, missedSeries)
@ -154,7 +154,7 @@ func (q *querier) runBuilderQuery(
// caching the data
mergedSeriesData, marshallingErr = json.Marshal(mergedSeries)
if marshallingErr != nil {
zap.S().Error("error marshalling merged series", zap.Error(marshallingErr))
zap.L().Error("error marshalling merged series", zap.Error(marshallingErr))
}
}
@ -172,7 +172,7 @@ func (q *querier) runBuilderQuery(
// caching the data
err = q.cache.Store(cacheKey, mergedSeriesData, time.Hour)
if err != nil {
zap.S().Error("error storing merged series", zap.Error(err))
zap.L().Error("error storing merged series", zap.Error(err))
return
}
}
@ -251,7 +251,7 @@ func (q *querier) runBuilderQuery(
if !params.NoCache && q.cache != nil {
var retrieveStatus status.RetrieveStatus
data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true)
zap.S().Infof("cache retrieve status: %s", retrieveStatus.String())
zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String()))
if err == nil {
cachedData = data
}
@ -290,7 +290,7 @@ func (q *querier) runBuilderQuery(
missedSeries = append(missedSeries, series...)
}
if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil {
zap.S().Error("error unmarshalling cached data", zap.Error(err))
zap.L().Error("error unmarshalling cached data", zap.Error(err))
}
mergedSeries := mergeSerieses(cachedSeries, missedSeries)
var mergedSeriesData []byte
@ -300,7 +300,7 @@ func (q *querier) runBuilderQuery(
// caching the data
mergedSeriesData, marshallingErr = json.Marshal(mergedSeries)
if marshallingErr != nil {
zap.S().Error("error marshalling merged series", zap.Error(marshallingErr))
zap.L().Error("error marshalling merged series", zap.Error(marshallingErr))
}
}
@ -316,7 +316,7 @@ func (q *querier) runBuilderQuery(
if missedSeriesLen > 0 && !params.NoCache && q.cache != nil && marshallingErr == nil {
err := q.cache.Store(cacheKey, mergedSeriesData, time.Hour)
if err != nil {
zap.S().Error("error storing merged series", zap.Error(err))
zap.L().Error("error storing merged series", zap.Error(err))
return
}
}
@ -353,7 +353,7 @@ func (q *querier) runBuilderExpression(
if !params.NoCache && q.cache != nil {
var retrieveStatus status.RetrieveStatus
data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true)
zap.S().Infof("cache retrieve status: %s", retrieveStatus.String())
zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String()))
if err == nil {
cachedData = data
}
@ -379,7 +379,7 @@ func (q *querier) runBuilderExpression(
missedSeries = append(missedSeries, series...)
}
if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil {
zap.S().Error("error unmarshalling cached data", zap.Error(err))
zap.L().Error("error unmarshalling cached data", zap.Error(err))
}
mergedSeries := mergeSerieses(cachedSeries, missedSeries)
@ -390,7 +390,7 @@ func (q *querier) runBuilderExpression(
// caching the data
mergedSeriesData, marshallingErr = json.Marshal(mergedSeries)
if marshallingErr != nil {
zap.S().Error("error marshalling merged series", zap.Error(marshallingErr))
zap.L().Error("error marshalling merged series", zap.Error(marshallingErr))
}
}
@ -406,7 +406,7 @@ func (q *querier) runBuilderExpression(
if len(missedSeries) > 0 && !params.NoCache && q.cache != nil && marshallingErr == nil {
err = q.cache.Store(cacheKey, mergedSeriesData, time.Hour)
if err != nil {
zap.S().Error("error storing merged series", zap.Error(err))
zap.L().Error("error storing merged series", zap.Error(err))
return
}
}

View File

@ -108,7 +108,7 @@ func (q *querier) execClickHouseQuery(ctx context.Context, query string) ([]*v3.
series.Points = points
}
if pointsWithNegativeTimestamps > 0 {
zap.S().Errorf("found points with negative timestamps for query %s", query)
zap.L().Error("found points with negative timestamps for query", zap.String("query", query))
}
return result, err
}
@ -346,7 +346,7 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam
// Ensure NoCache is not set and cache is not nil
if !params.NoCache && q.cache != nil {
data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true)
zap.S().Infof("cache retrieve status: %s", retrieveStatus.String())
zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String()))
if err == nil {
cachedData = data
}
@ -365,7 +365,7 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam
}
if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil {
// ideally we should not be getting an error here
zap.S().Error("error unmarshalling cached data", zap.Error(err))
zap.L().Error("error unmarshalling cached data", zap.Error(err))
}
mergedSeries := mergeSerieses(cachedSeries, missedSeries)
@ -375,12 +375,12 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam
if len(missedSeries) > 0 && !params.NoCache && q.cache != nil {
mergedSeriesData, err := json.Marshal(mergedSeries)
if err != nil {
zap.S().Error("error marshalling merged series", zap.Error(err))
zap.L().Error("error marshalling merged series", zap.Error(err))
return
}
err = q.cache.Store(cacheKey, mergedSeriesData, time.Hour)
if err != nil {
zap.S().Error("error storing merged series", zap.Error(err))
zap.L().Error("error storing merged series", zap.Error(err))
return
}
}

View File

@ -169,7 +169,7 @@ func (q *querier) runBuilderQuery(
if !params.NoCache && q.cache != nil {
var retrieveStatus status.RetrieveStatus
data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true)
zap.S().Infof("cache retrieve status: %s", retrieveStatus.String())
zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String()))
if err == nil {
cachedData = data
}
@ -208,7 +208,7 @@ func (q *querier) runBuilderQuery(
missedSeries = append(missedSeries, series...)
}
if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil {
zap.S().Error("error unmarshalling cached data", zap.Error(err))
zap.L().Error("error unmarshalling cached data", zap.Error(err))
}
mergedSeries := mergeSerieses(cachedSeries, missedSeries)
@ -221,12 +221,12 @@ func (q *querier) runBuilderQuery(
if len(missedSeries) > 0 && !params.NoCache && q.cache != nil {
mergedSeriesData, err := json.Marshal(mergedSeries)
if err != nil {
zap.S().Error("error marshalling merged series", zap.Error(err))
zap.L().Error("error marshalling merged series", zap.Error(err))
return
}
err = q.cache.Store(cacheKey, mergedSeriesData, time.Hour)
if err != nil {
zap.S().Error("error storing merged series", zap.Error(err))
zap.L().Error("error storing merged series", zap.Error(err))
return
}
}
@ -263,7 +263,7 @@ func (q *querier) runBuilderExpression(
if !params.NoCache && q.cache != nil {
var retrieveStatus status.RetrieveStatus
data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true)
zap.S().Infof("cache retrieve status: %s", retrieveStatus.String())
zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String()))
if err == nil {
cachedData = data
}
@ -289,7 +289,7 @@ func (q *querier) runBuilderExpression(
missedSeries = append(missedSeries, series...)
}
if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil {
zap.S().Error("error unmarshalling cached data", zap.Error(err))
zap.L().Error("error unmarshalling cached data", zap.Error(err))
}
mergedSeries := mergeSerieses(cachedSeries, missedSeries)
@ -302,12 +302,12 @@ func (q *querier) runBuilderExpression(
if len(missedSeries) > 0 && !params.NoCache && q.cache != nil {
mergedSeriesData, err := json.Marshal(mergedSeries)
if err != nil {
zap.S().Error("error marshalling merged series", zap.Error(err))
zap.L().Error("error marshalling merged series", zap.Error(err))
return
}
err = q.cache.Store(cacheKey, mergedSeriesData, time.Hour)
if err != nil {
zap.S().Error("error storing merged series", zap.Error(err))
zap.L().Error("error storing merged series", zap.Error(err))
return
}
}

View File

@ -108,7 +108,7 @@ func (q *querier) execClickHouseQuery(ctx context.Context, query string) ([]*v3.
series.Points = points
}
if pointsWithNegativeTimestamps > 0 {
zap.S().Errorf("found points with negative timestamps for query %s", query)
zap.L().Error("found points with negative timestamps for query", zap.String("query", query))
}
return result, err
}
@ -326,7 +326,7 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam
// Ensure NoCache is not set and cache is not nil
if !params.NoCache && q.cache != nil {
data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true)
zap.S().Infof("cache retrieve status: %s", retrieveStatus.String())
zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String()))
if err == nil {
cachedData = data
}
@ -345,7 +345,7 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam
}
if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil {
// ideally we should not be getting an error here
zap.S().Error("error unmarshalling cached data", zap.Error(err))
zap.L().Error("error unmarshalling cached data", zap.Error(err))
}
mergedSeries := mergeSerieses(cachedSeries, missedSeries)
@ -355,12 +355,12 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam
if len(missedSeries) > 0 && !params.NoCache && q.cache != nil {
mergedSeriesData, err := json.Marshal(mergedSeries)
if err != nil {
zap.S().Error("error marshalling merged series", zap.Error(err))
zap.L().Error("error marshalling merged series", zap.Error(err))
return
}
err = q.cache.Store(cacheKey, mergedSeriesData, time.Hour)
if err != nil {
zap.S().Error("error storing merged series", zap.Error(err))
zap.L().Error("error storing merged series", zap.Error(err))
return
}
}

View File

@ -246,7 +246,7 @@ func (qb *QueryBuilder) PrepareQueries(params *v3.QueryRangeParamsV3, args ...in
}
queries[queryName] = queryString
default:
zap.S().Errorf("Unknown data source %s", query.DataSource)
zap.L().Error("Unknown data source", zap.String("dataSource", string(query.DataSource)))
}
}
}

View File

@ -115,7 +115,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
var reader interfaces.Reader
storage := os.Getenv("STORAGE")
if storage == "clickhouse" {
zap.S().Info("Using ClickHouse as datastore ...")
zap.L().Info("Using ClickHouse as datastore ...")
clickhouseReader := clickhouseReader.NewReader(
localDB,
serverOptions.PromConfigPath,
@ -304,7 +304,7 @@ func loggingMiddleware(next http.Handler) http.Handler {
path, _ := route.GetPathTemplate()
startTime := time.Now()
next.ServeHTTP(w, r)
zap.S().Info(path+"\ttimeTaken:"+time.Now().Sub(startTime).String(), zap.Duration("timeTaken", time.Now().Sub(startTime)), zap.String("path", path))
zap.L().Info(path+"\ttimeTaken:"+time.Now().Sub(startTime).String(), zap.Duration("timeTaken", time.Now().Sub(startTime)), zap.String("path", path))
})
}
@ -375,7 +375,7 @@ func loggingMiddlewarePrivate(next http.Handler) http.Handler {
path, _ := route.GetPathTemplate()
startTime := time.Now()
next.ServeHTTP(w, r)
zap.S().Info(path+"\tprivatePort: true \ttimeTaken"+time.Now().Sub(startTime).String(), zap.Duration("timeTaken", time.Now().Sub(startTime)), zap.String("path", path), zap.Bool("tprivatePort", true))
zap.L().Info(path+"\tprivatePort: true \ttimeTaken"+time.Now().Sub(startTime).String(), zap.Duration("timeTaken", time.Now().Sub(startTime)), zap.String("path", path), zap.Bool("tprivatePort", true))
})
}
@ -550,7 +550,7 @@ func (s *Server) initListeners() error {
return err
}
zap.S().Info(fmt.Sprintf("Query server started listening on %s...", s.serverOptions.HTTPHostPort))
zap.L().Info(fmt.Sprintf("Query server started listening on %s...", s.serverOptions.HTTPHostPort))
// listen on private port to support internal services
privateHostPort := s.serverOptions.PrivateHostPort
@ -563,7 +563,7 @@ func (s *Server) initListeners() error {
if err != nil {
return err
}
zap.S().Info(fmt.Sprintf("Query server started listening on private port %s...", s.serverOptions.PrivateHostPort))
zap.L().Info(fmt.Sprintf("Query server started listening on private port %s...", s.serverOptions.PrivateHostPort))
return nil
}
@ -575,7 +575,7 @@ func (s *Server) Start() error {
if !s.serverOptions.DisableRules {
s.ruleManager.Start()
} else {
zap.S().Info("msg: Rules disabled as rules.disable is set to TRUE")
zap.L().Info("msg: Rules disabled as rules.disable is set to TRUE")
}
err := s.initListeners()
@ -589,23 +589,23 @@ func (s *Server) Start() error {
}
go func() {
zap.S().Info("Starting HTTP server", zap.Int("port", httpPort), zap.String("addr", s.serverOptions.HTTPHostPort))
zap.L().Info("Starting HTTP server", zap.Int("port", httpPort), zap.String("addr", s.serverOptions.HTTPHostPort))
switch err := s.httpServer.Serve(s.httpConn); err {
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
// normal exit, nothing to do
default:
zap.S().Error("Could not start HTTP server", zap.Error(err))
zap.L().Error("Could not start HTTP server", zap.Error(err))
}
s.unavailableChannel <- healthcheck.Unavailable
}()
go func() {
zap.S().Info("Starting pprof server", zap.String("addr", constants.DebugHttpPort))
zap.L().Info("Starting pprof server", zap.String("addr", constants.DebugHttpPort))
err = http.ListenAndServe(constants.DebugHttpPort, nil)
if err != nil {
zap.S().Error("Could not start pprof server", zap.Error(err))
zap.L().Error("Could not start pprof server", zap.Error(err))
}
}()
@ -615,14 +615,14 @@ func (s *Server) Start() error {
}
fmt.Println("starting private http")
go func() {
zap.S().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.serverOptions.PrivateHostPort))
zap.L().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.serverOptions.PrivateHostPort))
switch err := s.privateHTTP.Serve(s.privateConn); err {
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
// normal exit, nothing to do
zap.S().Info("private http server closed")
zap.L().Info("private http server closed")
default:
zap.S().Error("Could not start private HTTP server", zap.Error(err))
zap.L().Error("Could not start private HTTP server", zap.Error(err))
}
s.unavailableChannel <- healthcheck.Unavailable
@ -630,10 +630,10 @@ func (s *Server) Start() error {
}()
go func() {
zap.S().Info("Starting OpAmp Websocket server", zap.String("addr", constants.OpAmpWsEndpoint))
zap.L().Info("Starting OpAmp Websocket server", zap.String("addr", constants.OpAmpWsEndpoint))
err := s.opampServer.Start(constants.OpAmpWsEndpoint)
if err != nil {
zap.S().Info("opamp ws server failed to start", err)
zap.L().Info("opamp ws server failed to start", zap.Error(err))
s.unavailableChannel <- healthcheck.Unavailable
}
}()
@ -706,7 +706,7 @@ func makeRulesManager(
return nil, fmt.Errorf("rule manager error: %v", err)
}
zap.S().Info("rules manager is ready")
zap.L().Info("rules manager is ready")
return manager, nil
}

View File

@ -40,7 +40,7 @@ type InviteEmailData struct {
// The root user should be able to invite people to create account on SigNoz cluster.
func Invite(ctx context.Context, req *model.InviteRequest) (*model.InviteResponse, error) {
zap.S().Debugf("Got an invite request for email: %s\n", req.Email)
zap.L().Debug("Got an invite request for email", zap.String("email", req.Email))
token, err := utils.RandomHex(opaqueTokenSize)
if err != nil {
@ -110,13 +110,13 @@ func inviteEmail(req *model.InviteRequest, au *model.UserPayload, token string)
tmpl, err := template.ParseFiles(constants.InviteEmailTemplate)
if err != nil {
zap.S().Errorf("failed to send email", err)
zap.L().Error("failed to send email", zap.Error(err))
return
}
var body bytes.Buffer
if err := tmpl.Execute(&body, data); err != nil {
zap.S().Errorf("failed to send email", err)
zap.L().Error("failed to send email", zap.Error(err))
return
}
@ -126,7 +126,7 @@ func inviteEmail(req *model.InviteRequest, au *model.UserPayload, token string)
body.String(),
)
if err != nil {
zap.S().Errorf("failed to send email", err)
zap.L().Error("failed to send email", zap.Error(err))
return
}
return
@ -134,7 +134,7 @@ func inviteEmail(req *model.InviteRequest, au *model.UserPayload, token string)
// RevokeInvite is used to revoke the invitation for the given email.
func RevokeInvite(ctx context.Context, email string) error {
zap.S().Debugf("RevokeInvite method invoked for email: %s\n", email)
zap.L().Debug("RevokeInvite method invoked for email", zap.String("email", email))
if !isValidEmail(email) {
return ErrorInvalidInviteToken
@ -148,7 +148,7 @@ func RevokeInvite(ctx context.Context, email string) error {
// GetInvite returns an invitation object for the given token.
func GetInvite(ctx context.Context, token string) (*model.InvitationResponseObject, error) {
zap.S().Debugf("GetInvite method invoked for token: %s\n", token)
zap.L().Debug("GetInvite method invoked for token", zap.String("token", token))
inv, apiErr := dao.DB().GetInviteFromToken(ctx, token)
if apiErr != nil {
@ -282,13 +282,13 @@ func RegisterFirstUser(ctx context.Context, req *RegisterRequest) (*model.User,
org, apierr := dao.DB().CreateOrg(ctx,
&model.Organization{Name: req.OrgName})
if apierr != nil {
zap.S().Debugf("CreateOrg failed, err: %v\n", zap.Error(apierr.ToError()))
zap.L().Error("CreateOrg failed", zap.Error(apierr.ToError()))
return nil, apierr
}
group, apiErr := dao.DB().GetGroupByName(ctx, groupName)
if apiErr != nil {
zap.S().Debugf("GetGroupByName failed, err: %v\n", apiErr.Err)
zap.L().Error("GetGroupByName failed", zap.Error(apiErr.Err))
return nil, apiErr
}
@ -297,7 +297,7 @@ func RegisterFirstUser(ctx context.Context, req *RegisterRequest) (*model.User,
hash, err = PasswordHash(req.Password)
if err != nil {
zap.S().Errorf("failed to generate password hash when registering a user", zap.Error(err))
zap.L().Error("failed to generate password hash when registering a user", zap.Error(err))
return nil, model.InternalError(model.ErrSignupFailed{})
}
@ -328,7 +328,7 @@ func RegisterInvitedUser(ctx context.Context, req *RegisterRequest, nopassword b
invite, err := ValidateInvite(ctx, req)
if err != nil {
zap.S().Errorf("failed to validate invite token", err)
zap.L().Error("failed to validate invite token", zap.Error(err))
return nil, model.BadRequest(model.ErrSignupFailed{})
}
@ -337,7 +337,7 @@ func RegisterInvitedUser(ctx context.Context, req *RegisterRequest, nopassword b
// in the same transaction at the end of this function
userPayload, apierr := dao.DB().GetUserByEmail(ctx, invite.Email)
if apierr != nil {
zap.S().Debugf("failed to get user by email", apierr.Err)
zap.L().Error("failed to get user by email", zap.Error(apierr.Err))
return nil, apierr
}
@ -347,7 +347,7 @@ func RegisterInvitedUser(ctx context.Context, req *RegisterRequest, nopassword b
}
if invite.OrgId == "" {
zap.S().Errorf("failed to find org in the invite")
zap.L().Error("failed to find org in the invite")
return nil, model.InternalError(fmt.Errorf("invalid invite, org not found"))
}
@ -358,7 +358,7 @@ func RegisterInvitedUser(ctx context.Context, req *RegisterRequest, nopassword b
group, apiErr := dao.DB().GetGroupByName(ctx, invite.Role)
if apiErr != nil {
zap.S().Debugf("GetGroupByName failed, err: %v\n", apiErr.Err)
zap.L().Error("GetGroupByName failed", zap.Error(apiErr.Err))
return nil, model.InternalError(model.ErrSignupFailed{})
}
@ -368,13 +368,13 @@ func RegisterInvitedUser(ctx context.Context, req *RegisterRequest, nopassword b
if req.Password != "" {
hash, err = PasswordHash(req.Password)
if err != nil {
zap.S().Errorf("failed to generate password hash when registering a user", zap.Error(err))
zap.L().Error("failed to generate password hash when registering a user", zap.Error(err))
return nil, model.InternalError(model.ErrSignupFailed{})
}
} else {
hash, err = PasswordHash(utils.GeneratePassowrd())
if err != nil {
zap.S().Errorf("failed to generate password hash when registering a user", zap.Error(err))
zap.L().Error("failed to generate password hash when registering a user", zap.Error(err))
return nil, model.InternalError(model.ErrSignupFailed{})
}
}
@ -393,13 +393,13 @@ func RegisterInvitedUser(ctx context.Context, req *RegisterRequest, nopassword b
// TODO(Ahsan): Ideally create user and delete invitation should happen in a txn.
user, apiErr = dao.DB().CreateUser(ctx, user, false)
if apiErr != nil {
zap.S().Debugf("CreateUser failed, err: %v\n", apiErr.Err)
zap.L().Error("CreateUser failed", zap.Error(apiErr.Err))
return nil, apiErr
}
apiErr = dao.DB().DeleteInvitation(ctx, user.Email)
if apiErr != nil {
zap.S().Debugf("delete invitation failed, err: %v\n", apiErr.Err)
zap.L().Error("delete invitation failed", zap.Error(apiErr.Err))
return nil, apiErr
}
@ -428,17 +428,17 @@ func Register(ctx context.Context, req *RegisterRequest) (*model.User, *model.Ap
// Login method returns access and refresh tokens on successful login, else it errors out.
func Login(ctx context.Context, request *model.LoginRequest) (*model.LoginResponse, error) {
zap.S().Debugf("Login method called for user: %s\n", request.Email)
zap.L().Debug("Login method called for user", zap.String("email", request.Email))
user, err := authenticateLogin(ctx, request)
if err != nil {
zap.S().Debugf("Failed to authenticate login request, %v", err)
zap.L().Error("Failed to authenticate login request", zap.Error(err))
return nil, err
}
userjwt, err := GenerateJWTForUser(&user.User)
if err != nil {
zap.S().Debugf("Failed to generate JWT against login creds, %v", err)
zap.L().Error("Failed to generate JWT against login creds", zap.Error(err))
return nil, err
}

View File

@ -60,7 +60,7 @@ func validateUser(tok string) (*model.UserPayload, error) {
func AttachJwtToContext(ctx context.Context, r *http.Request) context.Context {
token, err := ExtractJwtFromRequest(r)
if err != nil {
zap.S().Debugf("Error while getting token from header, %v", err)
zap.L().Error("Error while getting token from header", zap.Error(err))
return ctx
}

View File

@ -59,7 +59,7 @@ func (c *cache) Retrieve(cacheKey string, allowExpired bool) ([]byte, status.Ret
func (c *cache) SetTTL(cacheKey string, ttl time.Duration) {
err := c.client.Expire(context.Background(), cacheKey, ttl).Err()
if err != nil {
zap.S().Error("error setting TTL for cache key", zap.String("cacheKey", cacheKey), zap.Duration("ttl", ttl), zap.Error(err))
zap.L().Error("error setting TTL for cache key", zap.String("cacheKey", cacheKey), zap.Duration("ttl", ttl), zap.Error(err))
}
}
@ -67,7 +67,7 @@ func (c *cache) SetTTL(cacheKey string, ttl time.Duration) {
func (c *cache) Remove(cacheKey string) {
err := c.client.Del(context.Background(), cacheKey).Err()
if err != nil {
zap.S().Error("error deleting cache key", zap.String("cacheKey", cacheKey), zap.Error(err))
zap.L().Error("error deleting cache key", zap.String("cacheKey", cacheKey), zap.Error(err))
}
}
@ -102,7 +102,7 @@ func (c *cache) GetOptions() *Options {
func (c *cache) GetTTL(cacheKey string) time.Duration {
ttl, err := c.client.TTL(context.Background(), cacheKey).Result()
if err != nil {
zap.S().Error("error getting TTL for cache key", zap.String("cacheKey", cacheKey), zap.Error(err))
zap.L().Error("error getting TTL for cache key", zap.String("cacheKey", cacheKey), zap.Error(err))
}
return ttl
}

View File

@ -58,8 +58,8 @@ var InviteEmailTemplate = GetOrDefaultEnv("INVITE_EMAIL_TEMPLATE", "/root/templa
// Alert manager channel subpath
var AmChannelApiPath = GetOrDefaultEnv("ALERTMANAGER_API_CHANNEL_PATH", "v1/routes")
var OTLPTarget = GetOrDefaultEnv("OTLP_TARGET", "")
var LogExportBatchSize = GetOrDefaultEnv("LOG_EXPORT_BATCH_SIZE", "1000")
var OTLPTarget = GetOrDefaultEnv("OTEL_EXPORTER_OTLP_ENDPOINT", "")
var LogExportBatchSize = GetOrDefaultEnv("OTEL_BLRP_MAX_EXPORT_BATCH_SIZE", "512")
var RELATIONAL_DATASOURCE_PATH = GetOrDefaultEnv("SIGNOZ_LOCAL_DB_PATH", "/var/lib/signoz/signoz.db")

View File

@ -180,7 +180,7 @@ func (mds *ModelDaoSqlite) createGroupIfNotPresent(ctx context.Context,
return group, nil
}
zap.S().Debugf("%s is not found, creating it", name)
zap.L().Debug("group is not found, creating it", zap.String("group_name", name))
group, cErr := mds.CreateGroup(ctx, &model.Group{Name: name})
if cErr != nil {
return nil, cErr.Err

View File

@ -43,12 +43,12 @@ func (fm *FeatureManager) GetFeatureFlags() (model.FeatureSet, error) {
}
func (fm *FeatureManager) InitFeatures(req model.FeatureSet) error {
zap.S().Error("InitFeatures not implemented in OSS")
zap.L().Error("InitFeatures not implemented in OSS")
return nil
}
func (fm *FeatureManager) UpdateFeatureFlag(req model.Feature) error {
zap.S().Error("UpdateFeatureFlag not implemented in OSS")
zap.L().Error("UpdateFeatureFlag not implemented in OSS")
return nil
}
@ -63,4 +63,4 @@ func (fm *FeatureManager) GetFeatureFlag(key string) (model.Feature, error) {
}
}
return model.Feature{}, model.ErrFeatureUnavailable{Key: key}
}
}

View File

@ -83,13 +83,12 @@ func (m *manager) AddRoute(receiver *Receiver) *model.ApiError {
response, err := http.Post(amURL, contentType, bytes.NewBuffer(receiverString))
if err != nil {
zap.S().Errorf(fmt.Sprintf("Error in getting response of API call to alertmanager(POST %s)\n", amURL), err)
zap.L().Error("Error in getting response of API call to alertmanager", zap.String("url", amURL), zap.Error(err))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
if response.StatusCode > 299 {
err := fmt.Errorf(fmt.Sprintf("Error in getting 2xx response in API call to alertmanager(POST %s)\n", amURL), response.Status)
zap.S().Error(err)
zap.L().Error("Error in getting 2xx response in API call to alertmanager", zap.String("url", amURL), zap.String("status", response.Status))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
return nil
@ -102,7 +101,7 @@ func (m *manager) EditRoute(receiver *Receiver) *model.ApiError {
req, err := http.NewRequest(http.MethodPut, amURL, bytes.NewBuffer(receiverString))
if err != nil {
zap.S().Errorf(fmt.Sprintf("Error creating new update request for API call to alertmanager(PUT %s)\n", amURL), err)
zap.L().Error("Error creating new update request for API call to alertmanager", zap.String("url", amURL), zap.Error(err))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
@ -112,13 +111,12 @@ func (m *manager) EditRoute(receiver *Receiver) *model.ApiError {
response, err := client.Do(req)
if err != nil {
zap.S().Errorf(fmt.Sprintf("Error in getting response of API call to alertmanager(PUT %s)\n", amURL), err)
zap.L().Error("Error in getting response of API call to alertmanager", zap.String("url", amURL), zap.Error(err))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
if response.StatusCode > 299 {
err := fmt.Errorf(fmt.Sprintf("Error in getting 2xx response in PUT API call to alertmanager(PUT %s)\n", amURL), response.Status)
zap.S().Error(err)
zap.L().Error("Error in getting 2xx response in PUT API call to alertmanager", zap.String("url", amURL), zap.String("status", response.Status))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
return nil
@ -132,7 +130,7 @@ func (m *manager) DeleteRoute(name string) *model.ApiError {
req, err := http.NewRequest(http.MethodDelete, amURL, bytes.NewBuffer(requestData))
if err != nil {
zap.S().Errorf("Error in creating new delete request to alertmanager/v1/receivers\n", err)
zap.L().Error("Error in creating new delete request to alertmanager/v1/receivers", zap.Error(err))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
@ -142,13 +140,13 @@ func (m *manager) DeleteRoute(name string) *model.ApiError {
response, err := client.Do(req)
if err != nil {
zap.S().Errorf(fmt.Sprintf("Error in getting response of API call to alertmanager(DELETE %s)\n", amURL), err)
zap.L().Error("Error in getting response of API call to alertmanager", zap.String("url", amURL), zap.Error(err))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
if response.StatusCode > 299 {
err := fmt.Errorf(fmt.Sprintf("Error in getting 2xx response in PUT API call to alertmanager(DELETE %s)\n", amURL), response.Status)
zap.S().Error(err)
zap.L().Error("Error in getting 2xx response in PUT API call to alertmanager", zap.String("url", amURL), zap.String("status", response.Status))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
return nil
@ -162,19 +160,19 @@ func (m *manager) TestReceiver(receiver *Receiver) *model.ApiError {
response, err := http.Post(amTestURL, contentType, bytes.NewBuffer(receiverBytes))
if err != nil {
zap.S().Errorf(fmt.Sprintf("Error in getting response of API call to alertmanager(POST %s)\n", amTestURL), err)
zap.L().Error("Error in getting response of API call to alertmanager", zap.String("url", amTestURL), zap.Error(err))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
if response.StatusCode > 201 && response.StatusCode < 400 {
err := fmt.Errorf(fmt.Sprintf("Invalid parameters in test alert api for alertmanager(POST %s)\n", amTestURL), response.Status)
zap.S().Error(err)
zap.L().Error("Invalid parameters in test alert api for alertmanager", zap.Error(err))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
if response.StatusCode > 400 {
err := fmt.Errorf(fmt.Sprintf("Received Server Error response for API call to alertmanager(POST %s)\n", amTestURL), response.Status)
zap.S().Error(err)
zap.L().Error("Received Server Error response for API call to alertmanager", zap.Error(err))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}

View File

@ -87,11 +87,11 @@ func NewNotifier(o *NotifierOptions, logger log.Logger) (*Notifier, error) {
amset, err := newAlertmanagerSet(o.AlertManagerURLs, timeout, logger)
if err != nil {
zap.S().Errorf("failed to parse alert manager urls")
zap.L().Error("failed to parse alert manager urls")
return n, err
}
n.alertmanagers = amset
zap.S().Info("Starting notifier with alert manager:", o.AlertManagerURLs)
zap.L().Info("Starting notifier with alert manager", zap.Strings("urls", o.AlertManagerURLs))
return n, nil
}
@ -123,7 +123,7 @@ func (n *Notifier) nextBatch() []*Alert {
// Run dispatches notifications continuously.
func (n *Notifier) Run() {
zap.S().Info("msg: Initiating alert notifier...")
zap.L().Info("msg: Initiating alert notifier...")
for {
select {
case <-n.ctx.Done():
@ -133,7 +133,7 @@ func (n *Notifier) Run() {
alerts := n.nextBatch()
if !n.sendAll(alerts...) {
zap.S().Warn("msg: dropped alerts", "\t count:", len(alerts))
zap.L().Warn("msg: dropped alerts", zap.Int("count", len(alerts)))
// n.metrics.dropped.Add(float64(len(alerts)))
}
// If the queue still has items left, kick off the next iteration.
@ -205,7 +205,7 @@ func (n *Notifier) sendAll(alerts ...*Alert) bool {
b, err := json.Marshal(alerts)
if err != nil {
zap.S().Errorf("msg", "Encoding alerts failed", "err", err)
zap.L().Error("Encoding alerts failed", zap.Error(err))
return false
}
@ -229,7 +229,7 @@ func (n *Notifier) sendAll(alerts ...*Alert) bool {
go func(ams *alertmanagerSet, am Manager) {
u := am.URLPath(alertPushEndpoint).String()
if err := n.sendOne(ctx, ams.client, u, b); err != nil {
zap.S().Errorf("alertmanager", u, "count", len(alerts), "msg", "Error calling alert API", "err", err)
zap.L().Error("Error calling alert API", zap.String("alertmanager", u), zap.Int("count", len(alerts)), zap.Error(err))
} else {
atomic.AddUint64(&numSuccess, 1)
}

View File

@ -18,7 +18,7 @@ import (
)
func initZapLog() *zap.Logger {
config := zap.NewDevelopmentConfig()
config := zap.NewProductionConfig()
config.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder
config.EncoderConfig.TimeKey = "timestamp"
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
@ -85,9 +85,9 @@ func main() {
auth.JwtSecret = os.Getenv("SIGNOZ_JWT_SECRET")
if len(auth.JwtSecret) == 0 {
zap.S().Warn("No JWT secret key is specified.")
zap.L().Warn("No JWT secret key is specified.")
} else {
zap.S().Info("No JWT secret key set successfully.")
zap.L().Info("No JWT secret key set successfully.")
}
server, err := app.NewServer(serverOptions)

View File

@ -143,11 +143,11 @@ func exprFormattedValue(v interface{}) string {
case uint8, uint16, uint32, uint64, int, int8, int16, int32, int64, float32, float64, bool:
return strings.Join(strings.Fields(fmt.Sprint(x)), ",")
default:
zap.S().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x[0])))
zap.L().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x[0])))
return ""
}
default:
zap.S().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x)))
zap.L().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x)))
return ""
}
}

View File

@ -15,15 +15,9 @@ import (
// this file contains common structs and methods used by
// rule engine
// how long before re-sending the alert
const resolvedRetention = 15 * time.Minute
const (
// AlertMetricName is the metric name for synthetic alert timeseries.
alertMetricName = "ALERTS"
// AlertForStateMetricName is the metric name for 'for' state of alert.
alertForStateMetricName = "ALERTS_FOR_STATE"
// how long before re-sending the alert
resolvedRetention = 15 * time.Minute
TestAlertPostFix = "_TEST_ALERT"
)

View File

@ -10,7 +10,6 @@ import (
"github.com/pkg/errors"
"go.signoz.io/signoz/pkg/query-service/model"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.uber.org/zap"
"go.signoz.io/signoz/pkg/query-service/utils/times"
"go.signoz.io/signoz/pkg/query-service/utils/timestamp"
@ -74,18 +73,15 @@ func parseIntoRule(initRule PostableRule, content []byte, kind string) (*Postabl
var err error
if kind == "json" {
if err = json.Unmarshal(content, rule); err != nil {
zap.S().Debugf("postable rule content", string(content), "\t kind:", kind)
return nil, []error{fmt.Errorf("failed to load json")}
}
} else if kind == "yaml" {
if err = yaml.Unmarshal(content, rule); err != nil {
zap.S().Debugf("postable rule content", string(content), "\t kind:", kind)
return nil, []error{fmt.Errorf("failed to load yaml")}
}
} else {
return nil, []error{fmt.Errorf("invalid data type")}
}
zap.S().Debugf("postable rule(parsed):", rule)
if rule.RuleCondition == nil && rule.Expr != "" {
// account for legacy rules
@ -126,8 +122,6 @@ func parseIntoRule(initRule PostableRule, content []byte, kind string) (*Postabl
}
}
zap.S().Debugf("postable rule:", rule, "\t condition", rule.RuleCondition.String())
if errs := rule.Validate(); len(errs) > 0 {
return nil, errs
}

View File

@ -73,7 +73,7 @@ func (r *ruleDB) CreateRuleTx(ctx context.Context, rule string) (int64, Tx, erro
stmt, err := tx.Prepare(`INSERT into rules (created_at, created_by, updated_at, updated_by, data) VALUES($1,$2,$3,$4,$5);`)
if err != nil {
zap.S().Errorf("Error in preparing statement for INSERT to rules\n", err)
zap.L().Error("Error in preparing statement for INSERT to rules", zap.Error(err))
tx.Rollback()
return lastInsertId, nil, err
}
@ -82,14 +82,14 @@ func (r *ruleDB) CreateRuleTx(ctx context.Context, rule string) (int64, Tx, erro
result, err := stmt.Exec(createdAt, userEmail, updatedAt, userEmail, rule)
if err != nil {
zap.S().Errorf("Error in Executing prepared statement for INSERT to rules\n", err)
zap.L().Error("Error in Executing prepared statement for INSERT to rules", zap.Error(err))
tx.Rollback() // return an error too, we may want to wrap them
return lastInsertId, nil, err
}
lastInsertId, err = result.LastInsertId()
if err != nil {
zap.S().Errorf("Error in getting last insert id for INSERT to rules\n", err)
zap.L().Error("Error in getting last insert id for INSERT to rules\n", zap.Error(err))
tx.Rollback() // return an error too, we may want to wrap them
return lastInsertId, nil, err
}
@ -122,14 +122,14 @@ func (r *ruleDB) EditRuleTx(ctx context.Context, rule string, id string) (string
//}
stmt, err := r.Prepare(`UPDATE rules SET updated_by=$1, updated_at=$2, data=$3 WHERE id=$4;`)
if err != nil {
zap.S().Errorf("Error in preparing statement for UPDATE to rules\n", err)
zap.L().Error("Error in preparing statement for UPDATE to rules", zap.Error(err))
// tx.Rollback()
return groupName, nil, err
}
defer stmt.Close()
if _, err := stmt.Exec(userEmail, updatedAt, rule, idInt); err != nil {
zap.S().Errorf("Error in Executing prepared statement for UPDATE to rules\n", err)
zap.L().Error("Error in Executing prepared statement for UPDATE to rules", zap.Error(err))
// tx.Rollback() // return an error too, we may want to wrap them
return groupName, nil, err
}
@ -158,7 +158,7 @@ func (r *ruleDB) DeleteRuleTx(ctx context.Context, id string) (string, Tx, error
defer stmt.Close()
if _, err := stmt.Exec(idInt); err != nil {
zap.S().Errorf("Error in Executing prepared statement for DELETE to rules\n", err)
zap.L().Error("Error in Executing prepared statement for DELETE to rules", zap.Error(err))
// tx.Rollback()
return groupName, nil, err
}
@ -175,7 +175,7 @@ func (r *ruleDB) GetStoredRules(ctx context.Context) ([]StoredRule, error) {
err := r.Select(&rules, query)
if err != nil {
zap.S().Debug("Error in processing sql query: ", err)
zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, err
}
@ -193,10 +193,10 @@ func (r *ruleDB) GetStoredRule(ctx context.Context, id string) (*StoredRule, err
query := fmt.Sprintf("SELECT id, created_at, created_by, updated_at, updated_by, data FROM rules WHERE id=%d", intId)
err = r.Get(rule, query)
// zap.S().Info(query)
// zap.L().Info(query)
if err != nil {
zap.S().Error("Error in processing sql query: ", err)
zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, err
}

View File

@ -125,7 +125,7 @@ func NewManager(o *ManagerOptions) (*Manager, error) {
func (m *Manager) Start() {
if err := m.initiate(); err != nil {
zap.S().Errorf("failed to initialize alerting rules manager: %v", err)
zap.L().Error("failed to initialize alerting rules manager", zap.Error(err))
}
m.run()
}
@ -154,40 +154,40 @@ func (m *Manager) initiate() error {
if len(errs) > 0 {
if errs[0].Error() == "failed to load json" {
zap.S().Info("failed to load rule in json format, trying yaml now:", rec.Data)
zap.L().Info("failed to load rule in json format, trying yaml now:", zap.String("name", taskName))
// see if rule is stored in yaml format
parsedRule, errs = parsePostableRule([]byte(rec.Data), "yaml")
if parsedRule == nil {
zap.S().Errorf("failed to parse and initialize yaml rule:", errs)
zap.L().Error("failed to parse and initialize yaml rule", zap.String("name", taskName), zap.Error(err))
// just one rule is being parsed so expect just one error
loadErrors = append(loadErrors, errs[0])
continue
} else {
// rule stored in yaml, so migrate it to json
zap.S().Info("msg:", "migrating rule from JSON to yaml", "\t rule:", rec.Data, "\t parsed rule:", parsedRule)
zap.L().Info("migrating rule from JSON to yaml", zap.String("name", taskName))
ruleJSON, err := json.Marshal(parsedRule)
if err == nil {
taskName, _, err := m.ruleDB.EditRuleTx(context.Background(), string(ruleJSON), fmt.Sprintf("%d", rec.Id))
if err != nil {
zap.S().Errorf("msg: failed to migrate rule ", "/t error:", err)
zap.L().Error("failed to migrate rule", zap.String("name", taskName), zap.Error(err))
} else {
zap.S().Info("msg:", "migrated rule from yaml to json", "/t rule:", taskName)
zap.L().Info("migrated rule from yaml to json", zap.String("name", taskName))
}
}
}
} else {
zap.S().Errorf("failed to parse and initialize rule:", errs)
zap.L().Error("failed to parse and initialize rule", zap.String("name", taskName), zap.Error(err))
// just one rule is being parsed so expect just one error
loadErrors = append(loadErrors, errs[0])
loadErrors = append(loadErrors, err)
continue
}
}
if !parsedRule.Disabled {
err := m.addTask(parsedRule, taskName)
if err != nil {
zap.S().Errorf("failed to load the rule definition (%s): %v", taskName, err)
zap.L().Error("failed to load the rule definition", zap.String("name", taskName), zap.Error(err))
}
}
}
@ -213,13 +213,13 @@ func (m *Manager) Stop() {
m.mtx.Lock()
defer m.mtx.Unlock()
zap.S().Info("msg: ", "Stopping rule manager...")
zap.L().Info("Stopping rule manager...")
for _, t := range m.tasks {
t.Stop()
}
zap.S().Info("msg: ", "Rule manager stopped")
zap.L().Info("Rule manager stopped")
}
// EditRuleDefinition writes the rule definition to the
@ -230,7 +230,7 @@ func (m *Manager) EditRule(ctx context.Context, ruleStr string, id string) error
currentRule, err := m.GetRule(ctx, id)
if err != nil {
zap.S().Errorf("msg: ", "failed to get the rule from rule db", "\t ruleid: ", id)
zap.L().Error("failed to get the rule from rule db", zap.String("id", id), zap.Error(err))
return err
}
@ -243,7 +243,7 @@ func (m *Manager) EditRule(ctx context.Context, ruleStr string, id string) error
}
if len(errs) > 0 {
zap.S().Errorf("failed to parse rules:", errs)
zap.L().Error("failed to parse rules", zap.Errors("errors", errs))
// just one rule is being parsed so expect just one error
return errs[0]
}
@ -264,13 +264,13 @@ func (m *Manager) EditRule(ctx context.Context, ruleStr string, id string) error
if !checkIfTraceOrLogQB(&currentRule.PostableRule) {
err = m.updateFeatureUsage(parsedRule, 1)
if err != nil {
zap.S().Errorf("error updating feature usage: %v", err)
zap.L().Error("error updating feature usage", zap.Error(err))
}
// update feature usage if the new rule is not a trace or log query builder and the current rule is
} else if !checkIfTraceOrLogQB(parsedRule) {
err = m.updateFeatureUsage(&currentRule.PostableRule, -1)
if err != nil {
zap.S().Errorf("error updating feature usage: %v", err)
zap.L().Error("error updating feature usage", zap.Error(err))
}
}
@ -281,12 +281,12 @@ func (m *Manager) editTask(rule *PostableRule, taskName string) error {
m.mtx.Lock()
defer m.mtx.Unlock()
zap.S().Debugf("msg:", "editing a rule task", "\t task name:", taskName)
zap.L().Debug("editing a rule task", zap.String("name", taskName))
newTask, err := m.prepareTask(false, rule, taskName)
if err != nil {
zap.S().Errorf("msg:", "loading tasks failed", "\t err:", err)
zap.L().Error("loading tasks failed", zap.Error(err))
return errors.New("error preparing rule with given parameters, previous rule set restored")
}
@ -294,7 +294,7 @@ func (m *Manager) editTask(rule *PostableRule, taskName string) error {
// it to finish the current iteration. Then copy it into the new group.
oldTask, ok := m.tasks[taskName]
if !ok {
zap.S().Warnf("msg:", "rule task not found, a new task will be created ", "\t task name:", taskName)
zap.L().Warn("rule task not found, a new task will be created", zap.String("name", taskName))
}
delete(m.tasks, taskName)
@ -319,14 +319,14 @@ func (m *Manager) DeleteRule(ctx context.Context, id string) error {
idInt, err := strconv.Atoi(id)
if err != nil {
zap.S().Errorf("msg: ", "delete rule received an rule id in invalid format, must be a number", "\t ruleid:", id)
zap.L().Error("delete rule received an rule id in invalid format, must be a number", zap.String("id", id), zap.Error(err))
return fmt.Errorf("delete rule received an rule id in invalid format, must be a number")
}
// update feature usage
rule, err := m.GetRule(ctx, id)
if err != nil {
zap.S().Errorf("msg: ", "failed to get the rule from rule db", "\t ruleid: ", id)
zap.L().Error("failed to get the rule from rule db", zap.String("id", id), zap.Error(err))
return err
}
@ -336,13 +336,13 @@ func (m *Manager) DeleteRule(ctx context.Context, id string) error {
}
if _, _, err := m.ruleDB.DeleteRuleTx(ctx, id); err != nil {
zap.S().Errorf("msg: ", "failed to delete the rule from rule db", "\t ruleid: ", id)
zap.L().Error("failed to delete the rule from rule db", zap.String("id", id), zap.Error(err))
return err
}
err = m.updateFeatureUsage(&rule.PostableRule, -1)
if err != nil {
zap.S().Errorf("error updating feature usage: %v", err)
zap.L().Error("error updating feature usage", zap.Error(err))
}
return nil
@ -351,16 +351,16 @@ func (m *Manager) DeleteRule(ctx context.Context, id string) error {
func (m *Manager) deleteTask(taskName string) {
m.mtx.Lock()
defer m.mtx.Unlock()
zap.S().Debugf("msg:", "deleting a rule task", "\t task name:", taskName)
zap.L().Debug("deleting a rule task", zap.String("name", taskName))
oldg, ok := m.tasks[taskName]
if ok {
oldg.Stop()
delete(m.tasks, taskName)
delete(m.rules, ruleIdFromTaskName(taskName))
zap.S().Debugf("msg:", "rule task deleted", "\t task name:", taskName)
zap.L().Debug("rule task deleted", zap.String("name", taskName))
} else {
zap.S().Info("msg: ", "rule not found for deletion", "\t name:", taskName)
zap.L().Info("rule not found for deletion", zap.String("name", taskName))
}
}
@ -376,7 +376,7 @@ func (m *Manager) CreateRule(ctx context.Context, ruleStr string) (*GettableRule
}
if len(errs) > 0 {
zap.S().Errorf("failed to parse rules:", errs)
zap.L().Error("failed to parse rules", zap.Errors("errors", errs))
// just one rule is being parsed so expect just one error
return nil, errs[0]
}
@ -400,7 +400,7 @@ func (m *Manager) CreateRule(ctx context.Context, ruleStr string) (*GettableRule
// update feature usage
err = m.updateFeatureUsage(parsedRule, 1)
if err != nil {
zap.S().Errorf("error updating feature usage: %v", err)
zap.L().Error("error updating feature usage", zap.Error(err))
}
gettableRule := &GettableRule{
Id: fmt.Sprintf("%d", lastInsertId),
@ -438,10 +438,10 @@ func (m *Manager) checkFeatureUsage(parsedRule *PostableRule) error {
if err != nil {
switch err.(type) {
case model.ErrFeatureUnavailable:
zap.S().Errorf("feature unavailable", zap.String("featureKey", model.QueryBuilderAlerts), zap.Error(err))
zap.L().Error("feature unavailable", zap.String("featureKey", model.QueryBuilderAlerts), zap.Error(err))
return model.BadRequest(err)
default:
zap.S().Errorf("feature check failed", zap.String("featureKey", model.QueryBuilderAlerts), zap.Error(err))
zap.L().Error("feature check failed", zap.String("featureKey", model.QueryBuilderAlerts), zap.Error(err))
return model.BadRequest(err)
}
}
@ -466,11 +466,11 @@ func (m *Manager) addTask(rule *PostableRule, taskName string) error {
m.mtx.Lock()
defer m.mtx.Unlock()
zap.S().Debugf("msg:", "adding a new rule task", "\t task name:", taskName)
zap.L().Debug("adding a new rule task", zap.String("name", taskName))
newTask, err := m.prepareTask(false, rule, taskName)
if err != nil {
zap.S().Errorf("msg:", "creating rule task failed", "\t name:", taskName, "\t err", err)
zap.L().Error("creating rule task failed", zap.String("name", taskName), zap.Error(err))
return errors.New("error loading rules, previous rule set restored")
}
@ -504,7 +504,7 @@ func (m *Manager) prepareTask(acquireLock bool, r *PostableRule, taskName string
var task Task
if r.Alert == "" {
zap.S().Errorf("msg:", "task load failed, at least one rule must be set", "\t task name:", taskName)
zap.L().Error("task load failed, at least one rule must be set", zap.String("name", taskName))
return task, fmt.Errorf("task load failed, at least one rule must be set")
}
@ -686,7 +686,7 @@ func (m *Manager) ListRuleStates(ctx context.Context) (*GettableRules, error) {
ruleResponse := &GettableRule{}
if err := json.Unmarshal([]byte(s.Data), ruleResponse); err != nil { // Parse []byte to go struct pointer
zap.S().Errorf("msg:", "invalid rule data", "\t err:", err)
zap.L().Error("failed to unmarshal rule from db", zap.Int("id", s.Id), zap.Error(err))
continue
}
@ -779,28 +779,28 @@ func (m *Manager) PatchRule(ctx context.Context, ruleStr string, ruleId string)
// retrieve rule from DB
storedJSON, err := m.ruleDB.GetStoredRule(ctx, ruleId)
if err != nil {
zap.S().Errorf("msg:", "failed to get stored rule with given id", "\t error:", err)
zap.L().Error("failed to get stored rule with given id", zap.String("id", ruleId), zap.Error(err))
return nil, err
}
// storedRule holds the current stored rule from DB
storedRule := PostableRule{}
if err := json.Unmarshal([]byte(storedJSON.Data), &storedRule); err != nil {
zap.S().Errorf("msg:", "failed to get unmarshal stored rule with given id", "\t error:", err)
zap.L().Error("failed to unmarshal stored rule with given id", zap.String("id", ruleId), zap.Error(err))
return nil, err
}
// patchedRule is combo of stored rule and patch received in the request
patchedRule, errs := parseIntoRule(storedRule, []byte(ruleStr), "json")
if len(errs) > 0 {
zap.S().Errorf("failed to parse rules:", errs)
zap.L().Error("failed to parse rules", zap.Errors("errors", errs))
// just one rule is being parsed so expect just one error
return nil, errs[0]
}
// deploy or un-deploy task according to patched (new) rule state
if err := m.syncRuleStateWithTask(taskName, patchedRule); err != nil {
zap.S().Errorf("failed to sync stored rule state with the task")
zap.L().Error("failed to sync stored rule state with the task", zap.String("taskName", taskName), zap.Error(err))
return nil, err
}
@ -816,7 +816,7 @@ func (m *Manager) PatchRule(ctx context.Context, ruleStr string, ruleId string)
// restore task state from the stored rule
if err := m.syncRuleStateWithTask(taskName, &storedRule); err != nil {
zap.S().Errorf("msg: ", "failed to restore rule after patch failure", "\t error:", err)
zap.L().Error("failed to restore rule after patch failure", zap.String("taskName", taskName), zap.Error(err))
}
return nil, err
@ -846,7 +846,7 @@ func (m *Manager) TestNotification(ctx context.Context, ruleStr string) (int, *m
parsedRule, errs := ParsePostableRule([]byte(ruleStr))
if len(errs) > 0 {
zap.S().Errorf("msg: failed to parse rule from request:", "\t error: ", errs)
zap.L().Error("failed to parse rule from request", zap.Errors("errors", errs))
return 0, newApiErrorBadData(errs[0])
}
@ -882,7 +882,7 @@ func (m *Manager) TestNotification(ctx context.Context, ruleStr string) (int, *m
)
if err != nil {
zap.S().Errorf("msg: failed to prepare a new threshold rule for test:", "\t error: ", err)
zap.L().Error("failed to prepare a new threshold rule for test", zap.String("name", rule.Name()), zap.Error(err))
return 0, newApiErrorBadData(err)
}
@ -899,7 +899,7 @@ func (m *Manager) TestNotification(ctx context.Context, ruleStr string) (int, *m
)
if err != nil {
zap.S().Errorf("msg: failed to prepare a new promql rule for test:", "\t error: ", err)
zap.L().Error("failed to prepare a new promql rule for test", zap.String("name", rule.Name()), zap.Error(err))
return 0, newApiErrorBadData(err)
}
} else {
@ -911,10 +911,13 @@ func (m *Manager) TestNotification(ctx context.Context, ruleStr string) (int, *m
count, err := rule.Eval(ctx, ts, m.opts.Queriers)
if err != nil {
zap.S().Warn("msg:", "Evaluating rule failed", "\t rule:", rule, "\t err: ", err)
zap.L().Error("evaluating rule failed", zap.String("rule", rule.Name()), zap.Error(err))
return 0, newApiErrorInternal(fmt.Errorf("rule evaluation failed"))
}
alertsFound := count.(int)
alertsFound, ok := count.(int)
if !ok {
return 0, newApiErrorInternal(fmt.Errorf("something went wrong"))
}
rule.SendAlerts(ctx, ts, 0, time.Duration(1*time.Minute), m.prepareNotifyFunc())
return alertsFound, nil

View File

@ -94,7 +94,7 @@ func NewPromRule(
return nil, err
}
zap.S().Info("msg:", "creating new alerting rule", "\t name:", p.name, "\t condition:", p.ruleCondition.String(), "\t query:", query)
zap.L().Info("creating new alerting rule", zap.String("name", p.name), zap.String("condition", p.ruleCondition.String()), zap.String("query", query))
return &p, nil
}
@ -339,7 +339,7 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time, queriers *Queriers) (
if err != nil {
return nil, err
}
zap.S().Info("rule:", r.Name(), "\t evaluating promql query: ", q)
zap.L().Info("evaluating promql query", zap.String("name", r.Name()), zap.String("query", q))
res, err := queriers.PqlEngine.RunAlertQuery(ctx, q, start, end, interval)
if err != nil {
r.SetHealth(HealthBad)
@ -368,7 +368,7 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time, queriers *Queriers) (
if !shouldAlert {
continue
}
zap.S().Debugf("rule: %s, alerting for series: %v", r.Name(), series)
zap.L().Debug("alerting for series", zap.String("name", r.Name()), zap.Any("series", series))
thresholdFormatter := formatter.FromUnit(r.ruleCondition.TargetUnit)
threshold := thresholdFormatter.Format(r.targetVal(), r.ruleCondition.TargetUnit)
@ -435,7 +435,7 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time, queriers *Queriers) (
}
}
zap.S().Debugf("For rule: %s, found %d alerts", r.Name(), len(alerts))
zap.L().Debug("found alerts for rule", zap.Int("count", len(alerts)), zap.String("name", r.Name()))
// alerts[h] is ready, add or update active list now
for h, a := range alerts {
// Check whether we already have alerting state for the identifying label set.

View File

@ -40,7 +40,7 @@ type PromRuleTask struct {
// newPromRuleTask holds rules that have promql condition
// and evalutes the rule at a given frequency
func newPromRuleTask(name, file string, frequency time.Duration, rules []Rule, opts *ManagerOptions, notify NotifyFunc) *PromRuleTask {
zap.S().Info("Initiating a new rule group:", name, "\t frequency:", frequency)
zap.L().Info("Initiating a new rule group", zap.String("name", name), zap.Duration("frequency", frequency))
if time.Now() == time.Now().Add(frequency) {
frequency = DefaultFrequency
@ -312,7 +312,7 @@ func (g *PromRuleTask) CopyState(fromTask Task) error {
// Eval runs a single evaluation cycle in which all rules are evaluated sequentially.
func (g *PromRuleTask) Eval(ctx context.Context, ts time.Time) {
zap.S().Info("promql rule task:", g.name, "\t eval started at:", ts)
zap.L().Info("promql rule task", zap.String("name", g.name), zap.Time("eval started at", ts))
for i, rule := range g.rules {
if rule == nil {
continue
@ -340,7 +340,7 @@ func (g *PromRuleTask) Eval(ctx context.Context, ts time.Time) {
rule.SetHealth(HealthBad)
rule.SetLastError(err)
zap.S().Warn("msg", "Evaluating rule failed", "rule", rule, "err", err)
zap.L().Warn("Evaluating rule failed", zap.String("ruleid", rule.ID()), zap.Error(err))
// Canceled queries are intentional termination of queries. This normally
// happens on shutdown and thus we skip logging of any errors here.

View File

@ -25,10 +25,8 @@ type RuleTask struct {
evaluationTime time.Duration
lastEvaluation time.Time
markStale bool
done chan struct{}
terminated chan struct{}
managerDone chan struct{}
done chan struct{}
terminated chan struct{}
pause bool
notify NotifyFunc
@ -42,7 +40,7 @@ func newRuleTask(name, file string, frequency time.Duration, rules []Rule, opts
if time.Now() == time.Now().Add(frequency) {
frequency = DefaultFrequency
}
zap.S().Info("msg:", "initiating a new rule task", "\t name:", name, "\t frequency:", frequency)
zap.L().Info("initiating a new rule task", zap.String("name", name), zap.Duration("frequency", frequency))
return &RuleTask{
name: name,
@ -91,7 +89,7 @@ func (g *RuleTask) Run(ctx context.Context) {
// Wait an initial amount to have consistently slotted intervals.
evalTimestamp := g.EvalTimestamp(time.Now().UnixNano()).Add(g.frequency)
zap.S().Debugf("group:", g.name, "\t group run to begin at: ", evalTimestamp)
zap.L().Debug("group run to begin at", zap.Time("evalTimestamp", evalTimestamp))
select {
case <-time.After(time.Until(evalTimestamp)):
case <-g.done:
@ -294,7 +292,7 @@ func (g *RuleTask) CopyState(fromTask Task) error {
// Eval runs a single evaluation cycle in which all rules are evaluated sequentially.
func (g *RuleTask) Eval(ctx context.Context, ts time.Time) {
zap.S().Debugf("msg:", "rule task eval started", "\t name:", g.name, "\t start time:", ts)
zap.L().Debug("rule task eval started", zap.String("name", g.name), zap.Time("start time", ts))
for i, rule := range g.rules {
if rule == nil {
@ -330,7 +328,7 @@ func (g *RuleTask) Eval(ctx context.Context, ts time.Time) {
rule.SetHealth(HealthBad)
rule.SetLastError(err)
zap.S().Warn("msg:", "Evaluating rule failed", "\t rule:", rule, "\t err: ", err)
zap.L().Warn("Evaluating rule failed", zap.String("ruleid", rule.ID()), zap.Error(err))
// Canceled queries are intentional termination of queries. This normally
// happens on shutdown and thus we skip logging of any errors here.

View File

@ -135,7 +135,7 @@ func NewThresholdRule(
}
t.queryBuilderV4 = queryBuilder.NewQueryBuilder(builderOptsV4, featureFlags)
zap.S().Info("msg:", "creating new alerting rule", "\t name:", t.name, "\t condition:", t.ruleCondition.String(), "\t generatorURL:", t.GeneratorURL())
zap.L().Info("creating new ThresholdRule", zap.String("name", t.name), zap.String("id", t.id))
return &t, nil
}
@ -386,7 +386,7 @@ func (r *ThresholdRule) ForEachActiveAlert(f func(*Alert)) {
}
func (r *ThresholdRule) SendAlerts(ctx context.Context, ts time.Time, resendDelay time.Duration, interval time.Duration, notifyFunc NotifyFunc) {
zap.S().Info("msg:", "sending alerts", "\t rule:", r.Name())
zap.L().Info("sending alerts", zap.String("rule", r.Name()))
alerts := []*Alert{}
r.ForEachActiveAlert(func(alert *Alert) {
if r.opts.SendAlways || alert.needsSending(ts, resendDelay) {
@ -400,7 +400,7 @@ func (r *ThresholdRule) SendAlerts(ctx context.Context, ts time.Time, resendDela
anew := *alert
alerts = append(alerts, &anew)
} else {
zap.S().Debugf("msg: skipping send alert due to resend delay", "\t rule: ", r.Name(), "\t alert:", alert.Labels)
zap.L().Debug("skipping send alert due to resend delay", zap.String("rule", r.Name()), zap.Any("alert", alert.Labels))
}
})
notifyFunc(ctx, "", alerts...)
@ -416,12 +416,12 @@ func (r *ThresholdRule) Unit() string {
func (r *ThresholdRule) CheckCondition(v float64) bool {
if math.IsNaN(v) {
zap.S().Debugf("msg:", "found NaN in rule condition", "\t rule name:", r.Name())
zap.L().Debug("found NaN in rule condition", zap.String("rule", r.Name()))
return false
}
if r.ruleCondition.Target == nil {
zap.S().Debugf("msg:", "found null target in rule condition", "\t rulename:", r.Name())
zap.L().Debug("found null target in rule condition", zap.String("rule", r.Name()))
return false
}
@ -429,7 +429,7 @@ func (r *ThresholdRule) CheckCondition(v float64) bool {
value := unitConverter.Convert(converter.Value{F: *r.ruleCondition.Target, U: converter.Unit(r.ruleCondition.TargetUnit)}, converter.Unit(r.Unit()))
zap.S().Debugf("Checking condition for rule: %s, Converter=%s, Value=%f, Target=%f, CompareOp=%s", r.Name(), unitConverter.Name(), v, value.F, r.ruleCondition.CompareOp)
zap.L().Info("Checking condition for rule", zap.String("rule", r.Name()), zap.String("converter", unitConverter.Name()), zap.Float64("value", v), zap.Float64("target", value.F), zap.String("compareOp", string(r.ruleCondition.CompareOp)))
switch r.ruleCondition.CompareOp {
case ValueIsEq:
return v == value.F
@ -496,7 +496,7 @@ func (r *ThresholdRule) shouldSkipFirstRecord() bool {
func (r *ThresholdRule) runChQuery(ctx context.Context, db clickhouse.Conn, query string) (Vector, error) {
rows, err := db.Query(ctx, query)
if err != nil {
zap.S().Errorf("rule:", r.Name(), "\t failed to get alert query result")
zap.L().Error("failed to get alert query result", zap.String("rule", r.Name()), zap.Error(err))
return nil, err
}
@ -604,7 +604,7 @@ func (r *ThresholdRule) runChQuery(ctx context.Context, db clickhouse.Conn, quer
lblsOrig.Set(columnNames[i], fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int()))
}
default:
zap.S().Errorf("ruleId:", r.ID(), "\t error: invalid var found in query result", v, columnNames[i])
zap.L().Error("invalid var found in query result", zap.String("ruleId", r.ID()), zap.Any("value", v), zap.Any("column", columnNames[i]))
}
}
@ -710,11 +710,11 @@ func (r *ThresholdRule) runChQuery(ctx context.Context, db clickhouse.Conn, quer
}
}
zap.S().Debugf("ruleid:", r.ID(), "\t resultmap(potential alerts):", len(resultMap))
zap.L().Debug("resultmap(potential alerts)", zap.String("ruleid", r.ID()), zap.Int("count", len(resultMap)))
// if the data is missing for `For` duration then we should send alert
if r.ruleCondition.AlertOnAbsent && r.lastTimestampWithDatapoints.Add(r.Condition().AbsentFor*time.Minute).Before(time.Now()) {
zap.S().Debugf("ruleid:", r.ID(), "\t msg: no data found for rule condition")
zap.L().Info("no data found for rule condition", zap.String("ruleid", r.ID()))
lbls := labels.NewBuilder(labels.Labels{})
if !r.lastTimestampWithDatapoints.IsZero() {
lbls.Set("lastSeen", r.lastTimestampWithDatapoints.Format(constants.AlertTimeFormat))
@ -734,7 +734,7 @@ func (r *ThresholdRule) runChQuery(ctx context.Context, db clickhouse.Conn, quer
}
}
if len(result) != 0 {
zap.S().Infof("For rule %s, with ClickHouseQuery %s, found %d alerts", r.ID(), query, len(result))
zap.L().Info("found alerts", zap.String("ruleid", r.ID()), zap.String("query", query), zap.Int("count", len(result)))
}
return result, nil
}
@ -979,7 +979,7 @@ func (r *ThresholdRule) prepareClickhouseQueries(ts time.Time) (map[string]strin
}
if r.ruleCondition.QueryType() != v3.QueryTypeClickHouseSQL {
zap.S().Debugf("ruleid:", r.ID(), "\t msg: unsupported query type in prepareClickhouseQueries()")
zap.L().Error("unsupported query type in prepareClickhouseQueries", zap.String("ruleid", r.ID()))
return nil, fmt.Errorf("failed to prepare clickhouse queries")
}
@ -995,18 +995,17 @@ func (r *ThresholdRule) prepareClickhouseQueries(ts time.Time) (map[string]strin
tmpl := template.New("clickhouse-query")
tmpl, err := tmpl.Parse(chQuery.Query)
if err != nil {
zap.S().Errorf("ruleid:", r.ID(), "\t msg: failed to parse clickhouse query to populate vars", err)
zap.L().Error("failed to parse clickhouse query to populate vars", zap.String("ruleid", r.ID()), zap.Error(err))
r.SetHealth(HealthBad)
return nil, err
}
var query bytes.Buffer
err = tmpl.Execute(&query, params.Variables)
if err != nil {
zap.S().Errorf("ruleid:", r.ID(), "\t msg: failed to populate clickhouse query", err)
zap.L().Error("failed to populate clickhouse query", zap.String("ruleid", r.ID()), zap.Error(err))
r.SetHealth(HealthBad)
return nil, err
}
zap.S().Debugf("ruleid:", r.ID(), "\t query:", query.String())
queries[name] = query.String()
}
return queries, nil
@ -1023,13 +1022,13 @@ func (r *ThresholdRule) GetSelectedQuery() string {
if r.ruleCondition.QueryType() == v3.QueryTypeBuilder {
queries, err = r.prepareBuilderQueries(time.Now(), nil)
if err != nil {
zap.S().Errorf("ruleid:", r.ID(), "\t msg: failed to prepare metric queries", zap.Error(err))
zap.L().Error("failed to prepare metric queries", zap.String("ruleid", r.ID()), zap.Error(err))
return ""
}
} else if r.ruleCondition.QueryType() == v3.QueryTypeClickHouseSQL {
queries, err = r.prepareClickhouseQueries(time.Now())
if err != nil {
zap.S().Errorf("ruleid:", r.ID(), "\t msg: failed to prepare clickhouse queries", zap.Error(err))
zap.L().Error("failed to prepare clickhouse queries", zap.String("ruleid", r.ID()), zap.Error(err))
return ""
}
}
@ -1078,7 +1077,7 @@ func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, ts time.Time, ch c
queries, err = r.prepareBuilderQueries(ts, ch)
if err != nil {
zap.S().Errorf("ruleid:", r.ID(), "\t msg: failed to prepare metric queries", zap.Error(err))
zap.L().Error("failed to prepare metric queries", zap.String("ruleid", r.ID()), zap.Error(err))
return nil, fmt.Errorf("failed to prepare metric queries")
}
@ -1087,7 +1086,7 @@ func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, ts time.Time, ch c
queries, err = r.prepareClickhouseQueries(ts)
if err != nil {
zap.S().Errorf("ruleid:", r.ID(), "\t msg: failed to prepare clickhouse queries", zap.Error(err))
zap.L().Error("failed to prepare clickhouse queries", zap.String("ruleid", r.ID()), zap.Error(err))
return nil, fmt.Errorf("failed to prepare clickhouse queries")
}
@ -1099,16 +1098,16 @@ func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, ts time.Time, ch c
return nil, fmt.Errorf("no queries could be built with the rule config")
}
zap.S().Debugf("ruleid:", r.ID(), "\t runQueries:", queries)
zap.L().Info("prepared queries", zap.String("ruleid", r.ID()), zap.Any("queries", queries))
queryLabel := r.GetSelectedQuery()
zap.S().Debugf("ruleId: ", r.ID(), "\t result query label:", queryLabel)
zap.L().Debug("Selected query lable for rule", zap.String("ruleid", r.ID()), zap.String("label", queryLabel))
if queryString, ok := queries[queryLabel]; ok {
return r.runChQuery(ctx, ch, queryString)
}
zap.S().Errorf("ruleId: ", r.ID(), "\t invalid query label:", queryLabel, "\t queries:", queries)
zap.L().Error("invalid query label", zap.String("ruleid", r.ID()), zap.String("label", queryLabel), zap.Any("queries", queries))
return nil, fmt.Errorf("this is unexpected, invalid query label")
}
@ -1137,7 +1136,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time, queriers *Querie
if err != nil {
r.SetHealth(HealthBad)
r.SetLastError(err)
zap.S().Debugf("ruleid:", r.ID(), "\t failure in buildAndRunQuery:", err)
zap.L().Error("failure in buildAndRunQuery", zap.String("ruleid", r.ID()), zap.Error(err))
return nil, err
}
@ -1156,7 +1155,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time, queriers *Querie
value := valueFormatter.Format(smpl.V, r.Unit())
thresholdFormatter := formatter.FromUnit(r.ruleCondition.TargetUnit)
threshold := thresholdFormatter.Format(r.targetVal(), r.ruleCondition.TargetUnit)
zap.S().Debugf("Alert template data for rule %s: Formatter=%s, Value=%s, Threshold=%s", r.Name(), valueFormatter.Name(), value, threshold)
zap.L().Debug("Alert template data for rule", zap.String("name", r.Name()), zap.String("formatter", valueFormatter.Name()), zap.String("value", value), zap.String("threshold", threshold))
tmplData := AlertTemplateData(l, value, threshold)
// Inject some convenience variables that are easier to remember for users
@ -1177,7 +1176,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time, queriers *Querie
result, err := tmpl.Expand()
if err != nil {
result = fmt.Sprintf("<error expanding template: %s>", err)
zap.S().Errorf("msg:", "Expanding alert template failed", "\t err", err, "\t data", tmplData)
zap.L().Error("Expanding alert template failed", zap.Error(err), zap.Any("data", tmplData))
}
return result
}
@ -1222,7 +1221,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time, queriers *Querie
resultFPs[h] = struct{}{}
if _, ok := alerts[h]; ok {
zap.S().Errorf("ruleId: ", r.ID(), "\t msg:", "the alert query returns duplicate records:", alerts[h])
zap.L().Error("the alert query returns duplicate records", zap.String("ruleid", r.ID()), zap.Any("alert", alerts[h]))
err = fmt.Errorf("duplicate alert found, vector contains metrics with the same labelset after applying alert labels")
// We have already acquired the lock above hence using SetHealth and
// SetLastError will deadlock.
@ -1242,7 +1241,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time, queriers *Querie
}
}
zap.S().Info("rule:", r.Name(), "\t alerts found: ", len(alerts))
zap.L().Info("alerts found", zap.String("name", r.Name()), zap.Int("count", len(alerts)))
// alerts[h] is ready, add or update active list now
for h, a := range alerts {

View File

@ -467,7 +467,7 @@ func (a *Telemetry) SendEvent(event string, data map[string]interface{}, userEma
}
}
// zap.S().Info(data)
// zap.L().Info(data)
properties := analytics.NewProperties()
properties.Set("version", version.GetVersion())
properties.Set("deploymentType", getDeploymentType())

View File

@ -13,7 +13,6 @@ import (
"log"
minio "github.com/minio/minio-go/v6"
"go.uber.org/zap"
)
const (
@ -36,7 +35,7 @@ func init() {
} else if goArch == "amd64" {
composeFile = "./test-deploy/docker-compose.yaml"
} else {
zap.S().Fatalf("Unsupported architecture: %s", goArch)
log.Fatalf("Unsupported architecture: %s", goArch)
}
}

View File

@ -183,11 +183,11 @@ func ClickHouseFormattedValue(v interface{}) string {
case uint8, uint16, uint32, uint64, int, int8, int16, int32, int64, float32, float64, bool:
return strings.Join(strings.Fields(fmt.Sprint(x)), ",")
default:
zap.S().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x[0])))
zap.L().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x[0])))
return ""
}
default:
zap.S().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x)))
zap.L().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x)))
return ""
}
}

View File

@ -15,6 +15,6 @@ func Elapsed(funcName string, args ...interface{}) func() {
}
argsStr = argsStr[:len(argsStr)-2]
return func() {
zap.S().Infof("func %s took %v with args %v", funcName, time.Since(start), string(argsStr))
zap.L().Info("Elapsed time", zap.String("func_name", funcName), zap.Duration("duration", time.Since(start)), zap.String("args", argsStr))
}
}

View File

@ -3,8 +3,6 @@ package version
import (
"fmt"
"runtime"
"go.uber.org/zap"
)
// These fields are set during an official build
@ -40,7 +38,7 @@ Copyright 2022 SigNoz
// PrintVersion prints version and other helpful information.
func PrintVersion() {
zap.S().Infof("\n%s\n", BuildDetails())
fmt.Println(BuildDetails())
}
func GetVersion() string {