From c4b052c51ec3a22e25923aa92fb66665e6ca589b Mon Sep 17 00:00:00 2001 From: Rajat Dwivedi <113243168+rkmdCodes@users.noreply.github.com> Date: Tue, 14 Mar 2023 16:55:15 +0530 Subject: [PATCH 01/38] upgraded some deprecated packages (#2424) * fix: upgrade deprecated pkg * fix: reverted linebreak rules * chore: some of the refactoring is done regarding the performance --------- Co-authored-by: Chintan Sudani <46838508+techchintan@users.noreply.github.com> Co-authored-by: palashgdev --- .../TimePreferenceDropDown/index.tsx | 19 +++-- .../container/ConfigDropdown/Config/styles.ts | 17 ---- .../src/container/ConfigDropdown/index.tsx | 23 ++--- .../Header/{SignedInAs => SignedIn}/index.tsx | 21 +++-- frontend/src/container/Header/index.tsx | 84 ++++++++++++------- .../src/container/ListOfDashboard/index.tsx | 23 ++++- .../src/container/LogsSearchFilter/index.tsx | 13 ++- 7 files changed, 117 insertions(+), 83 deletions(-) delete mode 100644 frontend/src/container/ConfigDropdown/Config/styles.ts rename frontend/src/container/Header/{SignedInAs => SignedIn}/index.tsx (69%) diff --git a/frontend/src/components/TimePreferenceDropDown/index.tsx b/frontend/src/components/TimePreferenceDropDown/index.tsx index 8774edbdc9..ff6d31bcc1 100644 --- a/frontend/src/components/TimePreferenceDropDown/index.tsx +++ b/frontend/src/components/TimePreferenceDropDown/index.tsx @@ -1,9 +1,9 @@ -import { Button, Dropdown, Menu } from 'antd'; +import { Button, Dropdown } from 'antd'; import TimeItems, { timePreferance, timePreferenceType, } from 'container/NewWidget/RightContainer/timeItems'; -import React, { useCallback } from 'react'; +import React, { useCallback, useMemo } from 'react'; import { menuItems } from './config'; import { TextContainer } from './styles'; @@ -22,13 +22,18 @@ function TimePreference({ [setSelectedTime], ); + const menu = useMemo( + () => ({ + items: menuItems, + onClick: timeMenuItemOnChangeHandler, + }), + [timeMenuItemOnChangeHandler], + ); + return ( - } - > - - + + ); } diff --git a/frontend/src/container/ConfigDropdown/Config/styles.ts b/frontend/src/container/ConfigDropdown/Config/styles.ts deleted file mode 100644 index 4807ea77c2..0000000000 --- a/frontend/src/container/ConfigDropdown/Config/styles.ts +++ /dev/null @@ -1,17 +0,0 @@ -import { Menu } from 'antd'; -import styled from 'styled-components'; - -export const MenuDropdown = styled(Menu)` - &&& { - .ant-dropdown, - .ant-dropdown-menu, - .ant-dropdown-menu-item { - padding: 0px; - } - .ant-menu-item { - height: 1.75rem; - display: flex; - align-items: center; - } - } -`; diff --git a/frontend/src/container/ConfigDropdown/index.tsx b/frontend/src/container/ConfigDropdown/index.tsx index 8390e09167..1ddd676948 100644 --- a/frontend/src/container/ConfigDropdown/index.tsx +++ b/frontend/src/container/ConfigDropdown/index.tsx @@ -13,7 +13,6 @@ import { ConfigProps } from 'types/api/dynamicConfigs/getDynamicConfigs'; import AppReducer from 'types/reducer/app'; import HelpToolTip from './Config'; -import { MenuDropdown } from './Config/styles'; function DynamicConfigDropdown({ frontendId, @@ -34,13 +33,15 @@ function DynamicConfigDropdown({ setIsHelpDropDownOpen(!isHelpDropDownOpen); }; - const menuItems = useMemo( - () => [ - { - key: '1', - label: , - }, - ], + const menu = useMemo( + () => ({ + items: [ + { + key: '1', + label: , + }, + ], + }), [config], ); @@ -53,10 +54,10 @@ function DynamicConfigDropdown({ return ( } - visible={isHelpDropDownOpen} + menu={menu} + open={isHelpDropDownOpen} > ((state) => state.app); + const onManageAccountClick = useCallback(() => { + onToggle(); + history.push(ROUTES.MY_SETTINGS); + }, [onToggle]); + if (!user) { return
; } @@ -30,11 +35,7 @@ function SignedInAS(): JSX.Element { {email}
- { - history.push(ROUTES.MY_SETTINGS); - }} - > + Manage Account @@ -42,4 +43,8 @@ function SignedInAS(): JSX.Element { ); } -export default SignedInAS; +interface SignedInProps { + onToggle: VoidFunction; +} + +export default SignedIn; diff --git a/frontend/src/container/Header/index.tsx b/frontend/src/container/Header/index.tsx index 9f04454d33..a34287e665 100644 --- a/frontend/src/container/Header/index.tsx +++ b/frontend/src/container/Header/index.tsx @@ -3,12 +3,19 @@ import { CaretUpFilled, LogoutOutlined, } from '@ant-design/icons'; -import { Divider, Dropdown, Menu, Space, Typography } from 'antd'; +import type { MenuProps } from 'antd'; +import { Divider, Dropdown, Space, Typography } from 'antd'; import { Logout } from 'api/utils'; import ROUTES from 'constants/routes'; import Config from 'container/ConfigDropdown'; import { useIsDarkMode, useThemeMode } from 'hooks/useDarkMode'; -import React, { Dispatch, SetStateAction, useCallback, useState } from 'react'; +import React, { + Dispatch, + SetStateAction, + useCallback, + useMemo, + useState, +} from 'react'; import { useSelector } from 'react-redux'; import { NavLink } from 'react-router-dom'; import { AppState } from 'store/reducers'; @@ -16,7 +23,7 @@ import AppReducer from 'types/reducer/app'; import CurrentOrganization from './CurrentOrganization'; import ManageLicense from './ManageLicense'; -import SignedInAS from './SignedInAs'; +import SignedIn from './SignedIn'; import { AvatarWrapper, Container, @@ -43,32 +50,45 @@ function HeaderContainer(): JSX.Element { [], ); - const menu = ( - - - - - - - - - - -
{ - if (e.key === 'Enter' || e.key === 'Space') { - Logout(); - } - }} - role="button" - onClick={Logout} - > - Logout -
-
-
-
+ const onLogoutKeyDown = useCallback( + (e: React.KeyboardEvent) => { + if (e.key === 'Enter' || e.key === 'Space') { + Logout(); + } + }, + [], + ); + + const menu: MenuProps = useMemo( + () => ({ + items: [ + { + key: 'main-menu', + label: ( +
+ + + + + + + + +
+ Logout +
+
+
+ ), + }, + ], + }), + [onToggleHandler, onLogoutKeyDown], ); return ( @@ -98,10 +118,10 @@ function HeaderContainer(): JSX.Element { /> {user?.name[0]} diff --git a/frontend/src/container/ListOfDashboard/index.tsx b/frontend/src/container/ListOfDashboard/index.tsx index b513df0c6a..ca5d68d9bb 100644 --- a/frontend/src/container/ListOfDashboard/index.tsx +++ b/frontend/src/container/ListOfDashboard/index.tsx @@ -1,5 +1,12 @@ import { PlusOutlined } from '@ant-design/icons'; -import { Card, Dropdown, Menu, Row, TableColumnProps, Typography } from 'antd'; +import { + Card, + Dropdown, + MenuProps, + Row, + TableColumnProps, + Typography, +} from 'antd'; import { ItemType } from 'antd/es/menu/hooks/useItems'; import createDashboard from 'api/dashboard/create'; import { AxiosError } from 'axios'; @@ -47,10 +54,12 @@ function ListOfAllDashboard(): JSX.Element { ); const { t } = useTranslation('dashboard'); + const [ isImportJSONModalVisible, setIsImportJSONModalVisible, ] = useState(false); + const [uploadedGrafana, setUploadedGrafana] = useState(false); const [filteredDashboards, setFilteredDashboards] = useState(); @@ -58,6 +67,7 @@ function ListOfAllDashboard(): JSX.Element { useEffect(() => { setFilteredDashboards(dashboards); }, [dashboards]); + const [newDashboardState, setNewDashboardState] = useState({ loading: false, error: false, @@ -215,7 +225,12 @@ function ListOfAllDashboard(): JSX.Element { return menuItems; }, [createNewDashboard, loading, onNewDashboardHandler, t]); - const menuItems = getMenuItems(); + const menu: MenuProps = useMemo( + () => ({ + items: getMenuItems(), + }), + [getMenuItems], + ); const GetHeader = useMemo( () => ( @@ -230,7 +245,7 @@ function ListOfAllDashboard(): JSX.Element { }} /> {newDashboard && ( - }> + } type="primary" @@ -249,7 +264,7 @@ function ListOfAllDashboard(): JSX.Element { newDashboard, newDashboardState.error, newDashboardState.loading, - menuItems, + menu, ], ); diff --git a/frontend/src/container/LogsSearchFilter/index.tsx b/frontend/src/container/LogsSearchFilter/index.tsx index 670c58cf70..75f95769f1 100644 --- a/frontend/src/container/LogsSearchFilter/index.tsx +++ b/frontend/src/container/LogsSearchFilter/index.tsx @@ -173,6 +173,13 @@ function SearchFilter({ globalTime.minTime, ]); + const onPopOverChange = useCallback( + (isVisible: boolean) => { + onDropDownToggleHandler(isVisible)(); + }, + [onDropDownToggleHandler], + ); + return ( { - onDropDownToggleHandler(value)(); - }} + onOpenChange={onPopOverChange} > Date: Wed, 15 Mar 2023 15:09:15 +0530 Subject: [PATCH 02/38] feat: opamp server application (#1787) * feat: opamp server application * chore: opamp * chore: refactor server implementation * chore: add Stop * chore: merged opamp updates * chore: removed all errorf * chore: added a comment about zero version * feat: added user context for created by * chore: changed debugf to debug * chore: removed lb from opamp + added config parser * fix: added userid to ConfigNewVersion() * chore: removed user id from contxt and added config parser * fix: removed lock inside re-deploy * chore: added config db fix * fix: merged app/server.go from develop * fix: restored extract jwt * Update pkg/query-service/app/server.go Co-authored-by: Nityananda Gohain * fix: dependency version fix and import added --------- Co-authored-by: Pranay Prateek Co-authored-by: Palash Gupta Co-authored-by: mindhash Co-authored-by: Nityananda Gohain --- ee/query-service/app/server.go | 61 +++- go.mod | 13 +- go.sum | 141 +++++++- pkg/query-service/agentConf/db.go | 236 +++++++++++++ pkg/query-service/agentConf/manager.go | 212 +++++++++++ pkg/query-service/agentConf/sqlite/init.go | 65 ++++ pkg/query-service/agentConf/version.go | 69 ++++ pkg/query-service/app/opamp/config.yaml | 76 ++++ .../app/opamp/configure_ingestionRules.go | 141 ++++++++ pkg/query-service/app/opamp/logger.go | 15 + pkg/query-service/app/opamp/model/agent.go | 334 ++++++++++++++++++ pkg/query-service/app/opamp/model/agents.go | 117 ++++++ .../app/opamp/model/coordinator.go | 66 ++++ pkg/query-service/app/opamp/opamp_server.go | 204 +++++++++++ .../app/opamp/otelconfig/config_parser.go | 195 ++++++++++ .../opamp/otelconfig/config_parser_test.go | 58 +++ .../otelconfig/filterprocessor/config.go | 11 + .../opamp/otelconfig/otlpreceiver/config.go | 6 + .../otelconfig/otlpreceiver/grpcSettings.go | 14 + .../otelconfig/otlpreceiver/httpSettings.go | 9 + .../app/opamp/otelconfig/otlpreceiver/tls.go | 38 ++ .../opamp/otelconfig/tailsampler/config.go | 82 +++++ .../app/opamp/otelconfig/testdata/basic.yaml | 76 ++++ .../opamp/otelconfig/testdata/service.yaml | 11 + .../app/opamp/pipeline_builder.go | 196 ++++++++++ pkg/query-service/app/opamp/signal.go | 9 + pkg/query-service/app/server.go | 43 +++ pkg/query-service/constants/constants.go | 1 + pkg/query-service/main.go | 8 +- 29 files changed, 2494 insertions(+), 13 deletions(-) create mode 100644 pkg/query-service/agentConf/db.go create mode 100644 pkg/query-service/agentConf/manager.go create mode 100644 pkg/query-service/agentConf/sqlite/init.go create mode 100644 pkg/query-service/agentConf/version.go create mode 100644 pkg/query-service/app/opamp/config.yaml create mode 100644 pkg/query-service/app/opamp/configure_ingestionRules.go create mode 100644 pkg/query-service/app/opamp/logger.go create mode 100644 pkg/query-service/app/opamp/model/agent.go create mode 100644 pkg/query-service/app/opamp/model/agents.go create mode 100644 pkg/query-service/app/opamp/model/coordinator.go create mode 100644 pkg/query-service/app/opamp/opamp_server.go create mode 100644 pkg/query-service/app/opamp/otelconfig/config_parser.go create mode 100644 pkg/query-service/app/opamp/otelconfig/config_parser_test.go create mode 100644 pkg/query-service/app/opamp/otelconfig/filterprocessor/config.go create mode 100644 pkg/query-service/app/opamp/otelconfig/otlpreceiver/config.go create mode 100644 pkg/query-service/app/opamp/otelconfig/otlpreceiver/grpcSettings.go create mode 100644 pkg/query-service/app/opamp/otelconfig/otlpreceiver/httpSettings.go create mode 100644 pkg/query-service/app/opamp/otelconfig/otlpreceiver/tls.go create mode 100644 pkg/query-service/app/opamp/otelconfig/tailsampler/config.go create mode 100644 pkg/query-service/app/opamp/otelconfig/testdata/basic.yaml create mode 100644 pkg/query-service/app/opamp/otelconfig/testdata/service.yaml create mode 100644 pkg/query-service/app/opamp/pipeline_builder.go create mode 100644 pkg/query-service/app/opamp/signal.go diff --git a/ee/query-service/app/server.go b/ee/query-service/app/server.go index af4a90f885..72091cc1e3 100644 --- a/ee/query-service/app/server.go +++ b/ee/query-service/app/server.go @@ -25,16 +25,18 @@ import ( licensepkg "go.signoz.io/signoz/ee/query-service/license" "go.signoz.io/signoz/ee/query-service/usage" + "go.signoz.io/signoz/pkg/query-service/agentConf" baseapp "go.signoz.io/signoz/pkg/query-service/app" "go.signoz.io/signoz/pkg/query-service/app/dashboards" - "go.signoz.io/signoz/pkg/query-service/app/explorer" + baseexplorer "go.signoz.io/signoz/pkg/query-service/app/explorer" + "go.signoz.io/signoz/pkg/query-service/app/opamp" + opAmpModel "go.signoz.io/signoz/pkg/query-service/app/opamp/model" baseauth "go.signoz.io/signoz/pkg/query-service/auth" - "go.signoz.io/signoz/pkg/query-service/constants" baseconst "go.signoz.io/signoz/pkg/query-service/constants" "go.signoz.io/signoz/pkg/query-service/healthcheck" basealm "go.signoz.io/signoz/pkg/query-service/integrations/alertManager" baseint "go.signoz.io/signoz/pkg/query-service/interfaces" - "go.signoz.io/signoz/pkg/query-service/model" + basemodel "go.signoz.io/signoz/pkg/query-service/model" pqle "go.signoz.io/signoz/pkg/query-service/pqlEngine" rules "go.signoz.io/signoz/pkg/query-service/rules" "go.signoz.io/signoz/pkg/query-service/telemetry" @@ -42,6 +44,8 @@ import ( "go.uber.org/zap" ) +const AppDbEngine = "sqlite" + type ServerOptions struct { PromConfigPath string HTTPHostPort string @@ -85,8 +89,9 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) { return nil, err } + baseexplorer.InitWithDSN(baseconst.RELATIONAL_DATASOURCE_PATH) + localDB, err := dashboards.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH) - explorer.InitWithDSN(constants.RELATIONAL_DATASOURCE_PATH) if err != nil { return nil, err @@ -127,6 +132,17 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) { return nil, err } + // initiate opamp + _, err = opAmpModel.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH) + if err != nil { + return nil, err + } + + // initiate agent config handler + if err := agentConf.Initiate(localDB, AppDbEngine); err != nil { + return nil, err + } + // start the usagemanager usageManager, err := usage.New("sqlite", localDB, lm.GetRepo(), reader.GetConn()) if err != nil { @@ -208,7 +224,7 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, e r := mux.NewRouter() - getUserFromRequest := func(r *http.Request) (*model.UserPayload, error) { + getUserFromRequest := func(r *http.Request) (*basemodel.UserPayload, error) { patToken := r.Header.Get("SIGNOZ-API-KEY") if len(patToken) > 0 { zap.S().Debugf("Received a non-zero length PAT token") @@ -299,7 +315,7 @@ func extractDashboardMetaData(path string, r *http.Request) (map[string]interfac pathToExtractBodyFrom := "/api/v2/metrics/query_range" data := map[string]interface{}{} - var postData *model.QueryRangeParamsV2 + var postData *basemodel.QueryRangeParamsV2 if path == pathToExtractBodyFrom && (r.Method == "POST") { if r.Body != nil { @@ -472,7 +488,7 @@ func (s *Server) Start() error { if port, err := utils.GetPort(s.privateConn.Addr()); err == nil { privatePort = port } - fmt.Println("starting private http") + go func() { zap.S().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.serverOptions.PrivateHostPort)) @@ -488,6 +504,37 @@ func (s *Server) Start() error { }() + go func() { + zap.S().Info("Starting OpAmp Websocket server", zap.String("addr", baseconst.OpAmpWsEndpoint)) + err := opamp.InitalizeServer(baseconst.OpAmpWsEndpoint, &opAmpModel.AllAgents) + if err != nil { + zap.S().Info("opamp ws server failed to start", err) + s.unavailableChannel <- healthcheck.Unavailable + } + }() + + return nil +} + +func (s *Server) Stop() error { + if s.httpServer != nil { + if err := s.httpServer.Shutdown(context.Background()); err != nil { + return err + } + } + + if s.privateHTTP != nil { + if err := s.privateHTTP.Shutdown(context.Background()); err != nil { + return err + } + } + + opamp.StopServer() + + if s.ruleManager != nil { + s.ruleManager.Stop() + } + return nil } diff --git a/go.mod b/go.mod index 490ecb53d7..cccc971615 100644 --- a/go.mod +++ b/go.mod @@ -13,11 +13,13 @@ require ( github.com/gosimple/slug v1.10.0 github.com/jmoiron/sqlx v1.3.4 github.com/json-iterator/go v1.1.12 + github.com/knadh/koanf v1.5.0 github.com/mailru/easyjson v0.7.7 github.com/mattn/go-sqlite3 v2.0.3+incompatible github.com/minio/minio-go/v6 v6.0.57 github.com/mitchellh/mapstructure v1.5.0 github.com/oklog/oklog v0.3.2 + github.com/open-telemetry/opamp-go v0.5.0 github.com/pkg/errors v0.9.1 github.com/posthog/posthog-go v0.0.0-20220817142604-0b0bbf0f9c0f github.com/prometheus/common v0.39.0 @@ -28,8 +30,10 @@ require ( github.com/sethvargo/go-password v0.2.0 github.com/smartystreets/goconvey v1.6.4 github.com/soheilhy/cmux v0.1.5 + go.opentelemetry.io/collector/confmap v0.70.0 go.uber.org/zap v1.24.0 gopkg.in/segmentio/analytics-go.v3 v3.1.0 + gopkg.in/yaml.v3 v3.0.1 k8s.io/apimachinery v0.26.0 ) @@ -66,17 +70,20 @@ require ( github.com/beevik/etree v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect + github.com/gorilla/websocket v1.5.0 // indirect github.com/jonboulle/clockwork v0.2.2 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/klauspost/cpuid v1.2.3 // indirect github.com/mattermost/xml-roundtrip-validator v0.1.0 // indirect github.com/minio/md5-simd v1.1.0 // indirect github.com/minio/sha256-simd v0.1.1 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_golang v1.14.0 // indirect + go.opentelemetry.io/collector/featuregate v0.70.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/square/go-jose.v2 v2.6.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect ) require ( @@ -116,7 +123,7 @@ require ( github.com/sirupsen/logrus v1.9.0 // indirect github.com/smartystreets/assertions v1.1.0 github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/stretchr/testify v1.8.1 + github.com/stretchr/testify v1.8.2 github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c // indirect go.opentelemetry.io/otel v1.11.2 // indirect go.opentelemetry.io/otel/trace v1.11.2 // indirect @@ -131,7 +138,7 @@ require ( golang.org/x/time v0.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/grpc v1.51.0 - google.golang.org/protobuf v1.28.1 // indirect + google.golang.org/protobuf v1.28.1 gopkg.in/yaml.v2 v2.4.0 ) diff --git a/go.sum b/go.sum index f437e8cf4c..19ef4f20ff 100644 --- a/go.sum +++ b/go.sum @@ -89,13 +89,27 @@ github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8V github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.4.0 h1:yCQqn7dwca4ITXb+CbubHmedzaQYHhNhrEXLYUeEe8Q= github.com/armon/go-metrics v0.4.0/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/auth0/go-jwt-middleware v1.0.1 h1:/fsQ4vRr4zod1wKReUH+0A3ySRjGiT9G34kypO/EKwI= github.com/auth0/go-jwt-middleware v1.0.1/go.mod h1:YSeUX3z6+TF2H+7padiEqNJ73Zy9vXW72U//IgN0BIM= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.44.159 h1:9odtuHAYQE9tQKyuX6ny1U1MHeH5/yzeCJi96g9H4DU= github.com/aws/aws-sdk-go v1.44.159/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= +github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw= +github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ= +github.com/aws/aws-sdk-go-v2/service/appconfig v1.4.2/go.mod h1:FZ3HkCe+b10uFZZkFdvf98LHW21k49W8o8J366lqVKY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8= +github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk= +github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g= +github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/beevik/etree v1.1.0 h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs= github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= @@ -104,6 +118,7 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -130,6 +145,8 @@ github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc h1:PYXxkRUBGUMa5xgMVMDl62vEklZvKpVaxQeN9ie7Hfk= github.com/coreos/go-oidc/v3 v3.4.0 h1:xz7elHb/LDwm/ERpwHd+5nb7wFHL32rsr6bBOgaeu6g= github.com/coreos/go-oidc/v3 v3.4.0/go.mod h1:eHUXhZtXPQLgEaDrOVTgwbgmz1xGOkJNye6h3zkD2Pw= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -159,12 +176,16 @@ github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go. github.com/envoyproxy/go-control-plane v0.10.3 h1:xdCVXxEe0Y3FQith+0cj2irwZudqGYvecuLB1HtdexY= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.9.1 h1:PS7VIOgmSVhWUEeZwTe7z7zouA22Cr590PzXKbZHOVY= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.5+incompatible h1:/l4kBbb4/vGSsdtB5nUe8L7B9mImVMaBPw9L/0TBHU8= github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw= @@ -181,6 +202,7 @@ github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEai github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= @@ -201,7 +223,9 @@ github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LB github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= @@ -239,6 +263,7 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -304,30 +329,63 @@ github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB7 github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gosimple/slug v1.10.0 h1:3XbiQua1IpCdrvuntWvGBxVm+K99wCSxJjlxkP49GGQ= github.com/gosimple/slug v1.10.0/go.mod h1:MICb3w495l9KNdZm+Xn5b6T2Hn831f9DMxiJ1r+bAjw= github.com/gosimple/unidecode v1.0.0 h1:kPdvM+qy0tnk4/BrnkrbdJ82xe88xn7c9hcaipDz4dQ= github.com/gosimple/unidecode v1.0.0/go.mod h1:CP0Cr1Y1kogOtx0bJblKzsVWrqYaqfNOnHzpgWw4Awc= github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul v1.1.1-0.20180615161029-bed22a81e9fd h1:u6o+bd6FHxDKoCSa8PJ5vrHhAYSKgJtAHQtLO1EYgos= github.com/hashicorp/consul v1.1.1-0.20180615161029-bed22a81e9fd/go.mod h1:mFrjN1mfidgJfYP1xrJCF+AfRhr6Eaqhb2+sfyn/OOI= +github.com/hashicorp/consul/api v1.13.0/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m72FhPvA8T35KQ= +github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ= +github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= +github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/nomad/api v0.0.0-20221214074818-7dbbf6bc584d h1:kEWrUx7mld3c6HRcO2KhfD1MYBkofuZfEfDwCRQ9aMU= +github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= +github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoIospckxBxk6Q= +github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hetznercloud/hcloud-go v1.38.0 h1:K6Pd/mMdcLfBhvwG39qyAaacp4pCS3dKa8gChmLKxLg= +github.com/hjson/hjson-go/v4 v4.0.0 h1:wlm6IYYqHjOdXH1gHev4VoXCaW20HdQAGCxdOEEg2cs= +github.com/hjson/hjson-go/v4 v4.0.0/go.mod h1:KaYt3bTw3zhBjYqnXkYywcYctk0A2nxeEFTse3rH13E= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= @@ -338,6 +396,8 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGw github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmoiron/sqlx v1.3.4 h1:wv+0IJZfL5z0uZoUjlpKgHkgaFSYD+r9CfrXjEXsO7w= github.com/jmoiron/sqlx v1.3.4/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= +github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -362,11 +422,14 @@ github.com/klauspost/compress v1.15.13 h1:NFn1Wr8cfnenSJSA46lLq4wHCcBzKTSjnBIexD github.com/klauspost/compress v1.15.13/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/klauspost/cpuid v1.2.3 h1:CCtW0xUnWGVINKvE/WWOYKdsPV6mawAtvQuSl8guwQs= github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/knadh/koanf v1.5.0 h1:q2TSd/3Pyc/5yP9ldIrSdIz26MCcyNQzW0pEAugLPNs= +github.com/knadh/koanf v1.5.0/go.mod h1:Hgyjp4y8v44hpZtPzs7JZfRAW5AhN7KfZcwv1RYggDs= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= @@ -382,12 +445,22 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0 github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattermost/xml-roundtrip-validator v0.1.0 h1:RXbVD2UAl7A7nOTR4u7E3ILa4IbtvKBHw64LDsmu9hU= github.com/mattermost/xml-roundtrip-validator v0.1.0/go.mod h1:qccnGMcpgwcNaBnxqpJpWWUiPNr5H3O8eDgGV9gT5To= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U= github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4= github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= @@ -395,10 +468,23 @@ github.com/minio/minio-go/v6 v6.0.57 h1:ixPkbKkyD7IhnluRgQpGSpHdpvNVaW6OD5R9IAO/ github.com/minio/minio-go/v6 v6.0.57/go.mod h1:5+R/nM9Pwrh0vqF+HbYYDQ84wdUFPyXHkrdT4AIkifM= github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -410,21 +496,29 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/npillmayer/nestext v0.1.3/go.mod h1:h2lrijH8jpicr25dFY+oAJLyzlya6jhnuG+zWp9L0Uk= github.com/oklog/oklog v0.3.2 h1:wVfs8F+in6nTBMkA7CbRw+zZMIB7nNM825cM1wuzoTk= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/open-telemetry/opamp-go v0.5.0 h1:2YFbb6G4qBkq3yTRdVb5Nfz9hKHW/ldUyex352e1J7g= +github.com/open-telemetry/opamp-go v0.5.0/go.mod h1:IMdeuHGVc5CjKSu5/oNV0o+UmiXuahoHvoZ4GOmAI9M= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/ovh/go-ovh v1.3.0 h1:mvZaddk4E4kLcXhzb+cxBsMPYp2pHqiQpWYkInsuZPQ= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/paulmach/orb v0.8.0 h1:W5XAt5yNPNnhaMNEf0xNSkBMJ1LzOzdk2MRlB6EN0Vs= github.com/paulmach/orb v0.8.0/go.mod h1:FWRlTgl88VI1RBx/MkrwWDRhQ96ctqMCh8boXhmqB/A= github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY= +github.com/pelletier/go-toml v1.7.0 h1:7utD74fnzVc/cpcyy8sjrlFr5vYpypUixARcHIMIGuI= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc= github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= @@ -434,6 +528,8 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/posthog/posthog-go v0.0.0-20220817142604-0b0bbf0f9c0f h1:h0p1aZ9F5d6IXOygysob3g4B07b+HuVUQC0VJKD8wA4= github.com/posthog/posthog-go v0.0.0-20220817142604-0b0bbf0f9c0f/go.mod h1:oa2sAs9tGai3VldabTV0eWejt/O4/OOD7azP8GaikqU= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -441,6 +537,7 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -465,6 +562,7 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= @@ -477,7 +575,11 @@ github.com/russellhaering/gosaml2 v0.8.0/go.mod h1:byViER/1YPUa0Puj9ROZblpoq2jsE github.com/russellhaering/goxmldsig v1.2.0 h1:Y6GTTc9Un5hCxSzVz4UIWQ/zuVwDvzJk80guqzwx6Vg= github.com/russellhaering/goxmldsig v1.2.0/go.mod h1:gM4MDENBQf7M+V824SGfyIUVFWydB7n0KkEubVJl+Tw= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.10 h1:wsfMs0iv+MJiViM37qh5VEKISi3/ZUq2nNKNdqmumAs= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= github.com/segmentio/backo-go v1.0.0 h1:kbOAtGJY2DqOR0jfRkYEorx/b18RgtepGtY3+Cpe6qA= @@ -506,6 +608,7 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -518,8 +621,8 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/negroni v1.0.0 h1:kIimOitoypq34K7TG7DUaJ9kq/N4Ofuwi1sjz0KipXc= @@ -533,6 +636,9 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= +go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -540,6 +646,10 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/collector/confmap v0.70.0 h1:GJDaM7c3yFyT7Zv6l2/5ahwaqPCvtC92Ii8Bg2AVdjU= +go.opentelemetry.io/collector/confmap v0.70.0/go.mod h1:8//JWR2TMChLH35Az0mGFrCskEIP6POgZJK6iRRhzeM= +go.opentelemetry.io/collector/featuregate v0.70.0 h1:Xr6hrMT/++SjTm06nreex8WlpgFhYJ7S0yRVn1OvVf8= +go.opentelemetry.io/collector/featuregate v0.70.0/go.mod h1:ih+oCwrHW3bLac/qnPUzes28yDCDmh8WzsAKKauwCYI= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0 h1:yt2NKzK7Vyo6h0+X8BA4FpreZQTlVEIarnsBP/H5mzs= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0/go.mod h1:+ARmXlUlc51J7sZeCBkBJNdHGySrdOzgzxp6VWRWM1U= go.opentelemetry.io/otel v1.11.2 h1:YBZcQlsVekzFsFbjygXMOXSs6pialIZxcjfO/mBDmR0= @@ -549,12 +659,15 @@ go.opentelemetry.io/otel/metric v0.34.0/go.mod h1:ZFuI4yQGNCupurTXCwkeD/zHBt+C2b go.opentelemetry.io/otel/trace v1.11.2 h1:Xf7hWSF2Glv0DE3MH7fBHvtpSBsjcBUe5MYAmZM/+y0= go.opentelemetry.io/otel/trace v1.11.2/go.mod h1:4N+yC7QEz7TTsG9BSRLNAa63eg5E06ObSbKPmxQ/pKA= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -562,6 +675,7 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= @@ -622,6 +736,7 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -645,6 +760,7 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -697,11 +813,15 @@ golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -709,12 +829,19 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -736,10 +863,12 @@ golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -776,6 +905,7 @@ golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -804,6 +934,7 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -902,6 +1033,7 @@ google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6 google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -979,9 +1111,11 @@ google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 h1:jmIfw8+gSvXcZSgaFAGyInDXeWzUhvYH57G/5GKMn70= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= @@ -1030,6 +1164,7 @@ google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1042,6 +1177,7 @@ gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/segmentio/analytics-go.v3 v3.1.0 h1:UzxH1uaGZRpMKDhJyBz0pexz6yUoBU3x8bJsRk/HV6U= gopkg.in/segmentio/analytics-go.v3 v3.1.0/go.mod h1:4QqqlTlSSpVlWA9/9nDcPw+FkM2yv1NQoYjUbL9/JAw= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1078,4 +1214,5 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= diff --git a/pkg/query-service/agentConf/db.go b/pkg/query-service/agentConf/db.go new file mode 100644 index 0000000000..591a7ee799 --- /dev/null +++ b/pkg/query-service/agentConf/db.go @@ -0,0 +1,236 @@ +package agentConf + +import ( + "context" + "database/sql" + "fmt" + "math/rand" + + "github.com/google/uuid" + "github.com/jmoiron/sqlx" + "go.signoz.io/signoz/pkg/query-service/agentConf/sqlite" + "go.signoz.io/signoz/pkg/query-service/model" + "go.uber.org/zap" +) + +func init() { + rand.Seed(2000) +} + +// Repo handles DDL and DML ops on ingestion rules +type Repo struct { + db *sqlx.DB +} + +func (r *Repo) initDB(engine string) error { + switch engine { + case "sqlite3", "sqlite": + return sqlite.InitDB(r.db) + default: + return fmt.Errorf("unsupported db") + } +} + +func (r *Repo) GetConfigHistory(ctx context.Context, typ ElementTypeDef) ([]ConfigVersion, error) { + var c []ConfigVersion + err := r.db.SelectContext(ctx, &c, `SELECT + id, + version, + element_type, + COALESCE(created_by, -1) as created_by, + created_at, + COALESCE((SELECT NAME FROM users + WHERE id = v.created_by), "unknown") created_by_name, + active, + is_valid, + disabled, + deploy_status, + deploy_result + FROM agent_config_versions AS v + WHERE element_type = $1`, typ) + + return c, err +} + +func (r *Repo) GetConfigVersion(ctx context.Context, typ ElementTypeDef, v int) (*ConfigVersion, error) { + var c ConfigVersion + err := r.db.GetContext(ctx, &c, `SELECT + id, + version, + element_type, + COALESCE(created_by, -1) as created_by, + created_at, + COALESCE((SELECT NAME FROM users + WHERE id = v.created_by), "unknown") created_by_name, + active, + is_valid, + disabled, + deploy_status, + deploy_result, + last_hash, + last_config + FROM agent_config_versions v + WHERE element_type = $1 + AND version = $2`, typ, v) + + return &c, err + +} + +func (r *Repo) GetLatestVersion(ctx context.Context, typ ElementTypeDef) (*ConfigVersion, error) { + var c ConfigVersion + err := r.db.GetContext(ctx, &c, `SELECT + id, + version, + element_type, + COALESCE(created_by, -1) as created_by, + created_at, + COALESCE((SELECT NAME FROM users + WHERE id = v.created_by), "unknown") created_by_name, + active, + is_valid, + disabled, + deploy_status, + deploy_result + FROM agent_config_versions AS v + WHERE element_type = $1 + AND version = ( + SELECT MAX(version) + FROM agent_config_versions + WHERE element_type=$2)`, typ, typ) + if err != nil { + zap.S().Error("failed get latest config version for element:", typ, err) + } + return &c, err +} + +func (r *Repo) insertConfig(ctx context.Context, userId string, c *ConfigVersion, elements []string) (fnerr error) { + + if string(c.ElementType) == "" { + return fmt.Errorf("element type is required for creating agent config version") + } + + if len(elements) == 0 { + zap.S().Error("insert config called with no elements", c.ElementType) + return fmt.Errorf("config must have atleast one element") + } + + if c.Version != 0 { + // the version can not be set by the user, we want to auto-assign the versions + // in a monotonically increasing order starting with 1. hence, we reject insert + // requests with version anything other than 0. here, 0 indicates un-assigned + zap.S().Error("invalid version assignment while inserting agent config", c.Version, c.ElementType) + return fmt.Errorf("user defined versions are not supported in the agent config") + } + + configVersion, err := r.GetLatestVersion(ctx, c.ElementType) + if err != nil { + if err != sql.ErrNoRows { + zap.S().Error("failed to fetch latest config version", err) + return fmt.Errorf("failed to fetch latest config version") + } + } + + c.Version = updateVersion(configVersion.Version) + + defer func() { + if fnerr != nil { + // remove all the damage (invalid rows from db) + r.db.Exec("DELETE FROM agent_config_versions WHERE id = $1", c.ID) + r.db.Exec("DELETE FROM agent_config_elements WHERE version_id=$1", c.ID) + } + }() + + // insert config + configQuery := `INSERT INTO agent_config_versions( + id, + version, + created_by, + element_type, + active, + is_valid, + disabled, + deploy_status, + deploy_result) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)` + + _, err = r.db.ExecContext(ctx, + configQuery, + c.ID, + c.Version, + userId, + c.ElementType, + false, + false, + false, + c.DeployStatus, + c.DeployResult) + + if err != nil { + zap.S().Error("error in inserting config version: ", zap.Error(err)) + return fmt.Errorf("failed to insert ingestion rule") + } + + elementsQuery := `INSERT INTO agent_config_elements( + id, + version_id, + element_type, + element_id) + VALUES ($1, $2, $3, $4)` + + for _, e := range elements { + + _, err = r.db.ExecContext(ctx, + elementsQuery, + uuid.NewString(), + c.ID, + c.ElementType, + e) + if err != nil { + return err + } + } + + return nil +} + +func (r *Repo) updateDeployStatus(ctx context.Context, + elementType ElementTypeDef, + version int, + status string, + result string, + lastHash string, + lastconf string) error { + + updateQuery := `UPDATE agent_config_versions + set deploy_status = $1, + deploy_result = $2, + last_hash = COALESCE($3, last_hash), + last_config = $4 + WHERE version=$5 + AND element_type = $6` + + _, err := r.db.ExecContext(ctx, updateQuery, status, result, lastHash, lastconf, version, string(elementType)) + if err != nil { + zap.S().Error("failed to update deploy status", err) + return model.BadRequestStr("failed to update deploy status") + } + + return nil +} + +func (r *Repo) updateDeployStatusByHash(ctx context.Context, confighash string, status string, result string) error { + + updateQuery := `UPDATE agent_config_versions + set deploy_status = $1, + deploy_result = $2 + WHERE last_hash=$4` + + _, err := r.db.ExecContext(ctx, updateQuery, status, result, confighash) + if err != nil { + zap.S().Error("failed to update deploy status", err) + return model.BadRequestStr("failed to update deploy status") + } + + return nil +} diff --git a/pkg/query-service/agentConf/manager.go b/pkg/query-service/agentConf/manager.go new file mode 100644 index 0000000000..27478b332a --- /dev/null +++ b/pkg/query-service/agentConf/manager.go @@ -0,0 +1,212 @@ +package agentConf + +import ( + "context" + "fmt" + "sync/atomic" + + "github.com/jmoiron/sqlx" + "go.signoz.io/signoz/pkg/query-service/app/opamp" + filterprocessor "go.signoz.io/signoz/pkg/query-service/app/opamp/otelconfig/filterprocessor" + tsp "go.signoz.io/signoz/pkg/query-service/app/opamp/otelconfig/tailsampler" + "go.uber.org/zap" + yaml "gopkg.in/yaml.v3" +) + +var m *Manager + +func init() { + m = &Manager{} +} + +type Manager struct { + Repo + // lock to make sure only one update is sent to remote agents at a time + lock uint32 +} + +// Ready indicates if Manager can accept new config update requests +func (mgr *Manager) Ready() bool { + if atomic.LoadUint32(&mgr.lock) != 0 { + return false + } + return opamp.Ready() +} + +func Initiate(db *sqlx.DB, engine string) error { + m.Repo = Repo{db} + return m.initDB(engine) +} + +// Ready indicates if Manager can accept new config update requests +func Ready() bool { + return m.Ready() +} + +func GetLatestVersion(ctx context.Context, elementType ElementTypeDef) (*ConfigVersion, error) { + return m.GetLatestVersion(ctx, elementType) +} + +func GetConfigVersion(ctx context.Context, elementType ElementTypeDef, version int) (*ConfigVersion, error) { + return m.GetConfigVersion(ctx, elementType, version) +} + +func GetConfigHistory(ctx context.Context, typ ElementTypeDef) ([]ConfigVersion, error) { + return m.GetConfigHistory(ctx, typ) +} + +// StartNewVersion launches a new config version for given set of elements +func StartNewVersion(ctx context.Context, userId string, eleType ElementTypeDef, elementIds []string) (*ConfigVersion, error) { + + if !m.Ready() { + // agent is already being updated, ask caller to wait and re-try after sometime + return nil, fmt.Errorf("agent updater is busy") + } + + // create a new version + cfg := NewConfigversion(eleType) + + // insert new config and elements into database + err := m.insertConfig(ctx, userId, cfg, elementIds) + if err != nil { + return nil, err + } + + return cfg, nil +} + +func Redeploy(ctx context.Context, typ ElementTypeDef, version int) error { + + configVersion, err := GetConfigVersion(ctx, typ, version) + if err != nil { + zap.S().Debug("failed to fetch config version during redeploy", err) + return fmt.Errorf("failed to fetch details of the config version") + } + + if configVersion == nil || (configVersion != nil && configVersion.LastConf == "") { + zap.S().Debug("config version has no conf yaml", configVersion) + return fmt.Errorf("the config version can not be redeployed") + } + switch typ { + case ElementTypeSamplingRules: + var config *tsp.Config + if err := yaml.Unmarshal([]byte(configVersion.LastConf), &config); err != nil { + zap.S().Error("failed to read last conf correctly", err) + return fmt.Errorf("failed to read the stored config correctly") + } + + // merge current config with new filter params + processorConf := map[string]interface{}{ + "signoz_tail_sampling": config, + } + + opamp.AddToTracePipelineSpec("signoz_tail_sampling") + configHash, err := opamp.UpsertControlProcessors(ctx, "traces", processorConf, m.OnConfigUpdate) + if err != nil { + zap.S().Error("failed to call agent config update for trace processor:", err) + return fmt.Errorf("failed to deploy the config") + } + + m.updateDeployStatus(ctx, ElementTypeSamplingRules, version, string(DeployInitiated), "Deployment started", configHash, configVersion.LastConf) + case ElementTypeDropRules: + var filterConfig *filterprocessor.Config + if err := yaml.Unmarshal([]byte(configVersion.LastConf), &filterConfig); err != nil { + zap.S().Error("failed to read last conf correctly", err) + return fmt.Errorf("failed to read the stored config correctly") + } + processorConf := map[string]interface{}{ + "filter": filterConfig, + } + + opamp.AddToMetricsPipelineSpec("filter") + configHash, err := opamp.UpsertControlProcessors(ctx, "metrics", processorConf, m.OnConfigUpdate) + if err != nil { + zap.S().Error("failed to call agent config update for trace processor:", err) + return err + } + + m.updateDeployStatus(ctx, ElementTypeSamplingRules, version, string(DeployInitiated), "Deployment started", configHash, configVersion.LastConf) + } + + return nil +} + +// UpsertFilterProcessor updates the agent config with new filter processor params +func UpsertFilterProcessor(ctx context.Context, version int, config *filterprocessor.Config) error { + if !atomic.CompareAndSwapUint32(&m.lock, 0, 1) { + return fmt.Errorf("agent updater is busy") + } + defer atomic.StoreUint32(&m.lock, 0) + + // merge current config with new filter params + // merge current config with new filter params + processorConf := map[string]interface{}{ + "filter": config, + } + + opamp.AddToMetricsPipelineSpec("filter") + configHash, err := opamp.UpsertControlProcessors(ctx, "metrics", processorConf, m.OnConfigUpdate) + if err != nil { + zap.S().Error("failed to call agent config update for trace processor:", err) + return err + } + + processorConfYaml, err := yaml.Marshal(config) + if err != nil { + zap.S().Warnf("unexpected error while transforming processor config to yaml", err) + } + + m.updateDeployStatus(ctx, ElementTypeDropRules, version, string(DeployInitiated), "Deployment started", configHash, string(processorConfYaml)) + return nil +} + +// OnConfigUpdate is a callback function passed to opamp server. +// It receives a config hash with error status. We assume +// successful deployment if no error is received. +// this method is currently expected to be called only once in the lifecycle +// but can be improved in future to accept continuous request status updates from opamp +func (m *Manager) OnConfigUpdate(agentId string, hash string, err error) { + + status := string(Deployed) + + message := "deploy successful" + + defer func() { + zap.S().Info(status, zap.String("agentId", agentId), zap.String("agentResponse", message)) + }() + + if err != nil { + status = string(DeployFailed) + message = fmt.Sprintf("%s: %s", agentId, err.Error()) + } + + m.updateDeployStatusByHash(context.Background(), hash, status, message) +} + +// UpsertSamplingProcessor updates the agent config with new filter processor params +func UpsertSamplingProcessor(ctx context.Context, version int, config *tsp.Config) error { + if !atomic.CompareAndSwapUint32(&m.lock, 0, 1) { + return fmt.Errorf("agent updater is busy") + } + defer atomic.StoreUint32(&m.lock, 0) + + // merge current config with new filter params + processorConf := map[string]interface{}{ + "signoz_tail_sampling": config, + } + + opamp.AddToTracePipelineSpec("signoz_tail_sampling") + configHash, err := opamp.UpsertControlProcessors(ctx, "traces", processorConf, m.OnConfigUpdate) + if err != nil { + zap.S().Error("failed to call agent config update for trace processor:", err) + return err + } + + processorConfYaml, err := yaml.Marshal(config) + if err != nil { + zap.S().Warnf("unexpected error while transforming processor config to yaml", err) + } + + m.updateDeployStatus(ctx, ElementTypeSamplingRules, version, string(DeployInitiated), "Deployment started", configHash, string(processorConfYaml)) + return nil +} diff --git a/pkg/query-service/agentConf/sqlite/init.go b/pkg/query-service/agentConf/sqlite/init.go new file mode 100644 index 0000000000..b844fc6a62 --- /dev/null +++ b/pkg/query-service/agentConf/sqlite/init.go @@ -0,0 +1,65 @@ +package sqlite + +import ( + "fmt" + + "github.com/pkg/errors" + + "github.com/jmoiron/sqlx" +) + +func InitDB(db *sqlx.DB) error { + var err error + if db == nil { + return fmt.Errorf("invalid db connection") + } + + table_schema := `CREATE TABLE IF NOT EXISTS agent_config_versions( + id TEXT PRIMARY KEY, + created_by TEXT, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + updated_by TEXT, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + version INTEGER DEFAULT 1, + active int, + is_valid int, + disabled int, + element_type VARCHAR(120) NOT NULL, + deploy_status VARCHAR(80) NOT NULL DEFAULT 'DIRTY', + deploy_sequence INTEGER, + deploy_result TEXT, + last_hash TEXT, + last_config TEXT, + UNIQUE(element_type, version) + ); + + + CREATE UNIQUE INDEX IF NOT EXISTS agent_config_versions_u1 + ON agent_config_versions(element_type, version); + + CREATE INDEX IF NOT EXISTS agent_config_versions_nu1 + ON agent_config_versions(last_hash); + + + CREATE TABLE IF NOT EXISTS agent_config_elements( + id TEXT PRIMARY KEY, + created_by TEXT, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + updated_by TEXT, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + element_id TEXT NOT NULL, + element_type VARCHAR(120) NOT NULL, + version_id TEXT NOT NULL + ); + + CREATE UNIQUE INDEX IF NOT EXISTS agent_config_elements_u1 + ON agent_config_elements(version_id, element_id, element_type); + + ` + + _, err = db.Exec(table_schema) + if err != nil { + return errors.Wrap(err, "Error in creating agent config tables") + } + return nil +} diff --git a/pkg/query-service/agentConf/version.go b/pkg/query-service/agentConf/version.go new file mode 100644 index 0000000000..c6b8c01ee4 --- /dev/null +++ b/pkg/query-service/agentConf/version.go @@ -0,0 +1,69 @@ +package agentConf + +import ( + "time" + + "github.com/google/uuid" +) + +type ElementTypeDef string + +const ( + ElementTypeSamplingRules ElementTypeDef = "sampling_rules" + ElementTypeDropRules ElementTypeDef = "drop_rules" + ElementTypeLogPipelines ElementTypeDef = "log_pipelines" + ElementTypeLbExporter ElementTypeDef = "lb_exporter" +) + +type DeployStatus string + +const ( + PendingDeploy DeployStatus = "DIRTY" + Deploying DeployStatus = "DEPLOYING" + Deployed DeployStatus = "DEPLOYED" + DeployInitiated DeployStatus = "IN_PROGRESS" + DeployFailed DeployStatus = "FAILED" +) + +type ConfigVersion struct { + ID string `json:"id" db:"id"` + Version int `json:"version" db:"version"` + ElementType ElementTypeDef `json:"elementType" db:"element_type"` + Active bool `json:"active" db:"active"` + IsValid bool `json:"is_valid" db:"is_valid"` + Disabled bool `json:"disabled" db:"disabled"` + + DeployStatus DeployStatus `json:"deployStatus" db:"deploy_status"` + DeployResult string `json:"deployResult" db:"deploy_result"` + + LastHash string `json:"lastHash" db:"last_hash"` + LastConf string `json:"lastConf" db:"last_config"` + + CreatedBy string `json:"createdBy" db:"created_by"` + CreatedByName string `json:"createdByName" db:"created_by_name"` + CreatedAt time.Time `json:"createdAt" db:"created_at"` +} + +func NewConfigversion(typeDef ElementTypeDef) *ConfigVersion { + return &ConfigVersion{ + ID: uuid.NewString(), + ElementType: typeDef, + Active: false, + IsValid: false, + Disabled: false, + DeployStatus: PendingDeploy, + // todo: get user id from context? + // CreatedBy + } +} + +func updateVersion(v int) int { + return v + 1 +} + +type ConfigElements struct { + VersionID string + Version int + ElementType ElementTypeDef + ElementId string +} diff --git a/pkg/query-service/app/opamp/config.yaml b/pkg/query-service/app/opamp/config.yaml new file mode 100644 index 0000000000..d5ef74e00f --- /dev/null +++ b/pkg/query-service/app/opamp/config.yaml @@ -0,0 +1,76 @@ +receivers: + otlp/spanmetrics: + protocols: + grpc: + endpoint: "localhost:12345" + otlp: + protocols: + grpc: + http: + jaeger: + protocols: + grpc: + thrift_http: + hostmetrics: + collection_interval: 30s + scrapers: + cpu: + load: + memory: + disk: + filesystem: + network: +processors: + batch: + send_batch_size: 1000 + timeout: 10s + signozspanmetrics/prometheus: + metrics_exporter: prometheus + latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ] + dimensions_cache_size: 10000 + dimensions: + - name: service.namespace + default: default + - name: deployment.environment + default: default + # memory_limiter: + # # 80% of maximum memory up to 2G + # limit_mib: 1500 + # # 25% of limit up to 2G + # spike_limit_mib: 512 + # check_interval: 5s + # + # # 50% of the maximum memory + # limit_percentage: 50 + # # 20% of max memory usage spike expected + # spike_limit_percentage: 20 + # queued_retry: + # num_workers: 4 + # queue_size: 100 + # retry_on_failure: true +extensions: + zpages: {} +exporters: + clickhousetraces: + datasource: tcp://localhost:9000/?database=signoz_traces + migrations: exporter/clickhousetracesexporter/migrations + clickhousemetricswrite: + endpoint: tcp://localhost:9000/?database=signoz_metrics + resource_to_telemetry_conversion: + enabled: true + prometheus: + endpoint: "0.0.0.0:8889" +service: + extensions: [zpages] + pipelines: + traces: + receivers: [jaeger, otlp] + processors: [signozspanmetrics/prometheus, batch] + exporters: [clickhousetraces] + metrics: + receivers: [otlp, hostmetrics] + processors: [batch] + exporters: [clickhousemetricswrite] + metrics/spanmetrics: + receivers: [otlp/spanmetrics] + exporters: [prometheus] \ No newline at end of file diff --git a/pkg/query-service/app/opamp/configure_ingestionRules.go b/pkg/query-service/app/opamp/configure_ingestionRules.go new file mode 100644 index 0000000000..af04abb723 --- /dev/null +++ b/pkg/query-service/app/opamp/configure_ingestionRules.go @@ -0,0 +1,141 @@ +package opamp + +import ( + "context" + "crypto/sha256" + "fmt" + + "github.com/knadh/koanf/parsers/yaml" + "github.com/open-telemetry/opamp-go/protobufs" + "go.opentelemetry.io/collector/confmap" + model "go.signoz.io/signoz/pkg/query-service/app/opamp/model" + "go.signoz.io/signoz/pkg/query-service/app/opamp/otelconfig" + "go.uber.org/zap" +) + +// inserts or updates ingestion controller processors depending +// on the signal (metrics or traces) +func UpsertControlProcessors(ctx context.Context, signal string, processors map[string]interface{}, callback model.OnChangeCallback) (hash string, fnerr error) { + // note: only processors enabled through tracesPipelinePlan will be added + // to pipeline. To enable or disable processors from pipeline, call + // AddToTracePipeline() or RemoveFromTracesPipeline() prior to calling + // this method + + zap.S().Debug("initiating ingestion rules deployment config", signal, processors) + + if signal != string(Metrics) && signal != string(Traces) { + zap.S().Error("received invalid signal int UpsertControlProcessors", signal) + fnerr = fmt.Errorf("signal not supported in ingestion rules: %s", signal) + return + } + + if opAmpServer == nil { + fnerr = fmt.Errorf("opamp server is down, unable to push config to agent at this moment") + return + } + + agents := opAmpServer.agents.GetAllAgents() + if len(agents) == 0 { + fnerr = fmt.Errorf("no agents available at the moment") + return + } + + if len(agents) > 1 && signal == string(Traces) { + zap.S().Debug("found multiple agents. this feature is not supported for traces pipeline (sampling rules)") + fnerr = fmt.Errorf("multiple agents not supported in sampling rules") + return + } + + for _, agent := range agents { + + agenthash, err := addIngestionControlToAgent(agent, signal, processors, false) + if err != nil { + zap.S().Error("failed to push ingestion rules config to agent", agent.ID, err) + continue + } + + if agenthash != "" { + // subscribe callback + model.ListenToConfigUpdate(agent.ID, agenthash, callback) + } + + hash = agenthash + } + + return hash, nil +} + +// addIngestionControlToAgent adds ingestion contorl rules to agent config +func addIngestionControlToAgent(agent *model.Agent, signal string, processors map[string]interface{}, withLB bool) (string, error) { + confHash := "" + config := agent.EffectiveConfig + c, err := yaml.Parser().Unmarshal([]byte(config)) + if err != nil { + return confHash, err + } + + agentConf := confmap.NewFromStringMap(c) + + // add ingestion control spec + err = makeIngestionControlSpec(agentConf, Signal(signal), processors) + if err != nil { + zap.S().Error("failed to prepare ingestion control processors for agent ", agent.ID, err) + return confHash, err + } + + // ------ complete adding processor + configR, err := yaml.Parser().Marshal(agentConf.ToStringMap()) + if err != nil { + return confHash, err + } + + zap.S().Debugf("sending new config", string(configR)) + hash := sha256.New() + _, err = hash.Write(configR) + if err != nil { + return confHash, err + } + confHash = string(hash.Sum(nil)) + agent.EffectiveConfig = string(configR) + err = agent.Upsert() + if err != nil { + return confHash, err + } + + agent.SendToAgent(&protobufs.ServerToAgent{ + RemoteConfig: &protobufs.AgentRemoteConfig{ + Config: &protobufs.AgentConfigMap{ + ConfigMap: map[string]*protobufs.AgentConfigFile{ + "collector.yaml": { + Body: configR, + ContentType: "application/x-yaml", + }, + }, + }, + ConfigHash: []byte(confHash), + }, + }) + + return string(confHash), nil +} + +// prepare spec to introduce ingestion control in agent conf +func makeIngestionControlSpec(agentConf *confmap.Conf, signal Signal, processors map[string]interface{}) error { + configParser := otelconfig.NewConfigParser(agentConf) + configParser.UpdateProcessors(processors) + + // edit pipeline if processor is missing + currentPipeline := configParser.PipelineProcessors(string(signal)) + + // merge tracesPipelinePlan with current pipeline + mergedPipeline, err := buildPipeline(signal, currentPipeline) + if err != nil { + zap.S().Error("failed to build pipeline", signal, err) + return err + } + + // add merged pipeline to the service + configParser.UpdateProcsInPipeline(string(signal), mergedPipeline) + + return nil +} diff --git a/pkg/query-service/app/opamp/logger.go b/pkg/query-service/app/opamp/logger.go new file mode 100644 index 0000000000..49f9e630fc --- /dev/null +++ b/pkg/query-service/app/opamp/logger.go @@ -0,0 +1,15 @@ +package opamp + +import "log" + +type Logger struct { + logger *log.Logger +} + +func (l *Logger) Debugf(format string, v ...interface{}) { + l.logger.Printf(format, v...) +} + +func (l *Logger) Errorf(format string, v ...interface{}) { + l.logger.Printf(format, v...) +} diff --git a/pkg/query-service/app/opamp/model/agent.go b/pkg/query-service/app/opamp/model/agent.go new file mode 100644 index 0000000000..ba2ecfcddc --- /dev/null +++ b/pkg/query-service/app/opamp/model/agent.go @@ -0,0 +1,334 @@ +package model + +import ( + "bytes" + "context" + "crypto/sha256" + "sync" + "time" + + "google.golang.org/protobuf/proto" + + "github.com/open-telemetry/opamp-go/protobufs" + "github.com/open-telemetry/opamp-go/server/types" +) + +type AgentStatus int + +const ( + AgentStatusUnknown AgentStatus = iota + AgentStatusConnected + AgentStatusDisconnected +) + +// set in agent description when agent is capable of supporting +// lb exporter configuration. values: 1 (true) or 0 (false) +const lbExporterFlag = "capabilities.lbexporter" + +type Agent struct { + ID string `json:"agentId" yaml:"agentId" db:"agent_id"` + StartedAt time.Time `json:"startedAt" yaml:"startedAt" db:"started_at"` + TerminatedAt time.Time `json:"terminatedAt" yaml:"terminatedAt" db:"terminated_at"` + EffectiveConfig string `json:"effectiveConfig" yaml:"effectiveConfig" db:"effective_config"` + CurrentStatus AgentStatus `json:"currentStatus" yaml:"currentStatus" db:"current_status"` + remoteConfig *protobufs.AgentRemoteConfig + Status *protobufs.AgentToServer + + // can this agent be load balancer + CanLB bool + + // is this agent setup as load balancer + IsLb bool + + conn types.Connection + connMutex sync.Mutex + mux sync.RWMutex +} + +func New(ID string, conn types.Connection) *Agent { + return &Agent{ID: ID, StartedAt: time.Now(), CurrentStatus: AgentStatusConnected, conn: conn} +} + +// Upsert inserts or updates the agent in the database. +func (agent *Agent) Upsert() error { + agent.mux.Lock() + defer agent.mux.Unlock() + + _, err := db.NamedExec(`INSERT OR REPLACE INTO agents ( + agent_id, + started_at, + effective_config, + current_status + ) VALUES ( + :agent_id, + :started_at, + :effective_config, + :current_status + )`, agent) + if err != nil { + return err + } + + return nil +} + +func (agent *Agent) UpdateStatus(statusMsg *protobufs.AgentToServer, response *protobufs.ServerToAgent) { + agent.mux.Lock() + defer agent.mux.Unlock() + agent.processStatusUpdate(statusMsg, response) +} + +// extracts lb exporter support flag from agent description. the flag +// is used to decide if lb exporter can be enabled on the agent. +func ExtractLbFlag(agentDescr *protobufs.AgentDescription) bool { + + if agentDescr == nil { + return false + } + + if len(agentDescr.NonIdentifyingAttributes) > 0 { + for _, kv := range agentDescr.NonIdentifyingAttributes { + anyvalue, ok := kv.Value.Value.(*protobufs.AnyValue_StringValue) + if !ok { + continue + } + if kv.Key == lbExporterFlag && anyvalue.StringValue == "1" { + // agent can support load balancer config + return true + } + } + } + return false +} + +func (agent *Agent) updateAgentDescription(newStatus *protobufs.AgentToServer) (agentDescrChanged bool) { + prevStatus := agent.Status + + if agent.Status == nil { + // First time this Agent reports a status, remember it. + agent.Status = newStatus + agentDescrChanged = true + } else { + // Not a new Agent. Update the Status. + agent.Status.SequenceNum = newStatus.SequenceNum + + // Check what's changed in the AgentDescription. + if newStatus.AgentDescription != nil { + // If the AgentDescription field is set it means the Agent tells us + // something is changed in the field since the last status report + // (or this is the first report). + // Make full comparison of previous and new descriptions to see if it + // really is different. + if prevStatus != nil && proto.Equal(prevStatus.AgentDescription, newStatus.AgentDescription) { + // Agent description didn't change. + agentDescrChanged = false + } else { + // Yes, the description is different, update it. + agent.Status.AgentDescription = newStatus.AgentDescription + agentDescrChanged = true + } + } else { + // AgentDescription field is not set, which means description didn't change. + agentDescrChanged = false + } + + // Update remote config status if it is included and is different from what we have. + if newStatus.RemoteConfigStatus != nil && + !proto.Equal(agent.Status.RemoteConfigStatus, newStatus.RemoteConfigStatus) { + agent.Status.RemoteConfigStatus = newStatus.RemoteConfigStatus + + // todo: need to address multiple agent scenario here + // for now, the first response will be sent back to the UI + if agent.Status.RemoteConfigStatus.Status == protobufs.RemoteConfigStatuses_RemoteConfigStatuses_APPLIED { + onConfigSuccess(agent.ID, string(agent.Status.RemoteConfigStatus.LastRemoteConfigHash)) + } + + if agent.Status.RemoteConfigStatus.Status == protobufs.RemoteConfigStatuses_RemoteConfigStatuses_FAILED { + onConfigFailure(agent.ID, string(agent.Status.RemoteConfigStatus.LastRemoteConfigHash), agent.Status.RemoteConfigStatus.ErrorMessage) + } + } + } + + if agentDescrChanged { + agent.CanLB = ExtractLbFlag(newStatus.AgentDescription) + } + + return agentDescrChanged +} + +func (agent *Agent) updateHealth(newStatus *protobufs.AgentToServer) { + if newStatus.Health == nil { + return + } + + agent.Status.Health = newStatus.Health + + if agent.Status != nil && agent.Status.Health != nil && agent.Status.Health.Healthy { + agent.StartedAt = time.Unix(0, int64(agent.Status.Health.StartTimeUnixNano)).UTC() + } +} + +func (agent *Agent) updateRemoteConfigStatus(newStatus *protobufs.AgentToServer) { + // Update remote config status if it is included and is different from what we have. + if newStatus.RemoteConfigStatus != nil { + agent.Status.RemoteConfigStatus = newStatus.RemoteConfigStatus + } +} + +func (agent *Agent) updateStatusField(newStatus *protobufs.AgentToServer) (agentDescrChanged bool) { + if agent.Status == nil { + // First time this Agent reports a status, remember it. + agent.Status = newStatus + agentDescrChanged = true + } + + agentDescrChanged = agent.updateAgentDescription(newStatus) || agentDescrChanged + agent.updateRemoteConfigStatus(newStatus) + agent.updateHealth(newStatus) + return agentDescrChanged +} + +func (agent *Agent) updateEffectiveConfig(newStatus *protobufs.AgentToServer, response *protobufs.ServerToAgent) { + // Update effective config if provided. + if newStatus.EffectiveConfig != nil { + if newStatus.EffectiveConfig.ConfigMap != nil { + agent.Status.EffectiveConfig = newStatus.EffectiveConfig + + // Convert to string for displaying purposes. + agent.EffectiveConfig = "" + // There should be only one config in the map. + for _, cfg := range newStatus.EffectiveConfig.ConfigMap.ConfigMap { + agent.EffectiveConfig = string(cfg.Body) + } + } + } +} + +func (agent *Agent) hasCapability(capability protobufs.AgentCapabilities) bool { + return agent.Status.Capabilities&uint64(capability) != 0 +} + +func (agent *Agent) processStatusUpdate( + newStatus *protobufs.AgentToServer, + response *protobufs.ServerToAgent, +) { + // We don't have any status for this Agent, or we lost the previous status update from the Agent, so our + // current status is not up-to-date. + lostPreviousUpdate := (agent.Status == nil) || (agent.Status != nil && agent.Status.SequenceNum+1 != newStatus.SequenceNum) + + agentDescrChanged := agent.updateStatusField(newStatus) + + // Check if any fields were omitted in the status report. + effectiveConfigOmitted := newStatus.EffectiveConfig == nil && + agent.hasCapability(protobufs.AgentCapabilities_AgentCapabilities_ReportsEffectiveConfig) + + remoteConfigStatusOmitted := newStatus.RemoteConfigStatus == nil && + agent.hasCapability(protobufs.AgentCapabilities_AgentCapabilities_ReportsRemoteConfig) + + healthOmitted := newStatus.Health == nil && + agent.hasCapability(protobufs.AgentCapabilities_AgentCapabilities_ReportsHealth) + + // True if the status was not fully reported. + statusIsCompressed := effectiveConfigOmitted || remoteConfigStatusOmitted || healthOmitted + + if statusIsCompressed && lostPreviousUpdate { + // The status message is not fully set in the message that we received, but we lost the previous + // status update. Request full status update from the agent. + response.Flags |= uint64(protobufs.ServerToAgentFlags_ServerToAgentFlags_ReportFullState) + } + + configChanged := false + if agentDescrChanged { + // Agent description is changed. + + // We need to recalculate the config. + configChanged = agent.updateRemoteConfig() + } + + // If remote config is changed and different from what the Agent has then + // send the new remote config to the Agent. + if configChanged || + (agent.Status.RemoteConfigStatus != nil && + bytes.Compare(agent.Status.RemoteConfigStatus.LastRemoteConfigHash, agent.remoteConfig.ConfigHash) != 0) { + // The new status resulted in a change in the config of the Agent or the Agent + // does not have this config (hash is different). Send the new config the Agent. + response.RemoteConfig = agent.remoteConfig + agent.SendToAgent(response) + } + + agent.updateEffectiveConfig(newStatus, response) +} + +func (agent *Agent) updateRemoteConfig() bool { + hash := sha256.New() + + cfg := protobufs.AgentRemoteConfig{ + Config: &protobufs.AgentConfigMap{ + ConfigMap: map[string]*protobufs.AgentConfigFile{}, + }, + } + + // Calculate the hash. + for k, v := range cfg.Config.ConfigMap { + hash.Write([]byte(k)) + hash.Write(v.Body) + hash.Write([]byte(v.ContentType)) + } + + cfg.ConfigHash = hash.Sum(nil) + + configChanged := !isEqualRemoteConfig(agent.remoteConfig, &cfg) + + agent.remoteConfig = &cfg + + return configChanged +} + +func isEqualRemoteConfig(c1, c2 *protobufs.AgentRemoteConfig) bool { + if c1 == c2 { + return true + } + if c1 == nil || c2 == nil { + return false + } + return isEqualConfigSet(c1.Config, c2.Config) +} + +func isEqualConfigSet(c1, c2 *protobufs.AgentConfigMap) bool { + if c1 == c2 { + return true + } + if c1 == nil || c2 == nil { + return false + } + if len(c1.ConfigMap) != len(c2.ConfigMap) { + return false + } + for k, v1 := range c1.ConfigMap { + v2, ok := c2.ConfigMap[k] + if !ok { + return false + } + if !isEqualConfigFile(v1, v2) { + return false + } + } + return true +} + +func isEqualConfigFile(f1, f2 *protobufs.AgentConfigFile) bool { + if f1 == f2 { + return true + } + if f1 == nil || f2 == nil { + return false + } + return bytes.Compare(f1.Body, f2.Body) == 0 && f1.ContentType == f2.ContentType +} + +func (agent *Agent) SendToAgent(msg *protobufs.ServerToAgent) { + agent.connMutex.Lock() + defer agent.connMutex.Unlock() + + agent.conn.Send(context.Background(), msg) +} diff --git a/pkg/query-service/app/opamp/model/agents.go b/pkg/query-service/app/opamp/model/agents.go new file mode 100644 index 0000000000..18faddb48b --- /dev/null +++ b/pkg/query-service/app/opamp/model/agents.go @@ -0,0 +1,117 @@ +package model + +import ( + "fmt" + "sync" + "time" + + "github.com/jmoiron/sqlx" + "github.com/open-telemetry/opamp-go/server/types" +) + +var db *sqlx.DB + +var AllAgents = Agents{ + agentsById: map[string]*Agent{}, + connections: map[types.Connection]map[string]bool{}, +} + +type Agents struct { + mux sync.RWMutex + agentsById map[string]*Agent + connections map[types.Connection]map[string]bool +} + +func (a *Agents) Count() int { + return len(a.connections) +} + +// InitDB initializes the database and creates the agents table. +func InitDB(dataSourceName string) (*sqlx.DB, error) { + var err error + + db, err = sqlx.Open("sqlite3", dataSourceName) + if err != nil { + return nil, err + } + + tableSchema := `CREATE TABLE IF NOT EXISTS agents ( + agent_id TEXT PRIMARY KEY UNIQUE, + started_at datetime NOT NULL, + terminated_at datetime, + current_status TEXT NOT NULL, + effective_config TEXT NOT NULL + );` + + _, err = db.Exec(tableSchema) + if err != nil { + return nil, fmt.Errorf("Error in creating agents table: %s", err.Error()) + } + + AllAgents = Agents{ + agentsById: make(map[string]*Agent), + connections: make(map[types.Connection]map[string]bool), + mux: sync.RWMutex{}, + } + return db, nil +} + +// RemoveConnection removes the connection all Agent instances associated with the +// connection. +func (agents *Agents) RemoveConnection(conn types.Connection) { + agents.mux.Lock() + defer agents.mux.Unlock() + + for instanceId := range agents.connections[conn] { + agent := agents.agentsById[instanceId] + agent.CurrentStatus = AgentStatusDisconnected + agent.TerminatedAt = time.Now() + agent.Upsert() + delete(agents.agentsById, instanceId) + } + delete(agents.connections, conn) +} + +// FindAgent returns the Agent instance associated with the given agentID. +func (agents *Agents) FindAgent(agentID string) *Agent { + agents.mux.RLock() + defer agents.mux.RUnlock() + return agents.agentsById[agentID] +} + +// FindOrCreateAgent returns the Agent instance associated with the given agentID. +// If the Agent instance does not exist, it is created and added to the list of +// Agent instances. +func (agents *Agents) FindOrCreateAgent(agentID string, conn types.Connection) (*Agent, bool, error) { + agents.mux.Lock() + defer agents.mux.Unlock() + var created bool + agent, ok := agents.agentsById[agentID] + var err error + if !ok || agent == nil { + agent = New(agentID, conn) + err = agent.Upsert() + if err != nil { + return nil, created, err + } + agents.agentsById[agentID] = agent + + if agents.connections[conn] == nil { + agents.connections[conn] = map[string]bool{} + } + agents.connections[conn][agentID] = true + created = true + } + return agent, created, nil +} + +func (agents *Agents) GetAllAgents() []*Agent { + agents.mux.RLock() + defer agents.mux.RUnlock() + + allAgents := []*Agent{} + for _, v := range agents.agentsById { + allAgents = append(allAgents, v) + } + return allAgents +} diff --git a/pkg/query-service/app/opamp/model/coordinator.go b/pkg/query-service/app/opamp/model/coordinator.go new file mode 100644 index 0000000000..a1f17f43a2 --- /dev/null +++ b/pkg/query-service/app/opamp/model/coordinator.go @@ -0,0 +1,66 @@ +package model + +import ( + "fmt" + "sync" +) + +// communicates with calling apis when config is applied or fails +var coordinator *Coordinator + +func init() { + subscribers := make(map[string][]OnChangeCallback, 0) + coordinator = &Coordinator{ + subscribers: subscribers, + } +} + +type OnChangeCallback func(agentId string, hash string, err error) + +// responsible for managing subscribers on config change +type Coordinator struct { + mutex sync.Mutex + + // hash wise list of subscribers + subscribers map[string][]OnChangeCallback +} + +func onConfigSuccess(agentId string, hash string) { + notifySubscribers(agentId, hash, nil) +} + +func onConfigFailure(agentId string, hash string, errorMessage string) { + notifySubscribers(agentId, hash, fmt.Errorf(errorMessage)) +} + +// OnSuccess listens to config changes and notifies subscribers +func notifySubscribers(agentId string, hash string, err error) { + // this method currently does not handle multi-agent scenario. + // as soon as a message is delivered, we release all the subscribers + // for a given hash + subs, ok := coordinator.subscribers[hash] + if !ok { + return + } + + for _, s := range subs { + s(agentId, hash, err) + } + + // delete all subscribers for this hash, assume future + // notifies will be disabled. the first response is processed + delete(coordinator.subscribers, hash) +} + +// callers subscribe to this function to listen on config change requests +func ListenToConfigUpdate(agentId string, hash string, ss OnChangeCallback) { + coordinator.mutex.Lock() + defer coordinator.mutex.Unlock() + + if subs, ok := coordinator.subscribers[hash]; ok { + subs = append(subs, ss) + coordinator.subscribers[hash] = subs + } else { + coordinator.subscribers[hash] = []OnChangeCallback{ss} + } +} diff --git a/pkg/query-service/app/opamp/opamp_server.go b/pkg/query-service/app/opamp/opamp_server.go new file mode 100644 index 0000000000..237b07f121 --- /dev/null +++ b/pkg/query-service/app/opamp/opamp_server.go @@ -0,0 +1,204 @@ +package opamp + +import ( + "context" + "crypto/sha256" + + "strings" + + "github.com/knadh/koanf/parsers/yaml" + "github.com/open-telemetry/opamp-go/protobufs" + "github.com/open-telemetry/opamp-go/server" + "github.com/open-telemetry/opamp-go/server/types" + "go.opentelemetry.io/collector/confmap" + model "go.signoz.io/signoz/pkg/query-service/app/opamp/model" + + "go.uber.org/zap" +) + +var opAmpServer *Server + +type Server struct { + server server.OpAMPServer + agents *model.Agents + logger *zap.Logger + capabilities int32 +} + +const capabilities = protobufs.ServerCapabilities_ServerCapabilities_AcceptsEffectiveConfig | + protobufs.ServerCapabilities_ServerCapabilities_OffersRemoteConfig | + protobufs.ServerCapabilities_ServerCapabilities_AcceptsStatus + +func InitalizeServer(listener string, agents *model.Agents) error { + + if agents == nil { + agents = &model.AllAgents + } + + opAmpServer = &Server{ + agents: agents, + } + opAmpServer.server = server.New(zap.S()) + + return opAmpServer.Start(listener) +} + +func StopServer() { + if opAmpServer != nil { + opAmpServer.Stop() + } +} + +func (srv *Server) Start(listener string) error { + settings := server.StartSettings{ + Settings: server.Settings{ + Callbacks: server.CallbacksStruct{ + OnMessageFunc: srv.onMessage, + OnConnectionCloseFunc: srv.onDisconnect, + }, + }, + ListenEndpoint: listener, + } + + return srv.server.Start(settings) +} + +func (srv *Server) Stop() { + srv.server.Stop(context.Background()) +} + +func (srv *Server) onDisconnect(conn types.Connection) { + srv.agents.RemoveConnection(conn) +} + +func (srv *Server) onMessage(conn types.Connection, msg *protobufs.AgentToServer) *protobufs.ServerToAgent { + agentID := msg.InstanceUid + + agent, created, err := srv.agents.FindOrCreateAgent(agentID, conn) + if err != nil { + zap.S().Error("Failed to find or create agent %q: %v", agentID, err) + // TODO: handle error + } + + if created { + agent.CanLB = model.ExtractLbFlag(msg.AgentDescription) + zap.S().Debugf("New agent added:", zap.Bool("canLb", agent.CanLB), zap.String("ID", agent.ID), zap.Any("status", agent.CurrentStatus)) + } + + var response *protobufs.ServerToAgent + response = &protobufs.ServerToAgent{ + InstanceUid: agentID, + Capabilities: uint64(capabilities), + } + + agent.UpdateStatus(msg, response) + + return response +} + +// global var methods to support singleton pattern. we want to discourage +// allow multiple servers in one installation +func Ready() bool { + if opAmpServer == nil { + return false + } + if opAmpServer.agents.Count() == 0 { + zap.S().Warnf("no agents available, all agent config requests will be rejected") + return false + } + return true +} + +func Subscribe(agentId string, hash string, f model.OnChangeCallback) { + model.ListenToConfigUpdate(agentId, hash, f) +} + +func UpsertProcessor(ctx context.Context, processors map[string]interface{}, names []interface{}) error { + x := map[string]interface{}{ + "processors": processors, + } + + newConf := confmap.NewFromStringMap(x) + + agents := opAmpServer.agents.GetAllAgents() + for _, agent := range agents { + config := agent.EffectiveConfig + c, err := yaml.Parser().Unmarshal([]byte(config)) + if err != nil { + return err + } + agentConf := confmap.NewFromStringMap(c) + + err = agentConf.Merge(newConf) + if err != nil { + return err + } + + service := agentConf.Get("service") + + logs := service.(map[string]interface{})["pipelines"].(map[string]interface{})["logs"] + processors := logs.(map[string]interface{})["processors"].([]interface{}) + userProcessors := []interface{}{} + // remove old ones + for _, v := range processors { + if !strings.HasPrefix(v.(string), "logstransform/pipeline_") { + userProcessors = append(userProcessors, v) + } + } + // all user processors are pushed after pipelines + processors = append(names, userProcessors...) + + service.(map[string]interface{})["pipelines"].(map[string]interface{})["logs"].(map[string]interface{})["processors"] = processors + + s := map[string]interface{}{ + "service": map[string]interface{}{ + "pipelines": map[string]interface{}{ + "logs": map[string]interface{}{ + "processors": processors, + }, + }, + }, + } + + serviceC := confmap.NewFromStringMap(s) + + err = agentConf.Merge(serviceC) + if err != nil { + return err + } + + // ------ complete adding processor + configR, err := yaml.Parser().Marshal(agentConf.ToStringMap()) + if err != nil { + return err + } + + zap.S().Infof("sending new config", string(configR)) + hash := sha256.New() + _, err = hash.Write(configR) + if err != nil { + return err + } + agent.EffectiveConfig = string(configR) + err = agent.Upsert() + if err != nil { + return err + } + + agent.SendToAgent(&protobufs.ServerToAgent{ + RemoteConfig: &protobufs.AgentRemoteConfig{ + Config: &protobufs.AgentConfigMap{ + ConfigMap: map[string]*protobufs.AgentConfigFile{ + "collector.yaml": { + Body: configR, + ContentType: "application/x-yaml", + }, + }, + }, + ConfigHash: hash.Sum(nil), + }, + }) + } + + return nil +} diff --git a/pkg/query-service/app/opamp/otelconfig/config_parser.go b/pkg/query-service/app/opamp/otelconfig/config_parser.go new file mode 100644 index 0000000000..5d6c8adba4 --- /dev/null +++ b/pkg/query-service/app/opamp/otelconfig/config_parser.go @@ -0,0 +1,195 @@ +package otelconfig + +import ( + "sync" + + "go.opentelemetry.io/collector/confmap" +) + +type ConfigParser struct { + lock sync.Mutex + agentConf *confmap.Conf +} + +func NewConfigParser(agentConf *confmap.Conf) ConfigParser { + return ConfigParser{ + agentConf: agentConf, + } +} + +func toMap(i interface{}) map[string]interface{} { + return i.(map[string]interface{}) +} + +func toList(i interface{}) []interface{} { + return i.([]interface{}) +} + +// sent when key is not found in config +func emptyMap() map[string]interface{} { + return map[string]interface{}{} +} + +func emptyList() []interface{} { + return []interface{}{} +} + +func (cp *ConfigParser) Service() map[string]interface{} { + service := cp.agentConf.Get("service") + if service == nil { + return emptyMap() + } + return toMap(service) +} + +// components gets the high level parts like receivers, exporters, processors etc +func (cp *ConfigParser) components(partName, nameOptional string) map[string]interface{} { + parts := cp.agentConf.Get(partName) + if parts == nil { + return emptyMap() + } + + parsedParts := toMap(parts) + if nameOptional != "" { + if p, ok := parsedParts[nameOptional]; ok { + return p.(map[string]interface{}) + } else { + return emptyMap() + } + } + + return parsedParts +} + +func (cp *ConfigParser) Processors() map[string]interface{} { + return cp.components("processors", "") +} + +func (cp *ConfigParser) Processor(name string) map[string]interface{} { + return cp.components("processors", name) +} + +func (cp *ConfigParser) Exporters() map[string]interface{} { + return cp.components("exporters", "") +} + +func (cp *ConfigParser) Exporter(name string) map[string]interface{} { + return cp.components("exporters", name) +} + +func (cp *ConfigParser) Receivers() map[string]interface{} { + return cp.components("receivers", "") +} + +func (cp *ConfigParser) Receiver(name string) map[string]interface{} { + return cp.components("receivers", name) +} + +func (cp *ConfigParser) Pipelines(nameOptional string) map[string]interface{} { + services := cp.Service() + if p, ok := services["pipelines"]; ok { + pipelines := toMap(p) + if nameOptional != "" { + if namedPipeline, ok := pipelines[nameOptional]; ok { + return toMap(namedPipeline) + } else { + return emptyMap() + } + + } + return pipelines + } + return emptyMap() +} + +// component can be "recevers", "exporter" or "processors" +func (cp *ConfigParser) PipelineComponent(pipelineName, pipelineComponent string) []interface{} { + pipeline := cp.Pipelines(pipelineName) + if exporters, ok := pipeline[pipelineComponent]; ok { + exporters := toList(exporters) + return exporters + } + return emptyList() +} + +func (cp *ConfigParser) PipelineExporters(pipelineName string) []interface{} { + return cp.PipelineComponent(pipelineName, "exporters") +} + +func (cp *ConfigParser) PipelineReceivers(pipelineName string) []interface{} { + return cp.PipelineComponent(pipelineName, "receivers") +} + +func (cp *ConfigParser) PipelineProcessors(pipelineName string) []interface{} { + return cp.PipelineComponent(pipelineName, "processors") +} + +func (cp *ConfigParser) CheckPipelineExists(name string) bool { + if name == "" { + return false + } + pipelines := cp.Pipelines(name) + return len(pipelines) > 0 +} + +// CheckEntryInPipeline lets you look for an entry in pipeline by receiver, processor or exporter name +func (cp *ConfigParser) CheckEntryInPipeline(pipelineName, pipelineComponent, name string) bool { + if pipelineName == "" || pipelineComponent == "" || name == "" { + return false + } + + list := cp.PipelineComponent(pipelineName, pipelineComponent) + var found bool + for _, item := range list { + if item == name { + found = true + } + } + + return found +} + +func (cp *ConfigParser) CheckExporterInPipeline(pipelineName, name string) bool { + return cp.CheckEntryInPipeline(pipelineName, "exporters", name) +} + +func (cp *ConfigParser) CheckProcessorInPipeline(pipelineName, name string) bool { + return cp.CheckEntryInPipeline(pipelineName, "processors", name) +} + +func (cp *ConfigParser) Merge(c *confmap.Conf) { + cp.lock.Lock() + defer cp.lock.Unlock() + cp.agentConf.Merge(c) +} + +func (cp *ConfigParser) UpdateProcessors(processors map[string]interface{}) { + updates := cp.Processors() + + for key, params := range processors { + updates[key] = params + } + + updatedProcessors := map[string]interface{}{ + "processors": updates, + } + + updatedProcessorConf := confmap.NewFromStringMap(updatedProcessors) + + cp.Merge(updatedProcessorConf) +} + +func (cp *ConfigParser) UpdateProcsInPipeline(pipelineName string, list []interface{}) { + + serviceConf := map[string]interface{}{ + "service": map[string]interface{}{ + "pipelines": map[string]interface{}{ + pipelineName: map[string]interface{}{ + "processors": list, + }, + }, + }, + } + + cp.Merge(confmap.NewFromStringMap(serviceConf)) +} diff --git a/pkg/query-service/app/opamp/otelconfig/config_parser_test.go b/pkg/query-service/app/opamp/otelconfig/config_parser_test.go new file mode 100644 index 0000000000..61ceb1f613 --- /dev/null +++ b/pkg/query-service/app/opamp/otelconfig/config_parser_test.go @@ -0,0 +1,58 @@ +package otelconfig + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/knadh/koanf/parsers/yaml" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/confmap" +) + +func TestServiceConfig(t *testing.T) { + yamlFile, err := ioutil.ReadFile("./testdata/service.yaml") + if err != nil { + fmt.Printf("yamlFile.Get err #%v ", err) + t.Fail() + return + } + + c, err := yaml.Parser().Unmarshal([]byte(yamlFile)) + if err != nil { + fmt.Println("failed to parse config file as yaml", err) + t.Fail() + return + } + + agentConf := confmap.NewFromStringMap(c) + configParser := NewConfigParser(agentConf) + + expected := map[string]interface{}{ + "extensions": []interface{}{"zpages"}, + "pipelines": map[string]interface{}{ + "traces": map[string]interface{}{ + "receivers": []interface{}{"jaeger", "otlp"}, + "processors": []interface{}{ + "signozspanmetrics/prometheus", "batch", + }, + "exporters": []interface{}{ + "clickhousetraces", + }, + }, + "metrics": map[string]interface{}{ + "receivers": []interface{}{ + "otlp", "hostmetrics", + }, + "processors": []interface{}{ + "batch", + }, + "exporters": []interface{}{ + "clickhousemetricswrite", + }, + }, + }, + } + + require.Equal(t, expected, configParser.Service(), "expected same service config after parsing") +} diff --git a/pkg/query-service/app/opamp/otelconfig/filterprocessor/config.go b/pkg/query-service/app/opamp/otelconfig/filterprocessor/config.go new file mode 100644 index 0000000000..1d4d93a959 --- /dev/null +++ b/pkg/query-service/app/opamp/otelconfig/filterprocessor/config.go @@ -0,0 +1,11 @@ +package filterprocessor + +type Config struct { + Metrics MetricFilters `mapstructure:"metrics"` +} + +// MetricFilters filters by Metric properties. +type MetricFilters struct { + MetricConditions []string `mapstructure:"metric" yaml:"metric,omitempty"` + DataPointConditions []string `mapstructure:"datapoint" yaml:"datapoint,omitempty"` +} diff --git a/pkg/query-service/app/opamp/otelconfig/otlpreceiver/config.go b/pkg/query-service/app/opamp/otelconfig/otlpreceiver/config.go new file mode 100644 index 0000000000..869afc9f43 --- /dev/null +++ b/pkg/query-service/app/opamp/otelconfig/otlpreceiver/config.go @@ -0,0 +1,6 @@ +package otlpreceiver + +type Protocols struct { + GRPC *GRPCServerSettings `mapstructure:"grpc"` + HTTP *HTTPServerSettings `mapstructure:"http"` +} diff --git a/pkg/query-service/app/opamp/otelconfig/otlpreceiver/grpcSettings.go b/pkg/query-service/app/opamp/otelconfig/otlpreceiver/grpcSettings.go new file mode 100644 index 0000000000..f0f55eda10 --- /dev/null +++ b/pkg/query-service/app/opamp/otelconfig/otlpreceiver/grpcSettings.go @@ -0,0 +1,14 @@ +package otlpreceiver + +type GRPCServerSettings struct { + // Endpoint configures the address for this network connection. + // For TCP and UDP networks, the address has the form "host:port". The host must be a literal IP address, + // or a host name that can be resolved to IP addresses. The port must be a literal port number or a service name. + // If the host is a literal IPv6 address it must be enclosed in square brackets, as in "[2001:db8::1]:80" or + // "[fe80::1%zone]:80". The zone specifies the scope of the literal IPv6 address as defined in RFC 4007. + Endpoint string `mapstructure:"endpoint"` + + // Transport to use. Known protocols are "tcp", "tcp4" (IPv4-only), "tcp6" (IPv6-only), "udp", "udp4" (IPv4-only), + // "udp6" (IPv6-only), "ip", "ip4" (IPv4-only), "ip6" (IPv6-only), "unix", "unixgram" and "unixpacket". + Transport string `mapstructure:"transport"` +} diff --git a/pkg/query-service/app/opamp/otelconfig/otlpreceiver/httpSettings.go b/pkg/query-service/app/opamp/otelconfig/otlpreceiver/httpSettings.go new file mode 100644 index 0000000000..64c9a16fe5 --- /dev/null +++ b/pkg/query-service/app/opamp/otelconfig/otlpreceiver/httpSettings.go @@ -0,0 +1,9 @@ +package otlpreceiver + +type HTTPServerSettings struct { + // Endpoint configures the listening address for the server. + Endpoint string `mapstructure:"endpoint" yaml:"endpoint"` + + // TLSSetting struct exposes TLS client configuration. + TLSSetting *TLSServerSetting `mapstructure:"tls" yaml:"tls"` +} diff --git a/pkg/query-service/app/opamp/otelconfig/otlpreceiver/tls.go b/pkg/query-service/app/opamp/otelconfig/otlpreceiver/tls.go new file mode 100644 index 0000000000..39978fcb29 --- /dev/null +++ b/pkg/query-service/app/opamp/otelconfig/otlpreceiver/tls.go @@ -0,0 +1,38 @@ +package otlpreceiver + +import "time" + +type TLSSetting struct { + // Path to the CA cert. For a client this verifies the server certificate. + // For a server this verifies client certificates. If empty uses system root CA. + // (optional) + CAFile string `mapstructure:"ca_file"` + + // Path to the TLS cert to use for TLS required connections. (optional) + CertFile string `mapstructure:"cert_file"` + + // Path to the TLS key to use for TLS required connections. (optional) + KeyFile string `mapstructure:"key_file"` + + // MinVersion sets the minimum TLS version that is acceptable. + // If not set, TLS 1.2 will be used. (optional) + MinVersion string `mapstructure:"min_version"` + + // MaxVersion sets the maximum TLS version that is acceptable. + // If not set, refer to crypto/tls for defaults. (optional) + MaxVersion string `mapstructure:"max_version"` + + // ReloadInterval specifies the duration after which the certificate will be reloaded + // If not set, it will never be reloaded (optional) + ReloadInterval time.Duration `mapstructure:"reload_interval"` +} + +type TLSServerSetting struct { + // squash ensures fields are correctly decoded in embedded struct. + TLSSetting `mapstructure:",squash"` + + // Path to the TLS cert to use by the server to verify a client certificate. (optional) + // This sets the ClientCAs and ClientAuth to RequireAndVerifyClientCert in the TLSConfig. Please refer to + // https://godoc.org/crypto/tls#Config for more information. (optional) + ClientCAFile string `mapstructure:"client_ca_file"` +} diff --git a/pkg/query-service/app/opamp/otelconfig/tailsampler/config.go b/pkg/query-service/app/opamp/otelconfig/tailsampler/config.go new file mode 100644 index 0000000000..d43a8eacb4 --- /dev/null +++ b/pkg/query-service/app/opamp/otelconfig/tailsampler/config.go @@ -0,0 +1,82 @@ +package tailsampler + +import "time" + +type PolicyType string + +type Config struct { + DecisionWait time.Duration `mapstructure:"decision_wait" yaml:"decision_wait"` + NumTraces uint64 `mapstructure:"num_traces" yaml:"num_traces"` + ExpectedNewTracesPerSec uint64 `mapstructure:"expected_new_traces_per_sec" yaml:"expected_new_traces_per_sec"` + PolicyCfgs []PolicyCfg `mapstructure:"policies" yaml:"policies"` + + // read only version number (optional) + Version int +} + +type ProbabilisticCfg struct { + // HashSalt allows one to configure the hashing salts. This is important in scenarios where multiple layers of collectors + // have different sampling rates: if they use the same salt all passing one layer may pass the other even if they have + // different sampling rates, configuring different salts avoids that. + HashSalt string `mapstructure:"hash_salt" yaml:"hash_salt"` + // SamplingPercentage is the percentage rate at which traces are going to be sampled. Defaults to zero, i.e.: no sample. + // Values greater or equal 100 are treated as "sample all traces". + SamplingPercentage float64 `mapstructure:"sampling_percentage" yaml:"sampling_percentage"` +} + +type NumericAttributeCfg struct { + // Tag that the filter is going to be matching against. + Key string `mapstructure:"key" yaml:"key"` + // MinValue is the minimum value of the attribute to be considered a match. + MinValue int64 `mapstructure:"min_value" yaml:"min_value"` + // MaxValue is the maximum value of the attribute to be considered a match. + MaxValue int64 `mapstructure:"max_value" yaml:"max_value"` +} + +type StringAttributeCfg struct { + // Tag that the filter is going to be matching against. + Key string `mapstructure:"key" yaml:"key"` + // Values indicate the set of values or regular expressions to use when matching against attribute values. + // StringAttribute Policy will apply exact value match on Values unless EnabledRegexMatching is true. + Values []string `mapstructure:"values" yaml:"values"` + // EnabledRegexMatching determines whether match attribute values by regexp string. + EnabledRegexMatching bool `mapstructure:"enabled_regex_matching" yaml:"enabled_regex_matching"` + // CacheMaxSize is the maximum number of attribute entries of LRU Cache that stores the matched result + // from the regular expressions defined in Values. + // CacheMaxSize will not be used if EnabledRegexMatching is set to false. + CacheMaxSize int `mapstructure:"cache_max_size" yaml:"cache_max_size"` + // InvertMatch indicates that values or regular expressions must not match against attribute values. + // If InvertMatch is true and Values is equal to 'acme', all other values will be sampled except 'acme'. + // Also, if the specified Key does not match on any resource or span attributes, data will be sampled. + InvertMatch bool `mapstructure:"invert_match" yaml:"invert_match"` +} + +type PolicyFilterCfg struct { + // values: AND | OR + FilterOp string `mapstructure:"filter_op" yaml:"filter_op"` + + StringAttributeCfgs []StringAttributeCfg `mapstructure:"string_attributes" yaml:"string_attributes"` + NumericAttributeCfgs []NumericAttributeCfg `mapstructure:"numeric_attributes" yaml:"numeric_attributes"` +} + +// PolicyCfg identifies policy rules in policy group +type PolicyCfg struct { + // name of the policy + Name string `mapstructure:"name" yaml:"name"` + + // Type of the policy this will be used to match the proper configuration of the policy. + Type PolicyType `mapstructure:"type" yaml:"type"` + + // Set to true for sampling rule (root) and false for conditions + Root bool `mapstructure:"root" yaml:"root"` + + Priority int `mapstructure:"priority" yaml:"priority"` + + // sampling applied when PolicyFilter matches + ProbabilisticCfg `mapstructure:",squash" yaml:"sampling"` + + // filter to activate policy + PolicyFilterCfg `mapstructure:",squash" yaml:"policy_filter"` + + SubPolicies []PolicyCfg `mapstructure:"sub_policies" yaml:"sub_policies"` +} diff --git a/pkg/query-service/app/opamp/otelconfig/testdata/basic.yaml b/pkg/query-service/app/opamp/otelconfig/testdata/basic.yaml new file mode 100644 index 0000000000..d5ef74e00f --- /dev/null +++ b/pkg/query-service/app/opamp/otelconfig/testdata/basic.yaml @@ -0,0 +1,76 @@ +receivers: + otlp/spanmetrics: + protocols: + grpc: + endpoint: "localhost:12345" + otlp: + protocols: + grpc: + http: + jaeger: + protocols: + grpc: + thrift_http: + hostmetrics: + collection_interval: 30s + scrapers: + cpu: + load: + memory: + disk: + filesystem: + network: +processors: + batch: + send_batch_size: 1000 + timeout: 10s + signozspanmetrics/prometheus: + metrics_exporter: prometheus + latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ] + dimensions_cache_size: 10000 + dimensions: + - name: service.namespace + default: default + - name: deployment.environment + default: default + # memory_limiter: + # # 80% of maximum memory up to 2G + # limit_mib: 1500 + # # 25% of limit up to 2G + # spike_limit_mib: 512 + # check_interval: 5s + # + # # 50% of the maximum memory + # limit_percentage: 50 + # # 20% of max memory usage spike expected + # spike_limit_percentage: 20 + # queued_retry: + # num_workers: 4 + # queue_size: 100 + # retry_on_failure: true +extensions: + zpages: {} +exporters: + clickhousetraces: + datasource: tcp://localhost:9000/?database=signoz_traces + migrations: exporter/clickhousetracesexporter/migrations + clickhousemetricswrite: + endpoint: tcp://localhost:9000/?database=signoz_metrics + resource_to_telemetry_conversion: + enabled: true + prometheus: + endpoint: "0.0.0.0:8889" +service: + extensions: [zpages] + pipelines: + traces: + receivers: [jaeger, otlp] + processors: [signozspanmetrics/prometheus, batch] + exporters: [clickhousetraces] + metrics: + receivers: [otlp, hostmetrics] + processors: [batch] + exporters: [clickhousemetricswrite] + metrics/spanmetrics: + receivers: [otlp/spanmetrics] + exporters: [prometheus] \ No newline at end of file diff --git a/pkg/query-service/app/opamp/otelconfig/testdata/service.yaml b/pkg/query-service/app/opamp/otelconfig/testdata/service.yaml new file mode 100644 index 0000000000..dd562fba0d --- /dev/null +++ b/pkg/query-service/app/opamp/otelconfig/testdata/service.yaml @@ -0,0 +1,11 @@ +service: + extensions: [zpages] + pipelines: + traces: + receivers: [jaeger, otlp] + processors: [signozspanmetrics/prometheus, batch] + exporters: [clickhousetraces] + metrics: + receivers: [otlp, hostmetrics] + processors: [batch] + exporters: [clickhousemetricswrite] diff --git a/pkg/query-service/app/opamp/pipeline_builder.go b/pkg/query-service/app/opamp/pipeline_builder.go new file mode 100644 index 0000000000..841a9ce5c6 --- /dev/null +++ b/pkg/query-service/app/opamp/pipeline_builder.go @@ -0,0 +1,196 @@ +package opamp + +import ( + "fmt" + "sync" + + "go.uber.org/zap" +) + +var lockTracesPipelineSpec sync.RWMutex +var lockMetricsPipelineSpec sync.RWMutex + +type pipelineStatus struct { + Name string + Enabled bool +} + +var tracesPipelineSpec = map[int]pipelineStatus{ + 0: { + Name: "signoz_tail_sampling", + Enabled: false, + }, + 1: { + Name: "batch", + Enabled: true, + }, +} + +var metricsPipelineSpec = map[int]pipelineStatus{ + 0: { + Name: "filter", + Enabled: false, + }, + 1: { + Name: "batch", + Enabled: true, + }, +} + +func updatePipelineSpec(signal string, name string, enabled bool) { + switch signal { + case "metrics": + lockMetricsPipelineSpec.Lock() + defer lockMetricsPipelineSpec.Unlock() + + for i := 0; i < len(metricsPipelineSpec); i++ { + p := metricsPipelineSpec[i] + if p.Name == name { + p.Enabled = enabled + metricsPipelineSpec[i] = p + } + } + case "traces": + lockTracesPipelineSpec.Lock() + defer lockTracesPipelineSpec.Unlock() + + for i := 0; i < len(tracesPipelineSpec); i++ { + p := tracesPipelineSpec[i] + if p.Name == name { + p.Enabled = enabled + tracesPipelineSpec[i] = p + } + } + default: + return + } + +} + +// AddToTracePipeline to enable processor in traces pipeline +func AddToTracePipelineSpec(processor string) { + updatePipelineSpec("traces", processor, true) +} + +// RemoveFromTracePipeline to remove processor from traces pipeline +func RemoveFromTracePipelineSpec(name string) { + updatePipelineSpec("traces", name, false) +} + +// AddToMetricsPipeline to enable processor in traces pipeline +func AddToMetricsPipelineSpec(processor string) { + updatePipelineSpec("metrics", processor, true) +} + +// RemoveFromMetricsPipeline to remove processor from traces pipeline +func RemoveFromMetricsPipelineSpec(name string) { + updatePipelineSpec("metrics", name, false) +} + +func checkDuplicates(pipeline []interface{}) bool { + exists := make(map[string]bool, len(pipeline)) + zap.S().Debugf("checking duplicate processors in the pipeline:", pipeline) + for _, processor := range pipeline { + name := processor.(string) + if _, ok := exists[name]; ok { + return true + } + + exists[name] = true + } + return false +} + +func buildPipeline(signal Signal, current []interface{}) ([]interface{}, error) { + var spec map[int]pipelineStatus + + switch signal { + case Metrics: + spec = metricsPipelineSpec + lockMetricsPipelineSpec.Lock() + defer lockMetricsPipelineSpec.Unlock() + case Traces: + spec = tracesPipelineSpec + lockTracesPipelineSpec.Lock() + defer lockTracesPipelineSpec.Unlock() + default: + return nil, fmt.Errorf("invalid signal") + } + + pipeline := current + // create a reverse map of existing config processors and their position + existing := map[string]int{} + for i, p := range current { + name := p.(string) + existing[name] = i + } + + // create mapping from our tracesPipelinePlan (processors managed by us) to position in existing processors (from current config) + // this means, if "batch" holds position 3 in the current effective config, and 2 in our config, the map will be [2]: 3 + specVsExistingMap := map[int]int{} + + // go through plan and map its elements to current positions in effective config + for i, m := range spec { + if loc, ok := existing[m.Name]; ok { + specVsExistingMap[i] = loc + } + } + + lastMatched := -1 + inserts := 0 + + // go through plan again in the increasing order + for i := 0; i < len(spec); i++ { + m := spec[i] + + if loc, ok := specVsExistingMap[i]; ok { + // element from plan already exists in current effective config. + + currentPos := loc + inserts + // if disabled then remove from the pipeline + if !m.Enabled { + zap.S().Debugf("build_pipeline: found a disabled item, removing from pipeline at position", currentPos-1, " ", m.Name) + if currentPos-1 <= 0 { + pipeline = pipeline[currentPos+1:] + } else { + pipeline = append(pipeline[:currentPos-1], pipeline[currentPos+1:]...) + } + } + + // capture last position where match was found, this will be used + // to insert missing elements next to it + lastMatched = currentPos + + } else { + if m.Enabled { + // track inserts as they shift the elements in pipeline + inserts++ + + // we use last matched to insert new item. This means, we keep inserting missing processors + // right after last matched processsor (e.g. insert filters after tail_sampling for existing list of [batch, tail_sampling]) + + if lastMatched <= 0 { + zap.S().Debugf("build_pipeline: found a new item to be inserted, inserting at position 0:", m.Name) + pipeline = append([]interface{}{m.Name}, pipeline[lastMatched+1:]...) + } else { + zap.S().Debugf("build_pipeline: found a new item to be inserted, inserting at position :", lastMatched, " ", m.Name) + prior := make([]interface{}, len(pipeline[:lastMatched])) + next := make([]interface{}, len(pipeline[lastMatched:])) + copy(prior, pipeline[:lastMatched]) + copy(next, pipeline[lastMatched:]) + + pipeline = append(prior, m.Name) + pipeline = append(pipeline, next...) + } + } + } + } + + if checkDuplicates(pipeline) { + // duplicates are most likely because the processor sequence in effective config conflicts + // with the planned sequence as per planned pipeline + return pipeline, fmt.Errorf("the effective config has an unexpected processor sequence: %v", pipeline) + } + + return pipeline, nil +} diff --git a/pkg/query-service/app/opamp/signal.go b/pkg/query-service/app/opamp/signal.go new file mode 100644 index 0000000000..754dfc33a8 --- /dev/null +++ b/pkg/query-service/app/opamp/signal.go @@ -0,0 +1,9 @@ +package opamp + +type Signal string + +const ( + Metrics Signal = "metrics" + Traces Signal = "traces" + Logs Signal = "logs" +) diff --git a/pkg/query-service/app/server.go b/pkg/query-service/app/server.go index 8088c2294e..c3142643f0 100644 --- a/pkg/query-service/app/server.go +++ b/pkg/query-service/app/server.go @@ -18,8 +18,12 @@ import ( "github.com/rs/cors" "github.com/soheilhy/cmux" + "go.signoz.io/signoz/pkg/query-service/agentConf" "go.signoz.io/signoz/pkg/query-service/app/clickhouseReader" "go.signoz.io/signoz/pkg/query-service/app/dashboards" + opamp "go.signoz.io/signoz/pkg/query-service/app/opamp" + opAmpModel "go.signoz.io/signoz/pkg/query-service/app/opamp/model" + "go.signoz.io/signoz/pkg/query-service/app/explorer" "go.signoz.io/signoz/pkg/query-service/auth" "go.signoz.io/signoz/pkg/query-service/constants" @@ -142,6 +146,14 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) { s.privateHTTP = privateServer + _, err = opAmpModel.InitDB(constants.RELATIONAL_DATASOURCE_PATH) + if err != nil { + return nil, err + } + + if err := agentConf.Initiate(localDB, "sqlite"); err != nil { + return nil, err + } return s, nil } @@ -439,6 +451,37 @@ func (s *Server) Start() error { }() + go func() { + zap.S().Info("Starting OpAmp Websocket server", zap.String("addr", constants.OpAmpWsEndpoint)) + err := opamp.InitalizeServer(constants.OpAmpWsEndpoint, &opAmpModel.AllAgents) + if err != nil { + zap.S().Info("opamp ws server failed to start", err) + s.unavailableChannel <- healthcheck.Unavailable + } + }() + + return nil +} + +func (s *Server) Stop() error { + if s.httpServer != nil { + if err := s.httpServer.Shutdown(context.Background()); err != nil { + return err + } + } + + if s.privateHTTP != nil { + if err := s.privateHTTP.Shutdown(context.Background()); err != nil { + return err + } + } + + opamp.StopServer() + + if s.ruleManager != nil { + s.ruleManager.Stop() + } + return nil } diff --git a/pkg/query-service/constants/constants.go b/pkg/query-service/constants/constants.go index 5bdc147c60..a18a69ac9a 100644 --- a/pkg/query-service/constants/constants.go +++ b/pkg/query-service/constants/constants.go @@ -11,6 +11,7 @@ const ( HTTPHostPort = "0.0.0.0:8080" // Address to serve http (query service) PrivateHostPort = "0.0.0.0:8085" // Address to server internal services like alert manager DebugHttpPort = "0.0.0.0:6060" // Address to serve http (pprof) + OpAmpWsEndpoint = "0.0.0.0:4320" // address for opamp websocket ) var ConfigSignozIo = "https://config.signoz.io/api/v1" diff --git a/pkg/query-service/main.go b/pkg/query-service/main.go index 24e9d06cb8..10bfe67306 100644 --- a/pkg/query-service/main.go +++ b/pkg/query-service/main.go @@ -84,7 +84,13 @@ func main() { case status := <-server.HealthCheckStatus(): logger.Info("Received HealthCheck status: ", zap.Int("status", int(status))) case <-signalsChannel: - logger.Fatal("Received OS Interrupt Signal ... ") + logger.Info("Received OS Interrupt Signal ... ") + err := server.Stop() + if err != nil { + logger.Fatal("Failed to stop server", zap.Error(err)) + } + logger.Info("Server stopped") + return } } From dfef41913f617c159112f0293f8d47f79cd84030 Mon Sep 17 00:00:00 2001 From: nityanandagohain Date: Wed, 15 Mar 2023 16:26:46 +0530 Subject: [PATCH 03/38] fix: get last 10 versions in getConfigHistory --- pkg/query-service/agentConf/db.go | 17 +++++++++++------ pkg/query-service/agentConf/manager.go | 4 ++-- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/pkg/query-service/agentConf/db.go b/pkg/query-service/agentConf/db.go index 591a7ee799..c3a84f6b3a 100644 --- a/pkg/query-service/agentConf/db.go +++ b/pkg/query-service/agentConf/db.go @@ -31,11 +31,11 @@ func (r *Repo) initDB(engine string) error { } } -func (r *Repo) GetConfigHistory(ctx context.Context, typ ElementTypeDef) ([]ConfigVersion, error) { +func (r *Repo) GetConfigHistory(ctx context.Context, typ ElementTypeDef, limit int) ([]ConfigVersion, error) { var c []ConfigVersion - err := r.db.SelectContext(ctx, &c, `SELECT - id, + err := r.db.SelectContext(ctx, &c, fmt.Sprintf(`SELECT version, + id, element_type, COALESCE(created_by, -1) as created_by, created_at, @@ -45,9 +45,14 @@ func (r *Repo) GetConfigHistory(ctx context.Context, typ ElementTypeDef) ([]Conf is_valid, disabled, deploy_status, - deploy_result + deploy_result, + last_hash, + last_config FROM agent_config_versions AS v - WHERE element_type = $1`, typ) + WHERE element_type = $1 + ORDER BY created_at desc, version desc + limit %v`, limit), + typ) return c, err } @@ -57,7 +62,7 @@ func (r *Repo) GetConfigVersion(ctx context.Context, typ ElementTypeDef, v int) err := r.db.GetContext(ctx, &c, `SELECT id, version, - element_type, + element_type, COALESCE(created_by, -1) as created_by, created_at, COALESCE((SELECT NAME FROM users diff --git a/pkg/query-service/agentConf/manager.go b/pkg/query-service/agentConf/manager.go index 27478b332a..f189ba9756 100644 --- a/pkg/query-service/agentConf/manager.go +++ b/pkg/query-service/agentConf/manager.go @@ -51,8 +51,8 @@ func GetConfigVersion(ctx context.Context, elementType ElementTypeDef, version i return m.GetConfigVersion(ctx, elementType, version) } -func GetConfigHistory(ctx context.Context, typ ElementTypeDef) ([]ConfigVersion, error) { - return m.GetConfigHistory(ctx, typ) +func GetConfigHistory(ctx context.Context, typ ElementTypeDef, limit int) ([]ConfigVersion, error) { + return m.GetConfigHistory(ctx, typ, limit) } // StartNewVersion launches a new config version for given set of elements From 500ab02c4728231e1850560266d809d652b319cb Mon Sep 17 00:00:00 2001 From: nityanandagohain Date: Wed, 15 Mar 2023 17:42:24 +0530 Subject: [PATCH 04/38] chore: logs parsing pipeline support in opamp --- pkg/query-service/agentConf/manager.go | 18 ++ pkg/query-service/app/opamp/logspipeline.go | 180 ++++++++++++++++++ .../app/opamp/logspipeline_test.go | 161 ++++++++++++++++ pkg/query-service/app/opamp/opamp_server.go | 95 --------- pkg/query-service/constants/constants.go | 3 + pkg/query-service/model/logparsingpipeline.go | 95 +++++++++ 6 files changed, 457 insertions(+), 95 deletions(-) create mode 100644 pkg/query-service/app/opamp/logspipeline.go create mode 100644 pkg/query-service/app/opamp/logspipeline_test.go create mode 100644 pkg/query-service/model/logparsingpipeline.go diff --git a/pkg/query-service/agentConf/manager.go b/pkg/query-service/agentConf/manager.go index 27478b332a..7544fb7ac1 100644 --- a/pkg/query-service/agentConf/manager.go +++ b/pkg/query-service/agentConf/manager.go @@ -210,3 +210,21 @@ func UpsertSamplingProcessor(ctx context.Context, version int, config *tsp.Confi m.updateDeployStatus(ctx, ElementTypeSamplingRules, version, string(DeployInitiated), "Deployment started", configHash, string(processorConfYaml)) return nil } + +// UpsertLogParsingProcessors updates the agent with log parsing processors +func UpsertLogParsingProcessor(ctx context.Context, version int, rawPipelineData []byte, config map[string]interface{}, names []interface{}) error { + if !atomic.CompareAndSwapUint32(&m.lock, 0, 1) { + return fmt.Errorf("agent updater is busy") + } + defer atomic.StoreUint32(&m.lock, 0) + + // send the changes to opamp. + configHash, err := opamp.UpsertLogsParsingProcessor(context.Background(), config, names, m.OnConfigUpdate) + if err != nil { + zap.S().Errorf("failed to call agent config update for log parsing processor:", err) + return err + } + + m.updateDeployStatus(ctx, ElementTypeLogPipelines, version, string(DeployInitiated), "Deployment started", configHash, string(rawPipelineData)) + return nil +} diff --git a/pkg/query-service/app/opamp/logspipeline.go b/pkg/query-service/app/opamp/logspipeline.go new file mode 100644 index 0000000000..e3f4dbeed0 --- /dev/null +++ b/pkg/query-service/app/opamp/logspipeline.go @@ -0,0 +1,180 @@ +package opamp + +import ( + "context" + "crypto/sha256" + "fmt" + "strings" + "sync" + + "github.com/knadh/koanf/parsers/yaml" + "github.com/open-telemetry/opamp-go/protobufs" + model "go.signoz.io/signoz/pkg/query-service/app/opamp/model" + "go.signoz.io/signoz/pkg/query-service/constants" + "go.uber.org/zap" +) + +var lockLogsPipelineSpec sync.RWMutex + +func UpsertLogsParsingProcessor(ctx context.Context, parsingProcessors map[string]interface{}, parsingProcessorsNames []interface{}, callback func(string, string, error)) (string, error) { + confHash := "" + if opAmpServer == nil { + return confHash, fmt.Errorf("opamp server is down, unable to push config to agent at this moment") + } + + agents := opAmpServer.agents.GetAllAgents() + if len(agents) == 0 { + return confHash, fmt.Errorf("no agents available at the moment") + } + + for _, agent := range agents { + config := agent.EffectiveConfig + c, err := yaml.Parser().Unmarshal([]byte(config)) + if err != nil { + return confHash, err + } + + BuildLogParsingProcessors(c, parsingProcessors) + + // get the processor list + logs := c["service"].(map[string]interface{})["pipelines"].(map[string]interface{})["logs"] + processors := logs.(map[string]interface{})["processors"].([]interface{}) + + // build the new processor list + updatedProcessorList, _ := buildLogsProcessors(processors, parsingProcessorsNames) + + // add the new processor to the data + c["service"].(map[string]interface{})["pipelines"].(map[string]interface{})["logs"].(map[string]interface{})["processors"] = updatedProcessorList + + updatedConf, err := yaml.Parser().Marshal(c) + if err != nil { + return confHash, err + } + + // zap.S().Infof("sending new config", string(updatedConf)) + hash := sha256.New() + _, err = hash.Write(updatedConf) + if err != nil { + return confHash, err + } + agent.EffectiveConfig = string(updatedConf) + err = agent.Upsert() + if err != nil { + return confHash, err + } + + agent.SendToAgent(&protobufs.ServerToAgent{ + RemoteConfig: &protobufs.AgentRemoteConfig{ + Config: &protobufs.AgentConfigMap{ + ConfigMap: map[string]*protobufs.AgentConfigFile{ + "collector.yaml": { + Body: updatedConf, + ContentType: "application/x-yaml", + }, + }, + }, + ConfigHash: hash.Sum(nil), + }, + }) + + if confHash == "" { + confHash = string(hash.Sum(nil)) + model.ListenToConfigUpdate(agent.ID, confHash, callback) + } + } + + return confHash, nil +} + +// check if the processors already exist +// if yes then update the processor. +// if something doesn't exists then remove it. +func BuildLogParsingProcessors(agentConf, parsingProcessors map[string]interface{}) error { + agentProcessors := agentConf["processors"].(map[string]interface{}) + exists := map[string]struct{}{} + for key, params := range parsingProcessors { + agentProcessors[key] = params + exists[key] = struct{}{} + } + // remove the old unwanted processors + for k := range agentProcessors { + if _, ok := exists[k]; !ok && strings.HasPrefix(k, constants.LogsPPLPfx) { + delete(agentProcessors, k) + } + } + agentConf["processors"] = agentProcessors + return nil +} + +func buildLogsProcessors(current []interface{}, logsParserPipeline []interface{}) ([]interface{}, error) { + lockLogsPipelineSpec.Lock() + defer lockLogsPipelineSpec.Unlock() + + exists := map[string]struct{}{} + for _, v := range logsParserPipeline { + exists[v.(string)] = struct{}{} + } + + // removed the old processors which are not used + var pipeline []interface{} + for _, v := range current { + k := v.(string) + if _, ok := exists[k]; ok || !strings.HasPrefix(k, constants.LogsPPLPfx) { + pipeline = append(pipeline, v) + } + } + + // create a reverse map of existing config processors and their position + existing := map[string]int{} + for i, p := range current { + name := p.(string) + existing[name] = i + } + + // create mapping from our tracesPipelinePlan (processors managed by us) to position in existing processors (from current config) + // this means, if "batch" holds position 3 in the current effective config, and 2 in our config, the map will be [2]: 3 + specVsExistingMap := map[int]int{} + + // go through plan and map its elements to current positions in effective config + for i, m := range logsParserPipeline { + if loc, ok := existing[m.(string)]; ok { + specVsExistingMap[i] = loc + } + } + + lastMatched := 0 + + // go through plan again in the increasing order + for i := 0; i < len(logsParserPipeline); i++ { + m := logsParserPipeline[i] + + if loc, ok := specVsExistingMap[i]; ok { + lastMatched = loc + 1 + } else { + if lastMatched <= 0 { + zap.S().Debugf("build_pipeline: found a new item to be inserted, inserting at position 0:", m) + pipeline = append([]interface{}{m}, pipeline[lastMatched:]...) + lastMatched++ + } else { + zap.S().Debugf("build_pipeline: found a new item to be inserted, inserting at position :", lastMatched, " ", m) + + prior := make([]interface{}, len(pipeline[:lastMatched])) + next := make([]interface{}, len(pipeline[lastMatched:])) + + copy(prior, pipeline[:lastMatched]) + copy(next, pipeline[lastMatched:]) + + pipeline = append(prior, m) + pipeline = append(pipeline, next...) + } + } + } + + if checkDuplicates(pipeline) { + // duplicates are most likely because the processor sequence in effective config conflicts + // with the planned sequence as per planned pipeline + return pipeline, fmt.Errorf("the effective config has an unexpected processor sequence: %v", pipeline) + } + + return pipeline, nil +} diff --git a/pkg/query-service/app/opamp/logspipeline_test.go b/pkg/query-service/app/opamp/logspipeline_test.go new file mode 100644 index 0000000000..d612959d19 --- /dev/null +++ b/pkg/query-service/app/opamp/logspipeline_test.go @@ -0,0 +1,161 @@ +package opamp + +import ( + "fmt" + "testing" + + . "github.com/smartystreets/goconvey/convey" + "go.signoz.io/signoz/pkg/query-service/constants" +) + +var BuildProcessorTestData = []struct { + Name string + agentConf map[string]interface{} + pipelineProcessor map[string]interface{} + outputConf map[string]interface{} +}{ + { + Name: "Add", + agentConf: map[string]interface{}{ + "processors": map[string]interface{}{ + "batch": struct{}{}, + }, + }, + pipelineProcessor: map[string]interface{}{ + constants.LogsPPLPfx + "_b": struct{}{}, + }, + outputConf: map[string]interface{}{ + "processors": map[string]interface{}{ + constants.LogsPPLPfx + "_b": struct{}{}, + "batch": struct{}{}, + }, + }, + }, + { + Name: "Remove", + agentConf: map[string]interface{}{ + "processors": map[string]interface{}{ + constants.LogsPPLPfx + "_b": struct{}{}, + "batch": struct{}{}, + }, + }, + pipelineProcessor: map[string]interface{}{}, + outputConf: map[string]interface{}{ + "processors": map[string]interface{}{ + "batch": struct{}{}, + }, + }, + }, + { + Name: "remove and upsert 1", + agentConf: map[string]interface{}{ + "processors": map[string]interface{}{ + constants.LogsPPLPfx + "_a": struct{}{}, + constants.LogsPPLPfx + "_b": struct{}{}, + "batch": struct{}{}, + }, + }, + pipelineProcessor: map[string]interface{}{ + constants.LogsPPLPfx + "_b": struct{}{}, + }, + outputConf: map[string]interface{}{ + "processors": map[string]interface{}{ + constants.LogsPPLPfx + "_b": struct{}{}, + "batch": struct{}{}, + }, + }, + }, + { + Name: "remove and upsert 2", + agentConf: map[string]interface{}{ + "processors": map[string]interface{}{ + "memory_limiter": struct{}{}, + constants.LogsPPLPfx + "_a": struct{}{}, + constants.LogsPPLPfx + "_b": struct{}{}, + "batch": struct{}{}, + }, + }, + pipelineProcessor: map[string]interface{}{ + constants.LogsPPLPfx + "_b": struct{}{}, + }, + outputConf: map[string]interface{}{ + "processors": map[string]interface{}{ + "memory_limiter": struct{}{}, + constants.LogsPPLPfx + "_b": struct{}{}, + "batch": struct{}{}, + }, + }, + }, +} + +func TestBuildLogParsingProcessors(t *testing.T) { + for _, test := range BuildProcessorTestData { + Convey(test.Name, t, func() { + err := BuildLogParsingProcessors(test.agentConf, test.pipelineProcessor) + So(err, ShouldBeNil) + So(test.agentConf, ShouldResemble, test.outputConf) + }) + } + +} + +var BuildLogsPipelineTestData = []struct { + Name string + currentPipeline []interface{} + logsPipeline []interface{} + expectedPipeline []interface{} +}{ + { + Name: "Add new pipelines", + currentPipeline: []interface{}{"processor1", "processor2"}, + logsPipeline: []interface{}{constants.LogsPPLPfx + "_a", constants.LogsPPLPfx + "_b"}, + expectedPipeline: []interface{}{constants.LogsPPLPfx + "_a", constants.LogsPPLPfx + "_b", "processor1", "processor2"}, + }, + { + Name: "Add new pipeline and respect custom processors", + currentPipeline: []interface{}{constants.LogsPPLPfx + "_a", "processor1", constants.LogsPPLPfx + "_b", "processor2"}, + logsPipeline: []interface{}{constants.LogsPPLPfx + "_a", constants.LogsPPLPfx + "_b", constants.LogsPPLPfx + "_c"}, + expectedPipeline: []interface{}{constants.LogsPPLPfx + "_a", "processor1", constants.LogsPPLPfx + "_b", constants.LogsPPLPfx + "_c", "processor2"}, + }, + { + Name: "Add new pipeline and respect custom processors in the beginning and middle", + currentPipeline: []interface{}{"processor1", constants.LogsPPLPfx + "_a", "processor2", constants.LogsPPLPfx + "_b", "batch"}, + logsPipeline: []interface{}{constants.LogsPPLPfx + "_a", constants.LogsPPLPfx + "_b", constants.LogsPPLPfx + "_c"}, + expectedPipeline: []interface{}{"processor1", constants.LogsPPLPfx + "_a", "processor2", constants.LogsPPLPfx + "_b", constants.LogsPPLPfx + "_c", "batch"}, + }, + { + Name: "Remove old pipeline add add new", + currentPipeline: []interface{}{constants.LogsPPLPfx + "_a", constants.LogsPPLPfx + "_b", "processor1", "processor2"}, + logsPipeline: []interface{}{constants.LogsPPLPfx + "_a"}, + expectedPipeline: []interface{}{constants.LogsPPLPfx + "_a", "processor1", "processor2"}, + }, + { + Name: "Remove old pipeline from middle", + currentPipeline: []interface{}{"processor1", "processor2", constants.LogsPPLPfx + "_a", "processor3", constants.LogsPPLPfx + "_b", "batch"}, + logsPipeline: []interface{}{constants.LogsPPLPfx + "_a"}, + expectedPipeline: []interface{}{"processor1", "processor2", constants.LogsPPLPfx + "_a", "processor3", "batch"}, + }, + { + Name: "Remove old pipeline from middle and add new pipeline", + currentPipeline: []interface{}{"processor1", "processor2", constants.LogsPPLPfx + "_a", "processor3", constants.LogsPPLPfx + "_b", "batch"}, + logsPipeline: []interface{}{constants.LogsPPLPfx + "_a", constants.LogsPPLPfx + "_c"}, + expectedPipeline: []interface{}{"processor1", "processor2", constants.LogsPPLPfx + "_a", constants.LogsPPLPfx + "_c", "processor3", "batch"}, + }, + { + Name: "Remove multiple old pipelines from middle and add multiple new ones", + currentPipeline: []interface{}{"processor1", constants.LogsPPLPfx + "_a", "processor2", constants.LogsPPLPfx + "_b", "processor3", constants.LogsPPLPfx + "_c", "processor4", constants.LogsPPLPfx + "_d", "processor5", "batch"}, + logsPipeline: []interface{}{constants.LogsPPLPfx + "_a", constants.LogsPPLPfx + "_a1", constants.LogsPPLPfx + "_c", constants.LogsPPLPfx + "_c1"}, + expectedPipeline: []interface{}{"processor1", constants.LogsPPLPfx + "_a", constants.LogsPPLPfx + "_a1", "processor2", "processor3", constants.LogsPPLPfx + "_c", constants.LogsPPLPfx + "_c1", "processor4", "processor5", "batch"}, + }, +} + +func TestBuildLogsPipeline(t *testing.T) { + for _, test := range BuildLogsPipelineTestData { + Convey(test.Name, t, func() { + v, err := buildLogsProcessors(test.currentPipeline, test.logsPipeline) + So(err, ShouldBeNil) + fmt.Println(test.Name, "\n", test.currentPipeline, "\n", v, "\n", test.expectedPipeline) + So(v, ShouldResemble, test.expectedPipeline) + }) + } +} diff --git a/pkg/query-service/app/opamp/opamp_server.go b/pkg/query-service/app/opamp/opamp_server.go index 237b07f121..cee50ba90c 100644 --- a/pkg/query-service/app/opamp/opamp_server.go +++ b/pkg/query-service/app/opamp/opamp_server.go @@ -2,15 +2,10 @@ package opamp import ( "context" - "crypto/sha256" - "strings" - - "github.com/knadh/koanf/parsers/yaml" "github.com/open-telemetry/opamp-go/protobufs" "github.com/open-telemetry/opamp-go/server" "github.com/open-telemetry/opamp-go/server/types" - "go.opentelemetry.io/collector/confmap" model "go.signoz.io/signoz/pkg/query-service/app/opamp/model" "go.uber.org/zap" @@ -112,93 +107,3 @@ func Ready() bool { func Subscribe(agentId string, hash string, f model.OnChangeCallback) { model.ListenToConfigUpdate(agentId, hash, f) } - -func UpsertProcessor(ctx context.Context, processors map[string]interface{}, names []interface{}) error { - x := map[string]interface{}{ - "processors": processors, - } - - newConf := confmap.NewFromStringMap(x) - - agents := opAmpServer.agents.GetAllAgents() - for _, agent := range agents { - config := agent.EffectiveConfig - c, err := yaml.Parser().Unmarshal([]byte(config)) - if err != nil { - return err - } - agentConf := confmap.NewFromStringMap(c) - - err = agentConf.Merge(newConf) - if err != nil { - return err - } - - service := agentConf.Get("service") - - logs := service.(map[string]interface{})["pipelines"].(map[string]interface{})["logs"] - processors := logs.(map[string]interface{})["processors"].([]interface{}) - userProcessors := []interface{}{} - // remove old ones - for _, v := range processors { - if !strings.HasPrefix(v.(string), "logstransform/pipeline_") { - userProcessors = append(userProcessors, v) - } - } - // all user processors are pushed after pipelines - processors = append(names, userProcessors...) - - service.(map[string]interface{})["pipelines"].(map[string]interface{})["logs"].(map[string]interface{})["processors"] = processors - - s := map[string]interface{}{ - "service": map[string]interface{}{ - "pipelines": map[string]interface{}{ - "logs": map[string]interface{}{ - "processors": processors, - }, - }, - }, - } - - serviceC := confmap.NewFromStringMap(s) - - err = agentConf.Merge(serviceC) - if err != nil { - return err - } - - // ------ complete adding processor - configR, err := yaml.Parser().Marshal(agentConf.ToStringMap()) - if err != nil { - return err - } - - zap.S().Infof("sending new config", string(configR)) - hash := sha256.New() - _, err = hash.Write(configR) - if err != nil { - return err - } - agent.EffectiveConfig = string(configR) - err = agent.Upsert() - if err != nil { - return err - } - - agent.SendToAgent(&protobufs.ServerToAgent{ - RemoteConfig: &protobufs.AgentRemoteConfig{ - Config: &protobufs.AgentConfigMap{ - ConfigMap: map[string]*protobufs.AgentConfigFile{ - "collector.yaml": { - Body: configR, - ContentType: "application/x-yaml", - }, - }, - }, - ConfigHash: hash.Sum(nil), - }, - }) - } - - return nil -} diff --git a/pkg/query-service/constants/constants.go b/pkg/query-service/constants/constants.go index a18a69ac9a..208089ca8a 100644 --- a/pkg/query-service/constants/constants.go +++ b/pkg/query-service/constants/constants.go @@ -220,3 +220,6 @@ const ( NumberTagMapCol = "numberTagMap" BoolTagMapCol = "boolTagMap" ) + +// logsPPLPfx is a short constant for logsPipelinePrefix +const LogsPPLPfx = "logstransform/pipeline" diff --git a/pkg/query-service/model/logparsingpipeline.go b/pkg/query-service/model/logparsingpipeline.go new file mode 100644 index 0000000000..3eec51bdc3 --- /dev/null +++ b/pkg/query-service/model/logparsingpipeline.go @@ -0,0 +1,95 @@ +package model + +import ( + "encoding/json" + "time" + + "github.com/pkg/errors" +) + +// Pipeline is stored and also deployed finally to collector config +type Pipeline struct { + Id string `json:"id,omitempty" db:"id"` + OrderId int `json:"orderId" db:"order_id"` + Name string `json:"name,omitempty" db:"name"` + Alias string `json:"alias" db:"alias"` + Description *string `json:"description" db:"description"` + Enabled bool `json:"enabled" db:"enabled"` + Filter string `json:"filter" db:"filter"` + + // configuration for pipeline + RawConfig string `db:"config_json" json:"-"` + + Config []PipelineOperator `json:"config"` + + // Updater not required as any change will result in new version + Creator +} + +type Creator struct { + CreatedBy string `json:"createdBy" db:"created_by"` + CreatedAt time.Time `json:"createdAt" db:"created_at"` +} + +type Processor struct { + Operators []PipelineOperator `json:"operators" yaml:"operators"` +} + +type PipelineOperator struct { + Type string `json:"type" yaml:"type"` + ID string `json:"id,omitempty" yaml:"id,omitempty"` + Output string `json:"output,omitempty" yaml:"output,omitempty"` + OnError string `json:"on_error,omitempty" yaml:"on_error,omitempty"` + + // don't need the following in the final config + OrderId int `json:"orderId" yaml:"-"` + Enabled bool `json:"enabled" yaml:"-"` + Name string `json:"name,omitempty" yaml:"-"` + + // optional keys depending on the type + ParseTo string `json:"parse_to,omitempty" yaml:"parse_to,omitempty"` + Pattern string `json:"pattern,omitempty" yaml:"pattern,omitempty"` + Regex string `json:"regex,omitempty" yaml:"regex,omitempty"` + ParseFrom string `json:"parse_from,omitempty" yaml:"parse_from,omitempty"` + Timestamp *TimestampParser `json:"timestamp,omitempty" yaml:"timestamp,omitempty"` + TraceParser *TraceParser `json:"trace_parser,omitempty" yaml:"trace_parser,omitempty"` + Field string `json:"field,omitempty" yaml:"field,omitempty"` + Value string `json:"value,omitempty" yaml:"value,omitempty"` + From string `json:"from,omitempty" yaml:"from,omitempty"` + To string `json:"to,omitempty" yaml:"to,omitempty"` + Expr string `json:"expr,omitempty" yaml:"expr,omitempty"` + Routes *[]Route `json:"routes,omitempty" yaml:"routes,omitempty"` + Fields []string `json:"fields,omitempty" yaml:"fields,omitempty"` + Default string `json:"default,omitempty" yaml:"default,omitempty"` +} + +type TimestampParser struct { + Layout string `json:"layout" yaml:"layout"` + LayoutType string `json:"layout_type" yaml:"layout_type"` + ParseFrom string `json:"parse_from" yaml:"parse_from"` +} + +type TraceParser struct { + TraceId *ParseFrom `json:"trace_id,omitempty" yaml:"trace_id,omitempty"` + SpanId *ParseFrom `json:"span_id,omitempty" yaml:"span_id,omitempty"` + TraceFlags *ParseFrom `json:"trace_flags,omitempty" yaml:"trace_flags,omitempty"` +} + +type ParseFrom struct { + ParseFrom string `json:"parse_from" yaml:"parse_from"` +} + +type Route struct { + Output string `json:"output" yaml:"output"` + Expr string `json:"expr" yaml:"expr"` +} + +func (i *Pipeline) ParseRawConfig() error { + c := []PipelineOperator{} + err := json.Unmarshal([]byte(i.RawConfig), &c) + if err != nil { + return errors.Wrap(err, "failed to parse ingestion rule config") + } + i.Config = c + return nil +} From 755d64061e95019c53d4c327eff34d3b53536db6 Mon Sep 17 00:00:00 2001 From: nityanandagohain Date: Wed, 15 Mar 2023 17:55:02 +0530 Subject: [PATCH 05/38] fix: minor spelling fixes --- pkg/query-service/app/opamp/logspipeline.go | 6 +++--- pkg/query-service/app/opamp/logspipeline_test.go | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/query-service/app/opamp/logspipeline.go b/pkg/query-service/app/opamp/logspipeline.go index e3f4dbeed0..c1dffa0980 100644 --- a/pkg/query-service/app/opamp/logspipeline.go +++ b/pkg/query-service/app/opamp/logspipeline.go @@ -34,7 +34,7 @@ func UpsertLogsParsingProcessor(ctx context.Context, parsingProcessors map[strin return confHash, err } - BuildLogParsingProcessors(c, parsingProcessors) + buildLogParsingProcessors(c, parsingProcessors) // get the processor list logs := c["service"].(map[string]interface{})["pipelines"].(map[string]interface{})["logs"] @@ -89,7 +89,7 @@ func UpsertLogsParsingProcessor(ctx context.Context, parsingProcessors map[strin // check if the processors already exist // if yes then update the processor. // if something doesn't exists then remove it. -func BuildLogParsingProcessors(agentConf, parsingProcessors map[string]interface{}) error { +func buildLogParsingProcessors(agentConf, parsingProcessors map[string]interface{}) error { agentProcessors := agentConf["processors"].(map[string]interface{}) exists := map[string]struct{}{} for key, params := range parsingProcessors { @@ -131,7 +131,7 @@ func buildLogsProcessors(current []interface{}, logsParserPipeline []interface{} existing[name] = i } - // create mapping from our tracesPipelinePlan (processors managed by us) to position in existing processors (from current config) + // create mapping from our logsParserPipeline to position in existing processors (from current config) // this means, if "batch" holds position 3 in the current effective config, and 2 in our config, the map will be [2]: 3 specVsExistingMap := map[int]int{} diff --git a/pkg/query-service/app/opamp/logspipeline_test.go b/pkg/query-service/app/opamp/logspipeline_test.go index d612959d19..703d5ecb6e 100644 --- a/pkg/query-service/app/opamp/logspipeline_test.go +++ b/pkg/query-service/app/opamp/logspipeline_test.go @@ -8,7 +8,7 @@ import ( "go.signoz.io/signoz/pkg/query-service/constants" ) -var BuildProcessorTestData = []struct { +var buildProcessorTestData = []struct { Name string agentConf map[string]interface{} pipelineProcessor map[string]interface{} @@ -89,9 +89,9 @@ var BuildProcessorTestData = []struct { } func TestBuildLogParsingProcessors(t *testing.T) { - for _, test := range BuildProcessorTestData { + for _, test := range buildProcessorTestData { Convey(test.Name, t, func() { - err := BuildLogParsingProcessors(test.agentConf, test.pipelineProcessor) + err := buildLogParsingProcessors(test.agentConf, test.pipelineProcessor) So(err, ShouldBeNil) So(test.agentConf, ShouldResemble, test.outputConf) }) From e1219ea94219c63cafe8482b97f1f7a5cd94704a Mon Sep 17 00:00:00 2001 From: nityanandagohain Date: Thu, 16 Mar 2023 10:20:57 +0530 Subject: [PATCH 06/38] fix: use structs instead of interface --- pkg/query-service/agentConf/manager.go | 2 +- pkg/query-service/app/opamp/logspipeline.go | 79 +++++++++++++++---- .../app/opamp/logspipeline_test.go | 78 +++++++++--------- pkg/query-service/constants/constants.go | 2 +- 4 files changed, 103 insertions(+), 58 deletions(-) diff --git a/pkg/query-service/agentConf/manager.go b/pkg/query-service/agentConf/manager.go index 8abcbacaa7..b26d382070 100644 --- a/pkg/query-service/agentConf/manager.go +++ b/pkg/query-service/agentConf/manager.go @@ -212,7 +212,7 @@ func UpsertSamplingProcessor(ctx context.Context, version int, config *tsp.Confi } // UpsertLogParsingProcessors updates the agent with log parsing processors -func UpsertLogParsingProcessor(ctx context.Context, version int, rawPipelineData []byte, config map[string]interface{}, names []interface{}) error { +func UpsertLogParsingProcessor(ctx context.Context, version int, rawPipelineData []byte, config map[string]interface{}, names []string) error { if !atomic.CompareAndSwapUint32(&m.lock, 0, 1) { return fmt.Errorf("agent updater is busy") } diff --git a/pkg/query-service/app/opamp/logspipeline.go b/pkg/query-service/app/opamp/logspipeline.go index c1dffa0980..f2467e05e0 100644 --- a/pkg/query-service/app/opamp/logspipeline.go +++ b/pkg/query-service/app/opamp/logspipeline.go @@ -3,6 +3,7 @@ package opamp import ( "context" "crypto/sha256" + "encoding/json" "fmt" "strings" "sync" @@ -16,7 +17,7 @@ import ( var lockLogsPipelineSpec sync.RWMutex -func UpsertLogsParsingProcessor(ctx context.Context, parsingProcessors map[string]interface{}, parsingProcessorsNames []interface{}, callback func(string, string, error)) (string, error) { +func UpsertLogsParsingProcessor(ctx context.Context, parsingProcessors map[string]interface{}, parsingProcessorsNames []string, callback func(string, string, error)) (string, error) { confHash := "" if opAmpServer == nil { return confHash, fmt.Errorf("opamp server is down, unable to push config to agent at this moment") @@ -36,15 +37,20 @@ func UpsertLogsParsingProcessor(ctx context.Context, parsingProcessors map[strin buildLogParsingProcessors(c, parsingProcessors) - // get the processor list - logs := c["service"].(map[string]interface{})["pipelines"].(map[string]interface{})["logs"] - processors := logs.(map[string]interface{})["processors"].([]interface{}) + p, err := getOtelPipelinFromConfig(c) + if err != nil { + return confHash, err + } + if p.Pipelines.Logs == nil { + return confHash, fmt.Errorf("logs pipeline doesn't exist") + } // build the new processor list - updatedProcessorList, _ := buildLogsProcessors(processors, parsingProcessorsNames) + updatedProcessorList, _ := buildLogsProcessors(p.Pipelines.Logs.Processors, parsingProcessorsNames) + p.Pipelines.Logs.Processors = updatedProcessorList - // add the new processor to the data - c["service"].(map[string]interface{})["pipelines"].(map[string]interface{})["logs"].(map[string]interface{})["processors"] = updatedProcessorList + // add the new processor to the data ( no checks required as the keys will exists) + c["service"].(map[string]interface{})["pipelines"].(map[string]interface{})["logs"] = p.Pipelines.Logs updatedConf, err := yaml.Parser().Marshal(c) if err != nil { @@ -106,19 +112,44 @@ func buildLogParsingProcessors(agentConf, parsingProcessors map[string]interface return nil } -func buildLogsProcessors(current []interface{}, logsParserPipeline []interface{}) ([]interface{}, error) { +type otelPipeline struct { + Pipelines struct { + Logs *struct { + Exporters []string `json:"exporters" yaml:"exporters"` + Processors []string `json:"processors" yaml:"processors"` + Receivers []string `json:"receivers" yaml:"receivers"` + } `json:"logs" yaml:"logs"` + } `json:"pipelines" yaml:"pipelines"` +} + +func getOtelPipelinFromConfig(config map[string]interface{}) (*otelPipeline, error) { + if _, ok := config["service"]; !ok { + return nil, fmt.Errorf("service not found in OTEL config") + } + b, err := json.Marshal(config["service"]) + if err != nil { + return nil, err + } + p := otelPipeline{} + if err := json.Unmarshal(b, &p); err != nil { + return nil, err + } + return &p, nil +} + +func buildLogsProcessors(current []string, logsParserPipeline []string) ([]string, error) { lockLogsPipelineSpec.Lock() defer lockLogsPipelineSpec.Unlock() exists := map[string]struct{}{} for _, v := range logsParserPipeline { - exists[v.(string)] = struct{}{} + exists[v] = struct{}{} } // removed the old processors which are not used - var pipeline []interface{} + var pipeline []string for _, v := range current { - k := v.(string) + k := v if _, ok := exists[k]; ok || !strings.HasPrefix(k, constants.LogsPPLPfx) { pipeline = append(pipeline, v) } @@ -127,7 +158,7 @@ func buildLogsProcessors(current []interface{}, logsParserPipeline []interface{} // create a reverse map of existing config processors and their position existing := map[string]int{} for i, p := range current { - name := p.(string) + name := p existing[name] = i } @@ -137,7 +168,7 @@ func buildLogsProcessors(current []interface{}, logsParserPipeline []interface{} // go through plan and map its elements to current positions in effective config for i, m := range logsParserPipeline { - if loc, ok := existing[m.(string)]; ok { + if loc, ok := existing[m]; ok { specVsExistingMap[i] = loc } } @@ -153,13 +184,13 @@ func buildLogsProcessors(current []interface{}, logsParserPipeline []interface{} } else { if lastMatched <= 0 { zap.S().Debugf("build_pipeline: found a new item to be inserted, inserting at position 0:", m) - pipeline = append([]interface{}{m}, pipeline[lastMatched:]...) + pipeline = append([]string{m}, pipeline[lastMatched:]...) lastMatched++ } else { zap.S().Debugf("build_pipeline: found a new item to be inserted, inserting at position :", lastMatched, " ", m) - prior := make([]interface{}, len(pipeline[:lastMatched])) - next := make([]interface{}, len(pipeline[lastMatched:])) + prior := make([]string, len(pipeline[:lastMatched])) + next := make([]string, len(pipeline[lastMatched:])) copy(prior, pipeline[:lastMatched]) copy(next, pipeline[lastMatched:]) @@ -170,7 +201,7 @@ func buildLogsProcessors(current []interface{}, logsParserPipeline []interface{} } } - if checkDuplicates(pipeline) { + if checkDuplicateString(pipeline) { // duplicates are most likely because the processor sequence in effective config conflicts // with the planned sequence as per planned pipeline return pipeline, fmt.Errorf("the effective config has an unexpected processor sequence: %v", pipeline) @@ -178,3 +209,17 @@ func buildLogsProcessors(current []interface{}, logsParserPipeline []interface{} return pipeline, nil } + +func checkDuplicateString(pipeline []string) bool { + exists := make(map[string]bool, len(pipeline)) + zap.S().Debugf("checking duplicate processors in the pipeline:", pipeline) + for _, processor := range pipeline { + name := processor + if _, ok := exists[name]; ok { + return true + } + + exists[name] = true + } + return false +} diff --git a/pkg/query-service/app/opamp/logspipeline_test.go b/pkg/query-service/app/opamp/logspipeline_test.go index 703d5ecb6e..011e33473a 100644 --- a/pkg/query-service/app/opamp/logspipeline_test.go +++ b/pkg/query-service/app/opamp/logspipeline_test.go @@ -22,11 +22,11 @@ var buildProcessorTestData = []struct { }, }, pipelineProcessor: map[string]interface{}{ - constants.LogsPPLPfx + "_b": struct{}{}, + constants.LogsPPLPfx + "b": struct{}{}, }, outputConf: map[string]interface{}{ "processors": map[string]interface{}{ - constants.LogsPPLPfx + "_b": struct{}{}, + constants.LogsPPLPfx + "b": struct{}{}, "batch": struct{}{}, }, }, @@ -35,7 +35,7 @@ var buildProcessorTestData = []struct { Name: "Remove", agentConf: map[string]interface{}{ "processors": map[string]interface{}{ - constants.LogsPPLPfx + "_b": struct{}{}, + constants.LogsPPLPfx + "b": struct{}{}, "batch": struct{}{}, }, }, @@ -50,17 +50,17 @@ var buildProcessorTestData = []struct { Name: "remove and upsert 1", agentConf: map[string]interface{}{ "processors": map[string]interface{}{ - constants.LogsPPLPfx + "_a": struct{}{}, - constants.LogsPPLPfx + "_b": struct{}{}, + constants.LogsPPLPfx + "a": struct{}{}, + constants.LogsPPLPfx + "b": struct{}{}, "batch": struct{}{}, }, }, pipelineProcessor: map[string]interface{}{ - constants.LogsPPLPfx + "_b": struct{}{}, + constants.LogsPPLPfx + "b": struct{}{}, }, outputConf: map[string]interface{}{ "processors": map[string]interface{}{ - constants.LogsPPLPfx + "_b": struct{}{}, + constants.LogsPPLPfx + "b": struct{}{}, "batch": struct{}{}, }, }, @@ -69,19 +69,19 @@ var buildProcessorTestData = []struct { Name: "remove and upsert 2", agentConf: map[string]interface{}{ "processors": map[string]interface{}{ - "memory_limiter": struct{}{}, - constants.LogsPPLPfx + "_a": struct{}{}, - constants.LogsPPLPfx + "_b": struct{}{}, + "memorylimiter": struct{}{}, + constants.LogsPPLPfx + "a": struct{}{}, + constants.LogsPPLPfx + "b": struct{}{}, "batch": struct{}{}, }, }, pipelineProcessor: map[string]interface{}{ - constants.LogsPPLPfx + "_b": struct{}{}, + constants.LogsPPLPfx + "b": struct{}{}, }, outputConf: map[string]interface{}{ "processors": map[string]interface{}{ - "memory_limiter": struct{}{}, - constants.LogsPPLPfx + "_b": struct{}{}, + "memorylimiter": struct{}{}, + constants.LogsPPLPfx + "b": struct{}{}, "batch": struct{}{}, }, }, @@ -89,7 +89,7 @@ var buildProcessorTestData = []struct { } func TestBuildLogParsingProcessors(t *testing.T) { - for _, test := range buildProcessorTestData { + for , test := range buildProcessorTestData { Convey(test.Name, t, func() { err := buildLogParsingProcessors(test.agentConf, test.pipelineProcessor) So(err, ShouldBeNil) @@ -101,56 +101,56 @@ func TestBuildLogParsingProcessors(t *testing.T) { var BuildLogsPipelineTestData = []struct { Name string - currentPipeline []interface{} - logsPipeline []interface{} - expectedPipeline []interface{} + currentPipeline []string + logsPipeline []string + expectedPipeline []string }{ { Name: "Add new pipelines", - currentPipeline: []interface{}{"processor1", "processor2"}, - logsPipeline: []interface{}{constants.LogsPPLPfx + "_a", constants.LogsPPLPfx + "_b"}, - expectedPipeline: []interface{}{constants.LogsPPLPfx + "_a", constants.LogsPPLPfx + "_b", "processor1", "processor2"}, + currentPipeline: []string{"processor1", "processor2"}, + logsPipeline: []string{constants.LogsPPLPfx + "a", constants.LogsPPLPfx + "b"}, + expectedPipeline: []string{constants.LogsPPLPfx + "a", constants.LogsPPLPfx + "b", "processor1", "processor2"}, }, { Name: "Add new pipeline and respect custom processors", - currentPipeline: []interface{}{constants.LogsPPLPfx + "_a", "processor1", constants.LogsPPLPfx + "_b", "processor2"}, - logsPipeline: []interface{}{constants.LogsPPLPfx + "_a", constants.LogsPPLPfx + "_b", constants.LogsPPLPfx + "_c"}, - expectedPipeline: []interface{}{constants.LogsPPLPfx + "_a", "processor1", constants.LogsPPLPfx + "_b", constants.LogsPPLPfx + "_c", "processor2"}, + currentPipeline: []string{constants.LogsPPLPfx + "a", "processor1", constants.LogsPPLPfx + "b", "processor2"}, + logsPipeline: []string{constants.LogsPPLPfx + "a", constants.LogsPPLPfx + "b", constants.LogsPPLPfx + "c"}, + expectedPipeline: []string{constants.LogsPPLPfx + "a", "processor1", constants.LogsPPLPfx + "b", constants.LogsPPLPfx + "c", "processor2"}, }, { Name: "Add new pipeline and respect custom processors in the beginning and middle", - currentPipeline: []interface{}{"processor1", constants.LogsPPLPfx + "_a", "processor2", constants.LogsPPLPfx + "_b", "batch"}, - logsPipeline: []interface{}{constants.LogsPPLPfx + "_a", constants.LogsPPLPfx + "_b", constants.LogsPPLPfx + "_c"}, - expectedPipeline: []interface{}{"processor1", constants.LogsPPLPfx + "_a", "processor2", constants.LogsPPLPfx + "_b", constants.LogsPPLPfx + "_c", "batch"}, + currentPipeline: []string{"processor1", constants.LogsPPLPfx + "a", "processor2", constants.LogsPPLPfx + "b", "batch"}, + logsPipeline: []string{constants.LogsPPLPfx + "a", constants.LogsPPLPfx + "b", constants.LogsPPLPfx + "c"}, + expectedPipeline: []string{"processor1", constants.LogsPPLPfx + "a", "processor2", constants.LogsPPLPfx + "b", constants.LogsPPLPfx + "c", "batch"}, }, { Name: "Remove old pipeline add add new", - currentPipeline: []interface{}{constants.LogsPPLPfx + "_a", constants.LogsPPLPfx + "_b", "processor1", "processor2"}, - logsPipeline: []interface{}{constants.LogsPPLPfx + "_a"}, - expectedPipeline: []interface{}{constants.LogsPPLPfx + "_a", "processor1", "processor2"}, + currentPipeline: []string{constants.LogsPPLPfx + "a", constants.LogsPPLPfx + "b", "processor1", "processor2"}, + logsPipeline: []string{constants.LogsPPLPfx + "a"}, + expectedPipeline: []string{constants.LogsPPLPfx + "a", "processor1", "processor2"}, }, { Name: "Remove old pipeline from middle", - currentPipeline: []interface{}{"processor1", "processor2", constants.LogsPPLPfx + "_a", "processor3", constants.LogsPPLPfx + "_b", "batch"}, - logsPipeline: []interface{}{constants.LogsPPLPfx + "_a"}, - expectedPipeline: []interface{}{"processor1", "processor2", constants.LogsPPLPfx + "_a", "processor3", "batch"}, + currentPipeline: []string{"processor1", "processor2", constants.LogsPPLPfx + "a", "processor3", constants.LogsPPLPfx + "b", "batch"}, + logsPipeline: []string{constants.LogsPPLPfx + "a"}, + expectedPipeline: []string{"processor1", "processor2", constants.LogsPPLPfx + "a", "processor3", "batch"}, }, { Name: "Remove old pipeline from middle and add new pipeline", - currentPipeline: []interface{}{"processor1", "processor2", constants.LogsPPLPfx + "_a", "processor3", constants.LogsPPLPfx + "_b", "batch"}, - logsPipeline: []interface{}{constants.LogsPPLPfx + "_a", constants.LogsPPLPfx + "_c"}, - expectedPipeline: []interface{}{"processor1", "processor2", constants.LogsPPLPfx + "_a", constants.LogsPPLPfx + "_c", "processor3", "batch"}, + currentPipeline: []string{"processor1", "processor2", constants.LogsPPLPfx + "a", "processor3", constants.LogsPPLPfx + "b", "batch"}, + logsPipeline: []string{constants.LogsPPLPfx + "a", constants.LogsPPLPfx + "c"}, + expectedPipeline: []string{"processor1", "processor2", constants.LogsPPLPfx + "a", constants.LogsPPLPfx + "c", "processor3", "batch"}, }, { Name: "Remove multiple old pipelines from middle and add multiple new ones", - currentPipeline: []interface{}{"processor1", constants.LogsPPLPfx + "_a", "processor2", constants.LogsPPLPfx + "_b", "processor3", constants.LogsPPLPfx + "_c", "processor4", constants.LogsPPLPfx + "_d", "processor5", "batch"}, - logsPipeline: []interface{}{constants.LogsPPLPfx + "_a", constants.LogsPPLPfx + "_a1", constants.LogsPPLPfx + "_c", constants.LogsPPLPfx + "_c1"}, - expectedPipeline: []interface{}{"processor1", constants.LogsPPLPfx + "_a", constants.LogsPPLPfx + "_a1", "processor2", "processor3", constants.LogsPPLPfx + "_c", constants.LogsPPLPfx + "_c1", "processor4", "processor5", "batch"}, + currentPipeline: []string{"processor1", constants.LogsPPLPfx + "a", "processor2", constants.LogsPPLPfx + "b", "processor3", constants.LogsPPLPfx + "c", "processor4", constants.LogsPPLPfx + "d", "processor5", "batch"}, + logsPipeline: []string{constants.LogsPPLPfx + "a", constants.LogsPPLPfx + "a1", constants.LogsPPLPfx + "c", constants.LogsPPLPfx + "c1"}, + expectedPipeline: []string{"processor1", constants.LogsPPLPfx + "a", constants.LogsPPLPfx + "a1", "processor2", "processor3", constants.LogsPPLPfx + "c", constants.LogsPPLPfx + "c1", "processor4", "processor5", "batch"}, }, } func TestBuildLogsPipeline(t *testing.T) { - for _, test := range BuildLogsPipelineTestData { + for , test := range BuildLogsPipelineTestData { Convey(test.Name, t, func() { v, err := buildLogsProcessors(test.currentPipeline, test.logsPipeline) So(err, ShouldBeNil) diff --git a/pkg/query-service/constants/constants.go b/pkg/query-service/constants/constants.go index 208089ca8a..191e7c6e5f 100644 --- a/pkg/query-service/constants/constants.go +++ b/pkg/query-service/constants/constants.go @@ -222,4 +222,4 @@ const ( ) // logsPPLPfx is a short constant for logsPipelinePrefix -const LogsPPLPfx = "logstransform/pipeline" +const LogsPPLPfx = "logstransform/pipeline_" From bac717e9e6891715d980252135d922e0ce76b7e9 Mon Sep 17 00:00:00 2001 From: nityanandagohain Date: Thu, 16 Mar 2023 10:20:57 +0530 Subject: [PATCH 07/38] fix: use structs instead of interface --- pkg/query-service/agentConf/manager.go | 2 +- pkg/query-service/app/opamp/logspipeline.go | 79 +++++++++++++++---- .../app/opamp/logspipeline_test.go | 78 +++++++++--------- pkg/query-service/constants/constants.go | 2 +- 4 files changed, 103 insertions(+), 58 deletions(-) diff --git a/pkg/query-service/agentConf/manager.go b/pkg/query-service/agentConf/manager.go index 8abcbacaa7..b26d382070 100644 --- a/pkg/query-service/agentConf/manager.go +++ b/pkg/query-service/agentConf/manager.go @@ -212,7 +212,7 @@ func UpsertSamplingProcessor(ctx context.Context, version int, config *tsp.Confi } // UpsertLogParsingProcessors updates the agent with log parsing processors -func UpsertLogParsingProcessor(ctx context.Context, version int, rawPipelineData []byte, config map[string]interface{}, names []interface{}) error { +func UpsertLogParsingProcessor(ctx context.Context, version int, rawPipelineData []byte, config map[string]interface{}, names []string) error { if !atomic.CompareAndSwapUint32(&m.lock, 0, 1) { return fmt.Errorf("agent updater is busy") } diff --git a/pkg/query-service/app/opamp/logspipeline.go b/pkg/query-service/app/opamp/logspipeline.go index c1dffa0980..f2467e05e0 100644 --- a/pkg/query-service/app/opamp/logspipeline.go +++ b/pkg/query-service/app/opamp/logspipeline.go @@ -3,6 +3,7 @@ package opamp import ( "context" "crypto/sha256" + "encoding/json" "fmt" "strings" "sync" @@ -16,7 +17,7 @@ import ( var lockLogsPipelineSpec sync.RWMutex -func UpsertLogsParsingProcessor(ctx context.Context, parsingProcessors map[string]interface{}, parsingProcessorsNames []interface{}, callback func(string, string, error)) (string, error) { +func UpsertLogsParsingProcessor(ctx context.Context, parsingProcessors map[string]interface{}, parsingProcessorsNames []string, callback func(string, string, error)) (string, error) { confHash := "" if opAmpServer == nil { return confHash, fmt.Errorf("opamp server is down, unable to push config to agent at this moment") @@ -36,15 +37,20 @@ func UpsertLogsParsingProcessor(ctx context.Context, parsingProcessors map[strin buildLogParsingProcessors(c, parsingProcessors) - // get the processor list - logs := c["service"].(map[string]interface{})["pipelines"].(map[string]interface{})["logs"] - processors := logs.(map[string]interface{})["processors"].([]interface{}) + p, err := getOtelPipelinFromConfig(c) + if err != nil { + return confHash, err + } + if p.Pipelines.Logs == nil { + return confHash, fmt.Errorf("logs pipeline doesn't exist") + } // build the new processor list - updatedProcessorList, _ := buildLogsProcessors(processors, parsingProcessorsNames) + updatedProcessorList, _ := buildLogsProcessors(p.Pipelines.Logs.Processors, parsingProcessorsNames) + p.Pipelines.Logs.Processors = updatedProcessorList - // add the new processor to the data - c["service"].(map[string]interface{})["pipelines"].(map[string]interface{})["logs"].(map[string]interface{})["processors"] = updatedProcessorList + // add the new processor to the data ( no checks required as the keys will exists) + c["service"].(map[string]interface{})["pipelines"].(map[string]interface{})["logs"] = p.Pipelines.Logs updatedConf, err := yaml.Parser().Marshal(c) if err != nil { @@ -106,19 +112,44 @@ func buildLogParsingProcessors(agentConf, parsingProcessors map[string]interface return nil } -func buildLogsProcessors(current []interface{}, logsParserPipeline []interface{}) ([]interface{}, error) { +type otelPipeline struct { + Pipelines struct { + Logs *struct { + Exporters []string `json:"exporters" yaml:"exporters"` + Processors []string `json:"processors" yaml:"processors"` + Receivers []string `json:"receivers" yaml:"receivers"` + } `json:"logs" yaml:"logs"` + } `json:"pipelines" yaml:"pipelines"` +} + +func getOtelPipelinFromConfig(config map[string]interface{}) (*otelPipeline, error) { + if _, ok := config["service"]; !ok { + return nil, fmt.Errorf("service not found in OTEL config") + } + b, err := json.Marshal(config["service"]) + if err != nil { + return nil, err + } + p := otelPipeline{} + if err := json.Unmarshal(b, &p); err != nil { + return nil, err + } + return &p, nil +} + +func buildLogsProcessors(current []string, logsParserPipeline []string) ([]string, error) { lockLogsPipelineSpec.Lock() defer lockLogsPipelineSpec.Unlock() exists := map[string]struct{}{} for _, v := range logsParserPipeline { - exists[v.(string)] = struct{}{} + exists[v] = struct{}{} } // removed the old processors which are not used - var pipeline []interface{} + var pipeline []string for _, v := range current { - k := v.(string) + k := v if _, ok := exists[k]; ok || !strings.HasPrefix(k, constants.LogsPPLPfx) { pipeline = append(pipeline, v) } @@ -127,7 +158,7 @@ func buildLogsProcessors(current []interface{}, logsParserPipeline []interface{} // create a reverse map of existing config processors and their position existing := map[string]int{} for i, p := range current { - name := p.(string) + name := p existing[name] = i } @@ -137,7 +168,7 @@ func buildLogsProcessors(current []interface{}, logsParserPipeline []interface{} // go through plan and map its elements to current positions in effective config for i, m := range logsParserPipeline { - if loc, ok := existing[m.(string)]; ok { + if loc, ok := existing[m]; ok { specVsExistingMap[i] = loc } } @@ -153,13 +184,13 @@ func buildLogsProcessors(current []interface{}, logsParserPipeline []interface{} } else { if lastMatched <= 0 { zap.S().Debugf("build_pipeline: found a new item to be inserted, inserting at position 0:", m) - pipeline = append([]interface{}{m}, pipeline[lastMatched:]...) + pipeline = append([]string{m}, pipeline[lastMatched:]...) lastMatched++ } else { zap.S().Debugf("build_pipeline: found a new item to be inserted, inserting at position :", lastMatched, " ", m) - prior := make([]interface{}, len(pipeline[:lastMatched])) - next := make([]interface{}, len(pipeline[lastMatched:])) + prior := make([]string, len(pipeline[:lastMatched])) + next := make([]string, len(pipeline[lastMatched:])) copy(prior, pipeline[:lastMatched]) copy(next, pipeline[lastMatched:]) @@ -170,7 +201,7 @@ func buildLogsProcessors(current []interface{}, logsParserPipeline []interface{} } } - if checkDuplicates(pipeline) { + if checkDuplicateString(pipeline) { // duplicates are most likely because the processor sequence in effective config conflicts // with the planned sequence as per planned pipeline return pipeline, fmt.Errorf("the effective config has an unexpected processor sequence: %v", pipeline) @@ -178,3 +209,17 @@ func buildLogsProcessors(current []interface{}, logsParserPipeline []interface{} return pipeline, nil } + +func checkDuplicateString(pipeline []string) bool { + exists := make(map[string]bool, len(pipeline)) + zap.S().Debugf("checking duplicate processors in the pipeline:", pipeline) + for _, processor := range pipeline { + name := processor + if _, ok := exists[name]; ok { + return true + } + + exists[name] = true + } + return false +} diff --git a/pkg/query-service/app/opamp/logspipeline_test.go b/pkg/query-service/app/opamp/logspipeline_test.go index 703d5ecb6e..011e33473a 100644 --- a/pkg/query-service/app/opamp/logspipeline_test.go +++ b/pkg/query-service/app/opamp/logspipeline_test.go @@ -22,11 +22,11 @@ var buildProcessorTestData = []struct { }, }, pipelineProcessor: map[string]interface{}{ - constants.LogsPPLPfx + "_b": struct{}{}, + constants.LogsPPLPfx + "b": struct{}{}, }, outputConf: map[string]interface{}{ "processors": map[string]interface{}{ - constants.LogsPPLPfx + "_b": struct{}{}, + constants.LogsPPLPfx + "b": struct{}{}, "batch": struct{}{}, }, }, @@ -35,7 +35,7 @@ var buildProcessorTestData = []struct { Name: "Remove", agentConf: map[string]interface{}{ "processors": map[string]interface{}{ - constants.LogsPPLPfx + "_b": struct{}{}, + constants.LogsPPLPfx + "b": struct{}{}, "batch": struct{}{}, }, }, @@ -50,17 +50,17 @@ var buildProcessorTestData = []struct { Name: "remove and upsert 1", agentConf: map[string]interface{}{ "processors": map[string]interface{}{ - constants.LogsPPLPfx + "_a": struct{}{}, - constants.LogsPPLPfx + "_b": struct{}{}, + constants.LogsPPLPfx + "a": struct{}{}, + constants.LogsPPLPfx + "b": struct{}{}, "batch": struct{}{}, }, }, pipelineProcessor: map[string]interface{}{ - constants.LogsPPLPfx + "_b": struct{}{}, + constants.LogsPPLPfx + "b": struct{}{}, }, outputConf: map[string]interface{}{ "processors": map[string]interface{}{ - constants.LogsPPLPfx + "_b": struct{}{}, + constants.LogsPPLPfx + "b": struct{}{}, "batch": struct{}{}, }, }, @@ -69,19 +69,19 @@ var buildProcessorTestData = []struct { Name: "remove and upsert 2", agentConf: map[string]interface{}{ "processors": map[string]interface{}{ - "memory_limiter": struct{}{}, - constants.LogsPPLPfx + "_a": struct{}{}, - constants.LogsPPLPfx + "_b": struct{}{}, + "memorylimiter": struct{}{}, + constants.LogsPPLPfx + "a": struct{}{}, + constants.LogsPPLPfx + "b": struct{}{}, "batch": struct{}{}, }, }, pipelineProcessor: map[string]interface{}{ - constants.LogsPPLPfx + "_b": struct{}{}, + constants.LogsPPLPfx + "b": struct{}{}, }, outputConf: map[string]interface{}{ "processors": map[string]interface{}{ - "memory_limiter": struct{}{}, - constants.LogsPPLPfx + "_b": struct{}{}, + "memorylimiter": struct{}{}, + constants.LogsPPLPfx + "b": struct{}{}, "batch": struct{}{}, }, }, @@ -89,7 +89,7 @@ var buildProcessorTestData = []struct { } func TestBuildLogParsingProcessors(t *testing.T) { - for _, test := range buildProcessorTestData { + for , test := range buildProcessorTestData { Convey(test.Name, t, func() { err := buildLogParsingProcessors(test.agentConf, test.pipelineProcessor) So(err, ShouldBeNil) @@ -101,56 +101,56 @@ func TestBuildLogParsingProcessors(t *testing.T) { var BuildLogsPipelineTestData = []struct { Name string - currentPipeline []interface{} - logsPipeline []interface{} - expectedPipeline []interface{} + currentPipeline []string + logsPipeline []string + expectedPipeline []string }{ { Name: "Add new pipelines", - currentPipeline: []interface{}{"processor1", "processor2"}, - logsPipeline: []interface{}{constants.LogsPPLPfx + "_a", constants.LogsPPLPfx + "_b"}, - expectedPipeline: []interface{}{constants.LogsPPLPfx + "_a", constants.LogsPPLPfx + "_b", "processor1", "processor2"}, + currentPipeline: []string{"processor1", "processor2"}, + logsPipeline: []string{constants.LogsPPLPfx + "a", constants.LogsPPLPfx + "b"}, + expectedPipeline: []string{constants.LogsPPLPfx + "a", constants.LogsPPLPfx + "b", "processor1", "processor2"}, }, { Name: "Add new pipeline and respect custom processors", - currentPipeline: []interface{}{constants.LogsPPLPfx + "_a", "processor1", constants.LogsPPLPfx + "_b", "processor2"}, - logsPipeline: []interface{}{constants.LogsPPLPfx + "_a", constants.LogsPPLPfx + "_b", constants.LogsPPLPfx + "_c"}, - expectedPipeline: []interface{}{constants.LogsPPLPfx + "_a", "processor1", constants.LogsPPLPfx + "_b", constants.LogsPPLPfx + "_c", "processor2"}, + currentPipeline: []string{constants.LogsPPLPfx + "a", "processor1", constants.LogsPPLPfx + "b", "processor2"}, + logsPipeline: []string{constants.LogsPPLPfx + "a", constants.LogsPPLPfx + "b", constants.LogsPPLPfx + "c"}, + expectedPipeline: []string{constants.LogsPPLPfx + "a", "processor1", constants.LogsPPLPfx + "b", constants.LogsPPLPfx + "c", "processor2"}, }, { Name: "Add new pipeline and respect custom processors in the beginning and middle", - currentPipeline: []interface{}{"processor1", constants.LogsPPLPfx + "_a", "processor2", constants.LogsPPLPfx + "_b", "batch"}, - logsPipeline: []interface{}{constants.LogsPPLPfx + "_a", constants.LogsPPLPfx + "_b", constants.LogsPPLPfx + "_c"}, - expectedPipeline: []interface{}{"processor1", constants.LogsPPLPfx + "_a", "processor2", constants.LogsPPLPfx + "_b", constants.LogsPPLPfx + "_c", "batch"}, + currentPipeline: []string{"processor1", constants.LogsPPLPfx + "a", "processor2", constants.LogsPPLPfx + "b", "batch"}, + logsPipeline: []string{constants.LogsPPLPfx + "a", constants.LogsPPLPfx + "b", constants.LogsPPLPfx + "c"}, + expectedPipeline: []string{"processor1", constants.LogsPPLPfx + "a", "processor2", constants.LogsPPLPfx + "b", constants.LogsPPLPfx + "c", "batch"}, }, { Name: "Remove old pipeline add add new", - currentPipeline: []interface{}{constants.LogsPPLPfx + "_a", constants.LogsPPLPfx + "_b", "processor1", "processor2"}, - logsPipeline: []interface{}{constants.LogsPPLPfx + "_a"}, - expectedPipeline: []interface{}{constants.LogsPPLPfx + "_a", "processor1", "processor2"}, + currentPipeline: []string{constants.LogsPPLPfx + "a", constants.LogsPPLPfx + "b", "processor1", "processor2"}, + logsPipeline: []string{constants.LogsPPLPfx + "a"}, + expectedPipeline: []string{constants.LogsPPLPfx + "a", "processor1", "processor2"}, }, { Name: "Remove old pipeline from middle", - currentPipeline: []interface{}{"processor1", "processor2", constants.LogsPPLPfx + "_a", "processor3", constants.LogsPPLPfx + "_b", "batch"}, - logsPipeline: []interface{}{constants.LogsPPLPfx + "_a"}, - expectedPipeline: []interface{}{"processor1", "processor2", constants.LogsPPLPfx + "_a", "processor3", "batch"}, + currentPipeline: []string{"processor1", "processor2", constants.LogsPPLPfx + "a", "processor3", constants.LogsPPLPfx + "b", "batch"}, + logsPipeline: []string{constants.LogsPPLPfx + "a"}, + expectedPipeline: []string{"processor1", "processor2", constants.LogsPPLPfx + "a", "processor3", "batch"}, }, { Name: "Remove old pipeline from middle and add new pipeline", - currentPipeline: []interface{}{"processor1", "processor2", constants.LogsPPLPfx + "_a", "processor3", constants.LogsPPLPfx + "_b", "batch"}, - logsPipeline: []interface{}{constants.LogsPPLPfx + "_a", constants.LogsPPLPfx + "_c"}, - expectedPipeline: []interface{}{"processor1", "processor2", constants.LogsPPLPfx + "_a", constants.LogsPPLPfx + "_c", "processor3", "batch"}, + currentPipeline: []string{"processor1", "processor2", constants.LogsPPLPfx + "a", "processor3", constants.LogsPPLPfx + "b", "batch"}, + logsPipeline: []string{constants.LogsPPLPfx + "a", constants.LogsPPLPfx + "c"}, + expectedPipeline: []string{"processor1", "processor2", constants.LogsPPLPfx + "a", constants.LogsPPLPfx + "c", "processor3", "batch"}, }, { Name: "Remove multiple old pipelines from middle and add multiple new ones", - currentPipeline: []interface{}{"processor1", constants.LogsPPLPfx + "_a", "processor2", constants.LogsPPLPfx + "_b", "processor3", constants.LogsPPLPfx + "_c", "processor4", constants.LogsPPLPfx + "_d", "processor5", "batch"}, - logsPipeline: []interface{}{constants.LogsPPLPfx + "_a", constants.LogsPPLPfx + "_a1", constants.LogsPPLPfx + "_c", constants.LogsPPLPfx + "_c1"}, - expectedPipeline: []interface{}{"processor1", constants.LogsPPLPfx + "_a", constants.LogsPPLPfx + "_a1", "processor2", "processor3", constants.LogsPPLPfx + "_c", constants.LogsPPLPfx + "_c1", "processor4", "processor5", "batch"}, + currentPipeline: []string{"processor1", constants.LogsPPLPfx + "a", "processor2", constants.LogsPPLPfx + "b", "processor3", constants.LogsPPLPfx + "c", "processor4", constants.LogsPPLPfx + "d", "processor5", "batch"}, + logsPipeline: []string{constants.LogsPPLPfx + "a", constants.LogsPPLPfx + "a1", constants.LogsPPLPfx + "c", constants.LogsPPLPfx + "c1"}, + expectedPipeline: []string{"processor1", constants.LogsPPLPfx + "a", constants.LogsPPLPfx + "a1", "processor2", "processor3", constants.LogsPPLPfx + "c", constants.LogsPPLPfx + "c1", "processor4", "processor5", "batch"}, }, } func TestBuildLogsPipeline(t *testing.T) { - for _, test := range BuildLogsPipelineTestData { + for , test := range BuildLogsPipelineTestData { Convey(test.Name, t, func() { v, err := buildLogsProcessors(test.currentPipeline, test.logsPipeline) So(err, ShouldBeNil) diff --git a/pkg/query-service/constants/constants.go b/pkg/query-service/constants/constants.go index 208089ca8a..191e7c6e5f 100644 --- a/pkg/query-service/constants/constants.go +++ b/pkg/query-service/constants/constants.go @@ -222,4 +222,4 @@ const ( ) // logsPPLPfx is a short constant for logsPipelinePrefix -const LogsPPLPfx = "logstransform/pipeline" +const LogsPPLPfx = "logstransform/pipeline_" From 7367f8dd4b58b5550fcb540fa535c9cbee12c687 Mon Sep 17 00:00:00 2001 From: nityanandagohain Date: Thu, 16 Mar 2023 10:24:20 +0530 Subject: [PATCH 08/38] fix: tests fixed --- pkg/query-service/app/opamp/logspipeline_test.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/pkg/query-service/app/opamp/logspipeline_test.go b/pkg/query-service/app/opamp/logspipeline_test.go index 011e33473a..26dec4e34d 100644 --- a/pkg/query-service/app/opamp/logspipeline_test.go +++ b/pkg/query-service/app/opamp/logspipeline_test.go @@ -27,7 +27,7 @@ var buildProcessorTestData = []struct { outputConf: map[string]interface{}{ "processors": map[string]interface{}{ constants.LogsPPLPfx + "b": struct{}{}, - "batch": struct{}{}, + "batch": struct{}{}, }, }, }, @@ -36,7 +36,7 @@ var buildProcessorTestData = []struct { agentConf: map[string]interface{}{ "processors": map[string]interface{}{ constants.LogsPPLPfx + "b": struct{}{}, - "batch": struct{}{}, + "batch": struct{}{}, }, }, pipelineProcessor: map[string]interface{}{}, @@ -52,7 +52,7 @@ var buildProcessorTestData = []struct { "processors": map[string]interface{}{ constants.LogsPPLPfx + "a": struct{}{}, constants.LogsPPLPfx + "b": struct{}{}, - "batch": struct{}{}, + "batch": struct{}{}, }, }, pipelineProcessor: map[string]interface{}{ @@ -61,7 +61,7 @@ var buildProcessorTestData = []struct { outputConf: map[string]interface{}{ "processors": map[string]interface{}{ constants.LogsPPLPfx + "b": struct{}{}, - "batch": struct{}{}, + "batch": struct{}{}, }, }, }, @@ -72,7 +72,7 @@ var buildProcessorTestData = []struct { "memorylimiter": struct{}{}, constants.LogsPPLPfx + "a": struct{}{}, constants.LogsPPLPfx + "b": struct{}{}, - "batch": struct{}{}, + "batch": struct{}{}, }, }, pipelineProcessor: map[string]interface{}{ @@ -82,14 +82,14 @@ var buildProcessorTestData = []struct { "processors": map[string]interface{}{ "memorylimiter": struct{}{}, constants.LogsPPLPfx + "b": struct{}{}, - "batch": struct{}{}, + "batch": struct{}{}, }, }, }, } func TestBuildLogParsingProcessors(t *testing.T) { - for , test := range buildProcessorTestData { + for _, test := range buildProcessorTestData { Convey(test.Name, t, func() { err := buildLogParsingProcessors(test.agentConf, test.pipelineProcessor) So(err, ShouldBeNil) @@ -150,7 +150,7 @@ var BuildLogsPipelineTestData = []struct { } func TestBuildLogsPipeline(t *testing.T) { - for , test := range BuildLogsPipelineTestData { + for _, test := range BuildLogsPipelineTestData { Convey(test.Name, t, func() { v, err := buildLogsProcessors(test.currentPipeline, test.logsPipeline) So(err, ShouldBeNil) From 91c3abae37b24d1ce837e9baf5e05c8628fc4afd Mon Sep 17 00:00:00 2001 From: Palash Gupta Date: Fri, 17 Mar 2023 15:12:31 +0530 Subject: [PATCH 09/38] feat: editor is updated (#2464) --- frontend/src/components/Editor/index.tsx | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/frontend/src/components/Editor/index.tsx b/frontend/src/components/Editor/index.tsx index a58df3778e..e24fab3c80 100644 --- a/frontend/src/components/Editor/index.tsx +++ b/frontend/src/components/Editor/index.tsx @@ -1,6 +1,6 @@ import MEditor, { EditorProps } from '@monaco-editor/react'; import { useIsDarkMode } from 'hooks/useDarkMode'; -import React from 'react'; +import React, { useMemo } from 'react'; function Editor({ value, @@ -11,16 +11,24 @@ function Editor({ options, }: MEditorProps): JSX.Element { const isDarkMode = useIsDarkMode(); + + const onChangeHandler = (newValue?: string): void => { + if (typeof newValue === 'string' && onChange) onChange(newValue); + }; + + const editorOptions = useMemo( + () => ({ fontSize: 16, automaticLayout: true, readOnly, ...options }), + [options, readOnly], + ); + return ( { - if (typeof newValue === 'string') onChange(newValue); - }} + onChange={onChangeHandler} /> ); } @@ -28,7 +36,7 @@ function Editor({ interface MEditorProps { value: string; language?: string; - onChange: (value: string) => void; + onChange?: (value: string) => void; readOnly?: boolean; height?: string; options?: EditorProps['options']; @@ -39,6 +47,7 @@ Editor.defaultProps = { readOnly: false, height: '40vh', options: {}, + onChange: (): void => {}, }; export default Editor; From 1ddda19c8edb4de8d719ecb1a3066db6fa6de589 Mon Sep 17 00:00:00 2001 From: Palash Gupta Date: Fri, 17 Mar 2023 15:21:02 +0530 Subject: [PATCH 10/38] feat: table view is updated for body field (#2465) --- .../container/LogDetailedView/TableView.tsx | 53 +++++++++++++++---- .../container/LogDetailedView/util.test.ts | 47 ++++++++++++++++ .../src/container/LogDetailedView/utils.ts | 11 ++++ 3 files changed, 100 insertions(+), 11 deletions(-) create mode 100644 frontend/src/container/LogDetailedView/util.test.ts create mode 100644 frontend/src/container/LogDetailedView/utils.ts diff --git a/frontend/src/container/LogDetailedView/TableView.tsx b/frontend/src/container/LogDetailedView/TableView.tsx index 92b15d1b57..eea61f2a32 100644 --- a/frontend/src/container/LogDetailedView/TableView.tsx +++ b/frontend/src/container/LogDetailedView/TableView.tsx @@ -1,14 +1,18 @@ import { blue, orange } from '@ant-design/colors'; import { Input } from 'antd'; +import { ColumnsType } from 'antd/es/table'; +import Editor from 'components/Editor'; import AddToQueryHOC from 'components/Logs/AddToQueryHOC'; import CopyClipboardHOC from 'components/Logs/CopyClipboardHOC'; import { ResizeTable } from 'components/ResizeTable'; import flatten from 'flat'; import { fieldSearchFilter } from 'lib/logs/fieldSearch'; +import { isEmpty } from 'lodash-es'; import React, { useMemo, useState } from 'react'; import { ILog } from 'types/api/logs/log'; import ActionItem from './ActionItem'; +import { recursiveParseJSON } from './utils'; // Fields which should be restricted from adding it to query const RESTRICTED_FIELDS = ['timestamp']; @@ -41,10 +45,10 @@ function TableView({ logData }: TableViewProps): JSX.Element | null { return null; } - const columns = [ + const columns: ColumnsType = [ { title: 'Action', - width: 100, + width: 15, render: (fieldData: Record): JSX.Element | null => { const fieldKey = fieldData.field.split('.').slice(-1); if (!RESTRICTED_FIELDS.includes(fieldKey[0])) { @@ -57,7 +61,8 @@ function TableView({ logData }: TableViewProps): JSX.Element | null { title: 'Field', dataIndex: 'field', key: 'field', - width: 100, + width: 30, + ellipsis: true, render: (field: string): JSX.Element => { const fieldKey = field.split('.').slice(-1); const renderedField = {field}; @@ -78,16 +83,36 @@ function TableView({ logData }: TableViewProps): JSX.Element | null { key: 'value', width: 80, ellipsis: false, - render: (field: never): JSX.Element => ( - - {field} - - ), + render: (field, record): JSX.Element => { + if (record.field === 'body') { + const parsedBody = recursiveParseJSON(field); + if (!isEmpty(parsedBody)) { + return ( + + ); + } + } + + return ( + + {field} + + ); + }, }, ]; return ( -
+ <> setFieldSearchInput(e.target.value)} /> -
+ ); } +interface DataType { + key: string; + field: string; + value: string; +} + export default TableView; diff --git a/frontend/src/container/LogDetailedView/util.test.ts b/frontend/src/container/LogDetailedView/util.test.ts new file mode 100644 index 0000000000..5d6459ea47 --- /dev/null +++ b/frontend/src/container/LogDetailedView/util.test.ts @@ -0,0 +1,47 @@ +import { recursiveParseJSON } from './utils'; + +describe('recursiveParseJSON', () => { + it('should return an empty object if the input is not valid JSON', () => { + const result = recursiveParseJSON('not valid JSON'); + expect(result).toEqual({}); + }); + + it('should return the parsed JSON object for valid JSON input', () => { + const jsonString = '{"name": "John", "age": 30}'; + const result = recursiveParseJSON(jsonString); + expect(result).toEqual({ name: 'John', age: 30 }); + }); + + it('should recursively parse nested JSON objects', () => { + const jsonString = + '{"name": "John", "age": 30, "address": {"street": "123 Main St", "city": "Anytown", "state": "CA"}}'; + const result = recursiveParseJSON(jsonString); + expect(result).toEqual({ + name: 'John', + age: 30, + address: { + street: '123 Main St', + city: 'Anytown', + state: 'CA', + }, + }); + }); + + it('should recursively parse nested JSON arrays', () => { + const jsonString = '[1, 2, [3, 4], {"foo": "bar"}]'; + const result = recursiveParseJSON(jsonString); + expect(result).toEqual([1, 2, [3, 4], { foo: 'bar' }]); + }); + + it('should recursively parse deeply nested JSON objects', () => { + const jsonString = '{"foo": {"bar": {"baz": {"qux": {"value": 42}}}}}'; + const result = recursiveParseJSON(jsonString); + expect(result).toEqual({ foo: { bar: { baz: { qux: { value: 42 } } } } }); + }); + + it('should handle JSON input that contains escaped characters', () => { + const jsonString = '{"name": "John\\", \\"Doe", "age": 30}'; + const result = recursiveParseJSON(jsonString); + expect(result).toEqual({ name: 'John", "Doe', age: 30 }); + }); +}); diff --git a/frontend/src/container/LogDetailedView/utils.ts b/frontend/src/container/LogDetailedView/utils.ts new file mode 100644 index 0000000000..00a89c96d1 --- /dev/null +++ b/frontend/src/container/LogDetailedView/utils.ts @@ -0,0 +1,11 @@ +export const recursiveParseJSON = (obj: string): Record => { + try { + const value = JSON.parse(obj); + if (typeof value === 'string') { + return recursiveParseJSON(value); + } + return value; + } catch (e) { + return {}; + } +}; From eb4ac18162c5f7f6589ce4639be746636af75fbf Mon Sep 17 00:00:00 2001 From: nityanandagohain Date: Fri, 17 Mar 2023 17:39:28 +0530 Subject: [PATCH 11/38] feat: processor builder updated with new logic and tests --- pkg/query-service/app/opamp/logspipeline.go | 40 ++++++++--------- .../app/opamp/logspipeline_test.go | 45 +++++++++++++++++++ 2 files changed, 64 insertions(+), 21 deletions(-) diff --git a/pkg/query-service/app/opamp/logspipeline.go b/pkg/query-service/app/opamp/logspipeline.go index f2467e05e0..36f4a1473b 100644 --- a/pkg/query-service/app/opamp/logspipeline.go +++ b/pkg/query-service/app/opamp/logspipeline.go @@ -157,7 +157,7 @@ func buildLogsProcessors(current []string, logsParserPipeline []string) ([]strin // create a reverse map of existing config processors and their position existing := map[string]int{} - for i, p := range current { + for i, p := range pipeline { name := p existing[name] = i } @@ -165,49 +165,47 @@ func buildLogsProcessors(current []string, logsParserPipeline []string) ([]strin // create mapping from our logsParserPipeline to position in existing processors (from current config) // this means, if "batch" holds position 3 in the current effective config, and 2 in our config, the map will be [2]: 3 specVsExistingMap := map[int]int{} + existingVsSpec := map[int]int{} // go through plan and map its elements to current positions in effective config for i, m := range logsParserPipeline { if loc, ok := existing[m]; ok { specVsExistingMap[i] = loc + existingVsSpec[loc] = i } } lastMatched := 0 + newPipeline := []string{} - // go through plan again in the increasing order for i := 0; i < len(logsParserPipeline); i++ { m := logsParserPipeline[i] - if loc, ok := specVsExistingMap[i]; ok { + for j := lastMatched; j < loc; j++ { + if strings.HasPrefix(pipeline[j], constants.LogsPPLPfx) { + delete(specVsExistingMap, existingVsSpec[j]) + } else { + newPipeline = append(newPipeline, pipeline[j]) + } + } + newPipeline = append(newPipeline, pipeline[loc]) lastMatched = loc + 1 } else { - if lastMatched <= 0 { - zap.S().Debugf("build_pipeline: found a new item to be inserted, inserting at position 0:", m) - pipeline = append([]string{m}, pipeline[lastMatched:]...) - lastMatched++ - } else { - zap.S().Debugf("build_pipeline: found a new item to be inserted, inserting at position :", lastMatched, " ", m) - - prior := make([]string, len(pipeline[:lastMatched])) - next := make([]string, len(pipeline[lastMatched:])) - - copy(prior, pipeline[:lastMatched]) - copy(next, pipeline[lastMatched:]) - - pipeline = append(prior, m) - pipeline = append(pipeline, next...) - } + newPipeline = append(newPipeline, m) } + + } + if lastMatched < len(pipeline) { + newPipeline = append(newPipeline, pipeline[lastMatched:]...) } - if checkDuplicateString(pipeline) { + if checkDuplicateString(newPipeline) { // duplicates are most likely because the processor sequence in effective config conflicts // with the planned sequence as per planned pipeline return pipeline, fmt.Errorf("the effective config has an unexpected processor sequence: %v", pipeline) } - return pipeline, nil + return newPipeline, nil } func checkDuplicateString(pipeline []string) bool { diff --git a/pkg/query-service/app/opamp/logspipeline_test.go b/pkg/query-service/app/opamp/logspipeline_test.go index 26dec4e34d..eef08870dd 100644 --- a/pkg/query-service/app/opamp/logspipeline_test.go +++ b/pkg/query-service/app/opamp/logspipeline_test.go @@ -117,6 +117,12 @@ var BuildLogsPipelineTestData = []struct { logsPipeline: []string{constants.LogsPPLPfx + "a", constants.LogsPPLPfx + "b", constants.LogsPPLPfx + "c"}, expectedPipeline: []string{constants.LogsPPLPfx + "a", "processor1", constants.LogsPPLPfx + "b", constants.LogsPPLPfx + "c", "processor2"}, }, + { + Name: "Add new pipeline and respect custom processors", + currentPipeline: []string{constants.LogsPPLPfx + "a", "processor1", constants.LogsPPLPfx + "b", "processor2"}, + logsPipeline: []string{constants.LogsPPLPfx + "a", constants.LogsPPLPfx + "b", constants.LogsPPLPfx + "c", constants.LogsPPLPfx + "d"}, + expectedPipeline: []string{constants.LogsPPLPfx + "a", "processor1", constants.LogsPPLPfx + "b", constants.LogsPPLPfx + "c", constants.LogsPPLPfx + "d", "processor2"}, + }, { Name: "Add new pipeline and respect custom processors in the beginning and middle", currentPipeline: []string{"processor1", constants.LogsPPLPfx + "a", "processor2", constants.LogsPPLPfx + "b", "batch"}, @@ -147,6 +153,45 @@ var BuildLogsPipelineTestData = []struct { logsPipeline: []string{constants.LogsPPLPfx + "a", constants.LogsPPLPfx + "a1", constants.LogsPPLPfx + "c", constants.LogsPPLPfx + "c1"}, expectedPipeline: []string{"processor1", constants.LogsPPLPfx + "a", constants.LogsPPLPfx + "a1", "processor2", "processor3", constants.LogsPPLPfx + "c", constants.LogsPPLPfx + "c1", "processor4", "processor5", "batch"}, }, + + // working + { + Name: "rearrange pipelines", + currentPipeline: []string{"processor1", "processor2", constants.LogsPPLPfx + "_a", "processor3", constants.LogsPPLPfx + "_b", "batch"}, + logsPipeline: []string{constants.LogsPPLPfx + "_b", constants.LogsPPLPfx + "_a"}, + expectedPipeline: []string{"processor1", "processor2", "processor3", constants.LogsPPLPfx + "_b", constants.LogsPPLPfx + "_a", "batch"}, + }, + { + Name: "rearrange pipelines with new processor", + currentPipeline: []string{"processor1", "processor2", constants.LogsPPLPfx + "_a", "processor3", constants.LogsPPLPfx + "_b", "batch"}, + logsPipeline: []string{constants.LogsPPLPfx + "_b", constants.LogsPPLPfx + "_a", constants.LogsPPLPfx + "_c"}, + expectedPipeline: []string{"processor1", "processor2", "processor3", constants.LogsPPLPfx + "_b", constants.LogsPPLPfx + "_a", constants.LogsPPLPfx + "_c", "batch"}, + // expectedPipeline: []string{"processor1", "processor2", constants.LogsPPLPfx + "_b", "processor3", constants.LogsPPLPfx + "_a", constants.LogsPPLPfx + "_c", "batch"}, + }, + { + Name: "delete processor", + currentPipeline: []string{"processor1", "processor2", constants.LogsPPLPfx + "_a", "processor3", constants.LogsPPLPfx + "_b", "batch"}, + logsPipeline: []string{}, + expectedPipeline: []string{"processor1", "processor2", "processor3", "batch"}, + }, + { + Name: "last to first", + currentPipeline: []string{"processor1", "processor2", constants.LogsPPLPfx + "_a", "processor3", "processor4", constants.LogsPPLPfx + "_b", "batch", constants.LogsPPLPfx + "_c"}, + logsPipeline: []string{constants.LogsPPLPfx + "_c", constants.LogsPPLPfx + "_a", constants.LogsPPLPfx + "_b"}, + expectedPipeline: []string{"processor1", "processor2", "processor3", "processor4", "batch", constants.LogsPPLPfx + "_c", constants.LogsPPLPfx + "_a", constants.LogsPPLPfx + "_b"}, + }, + { + Name: "multiple rearrange pipelines", + currentPipeline: []string{"processor1", "processor2", constants.LogsPPLPfx + "_a", "processor3", constants.LogsPPLPfx + "_b", "batch", constants.LogsPPLPfx + "_c", "processor4", "processor5", constants.LogsPPLPfx + "_d", "processor6", "processor7"}, + logsPipeline: []string{constants.LogsPPLPfx + "_b", constants.LogsPPLPfx + "_a", constants.LogsPPLPfx + "_d", constants.LogsPPLPfx + "_c", constants.LogsPPLPfx + "_e"}, + expectedPipeline: []string{"processor1", "processor2", "processor3", constants.LogsPPLPfx + "_b", constants.LogsPPLPfx + "_a", "batch", "processor4", "processor5", constants.LogsPPLPfx + "_d", constants.LogsPPLPfx + "_c", constants.LogsPPLPfx + "_e", "processor6", "processor7"}, + }, + { + Name: "multiple rearrange with new pipelines", + currentPipeline: []string{"processor1", "processor2", constants.LogsPPLPfx + "_a", "processor3", constants.LogsPPLPfx + "_b", "batch", constants.LogsPPLPfx + "_c", "processor4", "processor5", constants.LogsPPLPfx + "_d", "processor6", "processor7"}, + logsPipeline: []string{constants.LogsPPLPfx + "_z", constants.LogsPPLPfx + "_b", constants.LogsPPLPfx + "_a", constants.LogsPPLPfx + "_d", constants.LogsPPLPfx + "_c", constants.LogsPPLPfx + "_e"}, + expectedPipeline: []string{constants.LogsPPLPfx + "_z", "processor1", "processor2", "processor3", constants.LogsPPLPfx + "_b", constants.LogsPPLPfx + "_a", "batch", "processor4", "processor5", constants.LogsPPLPfx + "_d", constants.LogsPPLPfx + "_c", constants.LogsPPLPfx + "_e", "processor6", "processor7"}, + }, } func TestBuildLogsPipeline(t *testing.T) { From 481792d4cab7ff73ff19a5f1bbec23300fc1efda Mon Sep 17 00:00:00 2001 From: Chintan Sudani <46838508+techchintan@users.noreply.github.com> Date: Mon, 20 Mar 2023 18:46:20 +0530 Subject: [PATCH 12/38] fix: create/edit panel shows a blank page (#2473) --- frontend/src/components/TimePreferenceDropDown/index.tsx | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/frontend/src/components/TimePreferenceDropDown/index.tsx b/frontend/src/components/TimePreferenceDropDown/index.tsx index ff6d31bcc1..3ce9795f15 100644 --- a/frontend/src/components/TimePreferenceDropDown/index.tsx +++ b/frontend/src/components/TimePreferenceDropDown/index.tsx @@ -32,8 +32,9 @@ function TimePreference({ return ( - - + + + ); } From d95148359789c5403de97a6e71b5ba2c577dff9d Mon Sep 17 00:00:00 2001 From: Srikanth Chekuri Date: Tue, 21 Mar 2023 11:47:04 +0530 Subject: [PATCH 13/38] fix: substitute nan negative rate from couter resets (#2449) --- .../app/metrics/query_builder.go | 14 ++++++--- .../app/metrics/query_builder_test.go | 31 ++++++++++++++++--- 2 files changed, 36 insertions(+), 9 deletions(-) diff --git a/pkg/query-service/app/metrics/query_builder.go b/pkg/query-service/app/metrics/query_builder.go index 784b727514..c57cdf49ca 100644 --- a/pkg/query-service/app/metrics/query_builder.go +++ b/pkg/query-service/app/metrics/query_builder.go @@ -44,6 +44,9 @@ var AggregateOperatorToSQLFunc = map[model.AggregateOperator]string{ model.RATE_MIN: "min", } +// See https://github.com/SigNoz/signoz/issues/2151#issuecomment-1467249056 +var rateWithoutNegative = `if (runningDifference(value) < 0 OR runningDifference(ts) < 0, nan, runningDifference(value)/runningDifference(ts))` + var SupportedFunctions = []string{"exp", "log", "ln", "exp2", "log2", "exp10", "log10", "sqrt", "cbrt", "erf", "erfc", "lgamma", "tgamma", "sin", "cos", "tan", "asin", "acos", "atan", "degrees", "radians"} func GoValuateFuncs() map[string]govaluate.ExpressionFunction { @@ -200,7 +203,7 @@ func BuildMetricQuery(qp *model.QueryRangeParamsV2, mq *model.MetricQuery, table subQuery := fmt.Sprintf( queryTmpl, "any(labels) as labels, "+groupTags, qp.Step, op, filterSubQuery, groupBy, groupTags, ) // labels will be same so any should be fine - query := `SELECT %s ts, runningDifference(value)/runningDifference(ts) as value FROM(%s)` + query := `SELECT %s ts, ` + rateWithoutNegative + ` as value FROM(%s)` query = fmt.Sprintf(query, "labels as fullLabels,", subQuery) return query, nil @@ -211,14 +214,14 @@ func BuildMetricQuery(qp *model.QueryRangeParamsV2, mq *model.MetricQuery, table subQuery := fmt.Sprintf( queryTmpl, rateGroupTags, qp.Step, op, filterSubQuery, rateGroupBy, rateGroupTags, ) // labels will be same so any should be fine - query := `SELECT %s ts, runningDifference(value)/runningDifference(ts) as value FROM(%s) OFFSET 1` + query := `SELECT %s ts, ` + rateWithoutNegative + `as value FROM(%s)` query = fmt.Sprintf(query, groupTags, subQuery) query = fmt.Sprintf(`SELECT %s ts, sum(value) as value FROM (%s) GROUP BY %s ORDER BY %s ts`, groupTags, query, groupBy, groupTags) return query, nil case model.RATE_SUM, model.RATE_MAX, model.RATE_AVG, model.RATE_MIN: op := fmt.Sprintf("%s(value)", AggregateOperatorToSQLFunc[mq.AggregateOperator]) subQuery := fmt.Sprintf(queryTmpl, groupTags, qp.Step, op, filterSubQuery, groupBy, groupTags) - query := `SELECT %s ts, runningDifference(value)/runningDifference(ts) as value FROM(%s) OFFSET 1` + query := `SELECT %s ts, ` + rateWithoutNegative + `as value FROM(%s)` query = fmt.Sprintf(query, groupTags, subQuery) return query, nil case model.P05, model.P10, model.P20, model.P25, model.P50, model.P75, model.P90, model.P95, model.P99: @@ -232,9 +235,10 @@ func BuildMetricQuery(qp *model.QueryRangeParamsV2, mq *model.MetricQuery, table subQuery := fmt.Sprintf( queryTmpl, rateGroupTags, qp.Step, op, filterSubQuery, rateGroupBy, rateGroupTags, ) // labels will be same so any should be fine - query := `SELECT %s ts, runningDifference(value)/runningDifference(ts) as value FROM(%s) OFFSET 1` + query := `SELECT %s ts, ` + rateWithoutNegative + ` as value FROM(%s)` query = fmt.Sprintf(query, groupTags, subQuery) - query = fmt.Sprintf(`SELECT %s ts, sum(value) as value FROM (%s) GROUP BY %s ORDER BY %s ts`, groupTags, query, groupBy, groupTags) + // filter out NaN values from the rate query as histogramQuantile doesn't support NaN values + query = fmt.Sprintf(`SELECT %s ts, sum(value) as value FROM (%s) GROUP BY %s HAVING isNaN(value) = 0 ORDER BY %s ts`, groupTags, query, groupBy, groupTags) value := AggregateOperatorToPercentile[mq.AggregateOperator] query = fmt.Sprintf(`SELECT %s ts, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), %.3f) as value FROM (%s) GROUP BY %s ORDER BY %s ts`, groupTagsWithoutLe, value, query, groupByWithoutLe, groupTagsWithoutLe) diff --git a/pkg/query-service/app/metrics/query_builder_test.go b/pkg/query-service/app/metrics/query_builder_test.go index 92bc60c5b0..c749224689 100644 --- a/pkg/query-service/app/metrics/query_builder_test.go +++ b/pkg/query-service/app/metrics/query_builder_test.go @@ -28,7 +28,30 @@ func TestBuildQuery(t *testing.T) { queries := PrepareBuilderMetricQueries(q, "table").Queries So(len(queries), ShouldEqual, 1) So(queries["A"], ShouldContainSubstring, "WHERE metric_name = 'name'") - So(queries["A"], ShouldContainSubstring, "runningDifference(value)/runningDifference(ts)") + So(queries["A"], ShouldContainSubstring, rateWithoutNegative) + }) + + Convey("TestSimpleQueryWithHistQuantile", t, func() { + q := &model.QueryRangeParamsV2{ + Start: 1650991982000, + End: 1651078382000, + Step: 60, + CompositeMetricQuery: &model.CompositeMetricQuery{ + BuilderQueries: map[string]*model.MetricQuery{ + "A": { + QueryName: "A", + MetricName: "name", + AggregateOperator: model.HIST_QUANTILE_99, + Expression: "A", + }, + }, + }, + } + queries := PrepareBuilderMetricQueries(q, "table").Queries + So(len(queries), ShouldEqual, 1) + So(queries["A"], ShouldContainSubstring, "WHERE metric_name = 'name'") + So(queries["A"], ShouldContainSubstring, rateWithoutNegative) + So(queries["A"], ShouldContainSubstring, "HAVING isNaN(value) = 0") }) } @@ -57,7 +80,7 @@ func TestBuildQueryWithFilters(t *testing.T) { So(len(queries), ShouldEqual, 1) So(queries["A"], ShouldContainSubstring, "WHERE metric_name = 'name' AND JSONExtractString(labels, 'a') != 'b'") - So(queries["A"], ShouldContainSubstring, "runningDifference(value)/runningDifference(ts)") + So(queries["A"], ShouldContainSubstring, rateWithoutNegative) So(queries["A"], ShouldContainSubstring, "not match(JSONExtractString(labels, 'code'), 'ERROR_*')") }) } @@ -91,7 +114,7 @@ func TestBuildQueryWithMultipleQueries(t *testing.T) { queries := PrepareBuilderMetricQueries(q, "table").Queries So(len(queries), ShouldEqual, 2) So(queries["A"], ShouldContainSubstring, "WHERE metric_name = 'name' AND JSONExtractString(labels, 'in') IN ['a','b','c']") - So(queries["A"], ShouldContainSubstring, "runningDifference(value)/runningDifference(ts)") + So(queries["A"], ShouldContainSubstring, rateWithoutNegative) }) } @@ -128,7 +151,7 @@ func TestBuildQueryWithMultipleQueriesAndFormula(t *testing.T) { So(len(queries), ShouldEqual, 3) So(queries["C"], ShouldContainSubstring, "SELECT A.ts as ts, A.value / B.value") So(queries["C"], ShouldContainSubstring, "WHERE metric_name = 'name' AND JSONExtractString(labels, 'in') IN ['a','b','c']") - So(queries["C"], ShouldContainSubstring, "runningDifference(value)/runningDifference(ts)") + So(queries["C"], ShouldContainSubstring, rateWithoutNegative) }) } From d29dfa0751cfe773053d30d4c5e85cc4898e4766 Mon Sep 17 00:00:00 2001 From: Ankit Anand <83692067+ankit01-oss@users.noreply.github.com> Date: Tue, 21 Mar 2023 16:21:49 +0530 Subject: [PATCH 14/38] Update README.md (#2475) Updated product screenshots, bullet points for features --- README.md | 35 ++++++++++++++++++++++++++++------- 1 file changed, 28 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 324b8e2d69..532abcf0bc 100644 --- a/README.md +++ b/README.md @@ -35,14 +35,31 @@ SigNoz helps developers monitor applications and troubleshoot problems in their 👉 Filter and query logs, build dashboards and alerts based on attributes in logs -![screenzy-1670570187181](https://user-images.githubusercontent.com/504541/206646629-829fdafe-70e2-4503-a9c4-1301b7918586.png) -
-![screenzy-1670570193901](https://user-images.githubusercontent.com/504541/206646676-a676fdeb-331c-4847-aea9-d1cabf7c47e1.png) -
-![screenzy-1670570199026](https://user-images.githubusercontent.com/504541/206646754-28c5534f-0377-428c-9c6e-5c7c0d9dd22d.png) -
-![screenzy-1670569888865](https://user-images.githubusercontent.com/504541/206645819-1e865a56-71b4-4fde-80cc-fbdb137a4da5.png) +👉 Record exceptions automatically in Python, Java, Ruby, and Javascript +👉 Easy to set alerts with DIY query builder + + +### Application Metrics + +application_metrics + +### Distributed Tracing +distributed_tracing_2 2 + +distributed_tracing_1 + +### Logs Management + +logs_management + +### Infrastructure Monitoring + +infrastructure_monitoring + +### Alerts + +alerts_management

@@ -65,6 +82,10 @@ Come say Hi to us on [Slack](https://signoz.io/slack) 👋 - See exact request trace to figure out issues in downstream services, slow DB queries, call to 3rd party services like payment gateways, etc - Filter traces by service name, operation, latency, error, tags/annotations. - Run aggregates on trace data (events/spans) to get business relevant metrics. e.g. You can get error rate and 99th percentile latency of `customer_type: gold` or `deployment_version: v2` or `external_call: paypal` +- Native support for OpenTelemetry Logs, advanced log query builder, and automatic log collection from k8s cluster +- Lightening quick log analytics ([Logs Perf. Benchmark](https://signoz.io/blog/logs-performance-benchmark/)) +- End-to-End visibility into infrastructure performance, ingest metrics from all kinds of host environments +- Easy to set alerts with DIY query builder

From 27c48674d4bb7081f11ba83033a6691431e27a81 Mon Sep 17 00:00:00 2001 From: Vishal Sharma Date: Tue, 21 Mar 2023 22:53:56 +0530 Subject: [PATCH 15/38] fix: update query range params (#2453) --- pkg/query-service/model/v3/v3.go | 56 ++++++++++++++++++++++++++------ 1 file changed, 46 insertions(+), 10 deletions(-) diff --git a/pkg/query-service/model/v3/v3.go b/pkg/query-service/model/v3/v3.go index e097fdd6f1..1221e46a1b 100644 --- a/pkg/query-service/model/v3/v3.go +++ b/pkg/query-service/model/v3/v3.go @@ -229,7 +229,6 @@ type FilterAttributeKeyResponse struct { type AttributeKeyType string const ( - AttributeKeyTypeColumn AttributeKeyType = "column" AttributeKeyTypeTag AttributeKeyType = "tag" AttributeKeyTypeResource AttributeKeyType = "resource" ) @@ -238,6 +237,29 @@ type AttributeKey struct { Key string `json:"key"` DataType AttributeKeyDataType `json:"dataType"` Type AttributeKeyType `json:"type"` + IsColumn bool `json:"isColumn"` +} + +func (a AttributeKey) Validate() error { + switch a.DataType { + case AttributeKeyDataTypeBool, AttributeKeyDataTypeNumber, AttributeKeyDataTypeString: + break + default: + return fmt.Errorf("invalid attribute dataType: %s", a.DataType) + } + + switch a.Type { + case AttributeKeyTypeResource, AttributeKeyTypeTag: + break + default: + return fmt.Errorf("invalid attribute type: %s", a.Type) + } + + if a.Key == "" { + return fmt.Errorf("key is empty") + } + + return nil } type FilterAttributeValueResponse struct { @@ -345,9 +367,9 @@ type BuilderQuery struct { QueryName string `json:"queryName"` DataSource DataSource `json:"dataSource"` AggregateOperator AggregateOperator `json:"aggregateOperator"` - AggregateAttribute string `json:"aggregateAttribute,omitempty"` + AggregateAttribute AttributeKey `json:"aggregateAttribute,omitempty"` Filters *FilterSet `json:"filters,omitempty"` - GroupBy []string `json:"groupBy,omitempty"` + GroupBy []AttributeKey `json:"groupBy,omitempty"` Expression string `json:"expression"` Disabled bool `json:"disabled"` Having []Having `json:"having,omitempty"` @@ -356,7 +378,7 @@ type BuilderQuery struct { PageSize uint64 `json:"pageSize"` OrderBy []OrderBy `json:"orderBy,omitempty"` ReduceTo ReduceToOperator `json:"reduceTo,omitempty"` - SelectColumns []string `json:"selectColumns,omitempty"` + SelectColumns []AttributeKey `json:"selectColumns,omitempty"` } func (b *BuilderQuery) Validate() error { @@ -376,7 +398,7 @@ func (b *BuilderQuery) Validate() error { if err := b.AggregateOperator.Validate(); err != nil { return fmt.Errorf("aggregate operator is invalid: %w", err) } - if b.AggregateAttribute == "" && b.AggregateOperator.RequireAttribute() { + if b.AggregateAttribute == (AttributeKey{}) && b.AggregateOperator.RequireAttribute() { return fmt.Errorf("aggregate attribute is required") } } @@ -388,11 +410,20 @@ func (b *BuilderQuery) Validate() error { } if b.GroupBy != nil { for _, groupBy := range b.GroupBy { - if groupBy == "" { - return fmt.Errorf("group by cannot be empty") + if groupBy.Validate() != nil { + return fmt.Errorf("group by is invalid") } } } + + if b.SelectColumns != nil { + for _, selectColumn := range b.SelectColumns { + if selectColumn.Validate() != nil { + return fmt.Errorf("select column is invalid") + } + } + } + if b.Expression == "" { return fmt.Errorf("expression is required") } @@ -411,13 +442,18 @@ func (f *FilterSet) Validate() error { if f.Operator != "" && f.Operator != "AND" && f.Operator != "OR" { return fmt.Errorf("operator must be AND or OR") } + for _, item := range f.Items { + if err := item.Key.Validate(); err != nil { + return fmt.Errorf("filter item key is invalid: %w", err) + } + } return nil } type FilterItem struct { - Key string `json:"key"` - Value interface{} `json:"value"` - Operator string `json:"op"` + Key AttributeKey `json:"key"` + Value interface{} `json:"value"` + Operator string `json:"op"` } type OrderBy struct { From da23d9e08763dcdfa5ae608529ae715a95fbec2b Mon Sep 17 00:00:00 2001 From: Ankit Anand <83692067+ankit01-oss@users.noreply.github.com> Date: Wed, 22 Mar 2023 10:23:21 +0530 Subject: [PATCH 16/38] Update README.md (#2480) Added pic for exceptions monitoring, added shadow on app metrics image. Co-authored-by: Pranay Prateek --- README.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 532abcf0bc..70779f3de5 100644 --- a/README.md +++ b/README.md @@ -42,7 +42,8 @@ SigNoz helps developers monitor applications and troubleshoot problems in their ### Application Metrics -application_metrics +![application_metrics](https://user-images.githubusercontent.com/83692067/226637410-900dbc5e-6705-4b11-a10c-bd0faeb2a92f.png) + ### Distributed Tracing distributed_tracing_2 2 @@ -57,6 +58,11 @@ SigNoz helps developers monitor applications and troubleshoot problems in their infrastructure_monitoring +### Exceptions Monitoring + +![exceptions_light](https://user-images.githubusercontent.com/83692067/226637967-4188d024-3ac9-4799-be95-f5ea9c45436f.png) + + ### Alerts alerts_management From 97bfee48e1135700068a1e588ddd8eedd5aa019c Mon Sep 17 00:00:00 2001 From: GitStart <1501599+gitstart@users.noreply.github.com> Date: Wed, 22 Mar 2023 11:33:36 +0530 Subject: [PATCH 17/38] fix: slider deprecation warning from antd (#2478) Co-authored-by: gitstart Co-authored-by: Chintan Sudani <46838508+techchintan@users.noreply.github.com> Co-authored-by: Palash Gupta --- .../container/Trace/Filters/Panel/PanelBody/Duration/index.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/src/container/Trace/Filters/Panel/PanelBody/Duration/index.tsx b/frontend/src/container/Trace/Filters/Panel/PanelBody/Duration/index.tsx index 6fa6c9ec53..1524d4060a 100644 --- a/frontend/src/container/Trace/Filters/Panel/PanelBody/Duration/index.tsx +++ b/frontend/src/container/Trace/Filters/Panel/PanelBody/Duration/index.tsx @@ -210,7 +210,7 @@ function Duration(): JSX.Element { min={Number(getMs(String(preLocalMinDuration.current || 0)))} max={Number(getMs(String(preLocalMaxDuration.current || 0)))} range - tipFormatter={TipComponent} + tooltip={{ formatter: TipComponent }} onChange={([min, max]): void => { onRangeSliderHandler([String(min), String(max)]); }} From da4cbf6c2f8804d1fcff194c235a74ef83502a41 Mon Sep 17 00:00:00 2001 From: GitStart <1501599+gitstart@users.noreply.github.com> Date: Wed, 22 Mar 2023 12:01:37 +0530 Subject: [PATCH 18/38] fix: tabs deprecation warning from antd (#2479) Co-authored-by: gitstart Co-authored-by: Chintan Sudani <46838508+techchintan@users.noreply.github.com> Co-authored-by: Palash Gupta --- frontend/src/components/RouteTab/index.tsx | 26 ++-- .../container/FormAlertRules/QuerySection.tsx | 36 +++-- frontend/src/container/Licenses/index.tsx | 26 ++-- .../src/container/LogDetailedView/index.tsx | 22 ++- .../QueryBuilder/QueryBuilder.tsx | 5 +- .../NewDashboard/DashboardSettings/index.tsx | 18 +-- .../LeftContainer/QuerySection/index.tsx | 145 +++++++++--------- .../TraceDetail/SelectedSpanDetails/index.tsx | 48 +++--- frontend/src/pages/AlertList/index.tsx | 38 ++--- 9 files changed, 184 insertions(+), 180 deletions(-) diff --git a/frontend/src/components/RouteTab/index.tsx b/frontend/src/components/RouteTab/index.tsx index 1192f2f8e2..0059f5b84c 100644 --- a/frontend/src/components/RouteTab/index.tsx +++ b/frontend/src/components/RouteTab/index.tsx @@ -2,8 +2,6 @@ import { Tabs, TabsProps } from 'antd'; import history from 'lib/history'; import React from 'react'; -const { TabPane } = Tabs; - function RouteTab({ routes, activeKey, @@ -22,29 +20,23 @@ function RouteTab({ } }; + const items = routes.map(({ Component, name, route }) => ({ + label: name, + key: name, + tabKey: route, + children: , + })); + return ( - {routes.map( - ({ Component, name, route }): JSX.Element => ( - - - - ), - )} - + /> ); } diff --git a/frontend/src/container/FormAlertRules/QuerySection.tsx b/frontend/src/container/FormAlertRules/QuerySection.tsx index f02299dbc8..ab39c51083 100644 --- a/frontend/src/container/FormAlertRules/QuerySection.tsx +++ b/frontend/src/container/FormAlertRules/QuerySection.tsx @@ -23,7 +23,6 @@ import PromqlSection from './PromqlSection'; import { FormContainer, QueryButton, StepHeading } from './styles'; import { toIMetricsBuilderQuery } from './utils'; -const { TabPane } = Tabs; function QuerySection({ queryCategory, setQueryCategory, @@ -282,6 +281,24 @@ function QuerySection({ runQuery(); }; + const tabs = [ + { + label: t('tab_qb'), + key: EQueryType.QUERY_BUILDER.toString(), + disabled: true, + }, + { + label: t('tab_chquery'), + key: EQueryType.CLICKHOUSE.toString(), + }, + ]; + + const items = [ + { label: t('tab_qb'), key: EQueryType.QUERY_BUILDER.toString() }, + { label: t('tab_chquery'), key: EQueryType.CLICKHOUSE.toString() }, + { label: t('tab_promql'), key: EQueryType.PROM.toString() }, + ]; + const renderTabs = (typ: AlertTypes): JSX.Element | null => { switch (typ) { case AlertTypes.TRACES_BASED_ALERT: @@ -303,14 +320,8 @@ function QuerySection({ )} } - > - - - + items={tabs} + /> ); case AlertTypes.METRICS_BASED_ALERT: default: @@ -330,11 +341,8 @@ function QuerySection({ )} } - > - - - - + items={items} + /> ); } }; diff --git a/frontend/src/container/Licenses/index.tsx b/frontend/src/container/Licenses/index.tsx index b326a5b0e7..d7dc4ab22b 100644 --- a/frontend/src/container/Licenses/index.tsx +++ b/frontend/src/container/Licenses/index.tsx @@ -8,8 +8,6 @@ import { useQuery } from 'react-query'; import ApplyLicenseForm from './ApplyLicenseForm'; import ListLicenses from './ListLicenses'; -const { TabPane } = Tabs; - function Licenses(): JSX.Element { const { t } = useTranslation(['licenses']); const { data, isError, isLoading, refetch } = useQuery({ @@ -28,17 +26,21 @@ function Licenses(): JSX.Element { const allValidLicense = data?.payload?.filter((license) => license.isCurrent) || []; - return ( - - - - - + const tabs = [ + { + label: t('tab_current_license'), + key: 'licenses', + children: , + }, + { + label: t('tab_license_history'), + key: 'history', + children: , + }, + ]; - - - - + return ( + ); } diff --git a/frontend/src/container/LogDetailedView/index.tsx b/frontend/src/container/LogDetailedView/index.tsx index 930dfbf0d8..98ab0909a4 100644 --- a/frontend/src/container/LogDetailedView/index.tsx +++ b/frontend/src/container/LogDetailedView/index.tsx @@ -24,6 +24,19 @@ function LogDetailedView(): JSX.Element { }); }; + const items = [ + { + label: 'Table', + key: '1', + children: detailedLog && , + }, + { + label: 'JSON', + key: '2', + children: detailedLog && , + }, + ]; + return ( - - - {detailedLog && } - - - {detailedLog && } - - + ); } diff --git a/frontend/src/container/LogsSearchFilter/SearchFields/QueryBuilder/QueryBuilder.tsx b/frontend/src/container/LogsSearchFilter/SearchFields/QueryBuilder/QueryBuilder.tsx index 4db70b7db2..9aff0b6dd7 100644 --- a/frontend/src/container/LogsSearchFilter/SearchFields/QueryBuilder/QueryBuilder.tsx +++ b/frontend/src/container/LogsSearchFilter/SearchFields/QueryBuilder/QueryBuilder.tsx @@ -27,8 +27,9 @@ function QueryConditionField({ return ( + ); +} diff --git a/frontend/src/container/QueryBuilder/components/QueryLabel/index.ts b/frontend/src/container/QueryBuilder/components/QueryLabel/index.ts new file mode 100644 index 0000000000..7403bb9ce5 --- /dev/null +++ b/frontend/src/container/QueryBuilder/components/QueryLabel/index.ts @@ -0,0 +1 @@ +export { QueryLabel } from './QueryLabel'; diff --git a/frontend/src/container/QueryBuilder/components/index.ts b/frontend/src/container/QueryBuilder/components/index.ts new file mode 100644 index 0000000000..a5ced11d5b --- /dev/null +++ b/frontend/src/container/QueryBuilder/components/index.ts @@ -0,0 +1,2 @@ +export { ListMarker } from './ListMarker'; +export { QueryLabel } from './QueryLabel'; diff --git a/frontend/src/types/common/queryBuilder.ts b/frontend/src/types/common/queryBuilder.ts new file mode 100644 index 0000000000..f1190b857f --- /dev/null +++ b/frontend/src/types/common/queryBuilder.ts @@ -0,0 +1,5 @@ +export enum DataSource { + METRICS = 'metrics', + TRACES = 'traces', + LOGS = 'logs', +} diff --git a/frontend/src/types/common/select.ts b/frontend/src/types/common/select.ts new file mode 100644 index 0000000000..0bc6b8c47b --- /dev/null +++ b/frontend/src/types/common/select.ts @@ -0,0 +1,4 @@ +export type SelectOption = { + value: Value; + label: Label; +}; From 9d20c2f78707ba12ed8409375e2f6b3c37626724 Mon Sep 17 00:00:00 2001 From: Vishal Sharma Date: Tue, 28 Mar 2023 00:15:15 +0530 Subject: [PATCH 27/38] feat: add resource tags to ListErrors API (#2487) --- .../app/clickhouseReader/reader.go | 42 ++++--- pkg/query-service/app/http_handler.go | 4 +- pkg/query-service/app/parser.go | 76 +++++------- pkg/query-service/constants/constants.go | 6 - pkg/query-service/model/queryParams.go | 109 +++++++++++++----- 5 files changed, 139 insertions(+), 98 deletions(-) diff --git a/pkg/query-service/app/clickhouseReader/reader.go b/pkg/query-service/app/clickhouseReader/reader.go index d3ebee0492..e3e5fc47d7 100644 --- a/pkg/query-service/app/clickhouseReader/reader.go +++ b/pkg/query-service/app/clickhouseReader/reader.go @@ -1464,13 +1464,13 @@ func createTagQueryFromTagQueryParams(queryParams []model.TagQueryParam) []model tags := []model.TagQuery{} for _, tag := range queryParams { if len(tag.StringValues) > 0 { - tags = append(tags, model.NewTagQueryString(tag.Key, tag.StringValues, tag.Operator)) + tags = append(tags, model.NewTagQueryString(tag)) } if len(tag.NumberValues) > 0 { - tags = append(tags, model.NewTagQueryNumber(tag.Key, tag.NumberValues, tag.Operator)) + tags = append(tags, model.NewTagQueryNumber(tag)) } if len(tag.BoolValues) > 0 { - tags = append(tags, model.NewTagQueryBool(tag.Key, tag.BoolValues, tag.Operator)) + tags = append(tags, model.NewTagQueryBool(tag)) } } return tags @@ -1494,18 +1494,7 @@ func buildQueryWithTagParams(ctx context.Context, tags []model.TagQuery) (string for _, item := range tags { var subQuery string var argsSubQuery []interface{} - tagMapType := "" - switch item.(type) { - case model.TagQueryString: - tagMapType = constants.StringTagMapCol - case model.TagQueryNumber: - tagMapType = constants.NumberTagMapCol - case model.TagQueryBool: - tagMapType = constants.BoolTagMapCol - default: - // type not supported error - return "", nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("type not supported")} - } + tagMapType := item.GetTagMapColumn() switch item.GetOperator() { case model.EqualOperator: subQuery, argsSubQuery = addArithmeticOperator(item, tagMapType, "=") @@ -2698,6 +2687,17 @@ func (r *ClickHouseReader) ListErrors(ctx context.Context, queryParams *model.Li query = query + " AND exceptionType ilike @exceptionType" args = append(args, clickhouse.Named("exceptionType", "%"+queryParams.ExceptionType+"%")) } + + // create TagQuery from TagQueryParams + tags := createTagQueryFromTagQueryParams(queryParams.Tags) + subQuery, argsSubQuery, errStatus := buildQueryWithTagParams(ctx, tags) + query += subQuery + args = append(args, argsSubQuery...) + + if errStatus != nil { + zap.S().Error("Error in processing tags: ", errStatus) + return nil, errStatus + } query = query + " GROUP BY groupID" if len(queryParams.ServiceName) != 0 { query = query + ", serviceName" @@ -2747,6 +2747,18 @@ func (r *ClickHouseReader) CountErrors(ctx context.Context, queryParams *model.C query = query + " AND exceptionType ilike @exceptionType" args = append(args, clickhouse.Named("exceptionType", "%"+queryParams.ExceptionType+"%")) } + + // create TagQuery from TagQueryParams + tags := createTagQueryFromTagQueryParams(queryParams.Tags) + subQuery, argsSubQuery, errStatus := buildQueryWithTagParams(ctx, tags) + query += subQuery + args = append(args, argsSubQuery...) + + if errStatus != nil { + zap.S().Error("Error in processing tags: ", errStatus) + return 0, errStatus + } + err := r.db.QueryRow(ctx, query, args...).Scan(&errorCount) zap.S().Info(query) diff --git a/pkg/query-service/app/http_handler.go b/pkg/query-service/app/http_handler.go index 7a6097638a..a617201233 100644 --- a/pkg/query-service/app/http_handler.go +++ b/pkg/query-service/app/http_handler.go @@ -328,8 +328,8 @@ func (aH *APIHandler) RegisterRoutes(router *mux.Router, am *AuthMiddleware) { router.HandleFunc("/api/v1/getFilteredSpans/aggregates", am.ViewAccess(aH.getFilteredSpanAggregates)).Methods(http.MethodPost) router.HandleFunc("/api/v1/getTagValues", am.ViewAccess(aH.getTagValues)).Methods(http.MethodPost) - router.HandleFunc("/api/v1/listErrors", am.ViewAccess(aH.listErrors)).Methods(http.MethodGet) - router.HandleFunc("/api/v1/countErrors", am.ViewAccess(aH.countErrors)).Methods(http.MethodGet) + router.HandleFunc("/api/v1/listErrors", am.ViewAccess(aH.listErrors)).Methods(http.MethodPost) + router.HandleFunc("/api/v1/countErrors", am.ViewAccess(aH.countErrors)).Methods(http.MethodPost) router.HandleFunc("/api/v1/errorFromErrorID", am.ViewAccess(aH.getErrorFromErrorID)).Methods(http.MethodGet) router.HandleFunc("/api/v1/errorFromGroupID", am.ViewAccess(aH.getErrorFromGroupID)).Methods(http.MethodGet) router.HandleFunc("/api/v1/nextPrevErrorIDs", am.ViewAccess(aH.getNextPrevErrorIDs)).Methods(http.MethodGet) diff --git a/pkg/query-service/app/parser.go b/pkg/query-service/app/parser.go index a35f3138de..5c4393ae7a 100644 --- a/pkg/query-service/app/parser.go +++ b/pkg/query-service/app/parser.go @@ -494,76 +494,54 @@ func parseListErrorsRequest(r *http.Request) (*model.ListErrorsParams, error) { var allowedOrderParams = []string{"exceptionType", "exceptionCount", "firstSeen", "lastSeen", "serviceName"} var allowedOrderDirections = []string{"ascending", "descending"} - startTime, err := parseTime("start", r) - if err != nil { - return nil, err - } - endTime, err := parseTimeMinusBuffer("end", r) + var postData *model.ListErrorsParams + err := json.NewDecoder(r.Body).Decode(&postData) + if err != nil { return nil, err } - order := r.URL.Query().Get("order") - if len(order) > 0 && !DoesExistInSlice(order, allowedOrderDirections) { - return nil, errors.New(fmt.Sprintf("given order: %s is not allowed in query", order)) - } - orderParam := r.URL.Query().Get("orderParam") - if len(order) > 0 && !DoesExistInSlice(orderParam, allowedOrderParams) { - return nil, errors.New(fmt.Sprintf("given orderParam: %s is not allowed in query", orderParam)) - } - limit := r.URL.Query().Get("limit") - offset := r.URL.Query().Get("offset") - - if len(offset) == 0 || len(limit) == 0 { - return nil, fmt.Errorf("offset or limit param cannot be empty from the query") - } - - limitInt, err := strconv.Atoi(limit) + postData.Start, err = parseTimeStr(postData.StartStr, "start") if err != nil { - return nil, errors.New("limit param is not in correct format") + return nil, err } - offsetInt, err := strconv.Atoi(offset) + postData.End, err = parseTimeMinusBufferStr(postData.EndStr, "end") if err != nil { - return nil, errors.New("offset param is not in correct format") + return nil, err } - serviceName := r.URL.Query().Get("serviceName") - exceptionType := r.URL.Query().Get("exceptionType") - - params := &model.ListErrorsParams{ - Start: startTime, - End: endTime, - OrderParam: orderParam, - Order: order, - Limit: int64(limitInt), - Offset: int64(offsetInt), - ServiceName: serviceName, - ExceptionType: exceptionType, + if postData.Limit == 0 { + return nil, fmt.Errorf("limit param cannot be empty from the query") } - return params, nil + if len(postData.Order) > 0 && !DoesExistInSlice(postData.Order, allowedOrderDirections) { + return nil, errors.New(fmt.Sprintf("given order: %s is not allowed in query", postData.Order)) + } + + if len(postData.Order) > 0 && !DoesExistInSlice(postData.OrderParam, allowedOrderParams) { + return nil, errors.New(fmt.Sprintf("given orderParam: %s is not allowed in query", postData.OrderParam)) + } + + return postData, nil } func parseCountErrorsRequest(r *http.Request) (*model.CountErrorsParams, error) { - startTime, err := parseTime("start", r) + var postData *model.CountErrorsParams + err := json.NewDecoder(r.Body).Decode(&postData) + if err != nil { return nil, err } - endTime, err := parseTimeMinusBuffer("end", r) + + postData.Start, err = parseTimeStr(postData.StartStr, "start") if err != nil { return nil, err } - serviceName := r.URL.Query().Get("serviceName") - exceptionType := r.URL.Query().Get("exceptionType") - - params := &model.CountErrorsParams{ - Start: startTime, - End: endTime, - ServiceName: serviceName, - ExceptionType: exceptionType, + postData.End, err = parseTimeMinusBufferStr(postData.EndStr, "end") + if err != nil { + return nil, err } - - return params, nil + return postData, nil } func parseGetErrorRequest(r *http.Request) (*model.GetErrorParams, error) { diff --git a/pkg/query-service/constants/constants.go b/pkg/query-service/constants/constants.go index d433163aa7..ce03e364e1 100644 --- a/pkg/query-service/constants/constants.go +++ b/pkg/query-service/constants/constants.go @@ -219,11 +219,5 @@ var ReservedColumnTargetAliases = map[string]struct{}{ "value": {}, } -const ( - StringTagMapCol = "stringTagMap" - NumberTagMapCol = "numberTagMap" - BoolTagMapCol = "boolTagMap" -) - // logsPPLPfx is a short constant for logsPipelinePrefix const LogsPPLPfx = "logstransform/pipeline_" diff --git a/pkg/query-service/model/queryParams.go b/pkg/query-service/model/queryParams.go index 4730b1fabf..a94f57ad02 100644 --- a/pkg/query-service/model/queryParams.go +++ b/pkg/query-service/model/queryParams.go @@ -122,6 +122,13 @@ const ( LOGS ) +const ( + StringTagMapCol = "stringTagMap" + NumberTagMapCol = "numberTagMap" + BoolTagMapCol = "boolTagMap" + ResourceTagMapCol = "resourceTagsMap" +) + type QueryRangeParamsV2 struct { DataSource DataSource `json:"dataSource"` Start int64 `json:"start"` @@ -187,6 +194,7 @@ type GetServiceOverviewParams struct { type TagQueryParam struct { Key string `json:"key"` + TagType TagType `json:"tagType"` StringValues []string `json:"stringValues"` BoolValues []bool `json:"boolValues"` NumberValues []float64 `json:"numberValues"` @@ -212,23 +220,34 @@ const ( NotStartsWithOperator Operator = "NotStartsWith" ) +type TagType string + +const ( + ResourceAttributeTagType TagType = "ResourceAttribute" + SpanAttributeTagType TagType = "SpanAttribute" +) + type TagQuery interface { GetKey() string GetValues() []interface{} GetOperator() Operator + GetTagType() TagType + GetTagMapColumn() string } type TagQueryString struct { key string values []string operator Operator + tagType TagType } -func NewTagQueryString(key string, values []string, operator Operator) TagQueryString { +func NewTagQueryString(tag TagQueryParam) TagQueryString { return TagQueryString{ - key: key, - values: values, - operator: operator, + key: tag.Key, + values: tag.StringValues, + operator: tag.Operator, + tagType: tag.TagType, } } @@ -248,17 +267,31 @@ func (tqs TagQueryString) GetOperator() Operator { return tqs.operator } +func (tqs TagQueryString) GetTagType() TagType { + return tqs.tagType +} + +func (tqs TagQueryString) GetTagMapColumn() string { + if tqs.GetTagType() == ResourceAttributeTagType { + return ResourceTagMapCol + } else { + return StringTagMapCol + } +} + type TagQueryBool struct { key string values []bool operator Operator + tagType TagType } -func NewTagQueryBool(key string, values []bool, operator Operator) TagQueryBool { +func NewTagQueryBool(tag TagQueryParam) TagQueryBool { return TagQueryBool{ - key: key, - values: values, - operator: operator, + key: tag.Key, + values: tag.BoolValues, + operator: tag.Operator, + tagType: tag.TagType, } } @@ -278,17 +311,27 @@ func (tqb TagQueryBool) GetOperator() Operator { return tqb.operator } +func (tqb TagQueryBool) GetTagType() TagType { + return tqb.tagType +} + +func (tqb TagQueryBool) GetTagMapColumn() string { + return BoolTagMapCol +} + type TagQueryNumber struct { key string values []float64 operator Operator + tagType TagType } -func NewTagQueryNumber(key string, values []float64, operator Operator) TagQueryNumber { +func NewTagQueryNumber(tag TagQueryParam) TagQueryNumber { return TagQueryNumber{ - key: key, - values: values, - operator: operator, + key: tag.Key, + values: tag.NumberValues, + operator: tag.Operator, + tagType: tag.TagType, } } @@ -308,6 +351,14 @@ func (tqn TagQueryNumber) GetOperator() Operator { return tqn.operator } +func (tqn TagQueryNumber) GetTagType() TagType { + return tqn.tagType +} + +func (tqn TagQueryNumber) GetTagMapColumn() string { + return NumberTagMapCol +} + type GetFilteredSpansParams struct { TraceID []string `json:"traceID"` ServiceName []string `json:"serviceName"` @@ -414,17 +465,17 @@ type TagFilterParams struct { End *time.Time } -type TagType string +type TagDataType string const ( - TagTypeString TagType = "string" - TagTypeNumber TagType = "number" - TagTypeBool TagType = "bool" + TagTypeString TagDataType = "string" + TagTypeNumber TagDataType = "number" + TagTypeBool TagDataType = "bool" ) type TagKey struct { - Key string `json:"key"` - Type TagType `json:"type"` + Key string `json:"key"` + Type TagDataType `json:"type"` } type TTLParams struct { @@ -439,21 +490,27 @@ type GetTTLParams struct { } type ListErrorsParams struct { + StartStr string `json:"start"` + EndStr string `json:"end"` Start *time.Time End *time.Time - Limit int64 - OrderParam string - Order string - Offset int64 - ServiceName string - ExceptionType string + Limit int64 `json:"limit"` + OrderParam string `json:"orderParam"` + Order string `json:"order"` + Offset int64 `json:"offset"` + ServiceName string `json:"serviceName"` + ExceptionType string `json:"exceptionType"` + Tags []TagQueryParam `json:"tags"` } type CountErrorsParams struct { + StartStr string `json:"start"` + EndStr string `json:"end"` Start *time.Time End *time.Time - ServiceName string - ExceptionType string + ServiceName string `json:"serviceName"` + ExceptionType string `json:"exceptionType"` + Tags []TagQueryParam `json:"tags"` } type GetErrorParams struct { From 192d3881a1dfab46c8cb4478ca3294d2ad8ccff1 Mon Sep 17 00:00:00 2001 From: Chintan Sudani <46838508+techchintan@users.noreply.github.com> Date: Tue, 28 Mar 2023 21:22:06 +0530 Subject: [PATCH 28/38] fix: commented unwanted sidebar menu option (#2513) * fix: Removed Strict mode to stop render twice * fix: commented unwanted sidebar menuoption --- frontend/src/container/SideNav/menuItems.ts | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/frontend/src/container/SideNav/menuItems.ts b/frontend/src/container/SideNav/menuItems.ts index 5be9c9b9a1..2724878ffc 100644 --- a/frontend/src/container/SideNav/menuItems.ts +++ b/frontend/src/container/SideNav/menuItems.ts @@ -29,12 +29,12 @@ const menus: SidebarMenu[] = [ to: ROUTES.LOGS, name: 'Logs', // tags: ['Beta'], - children: [ - { - key: ROUTES.LOGS, - label: 'Search', - }, - ], + // children: [ + // { + // key: ROUTES.LOGS, + // label: 'Search', + // }, + // ], }, { Icon: DashboardFilled, From c4944370ce832e4abf1a54c7816f0129a7deb92e Mon Sep 17 00:00:00 2001 From: Srikanth Chekuri Date: Tue, 28 Mar 2023 22:15:46 +0530 Subject: [PATCH 29/38] feat: support environment filtering in service map (#2481) --- .../app/clickhouseReader/options.go | 2 +- .../app/clickhouseReader/reader.go | 13 ++-- pkg/query-service/app/services/map.go | 63 +++++++++++++++++++ 3 files changed, 72 insertions(+), 6 deletions(-) create mode 100644 pkg/query-service/app/services/map.go diff --git a/pkg/query-service/app/clickhouseReader/options.go b/pkg/query-service/app/clickhouseReader/options.go index da5a87ecd2..56dc36ff98 100644 --- a/pkg/query-service/app/clickhouseReader/options.go +++ b/pkg/query-service/app/clickhouseReader/options.go @@ -26,7 +26,7 @@ const ( defaultDurationTable string = "distributed_durationSort" defaultUsageExplorerTable string = "distributed_usage_explorer" defaultSpansTable string = "distributed_signoz_spans" - defaultDependencyGraphTable string = "distributed_dependency_graph_minutes" + defaultDependencyGraphTable string = "distributed_dependency_graph_minutes_v2" defaultTopLevelOperationsTable string = "distributed_top_level_operations" defaultLogsDB string = "signoz_logs" defaultLogsTable string = "distributed_logs" diff --git a/pkg/query-service/app/clickhouseReader/reader.go b/pkg/query-service/app/clickhouseReader/reader.go index e3e5fc47d7..c0545ef80a 100644 --- a/pkg/query-service/app/clickhouseReader/reader.go +++ b/pkg/query-service/app/clickhouseReader/reader.go @@ -41,6 +41,7 @@ import ( promModel "github.com/prometheus/common/model" "go.signoz.io/signoz/pkg/query-service/app/logs" + "go.signoz.io/signoz/pkg/query-service/app/services" "go.signoz.io/signoz/pkg/query-service/constants" am "go.signoz.io/signoz/pkg/query-service/integrations/alertManager" "go.signoz.io/signoz/pkg/query-service/interfaces" @@ -1996,20 +1997,22 @@ func (r *ClickHouseReader) GetDependencyGraph(ctx context.Context, queryParams * sum(total_count)/ @duration AS callRate, sum(error_count)/sum(total_count) * 100 as errorRate FROM %s.%s - WHERE toUInt64(toDateTime(timestamp)) >= @start AND toUInt64(toDateTime(timestamp)) <= @end - GROUP BY - src, - dest`, + WHERE toUInt64(toDateTime(timestamp)) >= @start AND toUInt64(toDateTime(timestamp)) <= @end`, r.TraceDB, r.dependencyGraphTable, ) + tags := createTagQueryFromTagQueryParams(queryParams.Tags) + filterQuery, filterArgs := services.BuildServiceMapQuery(tags) + query += filterQuery + " GROUP BY src, dest;" + args = append(args, filterArgs...) + zap.S().Debug(query, args) err := r.db.Select(ctx, &response, query, args...) if err != nil { zap.S().Error("Error in processing sql query: ", err) - return nil, fmt.Errorf("Error in processing sql query") + return nil, fmt.Errorf("error in processing sql query %w", err) } return &response, nil diff --git a/pkg/query-service/app/services/map.go b/pkg/query-service/app/services/map.go new file mode 100644 index 0000000000..ff0a7c2b3c --- /dev/null +++ b/pkg/query-service/app/services/map.go @@ -0,0 +1,63 @@ +package services + +import ( + "fmt" + "strings" + + "github.com/ClickHouse/clickhouse-go/v2" + "go.signoz.io/signoz/pkg/query-service/model" +) + +var ( + columns = map[string]struct{}{ + "deployment_environment": {}, + "k8s_cluster_name": {}, + "k8s_namespace_name": {}, + } +) + +func BuildServiceMapQuery(tags []model.TagQuery) (string, []interface{}) { + var filterQuery string + var namedArgs []interface{} + for _, tag := range tags { + key := strings.ReplaceAll(tag.GetKey(), ".", "_") + operator := tag.GetOperator() + value := tag.GetValues() + + if _, ok := columns[key]; !ok { + continue + } + + switch operator { + case model.InOperator: + filterQuery += fmt.Sprintf(" AND %s IN @%s", key, key) + namedArgs = append(namedArgs, clickhouse.Named(key, value)) + case model.NotInOperator: + filterQuery += fmt.Sprintf(" AND %s NOT IN @%s", key, key) + namedArgs = append(namedArgs, clickhouse.Named(key, value)) + case model.EqualOperator: + filterQuery += fmt.Sprintf(" AND %s = @%s", key, key) + namedArgs = append(namedArgs, clickhouse.Named(key, value)) + case model.NotEqualOperator: + filterQuery += fmt.Sprintf(" AND %s != @%s", key, key) + namedArgs = append(namedArgs, clickhouse.Named(key, value)) + case model.ContainsOperator: + filterQuery += fmt.Sprintf(" AND %s LIKE @%s", key, key) + namedArgs = append(namedArgs, clickhouse.Named(key, fmt.Sprintf("%%%s%%", value))) + case model.NotContainsOperator: + filterQuery += fmt.Sprintf(" AND %s NOT LIKE @%s", key, key) + namedArgs = append(namedArgs, clickhouse.Named(key, fmt.Sprintf("%%%s%%", value))) + case model.StartsWithOperator: + filterQuery += fmt.Sprintf(" AND %s LIKE @%s", key, key) + namedArgs = append(namedArgs, clickhouse.Named(key, fmt.Sprintf("%s%%", value))) + case model.NotStartsWithOperator: + filterQuery += fmt.Sprintf(" AND %s NOT LIKE @%s", key, key) + namedArgs = append(namedArgs, clickhouse.Named(key, fmt.Sprintf("%s%%", value))) + case model.ExistsOperator: + filterQuery += fmt.Sprintf(" AND %s IS NOT NULL", key) + case model.NotExistsOperator: + filterQuery += fmt.Sprintf(" AND %s IS NULL", key) + } + } + return filterQuery, namedArgs +} From 12e56932eed1f0925a3903c9e0f529134a0c9438 Mon Sep 17 00:00:00 2001 From: Vishal Sharma Date: Wed, 29 Mar 2023 07:32:47 +0530 Subject: [PATCH 30/38] fix: exception detail broken APIs due to resourceTagsMap (#2514) --- pkg/query-service/app/clickhouseReader/reader.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/query-service/app/clickhouseReader/reader.go b/pkg/query-service/app/clickhouseReader/reader.go index c0545ef80a..abfa0c8dfa 100644 --- a/pkg/query-service/app/clickhouseReader/reader.go +++ b/pkg/query-service/app/clickhouseReader/reader.go @@ -2781,7 +2781,7 @@ func (r *ClickHouseReader) GetErrorFromErrorID(ctx context.Context, queryParams } var getErrorWithSpanReponse []model.ErrorWithSpan - query := fmt.Sprintf("SELECT * FROM %s.%s WHERE timestamp = @timestamp AND groupID = @groupID AND errorID = @errorID LIMIT 1", r.TraceDB, r.errorTable) + query := fmt.Sprintf("SELECT errorID, exceptionType, exceptionStacktrace, exceptionEscaped, exceptionMessage, timestamp, spanID, traceID, serviceName, groupID FROM %s.%s WHERE timestamp = @timestamp AND groupID = @groupID AND errorID = @errorID LIMIT 1", r.TraceDB, r.errorTable) args := []interface{}{clickhouse.Named("errorID", queryParams.ErrorID), clickhouse.Named("groupID", queryParams.GroupID), clickhouse.Named("timestamp", strconv.FormatInt(queryParams.Timestamp.UnixNano(), 10))} err := r.db.Select(ctx, &getErrorWithSpanReponse, query, args...) @@ -2804,7 +2804,7 @@ func (r *ClickHouseReader) GetErrorFromGroupID(ctx context.Context, queryParams var getErrorWithSpanReponse []model.ErrorWithSpan - query := fmt.Sprintf("SELECT * FROM %s.%s WHERE timestamp = @timestamp AND groupID = @groupID LIMIT 1", r.TraceDB, r.errorTable) + query := fmt.Sprintf("SELECT errorID, exceptionType, exceptionStacktrace, exceptionEscaped, exceptionMessage, timestamp, spanID, traceID, serviceName, groupID FROM %s.%s WHERE timestamp = @timestamp AND groupID = @groupID LIMIT 1", r.TraceDB, r.errorTable) args := []interface{}{clickhouse.Named("groupID", queryParams.GroupID), clickhouse.Named("timestamp", strconv.FormatInt(queryParams.Timestamp.UnixNano(), 10))} err := r.db.Select(ctx, &getErrorWithSpanReponse, query, args...) From 99ed314fc9b7fa4b8ce5406c85bdef30cfbed0ae Mon Sep 17 00:00:00 2001 From: Palash Gupta Date: Wed, 29 Mar 2023 14:45:58 +0530 Subject: [PATCH 31/38] feat: resource attribute is added in the exception (#2491) * feat: resource attribute is added in the exception * fix: build is fixed * chore: methods is updated to post * fix: build is fixed * fix: listErrors, countErrors API request body * chore: type of the function is updated * chore: convertRawQueriesToTraceSelectedTags is updated * fix: resource attribute is updated * chore: selected tags is updated * feat: key is updated --------- Co-authored-by: Vishal Sharma --- frontend/src/AppRoutes/index.tsx | 45 ++-- frontend/src/api/errors/getAll.ts | 17 +- frontend/src/api/errors/getErrorCounts.ts | 13 +- frontend/src/api/trace/getSpans.ts | 2 +- frontend/src/api/trace/getSpansAggregate.ts | 2 +- frontend/src/container/AllError/index.tsx | 9 +- .../ResourceAttributesFilter/QueryChip.tsx | 32 --- .../ResourceAttributesFilter.Machine.ts | 61 ----- ...esourceAttributesFilter.Machine.typegen.ts | 32 --- .../ResourceAttributesFilter/index.tsx | 219 ------------------ .../ResourceAttributesFilter/types.ts | 11 - .../ResourceAttributesFilter/utils.ts | 64 ----- .../MetricsApplication/Tabs/DBCall.tsx | 21 +- .../MetricsApplication/Tabs/External.tsx | 21 +- .../MetricsApplication/Tabs/Overview.tsx | 27 +-- .../MetricsApplication/TopOperationsTable.tsx | 10 +- .../container/MetricsApplication/index.tsx | 2 +- .../MetricTagKey.machine.typegen.ts | 30 +-- .../ResourceAttributesFilter.tsx | 77 ++++++ .../components/QueryChip/QueryChip.tsx | 23 ++ .../components/QueryChip/index.ts | 3 + .../components/QueryChip/types.ts | 6 + .../ResourceAttributesFilter/index.ts | 3 + .../ResourceAttributesFilter/styles.ts | 6 +- frontend/src/container/SideNav/index.tsx | 9 +- .../useResourceAttribute/ResourceProvider.tsx | 181 +++++++++++++++ .../src/hooks/useResourceAttribute/context.ts | 7 + .../src/hooks/useResourceAttribute/index.ts | 7 + .../src/hooks/useResourceAttribute/machine.ts | 61 +++++ .../useResourceAttribute/machine.typegen.ts | 37 +++ .../src/hooks/useResourceAttribute/types.ts | 32 +++ .../useResourceAttribute.tsx | 9 + .../src/hooks/useResourceAttribute/utils.ts | 161 +++++++++++++ frontend/src/lib/resourceAttributes.ts | 76 ------ frontend/src/pages/AllErrors/index.tsx | 28 ++- .../src/pages/MetricApplication/index.tsx | 14 +- frontend/src/pages/Metrics/index.tsx | 23 +- .../metrics/setResourceAttributeQueries.ts | 55 ----- frontend/src/store/reducers/metric.ts | 16 -- frontend/src/types/actions/metrics.ts | 17 +- frontend/src/types/api/errors/getAll.ts | 2 + .../src/types/api/errors/getErrorCounts.ts | 2 + frontend/src/types/reducer/metrics.ts | 3 - 43 files changed, 754 insertions(+), 722 deletions(-) delete mode 100644 frontend/src/container/MetricsApplication/ResourceAttributesFilter/QueryChip.tsx delete mode 100644 frontend/src/container/MetricsApplication/ResourceAttributesFilter/ResourceAttributesFilter.Machine.ts delete mode 100644 frontend/src/container/MetricsApplication/ResourceAttributesFilter/ResourceAttributesFilter.Machine.typegen.ts delete mode 100644 frontend/src/container/MetricsApplication/ResourceAttributesFilter/index.tsx delete mode 100644 frontend/src/container/MetricsApplication/ResourceAttributesFilter/types.ts delete mode 100644 frontend/src/container/MetricsApplication/ResourceAttributesFilter/utils.ts create mode 100644 frontend/src/container/ResourceAttributesFilter/ResourceAttributesFilter.tsx create mode 100644 frontend/src/container/ResourceAttributesFilter/components/QueryChip/QueryChip.tsx create mode 100644 frontend/src/container/ResourceAttributesFilter/components/QueryChip/index.ts create mode 100644 frontend/src/container/ResourceAttributesFilter/components/QueryChip/types.ts create mode 100644 frontend/src/container/ResourceAttributesFilter/index.ts rename frontend/src/container/{MetricsApplication => }/ResourceAttributesFilter/styles.ts (77%) create mode 100644 frontend/src/hooks/useResourceAttribute/ResourceProvider.tsx create mode 100644 frontend/src/hooks/useResourceAttribute/context.ts create mode 100644 frontend/src/hooks/useResourceAttribute/index.ts create mode 100644 frontend/src/hooks/useResourceAttribute/machine.ts create mode 100644 frontend/src/hooks/useResourceAttribute/machine.typegen.ts create mode 100644 frontend/src/hooks/useResourceAttribute/types.ts create mode 100644 frontend/src/hooks/useResourceAttribute/useResourceAttribute.tsx create mode 100644 frontend/src/hooks/useResourceAttribute/utils.ts delete mode 100644 frontend/src/lib/resourceAttributes.ts delete mode 100644 frontend/src/store/actions/metrics/setResourceAttributeQueries.ts diff --git a/frontend/src/AppRoutes/index.tsx b/frontend/src/AppRoutes/index.tsx index 832e557e49..edfe843882 100644 --- a/frontend/src/AppRoutes/index.tsx +++ b/frontend/src/AppRoutes/index.tsx @@ -4,6 +4,7 @@ import Spinner from 'components/Spinner'; import AppLayout from 'container/AppLayout'; import { useThemeConfig } from 'hooks/useDarkMode'; import { NotificationProvider } from 'hooks/useNotifications'; +import { ResourceProvider } from 'hooks/useResourceAttribute'; import history from 'lib/history'; import { QueryBuilderProvider } from 'providers/QueryBuilder'; import React, { Suspense } from 'react'; @@ -17,30 +18,32 @@ function App(): JSX.Element { return ( - - + + - - - }> - - {routes.map(({ path, component, exact }) => ( - - ))} + + + + }> + + {routes.map(({ path, component, exact }) => ( + + ))} - - - - - + + + + + + - - + + ); } diff --git a/frontend/src/api/errors/getAll.ts b/frontend/src/api/errors/getAll.ts index 7014e52a56..8d6793ee87 100644 --- a/frontend/src/api/errors/getAll.ts +++ b/frontend/src/api/errors/getAll.ts @@ -1,7 +1,6 @@ import axios from 'api'; import { ErrorResponseHandler } from 'api/ErrorResponseHandler'; import { AxiosError } from 'axios'; -import createQueryParams from 'lib/createQueryParams'; import { ErrorResponse, SuccessResponse } from 'types/api'; import { PayloadProps, Props } from 'types/api/errors/getAll'; @@ -9,11 +8,17 @@ const getAll = async ( props: Props, ): Promise | ErrorResponse> => { try { - const response = await axios.get( - `/listErrors?${createQueryParams({ - ...props, - })}`, - ); + const response = await axios.post(`/listErrors`, { + start: `${props.start}`, + end: `${props.end}`, + order: props.order, + orderParam: props.orderParam, + limit: props.limit, + offset: props.offset, + exceptionType: props.exceptionType, + serviceName: props.serviceName, + tags: props.tags, + }); return { statusCode: 200, diff --git a/frontend/src/api/errors/getErrorCounts.ts b/frontend/src/api/errors/getErrorCounts.ts index 4992a6d391..977eeb226f 100644 --- a/frontend/src/api/errors/getErrorCounts.ts +++ b/frontend/src/api/errors/getErrorCounts.ts @@ -1,7 +1,6 @@ import axios from 'api'; import { ErrorResponseHandler } from 'api/ErrorResponseHandler'; import { AxiosError } from 'axios'; -import createQueryParams from 'lib/createQueryParams'; import { ErrorResponse, SuccessResponse } from 'types/api'; import { PayloadProps, Props } from 'types/api/errors/getErrorCounts'; @@ -9,11 +8,13 @@ const getErrorCounts = async ( props: Props, ): Promise | ErrorResponse> => { try { - const response = await axios.get( - `/countErrors?${createQueryParams({ - ...props, - })}`, - ); + const response = await axios.post(`/countErrors`, { + start: `${props.start}`, + end: `${props.end}`, + exceptionType: props.exceptionType, + serviceName: props.serviceName, + tags: props.tags, + }); return { statusCode: 200, diff --git a/frontend/src/api/trace/getSpans.ts b/frontend/src/api/trace/getSpans.ts index 8b56caa46d..261b2652c6 100644 --- a/frontend/src/api/trace/getSpans.ts +++ b/frontend/src/api/trace/getSpans.ts @@ -10,7 +10,7 @@ const getSpans = async ( ): Promise | ErrorResponse> => { try { const updatedSelectedTags = props.selectedTags.map((e) => ({ - Key: e.Key, + Key: `${e.Key}.(string)`, Operator: e.Operator, StringValues: e.StringValues, NumberValues: e.NumberValues, diff --git a/frontend/src/api/trace/getSpansAggregate.ts b/frontend/src/api/trace/getSpansAggregate.ts index cfa1f7e31f..7f245605fc 100644 --- a/frontend/src/api/trace/getSpansAggregate.ts +++ b/frontend/src/api/trace/getSpansAggregate.ts @@ -28,7 +28,7 @@ const getSpanAggregate = async ( }); const updatedSelectedTags = props.selectedTags.map((e) => ({ - Key: e.Key, + Key: `${e.Key}.(string)`, Operator: e.Operator, StringValues: e.StringValues, NumberValues: e.NumberValues, diff --git a/frontend/src/container/AllError/index.tsx b/frontend/src/container/AllError/index.tsx index 64d83e70ec..c3b0580f44 100644 --- a/frontend/src/container/AllError/index.tsx +++ b/frontend/src/container/AllError/index.tsx @@ -18,6 +18,8 @@ import { ResizeTable } from 'components/ResizeTable'; import ROUTES from 'constants/routes'; import dayjs from 'dayjs'; import { useNotifications } from 'hooks/useNotifications'; +import useResourceAttribute from 'hooks/useResourceAttribute'; +import { convertRawQueriesToTraceSelectedTags } from 'hooks/useResourceAttribute/utils'; import useUrlQuery from 'hooks/useUrlQuery'; import createQueryParams from 'lib/createQueryParams'; import history from 'lib/history'; @@ -93,9 +95,11 @@ function AllErrors(): JSX.Element { ], ); + const { queries } = useResourceAttribute(); + const [{ isLoading, data }, errorCountResponse] = useQueries([ { - queryKey: ['getAllErrors', updatedPath, maxTime, minTime], + queryKey: ['getAllErrors', updatedPath, maxTime, minTime, queries], queryFn: (): Promise | ErrorResponse> => getAll({ end: maxTime, @@ -106,6 +110,7 @@ function AllErrors(): JSX.Element { orderParam: getUpdatedParams, exceptionType: getUpdatedExceptionType, serviceName: getUpdatedServiceName, + tags: convertRawQueriesToTraceSelectedTags(queries), }), enabled: !loading, }, @@ -116,6 +121,7 @@ function AllErrors(): JSX.Element { minTime, getUpdatedExceptionType, getUpdatedServiceName, + queries, ], queryFn: (): Promise> => getErrorCounts({ @@ -123,6 +129,7 @@ function AllErrors(): JSX.Element { start: minTime, exceptionType: getUpdatedExceptionType, serviceName: getUpdatedServiceName, + tags: convertRawQueriesToTraceSelectedTags(queries), }), enabled: !loading, }, diff --git a/frontend/src/container/MetricsApplication/ResourceAttributesFilter/QueryChip.tsx b/frontend/src/container/MetricsApplication/ResourceAttributesFilter/QueryChip.tsx deleted file mode 100644 index 09c5d27471..0000000000 --- a/frontend/src/container/MetricsApplication/ResourceAttributesFilter/QueryChip.tsx +++ /dev/null @@ -1,32 +0,0 @@ -import { convertMetricKeyToTrace } from 'lib/resourceAttributes'; -import React from 'react'; - -import { QueryChipContainer, QueryChipItem } from './styles'; -import { IResourceAttributeQuery } from './types'; - -interface IQueryChipProps { - queryData: IResourceAttributeQuery; - onClose: (id: string) => void; - disabled: boolean; -} - -export default function QueryChip({ - queryData, - onClose, - disabled, -}: IQueryChipProps): JSX.Element { - return ( - - {convertMetricKeyToTrace(queryData.tagKey)} - {queryData.operator} - { - if (!disabled) onClose(queryData.id); - }} - > - {queryData.tagValue.join(', ')} - - - ); -} diff --git a/frontend/src/container/MetricsApplication/ResourceAttributesFilter/ResourceAttributesFilter.Machine.ts b/frontend/src/container/MetricsApplication/ResourceAttributesFilter/ResourceAttributesFilter.Machine.ts deleted file mode 100644 index 3b9078f76b..0000000000 --- a/frontend/src/container/MetricsApplication/ResourceAttributesFilter/ResourceAttributesFilter.Machine.ts +++ /dev/null @@ -1,61 +0,0 @@ -import { createMachine } from 'xstate'; - -export const ResourceAttributesFilterMachine = - /** @xstate-layout N4IgpgJg5mDOIC5QBECGsAWAjA9qgThAAQDKYBAxhkQIIB2xAYgJYA2ALmPgHQAqqUANJgAngGIAcgFEAGr0SgADjljN2zHHQUgAHogAcAFgAM3AOz6ATAEYAzJdsA2Y4cOWAnABoQIxAFpDR2tuQ319AFYTcKdbFycAX3jvNExcAmIySmp6JjZOHn4hUTFNACFWAFd8bWVVdU1tPQQzY1MXY2tDdzNHM3dHd0NvXwR7biMTa313S0i+63DE5PRsPEJScnwqWgYiFg4uPgFhcQAlKRIpeSQQWrUNLRumx3Czbg8TR0sbS31jfUcw38fW47gBHmm4XCVms3SWIBSq3SGyyO1yBx4AHlFFxUOwcPhJLJrkoVPcGk9ENYFuF3i5YR0wtEHECEAEgiEmV8zH1DLYzHZ4Yi0utMltsrt9vluNjcfjCWVKtUbnd6o9QE1rMYBtxbGFvsZ3NrZj1WdYOfotUZLX0XEFHEKViKMpttjk9nlDrL8HiCWJzpcSbcyWrGoh3NCQj0zK53P1ph1WeFLLqnJZ2s5vmZLA6kginWsXaj3VLDoUAGqoSpgEp0cpVGohh5hhDWDy0sz8zruakzamWVm-Qyg362V5-AZOayO1KFlHitEejFHKCV6v+i5XRt1ZuU1s52zjNOOaZfdOWIY+RDZ0Hc6ZmKEXqyLPPCudit2Sz08ACSEFYNbSHI27kuquiIOEjiONwjJgrM3RWJYZisgEIJgnYPTmuEdi2OaiR5nQOAQHA2hvsiH4Sui0qFCcIGhnuLSmP0YJuJ2xjJsmKELG8XZTK0tjdHG06vgW5GupRS7St6vrKqSO4UhqVL8TBWp8o4eqdl0A5Xmy3G6gK56-B4uERDOSKiuJi6lgUAhrhUYB0buimtrEKZBDYrxaS0OZca8+ltheybOI4hivGZzrzp+VGHH+AGOQp4EIHy+ghNYnawtG4TsbYvk8QKfHGAJfQ9uF76WSW37xWBTSGJ0qXpd0vRZdEKGPqC2YeO2-zfO4+HxEAA */ - createMachine({ - tsTypes: {} as import('./ResourceAttributesFilter.Machine.typegen').Typegen0, - initial: 'Idle', - states: { - TagKey: { - on: { - NEXT: { - actions: 'onSelectOperator', - target: 'Operator', - }, - onBlur: { - actions: 'onBlurPurge', - target: 'Idle', - }, - RESET: { - target: 'Idle', - }, - }, - }, - Operator: { - on: { - NEXT: { - actions: 'onSelectTagValue', - target: 'TagValue', - }, - onBlur: { - actions: 'onBlurPurge', - target: 'Idle', - }, - RESET: { - target: 'Idle', - }, - }, - }, - TagValue: { - on: { - onBlur: { - actions: ['onValidateQuery', 'onBlurPurge'], - target: 'Idle', - }, - RESET: { - target: 'Idle', - }, - }, - }, - Idle: { - on: { - NEXT: { - actions: 'onSelectTagKey', - description: 'Select Category', - target: 'TagKey', - }, - }, - }, - }, - id: 'Dashboard Search And Filter', - }); diff --git a/frontend/src/container/MetricsApplication/ResourceAttributesFilter/ResourceAttributesFilter.Machine.typegen.ts b/frontend/src/container/MetricsApplication/ResourceAttributesFilter/ResourceAttributesFilter.Machine.typegen.ts deleted file mode 100644 index e7f7ee3de7..0000000000 --- a/frontend/src/container/MetricsApplication/ResourceAttributesFilter/ResourceAttributesFilter.Machine.typegen.ts +++ /dev/null @@ -1,32 +0,0 @@ -// This file was automatically generated. Edits will be overwritten - -export interface Typegen0 { - '@@xstate/typegen': true; - eventsCausingActions: { - onSelectOperator: 'NEXT'; - onBlurPurge: 'onBlur'; - onSelectTagValue: 'NEXT'; - onValidateQuery: 'onBlur'; - onSelectTagKey: 'NEXT'; - }; - internalEvents: { - 'xstate.init': { type: 'xstate.init' }; - }; - invokeSrcNameMap: {}; - missingImplementations: { - actions: - | 'onSelectOperator' - | 'onBlurPurge' - | 'onSelectTagValue' - | 'onValidateQuery' - | 'onSelectTagKey'; - services: never; - guards: never; - delays: never; - }; - eventsCausingServices: {}; - eventsCausingGuards: {}; - eventsCausingDelays: {}; - matchesStates: 'TagKey' | 'Operator' | 'TagValue' | 'Idle'; - tags: never; -} diff --git a/frontend/src/container/MetricsApplication/ResourceAttributesFilter/index.tsx b/frontend/src/container/MetricsApplication/ResourceAttributesFilter/index.tsx deleted file mode 100644 index b8bed255f7..0000000000 --- a/frontend/src/container/MetricsApplication/ResourceAttributesFilter/index.tsx +++ /dev/null @@ -1,219 +0,0 @@ -import { CloseCircleFilled } from '@ant-design/icons'; -import { useMachine } from '@xstate/react'; -import { Button, Select, Spin } from 'antd'; -import ROUTES from 'constants/routes'; -import history from 'lib/history'; -import { convertMetricKeyToTrace } from 'lib/resourceAttributes'; -import { map } from 'lodash-es'; -import React, { useEffect, useState } from 'react'; -import { useDispatch, useSelector } from 'react-redux'; -import { ResetInitialData } from 'store/actions/metrics/resetInitialData'; -import { SetResourceAttributeQueries } from 'store/actions/metrics/setResourceAttributeQueries'; -import { AppState } from 'store/reducers'; -import MetricReducer from 'types/reducer/metrics'; -import { v4 as uuid } from 'uuid'; - -import QueryChip from './QueryChip'; -import { ResourceAttributesFilterMachine } from './ResourceAttributesFilter.Machine'; -import { QueryChipItem, SearchContainer } from './styles'; -import { IOption, IResourceAttributeQuery } from './types'; -import { createQuery, GetTagKeys, GetTagValues, OperatorSchema } from './utils'; - -function ResourceAttributesFilter(): JSX.Element | null { - const dispatch = useDispatch(); - const [disabled, setDisabled] = useState( - !(history.location.pathname === ROUTES.APPLICATION), - ); - - useEffect(() => { - const unListen = history.listen(({ pathname }) => { - setDisabled(!(pathname === ROUTES.APPLICATION)); - }); - return (): void => { - if (!history.location.pathname.startsWith(`${ROUTES.APPLICATION}/`)) { - dispatch(ResetInitialData()); - } - unListen(); - }; - }, [dispatch]); - - const { resourceAttributeQueries } = useSelector( - (state) => state.metrics, - ); - const [loading, setLoading] = useState(true); - const [selectedValues, setSelectedValues] = useState([]); - const [staging, setStaging] = useState([]); - const [queries, setQueries] = useState([]); - const [optionsData, setOptionsData] = useState<{ - mode: undefined | 'tags' | 'multiple'; - options: IOption[]; - }>({ - mode: undefined, - options: [], - }); - - const dispatchQueries = (updatedQueries: IResourceAttributeQuery[]): void => { - dispatch(SetResourceAttributeQueries(updatedQueries)); - }; - const handleLoading = (isLoading: boolean): void => { - setLoading(isLoading); - if (isLoading) { - setOptionsData({ mode: undefined, options: [] }); - } - }; - const [state, send] = useMachine(ResourceAttributesFilterMachine, { - actions: { - onSelectTagKey: () => { - handleLoading(true); - GetTagKeys() - .then((tagKeys) => setOptionsData({ options: tagKeys, mode: undefined })) - .finally(() => { - handleLoading(false); - }); - }, - onSelectOperator: () => { - setOptionsData({ options: OperatorSchema, mode: undefined }); - }, - onSelectTagValue: () => { - handleLoading(true); - - GetTagValues(staging[0]) - .then((tagValuesOptions) => - setOptionsData({ options: tagValuesOptions, mode: 'multiple' }), - ) - .finally(() => { - handleLoading(false); - }); - }, - onBlurPurge: () => { - setSelectedValues([]); - setStaging([]); - }, - onValidateQuery: (): void => { - if (staging.length < 2 || selectedValues.length === 0) { - return; - } - - const generatedQuery = createQuery([...staging, selectedValues]); - if (generatedQuery) { - dispatchQueries([...queries, generatedQuery]); - } - }, - }, - }); - - useEffect(() => { - setQueries(resourceAttributeQueries); - }, [resourceAttributeQueries]); - - const handleFocus = (): void => { - if (state.value === 'Idle') { - send('NEXT'); - } - }; - - const handleBlur = (): void => { - send('onBlur'); - }; - const handleChange = (value: never): void => { - if (!optionsData.mode) { - setStaging((prevStaging) => [...prevStaging, value]); - setSelectedValues([]); - send('NEXT'); - return; - } - - setSelectedValues([...value]); - }; - - const handleClose = (id: string): void => { - dispatchQueries(queries.filter((queryData) => queryData.id !== id)); - }; - - const handleClearAll = (): void => { - send('RESET'); - dispatchQueries([]); - setStaging([]); - setSelectedValues([]); - }; - const disabledAndEmpty = !!( - !queries.length && - !staging.length && - !selectedValues.length && - disabled - ); - const disabledOrEmpty = !!( - queries.length || - staging.length || - selectedValues.length || - disabled - ); - - if (disabledAndEmpty) { - return null; - } - - return ( - -
- {map( - queries, - (query): JSX.Element => ( - - ), - )} - {map(staging, (item, idx) => ( - - {idx === 0 ? convertMetricKeyToTrace(item) : item} - - ))} -
- {!disabled && ( - + Loading...{' '} + + ) : ( + + No resource attributes available to filter. Please refer docs to send + attributes. + + ) + } + /> + + {queries.length || staging.length || selectedQuery.length ? ( +