From 1e1624ed4cb85f01f555289506f5fd85b4dbab87 Mon Sep 17 00:00:00 2001 From: Vikrant Gupta Date: Thu, 14 Mar 2024 12:07:47 +0530 Subject: [PATCH 01/33] fix: [GH-3932]: do not retry API's in case of 4XX status code (#4376) * fix: [GH-3932]: do not retry API's in case of 400 status code * feat: do not retry 4XX response status --- frontend/src/index.tsx | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/frontend/src/index.tsx b/frontend/src/index.tsx index b95631c107..570db8c1da 100644 --- a/frontend/src/index.tsx +++ b/frontend/src/index.tsx @@ -3,6 +3,7 @@ import 'styles.scss'; import * as Sentry from '@sentry/react'; import AppRoutes from 'AppRoutes'; +import { AxiosError } from 'axios'; import { ThemeProvider } from 'hooks/useDarkMode'; import ErrorBoundaryFallback from 'pages/ErrorBoundaryFallback/ErrorBoundaryFallback'; import { createRoot } from 'react-dom/client'; @@ -16,6 +17,17 @@ const queryClient = new QueryClient({ defaultOptions: { queries: { refetchOnWindowFocus: false, + retry(failureCount, error): boolean { + if ( + // in case of manually throwing errors please make sure to send error.response.status + error instanceof AxiosError && + error.response?.status && + (error.response?.status >= 400 || error.response?.status <= 499) + ) { + return false; + } + return failureCount < 2; + }, }, }, }); From 2a62982885a9b92619096149f198a3b4529ee525 Mon Sep 17 00:00:00 2001 From: Vikrant Gupta Date: Thu, 14 Mar 2024 13:33:35 +0530 Subject: [PATCH 02/33] feat: support case insensitive operators (#4379) --- .../QueryBuilder/filters/QueryBuilderSearch/utils.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/frontend/src/container/QueryBuilder/filters/QueryBuilderSearch/utils.ts b/frontend/src/container/QueryBuilder/filters/QueryBuilderSearch/utils.ts index c549a6fd62..ec7eba3973 100644 --- a/frontend/src/container/QueryBuilder/filters/QueryBuilderSearch/utils.ts +++ b/frontend/src/container/QueryBuilder/filters/QueryBuilderSearch/utils.ts @@ -5,7 +5,7 @@ import { parse } from 'papaparse'; import { orderByValueDelimiter } from '../OrderByFilter/utils'; // eslint-disable-next-line no-useless-escape -export const tagRegexp = /^\s*(.*?)\s*(IN|NOT_IN|LIKE|NOT_LIKE|REGEX|NOT_REGEX|=|!=|EXISTS|NOT_EXISTS|CONTAINS|NOT_CONTAINS|>=|>|<=|<|HAS|NHAS)\s*(.*)$/g; +export const tagRegexp = /^\s*(.*?)\s*(\bIN\b|\bNOT_IN\b|\bLIKE\b|\bNOT_LIKE\b|\bREGEX\b|\bNOT_REGEX\b|=|!=|\bEXISTS\b|\bNOT_EXISTS\b|\bCONTAINS\b|\bNOT_CONTAINS\b|>=|>|<=|<|\bHAS\b|\bNHAS\b)\s*(.*)$/gi; export function isInNInOperator(value: string): boolean { return value === OPERATORS.IN || value === OPERATORS.NIN; @@ -25,8 +25,8 @@ export function getTagToken(tag: string): ITagToken { const [, matchTagKey, matchTagOperator, matchTagValue] = match; return { tagKey: matchTagKey, - tagOperator: matchTagOperator, - tagValue: isInNInOperator(matchTagOperator) + tagOperator: matchTagOperator.toUpperCase(), + tagValue: isInNInOperator(matchTagOperator.toUpperCase()) ? parse(matchTagValue).data.flat() : matchTagValue, } as ITagToken; From cf2203956211d5ba1e4b119000127aad5a2395cb Mon Sep 17 00:00:00 2001 From: Vikrant Gupta Date: Fri, 15 Mar 2024 01:26:31 +0530 Subject: [PATCH 03/33] Revert "Explorer Toolbar maximised and minimised (#4656)" (#4705) This reverts commit aadb962b6ccf716375f71791c4215c833f4406f5. --- frontend/src/constants/localStorage.ts | 1 - .../ExplorerOptions/ExplorerOptionWrapper.tsx | 56 ----- .../ExplorerOptions.styles.scss | 4 +- .../ExplorerOptions/ExplorerOptions.tsx | 220 +++++++----------- .../ExplorerOptionsDroppableArea.styles.scss | 55 ----- .../ExplorerOptionsDroppableArea.tsx | 83 ------- .../src/container/ExplorerOptions/utils.ts | 52 ----- .../src/container/LogsExplorerViews/index.tsx | 4 +- frontend/src/pages/TracesExplorer/index.tsx | 6 +- 9 files changed, 88 insertions(+), 393 deletions(-) delete mode 100644 frontend/src/container/ExplorerOptions/ExplorerOptionWrapper.tsx delete mode 100644 frontend/src/container/ExplorerOptions/ExplorerOptionsDroppableArea.styles.scss delete mode 100644 frontend/src/container/ExplorerOptions/ExplorerOptionsDroppableArea.tsx diff --git a/frontend/src/constants/localStorage.ts b/frontend/src/constants/localStorage.ts index 0ba6cac302..296735b286 100644 --- a/frontend/src/constants/localStorage.ts +++ b/frontend/src/constants/localStorage.ts @@ -16,5 +16,4 @@ export enum LOCALSTORAGE { CHAT_SUPPORT = 'CHAT_SUPPORT', IS_IDENTIFIED_USER = 'IS_IDENTIFIED_USER', DASHBOARD_VARIABLES = 'DASHBOARD_VARIABLES', - SHOW_EXPLORER_TOOLBAR = 'SHOW_EXPLORER_TOOLBAR', } diff --git a/frontend/src/container/ExplorerOptions/ExplorerOptionWrapper.tsx b/frontend/src/container/ExplorerOptions/ExplorerOptionWrapper.tsx deleted file mode 100644 index bdb300c404..0000000000 --- a/frontend/src/container/ExplorerOptions/ExplorerOptionWrapper.tsx +++ /dev/null @@ -1,56 +0,0 @@ -import { DndContext, DragEndEvent } from '@dnd-kit/core'; -import { useEffect, useState } from 'react'; - -import ExplorerOptions, { ExplorerOptionsProps } from './ExplorerOptions'; -import { - getExplorerToolBarVisibility, - setExplorerToolBarVisibility, -} from './utils'; - -type ExplorerOptionsWrapperProps = Omit< - ExplorerOptionsProps, - 'isExplorerOptionDrop' ->; - -function ExplorerOptionWrapper({ - disabled, - query, - isLoading, - onExport, - sourcepage, -}: ExplorerOptionsWrapperProps): JSX.Element { - const [isExplorerOptionHidden, setIsExplorerOptionHidden] = useState(false); - - useEffect(() => { - const toolbarVisibility = getExplorerToolBarVisibility(sourcepage); - setIsExplorerOptionHidden(!toolbarVisibility); - // eslint-disable-next-line react-hooks/exhaustive-deps - }, []); - - const handleDragEnd = (event: DragEndEvent): void => { - const { active, over } = event; - if ( - over !== null && - active.id === 'explorer-options-draggable' && - over.id === 'explorer-options-droppable' - ) { - setIsExplorerOptionHidden(true); - setExplorerToolBarVisibility(false, sourcepage); - } - }; - return ( - - - - ); -} - -export default ExplorerOptionWrapper; diff --git a/frontend/src/container/ExplorerOptions/ExplorerOptions.styles.scss b/frontend/src/container/ExplorerOptions/ExplorerOptions.styles.scss index 9f4441904d..d76d18bb4f 100644 --- a/frontend/src/container/ExplorerOptions/ExplorerOptions.styles.scss +++ b/frontend/src/container/ExplorerOptions/ExplorerOptions.styles.scss @@ -3,7 +3,7 @@ } .explorer-update { position: fixed; - bottom: 24px; + bottom: 16px; left: calc(50% - 225px); display: flex; align-items: center; @@ -47,7 +47,7 @@ box-shadow: 4px 4px 16px 4px rgba(0, 0, 0, 0.25); backdrop-filter: blur(20px); position: fixed; - bottom: 24px; + bottom: 16px; left: calc(50% + 240px); transform: translate(calc(-50% - 120px), 0); transition: left 0.2s linear; diff --git a/frontend/src/container/ExplorerOptions/ExplorerOptions.tsx b/frontend/src/container/ExplorerOptions/ExplorerOptions.tsx index ab26e03abc..8322c694d6 100644 --- a/frontend/src/container/ExplorerOptions/ExplorerOptions.tsx +++ b/frontend/src/container/ExplorerOptions/ExplorerOptions.tsx @@ -1,7 +1,5 @@ -/* eslint-disable react/jsx-props-no-spreading */ import './ExplorerOptions.styles.scss'; -import { useDraggable } from '@dnd-kit/core'; import { Color } from '@signozhq/design-tokens'; import { Button, @@ -33,15 +31,7 @@ import { useHandleExplorerTabChange } from 'hooks/useHandleExplorerTabChange'; import { useNotifications } from 'hooks/useNotifications'; import { mapCompositeQueryFromQuery } from 'lib/newQueryBuilder/queryBuilderMappers/mapCompositeQueryFromQuery'; import { Check, ConciergeBell, Disc3, Plus, X, XCircle } from 'lucide-react'; -import { - CSSProperties, - Dispatch, - SetStateAction, - useCallback, - useMemo, - useRef, - useState, -} from 'react'; +import { CSSProperties, useCallback, useMemo, useRef, useState } from 'react'; import { useSelector } from 'react-redux'; import { useHistory } from 'react-router-dom'; import { AppState } from 'store/reducers'; @@ -51,7 +41,6 @@ import { DataSource } from 'types/common/queryBuilder'; import AppReducer from 'types/reducer/app'; import { USER_ROLES } from 'types/roles'; -import ExplorerOptionsDroppableArea from './ExplorerOptionsDroppableArea'; import { DATASOURCE_VS_ROUTES, generateRGBAFromHex, @@ -68,8 +57,6 @@ function ExplorerOptions({ onExport, query, sourcepage, - isExplorerOptionHidden = false, - setIsExplorerOptionHidden, }: ExplorerOptionsProps): JSX.Element { const [isExport, setIsExport] = useState(false); const [isSaveModalOpen, setIsSaveModalOpen] = useState(false); @@ -79,7 +66,6 @@ function ExplorerOptions({ const history = useHistory(); const ref = useRef(null); const isDarkMode = useIsDarkMode(); - const [isDragEnabled, setIsDragEnabled] = useState(false); const onModalToggle = useCallback((value: boolean) => { setIsExport(value); @@ -271,31 +257,11 @@ function ExplorerOptions({ [isDarkMode], ); - const { - attributes, - listeners, - setNodeRef, - transform, - isDragging, - } = useDraggable({ - id: 'explorer-options-draggable', - disabled: isDragEnabled, - }); - const isEditDeleteSupported = allowedRoles.includes(role as string); - const style: React.CSSProperties | undefined = transform - ? { - transform: `translate3d(${transform.x - 338}px, ${transform.y}px, 0)`, - width: `${400 - transform.y * 6}px`, - maxWidth: '440px', // initial width of the explorer options - overflow: 'hidden', - } - : undefined; - return ( <> - {isQueryUpdated && !isExplorerOptionHidden && !isDragging && ( + {isQueryUpdated && (
)} - {!isExplorerOptionHidden && ( -
-
- - showSearch - placeholder="Select a view" - loading={viewsIsLoading || isRefetching} - value={viewName || undefined} - onSelect={handleSelect} - style={{ - minWidth: 170, - }} - dropdownStyle={dropdownStyle} - className="views-dropdown" - allowClear={{ - clearIcon: , - }} - onDropdownVisibleChange={(open): void => { - setIsDragEnabled(open); - }} - onClear={handleClearSelect} - ref={ref} - > - {viewsData?.data?.data?.map((view) => { - const extraData = - view.extraData !== '' ? JSON.parse(view.extraData) : ''; - let bgColor = getRandomColor(); - if (extraData !== '') { - bgColor = extraData.color; - } - return ( - -
- {' '} - {view.name} -
-
- ); - })} - +
+
+ + showSearch + placeholder="Select a view" + loading={viewsIsLoading || isRefetching} + value={viewName || undefined} + onSelect={handleSelect} + style={{ + minWidth: 170, + }} + dropdownStyle={dropdownStyle} + className="views-dropdown" + allowClear={{ + clearIcon: , + }} + onClear={handleClearSelect} + ref={ref} + > + {viewsData?.data?.data?.map((view) => { + const extraData = + view.extraData !== '' ? JSON.parse(view.extraData) : ''; + let bgColor = getRandomColor(); + if (extraData !== '') { + bgColor = extraData.color; + } + return ( + +
+ {' '} + {view.name} +
+
+ ); + })} + - -
- -
- -
- - - - - - - -
+
- )} - +
+ +
+ + + + + + + +
+
>; } -ExplorerOptions.defaultProps = { - isLoading: false, - isExplorerOptionHidden: false, - setIsExplorerOptionHidden: undefined, -}; +ExplorerOptions.defaultProps = { isLoading: false }; export default ExplorerOptions; diff --git a/frontend/src/container/ExplorerOptions/ExplorerOptionsDroppableArea.styles.scss b/frontend/src/container/ExplorerOptions/ExplorerOptionsDroppableArea.styles.scss deleted file mode 100644 index e092229bb9..0000000000 --- a/frontend/src/container/ExplorerOptions/ExplorerOptionsDroppableArea.styles.scss +++ /dev/null @@ -1,55 +0,0 @@ -.explorer-option-droppable-container { - position: fixed; - bottom: 0; - width: -webkit-fill-available; - height: 24px; - display: flex; - justify-content: center; - border-radius: 10px 10px 0px 0px; - // box-shadow: 0px 4px 16px 0px rgba(0, 0, 0, 0.25); - // backdrop-filter: blur(20px); - - .explorer-actions-btn { - display: flex; - gap: 8px; - margin-right: 8px; - - .action-btn { - display: flex; - justify-content: center; - align-items: center; - border-radius: 10px 10px 0px 0px; - box-shadow: 0px 4px 16px 0px rgba(0, 0, 0, 0.25); - backdrop-filter: blur(20px); - height: 24px !important; - border: none; - } - } - - .explorer-show-btn { - border-radius: 10px 10px 0px 0px; - border: 1px solid var(--bg-slate-400); - background: rgba(22, 24, 29, 0.40); - box-shadow: 0px 4px 16px 0px rgba(0, 0, 0, 0.25); - backdrop-filter: blur(20px); - align-self: center; - padding: 8px 12px; - height: 24px !important; - - .menu-bar { - border-radius: 50px; - background: var(--bg-slate-200); - height: 4px; - width: 50px; - } - } -} - -.lightMode { - .explorer-option-droppable-container { - - .explorer-show-btn { - background: var(--bg-vanilla-400); - } - } -} \ No newline at end of file diff --git a/frontend/src/container/ExplorerOptions/ExplorerOptionsDroppableArea.tsx b/frontend/src/container/ExplorerOptions/ExplorerOptionsDroppableArea.tsx deleted file mode 100644 index 33bef7c984..0000000000 --- a/frontend/src/container/ExplorerOptions/ExplorerOptionsDroppableArea.tsx +++ /dev/null @@ -1,83 +0,0 @@ -/* eslint-disable no-nested-ternary */ -import './ExplorerOptionsDroppableArea.styles.scss'; - -import { useDroppable } from '@dnd-kit/core'; -import { Color } from '@signozhq/design-tokens'; -import { Button, Tooltip } from 'antd'; -import { Disc3, X } from 'lucide-react'; -import { Dispatch, SetStateAction } from 'react'; -import { DataSource } from 'types/common/queryBuilder'; - -import { setExplorerToolBarVisibility } from './utils'; - -interface DroppableAreaProps { - isQueryUpdated: boolean; - isExplorerOptionHidden?: boolean; - sourcepage: DataSource; - setIsExplorerOptionHidden?: Dispatch>; - handleClearSelect: () => void; - onUpdateQueryHandler: () => void; -} - -function ExplorerOptionsDroppableArea({ - isQueryUpdated, - isExplorerOptionHidden, - sourcepage, - setIsExplorerOptionHidden, - handleClearSelect, - onUpdateQueryHandler, -}: DroppableAreaProps): JSX.Element { - const { setNodeRef } = useDroppable({ - id: 'explorer-options-droppable', - }); - - const handleShowExplorerOption = (): void => { - if (setIsExplorerOptionHidden) { - setIsExplorerOptionHidden(false); - setExplorerToolBarVisibility(true, sourcepage); - } - }; - - return ( -
- {isExplorerOptionHidden && ( - <> - {isQueryUpdated && ( -
- -
- )} - - - )} -
- ); -} - -ExplorerOptionsDroppableArea.defaultProps = { - isExplorerOptionHidden: undefined, - setIsExplorerOptionHidden: undefined, -}; - -export default ExplorerOptionsDroppableArea; diff --git a/frontend/src/container/ExplorerOptions/utils.ts b/frontend/src/container/ExplorerOptions/utils.ts index d94e64161e..e3ac710609 100644 --- a/frontend/src/container/ExplorerOptions/utils.ts +++ b/frontend/src/container/ExplorerOptions/utils.ts @@ -1,6 +1,5 @@ import { Color } from '@signozhq/design-tokens'; import { showErrorNotification } from 'components/ExplorerCard/utils'; -import { LOCALSTORAGE } from 'constants/localStorage'; import { QueryParams } from 'constants/query'; import ROUTES from 'constants/routes'; import { mapQueryDataFromApi } from 'lib/newQueryBuilder/queryBuilderMappers/mapQueryDataFromApi'; @@ -68,54 +67,3 @@ export const generateRGBAFromHex = (hex: string, opacity: number): string => hex.slice(3, 5), 16, )}, ${parseInt(hex.slice(5, 7), 16)}, ${opacity})`; - -export const getExplorerToolBarVisibility = (dataSource: string): boolean => { - try { - const showExplorerToolbar = localStorage.getItem( - LOCALSTORAGE.SHOW_EXPLORER_TOOLBAR, - ); - if (showExplorerToolbar === null) { - const parsedShowExplorerToolbar: { - [DataSource.LOGS]: boolean; - [DataSource.TRACES]: boolean; - [DataSource.METRICS]: boolean; - } = { - [DataSource.METRICS]: true, - [DataSource.TRACES]: true, - [DataSource.LOGS]: true, - }; - localStorage.setItem( - LOCALSTORAGE.SHOW_EXPLORER_TOOLBAR, - JSON.stringify(parsedShowExplorerToolbar), - ); - return true; - } - const parsedShowExplorerToolbar = JSON.parse(showExplorerToolbar || '{}'); - return parsedShowExplorerToolbar[dataSource]; - } catch (error) { - console.error(error); - return false; - } -}; - -export const setExplorerToolBarVisibility = ( - value: boolean, - dataSource: string, -): void => { - try { - const showExplorerToolbar = localStorage.getItem( - LOCALSTORAGE.SHOW_EXPLORER_TOOLBAR, - ); - if (showExplorerToolbar) { - const parsedShowExplorerToolbar = JSON.parse(showExplorerToolbar); - parsedShowExplorerToolbar[dataSource] = value; - localStorage.setItem( - LOCALSTORAGE.SHOW_EXPLORER_TOOLBAR, - JSON.stringify(parsedShowExplorerToolbar), - ); - return; - } - } catch (error) { - console.error(error); - } -}; diff --git a/frontend/src/container/LogsExplorerViews/index.tsx b/frontend/src/container/LogsExplorerViews/index.tsx index 45b33d01af..a12fd80997 100644 --- a/frontend/src/container/LogsExplorerViews/index.tsx +++ b/frontend/src/container/LogsExplorerViews/index.tsx @@ -14,7 +14,7 @@ import { PANEL_TYPES, } from 'constants/queryBuilder'; import { DEFAULT_PER_PAGE_VALUE } from 'container/Controls/config'; -import ExplorerOptionWrapper from 'container/ExplorerOptions/ExplorerOptionWrapper'; +import ExplorerOptions from 'container/ExplorerOptions/ExplorerOptions'; import GoToTop from 'container/GoToTop'; import LogsExplorerChart from 'container/LogsExplorerChart'; import LogsExplorerList from 'container/LogsExplorerList'; @@ -634,7 +634,7 @@ function LogsExplorerViews({ - - From 0365fa5421e72630bb8376e3ba4b6f023ae27037 Mon Sep 17 00:00:00 2001 From: SagarRajput-7 <162284829+SagarRajput-7@users.noreply.github.com> Date: Fri, 15 Mar 2024 12:19:07 +0530 Subject: [PATCH 04/33] feat: handled inactive tab handling by removing the display flex override (#4708) Co-authored-by: Sagar Rajput --- .../src/pages/LogsModulePage/LogsModulePage.styles.scss | 8 +------- frontend/src/pages/Pipelines/Pipelines.styles.scss | 4 ++++ 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/frontend/src/pages/LogsModulePage/LogsModulePage.styles.scss b/frontend/src/pages/LogsModulePage/LogsModulePage.styles.scss index 9465594ccb..acba2781df 100644 --- a/frontend/src/pages/LogsModulePage/LogsModulePage.styles.scss +++ b/frontend/src/pages/LogsModulePage/LogsModulePage.styles.scss @@ -1,10 +1,4 @@ .logs-module-container { - // margin: 0 -1rem; // as we have added a margin of 0 1rem components container, have to adjust the margin with negative to style the logs explorer as we want - - // .ant-tabs-content-holder { - // margin: 0 -1rem; - // } - flex: 1; display: flex; flex-direction: column; @@ -54,4 +48,4 @@ } } } -} \ No newline at end of file +} diff --git a/frontend/src/pages/Pipelines/Pipelines.styles.scss b/frontend/src/pages/Pipelines/Pipelines.styles.scss index 8521aab75e..78578006ee 100644 --- a/frontend/src/pages/Pipelines/Pipelines.styles.scss +++ b/frontend/src/pages/Pipelines/Pipelines.styles.scss @@ -2,4 +2,8 @@ .ant-tabs-content { padding: 0 16px; } + + .ant-tabs-tabpane-hidden { + display: none !important; + } } From 60946b5e9df56c80ee75c0b8cb095e94f2c4d840 Mon Sep 17 00:00:00 2001 From: Vikrant Gupta Date: Fri, 15 Mar 2024 12:28:03 +0530 Subject: [PATCH 05/33] feat: remove disabled in case of dashboard locked (#4709) --- .../VariableItem.tsx | 151 ++++++++---------- 1 file changed, 71 insertions(+), 80 deletions(-) diff --git a/frontend/src/container/NewDashboard/DashboardVariablesSelection/VariableItem.tsx b/frontend/src/container/NewDashboard/DashboardVariablesSelection/VariableItem.tsx index 339210f956..9f54305b1e 100644 --- a/frontend/src/container/NewDashboard/DashboardVariablesSelection/VariableItem.tsx +++ b/frontend/src/container/NewDashboard/DashboardVariablesSelection/VariableItem.tsx @@ -2,14 +2,13 @@ import './DashboardVariableSelection.styles.scss'; import { orange } from '@ant-design/colors'; import { WarningOutlined } from '@ant-design/icons'; -import { Input, Popover, Select, Tooltip, Typography } from 'antd'; +import { Input, Popover, Select, Typography } from 'antd'; import dashboardVariablesQuery from 'api/dashboard/variables/dashboardVariablesQuery'; import { REACT_QUERY_KEY } from 'constants/reactQueryKeys'; import { commaValuesParser } from 'lib/dashbaordVariables/customCommaValuesParser'; import sortValues from 'lib/dashbaordVariables/sortVariableValues'; import { debounce } from 'lodash-es'; import map from 'lodash-es/map'; -import { useDashboard } from 'providers/Dashboard/Dashboard'; import { memo, useEffect, useMemo, useState } from 'react'; import { useQuery } from 'react-query'; import { IDashboardVariable } from 'types/api/dashboard/getAll'; @@ -52,7 +51,6 @@ function VariableItem({ onValueUpdate, lastUpdatedVar, }: VariableItemProps): JSX.Element { - const { isDashboardLocked } = useDashboard(); const [optionsData, setOptionsData] = useState<(string | number | boolean)[]>( [], ); @@ -222,84 +220,77 @@ function VariableItem({ }, [variableData.type, variableData.customValue]); return ( - - - - ${variableData.name} - - - {variableData.type === 'TEXTBOX' ? ( - + + ${variableData.name} + + + {variableData.type === 'TEXTBOX' ? ( + { + debouncedHandleChange(e.target.value || ''); + }} + style={{ + width: + 50 + ((variableData.selectedValue?.toString()?.length || 0) * 7 || 50), + }} + /> + ) : ( + !errorMessage && + optionsData && ( + - {enableSelectAll && ( - - ALL - - )} - {map(optionsData, (option) => ( - - {option.toString()} - - ))} - - ) - )} - {variableData.type !== 'TEXTBOX' && errorMessage && ( - - {errorMessage}} - > - - - - )} - - - + placeholder="Select value" + placement="bottomRight" + mode={mode} + dropdownMatchSelectWidth={false} + style={SelectItemStyle} + loading={isLoading} + showSearch + data-testid="variable-select" + className="variable-select" + getPopupContainer={popupContainer} + > + {enableSelectAll && ( + + ALL + + )} + {map(optionsData, (option) => ( + + {option.toString()} + + ))} + + ) + )} + {variableData.type !== 'TEXTBOX' && errorMessage && ( + + {errorMessage}} + > + + + + )} + + ); } From 07747e73d6380fbc5e637662b0e80b10efd6683a Mon Sep 17 00:00:00 2001 From: Yunus M Date: Fri, 15 Mar 2024 13:25:06 +0530 Subject: [PATCH 06/33] fix: context filter input overflow issue, min height for logs list view (#4710) --- .../QueryBuilderSearchWrapper.styles.scss | 19 +++++++++++-------- .../LogsExplorerList.style.scss | 1 + 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/frontend/src/components/LogDetail/QueryBuilderSearchWrapper.styles.scss b/frontend/src/components/LogDetail/QueryBuilderSearchWrapper.styles.scss index e3da355621..2a6822dc00 100644 --- a/frontend/src/components/LogDetail/QueryBuilderSearchWrapper.styles.scss +++ b/frontend/src/components/LogDetail/QueryBuilderSearchWrapper.styles.scss @@ -1,10 +1,13 @@ .query-builder-search-wrapper { - margin-top: 10px; - height: 46px; - border: 1px solid var(--bg-slate-400); - border-bottom: none; + margin-top: 10px; + border: 1px solid var(--bg-slate-400); + border-bottom: none; - .ant-select-selector { - border: none !important; - } -} \ No newline at end of file + .ant-select-selector { + border: none !important; + + input { + font-size: 12px; + } + } +} diff --git a/frontend/src/container/LogsExplorerList/LogsExplorerList.style.scss b/frontend/src/container/LogsExplorerList/LogsExplorerList.style.scss index 29dc383b7d..be6b8d627f 100644 --- a/frontend/src/container/LogsExplorerList/LogsExplorerList.style.scss +++ b/frontend/src/container/LogsExplorerList/LogsExplorerList.style.scss @@ -8,4 +8,5 @@ line-height: 18px; letter-spacing: -0.005em; text-align: left; + min-height: 500px; } From 4c2174958fbaff1eae65ce09f25910c102475eab Mon Sep 17 00:00:00 2001 From: Yunus M Date: Fri, 15 Mar 2024 13:38:43 +0530 Subject: [PATCH 07/33] chore: remove share invite link message (#4691) --- frontend/public/locales/en-GB/organizationsettings.json | 3 +-- frontend/public/locales/en/organizationsettings.json | 3 +-- .../OrganizationSettings/PendingInvitesContainer/index.tsx | 3 --- frontend/webpack.config.prod.js | 1 + 4 files changed, 3 insertions(+), 7 deletions(-) diff --git a/frontend/public/locales/en-GB/organizationsettings.json b/frontend/public/locales/en-GB/organizationsettings.json index deae9666ee..74654d9b46 100644 --- a/frontend/public/locales/en-GB/organizationsettings.json +++ b/frontend/public/locales/en-GB/organizationsettings.json @@ -14,6 +14,5 @@ "delete_domain_message": "Are you sure you want to delete this domain?", "delete_domain": "Delete Domain", "add_domain": "Add Domains", - "saml_settings": "Your SAML settings have been saved, please login from incognito window to confirm that it has been set up correctly", - "invite_link_share_manually": "After inviting members, please copy the invite link and send them the link manually" + "saml_settings": "Your SAML settings have been saved, please login from incognito window to confirm that it has been set up correctly" } diff --git a/frontend/public/locales/en/organizationsettings.json b/frontend/public/locales/en/organizationsettings.json index deae9666ee..74654d9b46 100644 --- a/frontend/public/locales/en/organizationsettings.json +++ b/frontend/public/locales/en/organizationsettings.json @@ -14,6 +14,5 @@ "delete_domain_message": "Are you sure you want to delete this domain?", "delete_domain": "Delete Domain", "add_domain": "Add Domains", - "saml_settings": "Your SAML settings have been saved, please login from incognito window to confirm that it has been set up correctly", - "invite_link_share_manually": "After inviting members, please copy the invite link and send them the link manually" + "saml_settings": "Your SAML settings have been saved, please login from incognito window to confirm that it has been set up correctly" } diff --git a/frontend/src/container/OrganizationSettings/PendingInvitesContainer/index.tsx b/frontend/src/container/OrganizationSettings/PendingInvitesContainer/index.tsx index 7395102d4c..3e9276f596 100644 --- a/frontend/src/container/OrganizationSettings/PendingInvitesContainer/index.tsx +++ b/frontend/src/container/OrganizationSettings/PendingInvitesContainer/index.tsx @@ -279,9 +279,6 @@ function PendingInvitesContainer(): JSX.Element { - - {t('invite_link_share_manually')} - + +
+ } + > + - - - + <> +
+
+
max lines per row
+
+ + +
- - )} +
+
{!addNewColumn &&
} diff --git a/frontend/src/container/LiveLogs/LiveLogsList/index.tsx b/frontend/src/container/LiveLogs/LiveLogsList/index.tsx index 7be2927445..39a39ab990 100644 --- a/frontend/src/container/LiveLogs/LiveLogsList/index.tsx +++ b/frontend/src/container/LiveLogs/LiveLogsList/index.tsx @@ -71,6 +71,7 @@ function LiveLogsList({ logs }: LiveLogsListProps): JSX.Element { key={log.id} logData={log} selectedFields={selectedFields} + linesPerRow={options.maxLines} onAddToQuery={onAddToQuery} onSetActiveLog={onSetActiveLog} /> diff --git a/frontend/src/container/LogsExplorerList/index.tsx b/frontend/src/container/LogsExplorerList/index.tsx index 21b03cf413..fc5a1f6800 100644 --- a/frontend/src/container/LogsExplorerList/index.tsx +++ b/frontend/src/container/LogsExplorerList/index.tsx @@ -90,6 +90,7 @@ function LogsExplorerList({ onAddToQuery={onAddToQuery} onSetActiveLog={onSetActiveLog} activeLog={activeLog} + linesPerRow={options.maxLines} /> ); }, diff --git a/frontend/src/container/LogsTable/index.tsx b/frontend/src/container/LogsTable/index.tsx index c87d4232f6..b10c3503dd 100644 --- a/frontend/src/container/LogsTable/index.tsx +++ b/frontend/src/container/LogsTable/index.tsx @@ -74,6 +74,7 @@ function LogsTable(props: LogsTableProps): JSX.Element { key={log.id} logData={log} selectedFields={selected} + linesPerRow={linesPerRow} onAddToQuery={onAddToQuery} onSetActiveLog={onSetActiveLog} /> From 5745727031fad9bcfe78e3f326ef3be552ae1a9e Mon Sep 17 00:00:00 2001 From: Vikrant Gupta Date: Fri, 22 Mar 2024 14:59:43 +0530 Subject: [PATCH 20/33] fix: [SIG-565]: design feedback for integrations (#4723) * fix: [SIG-565]: design feedback for integrations * feat: added dotted line in the test connection modal * feat: handle the URL change for integration details page to support back navigation * feat: added ghost loading states * feat: added margin for details header * feat: added margin for details header * feat: increase the list sizes to 20 * fix: handle icons * fix: remove unused classes --- frontend/public/Icons/cable-car.svg | 1 + frontend/public/Icons/configure.svg | 1 + frontend/public/Icons/group.svg | 1 + .../src/assets/Integrations/ConfigureIcon.tsx | 23 ++++++++++ frontend/src/constants/query.ts | 1 + .../IntegrationDetailContent.tsx | 9 ++-- .../Configure.tsx | 26 ++++++------ .../DataCollected.tsx | 4 +- .../IntegrationDetailContentTabs.styles.scss | 9 +++- .../IntegrationDetailHeader.tsx | 42 +++++++++++++++---- .../IntegrationDetailPage.styles.scss | 28 +++++++++++-- .../IntegrationDetailPage.tsx | 5 ++- .../src/pages/Integrations/Integrations.tsx | 29 +++++++++++-- 13 files changed, 140 insertions(+), 39 deletions(-) create mode 100644 frontend/public/Icons/cable-car.svg create mode 100644 frontend/public/Icons/configure.svg create mode 100644 frontend/public/Icons/group.svg create mode 100644 frontend/src/assets/Integrations/ConfigureIcon.tsx diff --git a/frontend/public/Icons/cable-car.svg b/frontend/public/Icons/cable-car.svg new file mode 100644 index 0000000000..0c7318debd --- /dev/null +++ b/frontend/public/Icons/cable-car.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/frontend/public/Icons/configure.svg b/frontend/public/Icons/configure.svg new file mode 100644 index 0000000000..088dfa9447 --- /dev/null +++ b/frontend/public/Icons/configure.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/frontend/public/Icons/group.svg b/frontend/public/Icons/group.svg new file mode 100644 index 0000000000..e293cebcd0 --- /dev/null +++ b/frontend/public/Icons/group.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/frontend/src/assets/Integrations/ConfigureIcon.tsx b/frontend/src/assets/Integrations/ConfigureIcon.tsx new file mode 100644 index 0000000000..84ddef5de0 --- /dev/null +++ b/frontend/src/assets/Integrations/ConfigureIcon.tsx @@ -0,0 +1,23 @@ +import { Color } from '@signozhq/design-tokens'; +import { useIsDarkMode } from 'hooks/useDarkMode'; + +function ConfigureIcon(): JSX.Element { + const isDarkMode = useIsDarkMode(); + return ( + + + + + + + ); +} + +export default ConfigureIcon; diff --git a/frontend/src/constants/query.ts b/frontend/src/constants/query.ts index d3bd2729d1..31ec5fcd20 100644 --- a/frontend/src/constants/query.ts +++ b/frontend/src/constants/query.ts @@ -27,5 +27,6 @@ export enum QueryParams { viewName = 'viewName', viewKey = 'viewKey', expandedWidgetId = 'expandedWidgetId', + integration = 'integration', pagination = 'pagination', } diff --git a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContent.tsx b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContent.tsx index 6083489b58..ec81d51db6 100644 --- a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContent.tsx +++ b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContent.tsx @@ -1,7 +1,8 @@ import './IntegrationDetailPage.styles.scss'; import { Button, Tabs, TabsProps, Typography } from 'antd'; -import { Drum, Hammer, Table2 } from 'lucide-react'; +import ConfigureIcon from 'assets/Integrations/ConfigureIcon'; +import { CableCar, Group } from 'lucide-react'; import { IntegrationDetailedProps } from 'types/api/integrations/types'; import Configure from './IntegrationDetailContentTabs/Configure'; @@ -24,7 +25,7 @@ function IntegrationDetailContent( @@ -43,7 +44,7 @@ function IntegrationDetailContent( @@ -56,7 +57,7 @@ function IntegrationDetailContent( diff --git a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/Configure.tsx b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/Configure.tsx index ede3b41137..92a5e0c823 100644 --- a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/Configure.tsx +++ b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/Configure.tsx @@ -1,6 +1,6 @@ import './IntegrationDetailContentTabs.styles.scss'; -import { Button, Tooltip, Typography } from 'antd'; +import { Button, Typography } from 'antd'; import cx from 'classnames'; import { MarkdownRenderer } from 'components/MarkdownRenderer/MarkdownRenderer'; import { useState } from 'react'; @@ -21,18 +21,18 @@ function Configure(props: ConfigurationProps): JSX.Element {
{configuration.map((config, index) => ( - - - + ))}
diff --git a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/DataCollected.tsx b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/DataCollected.tsx index a3c387dc3a..1c605ec863 100644 --- a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/DataCollected.tsx +++ b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/DataCollected.tsx @@ -59,7 +59,7 @@ function DataCollected(props: DataCollectedProps): JSX.Element { index % 2 === 0 ? 'table-row-dark' : '' } dataSource={logsData} - pagination={{ pageSize: 3 }} + pagination={{ pageSize: 20 }} className="logs-section-table" />
@@ -74,7 +74,7 @@ function DataCollected(props: DataCollectedProps): JSX.Element { index % 2 === 0 ? 'table-row-dark' : '' } dataSource={metricsData} - pagination={{ pageSize: 3 }} + pagination={{ pageSize: 20 }} className="metrics-section-table" />
diff --git a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/IntegrationDetailContentTabs.styles.scss b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/IntegrationDetailContentTabs.styles.scss index 8340d0d4c0..81dcb6bf59 100644 --- a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/IntegrationDetailContentTabs.styles.scss +++ b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailContentTabs/IntegrationDetailContentTabs.styles.scss @@ -4,7 +4,7 @@ .integration-detail-overview-left-container { display: flex; flex-direction: column; - width: 25%; + width: 30%; gap: 26px; border-right: 1px solid var(--bg-slate-500); padding: 16px 0; @@ -185,13 +185,14 @@ .configure-menu { display: flex; flex-direction: column; - width: 25%; + width: 30%; padding: 16px 16px 0px 0px; border-right: 1px solid var(--bg-slate-500); gap: 8px; .configure-menu-item { padding: 4px 8px; + height: auto; text-align: start; color: var(--bg-vanilla-100); font-family: Inter; @@ -199,6 +200,10 @@ font-style: normal; font-weight: 400; line-height: 18px; /* 128.571% */ + + .configure-text { + text-wrap: pretty; + } } .configure-menu-item:hover { diff --git a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailHeader.tsx b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailHeader.tsx index 6b2a7b7c34..cab49391f5 100644 --- a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailHeader.tsx +++ b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailHeader.tsx @@ -1,7 +1,7 @@ /* eslint-disable no-nested-ternary */ import './IntegrationDetailPage.styles.scss'; -import { Button, Modal, Typography } from 'antd'; +import { Button, Modal, Tooltip, Typography } from 'antd'; import installIntegration from 'api/Integrations/installIntegration'; import { SOMETHING_WENT_WRONG } from 'constants/api'; import dayjs from 'dayjs'; @@ -22,6 +22,7 @@ interface IntegrationDetailHeaderProps { connectionState: ConnectionStates; connectionData: IntegrationConnectionStatus; } +// eslint-disable-next-line sonarjs/cognitive-complexity function IntegrationDetailHeader( props: IntegrationDetailHeaderProps, ): JSX.Element { @@ -154,19 +155,42 @@ function IntegrationDetailHeader( Last recieved from - - {latestData.last_received_from} - +
+ + + {latestData.last_received_from} + +
Last recieved at - - {latestData.last_received_ts_ms - ? dayjs(latestData.last_received_ts_ms).format('DD MMM YYYY HH:mm') - : ''} - +
+ + + {latestData.last_received_ts_ms + ? dayjs(latestData.last_received_ts_ms).format('DD MMM YYYY HH:mm') + : ''} + +
) : connectionState === ConnectionStates.TestingConnection ? ( diff --git a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailPage.styles.scss b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailPage.styles.scss index d9982c3aab..b7630491ae 100644 --- a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailPage.styles.scss +++ b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailPage.styles.scss @@ -53,9 +53,17 @@ .loading-integration-details { display: flex; - height: 400px; - justify-content: center; - align-items: center; + flex-direction: column; + gap: 16px; + + .skeleton-1 { + height: 125px; + width: 100%; + } + .skeleton-2 { + height: 250px; + width: 100%; + } } .all-integrations-btn { @@ -254,6 +262,7 @@ border-radius: 4px; border: 1px solid rgba(218, 85, 101, 0.2); background: rgba(218, 85, 101, 0.06); + gap: 32px; .unintall-integration-bar-text { display: flex; @@ -429,6 +438,15 @@ .data-info { display: flex; justify-content: space-between; + align-items: center; + + .connection-line { + border: 1px dashed var(--bg-slate-200); + min-width: 20px; + height: 0px; + flex-grow: 1; + margin: 0px 8px; + } .last-data { color: var(--bg-vanilla-400); @@ -447,6 +465,7 @@ font-style: normal; font-weight: 400; line-height: 18px; /* 150% */ + max-width: 320px; } } .testingConnection { @@ -622,6 +641,9 @@ .connection-content { .data-info { + .connection-line { + border: 1px dashed var(--bg-vanilla-400); + } .last-data { color: var(--bg-slate-400); } diff --git a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailPage.tsx b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailPage.tsx index e9a975001a..88be0dc3a3 100644 --- a/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailPage.tsx +++ b/frontend/src/pages/Integrations/IntegrationDetailPage/IntegrationDetailPage.tsx @@ -4,7 +4,7 @@ import './IntegrationDetailPage.styles.scss'; import { Color } from '@signozhq/design-tokens'; -import { Button, Typography } from 'antd'; +import { Button, Skeleton, Typography } from 'antd'; import { useGetIntegration } from 'hooks/Integrations/useGetIntegration'; import { useGetIntegrationStatus } from 'hooks/Integrations/useGetIntegrationStatus'; import { defaultTo } from 'lodash-es'; @@ -71,7 +71,8 @@ function IntegrationDetailPage(props: IntegrationDetailPageProps): JSX.Element { {loading ? (
- Please wait.. While we load the integration details + +
) : isError ? (
diff --git a/frontend/src/pages/Integrations/Integrations.tsx b/frontend/src/pages/Integrations/Integrations.tsx index 6d25a20a6f..bda4184eab 100644 --- a/frontend/src/pages/Integrations/Integrations.tsx +++ b/frontend/src/pages/Integrations/Integrations.tsx @@ -1,17 +1,38 @@ import './Integrations.styles.scss'; -import { useState } from 'react'; +import useUrlQuery from 'hooks/useUrlQuery'; +import { useCallback, useMemo, useState } from 'react'; +import { useHistory, useLocation } from 'react-router-dom'; import Header from './Header'; import IntegrationDetailPage from './IntegrationDetailPage/IntegrationDetailPage'; import IntegrationsList from './IntegrationsList'; function Integrations(): JSX.Element { - const [selectedIntegration, setSelectedIntegration] = useState( - null, + const urlQuery = useUrlQuery(); + const history = useHistory(); + const location = useLocation(); + + const selectedIntegration = useMemo(() => urlQuery.get('integration'), [ + urlQuery, + ]); + + const setSelectedIntegration = useCallback( + (integration: string | null) => { + if (integration) { + urlQuery.set('integration', integration); + } else { + urlQuery.set('integration', ''); + } + const generatedUrl = `${location.pathname}?${urlQuery.toString()}`; + history.push(generatedUrl); + }, + [history, location.pathname, urlQuery], ); - const [activeDetailTab, setActiveDetailTab] = useState(null); + const [activeDetailTab, setActiveDetailTab] = useState( + 'overview', + ); const [searchTerm, setSearchTerm] = useState(''); return ( From f24135f5b00d2598b06a5d8d130830775fd657d9 Mon Sep 17 00:00:00 2001 From: Raj Kamal Singh <1133322+raj-k-singh@users.noreply.github.com> Date: Sat, 23 Mar 2024 11:39:28 +0530 Subject: [PATCH 21/33] Feat: QS: postgres integration: instructions for collecting and parsing logs (#4738) * chore: offer metrics config instructions for signoz cloud only * chore: some more cleanups * chore: get log collection instructions started * feat: flesh out log collection otel config for postgres * chore: some cleanup * chore: some more cleanup * chore: some more cleanup --- .../postgres/config/collect-logs.md | 109 +++++++++++++ .../postgres/config/collect-metrics.md | 101 ++++++++++++ .../config/configure-otel-collector.md | 146 ------------------ .../postgres/config/prerequisites.md | 50 ++++-- .../postgres/integration.json | 82 ++++++---- .../builtin_integrations/postgres/overview.md | 4 +- 6 files changed, 299 insertions(+), 193 deletions(-) create mode 100644 pkg/query-service/app/integrations/builtin_integrations/postgres/config/collect-logs.md create mode 100644 pkg/query-service/app/integrations/builtin_integrations/postgres/config/collect-metrics.md delete mode 100644 pkg/query-service/app/integrations/builtin_integrations/postgres/config/configure-otel-collector.md diff --git a/pkg/query-service/app/integrations/builtin_integrations/postgres/config/collect-logs.md b/pkg/query-service/app/integrations/builtin_integrations/postgres/config/collect-logs.md new file mode 100644 index 0000000000..f49e722856 --- /dev/null +++ b/pkg/query-service/app/integrations/builtin_integrations/postgres/config/collect-logs.md @@ -0,0 +1,109 @@ +### Collect Postgres Logs + +#### Create collector config file + +Save the following config for collecting postgres logs in a file named `postgres-logs-collection-config.yaml` + +```yaml +receivers: + filelog/postgresql: + include: ["${env:POSTGRESQL_LOG_FILE}"] + operators: + # Parse default postgresql text log format. + # `log_line_prefix` postgres setting defaults to '%m [%p] ' which logs the timestamp and the process ID + # See https://www.postgresql.org/docs/current/runtime-config-logging.html#GUC-LOG-LINE-PREFIX for more details + - type: regex_parser + if: body matches '^(?P\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}.?[0-9]*? [A-Z]*) \\[(?P[0-9]+)\\] (?P[A-Z]*). (?P.*)$' + parse_from: body + regex: '^(?P\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.?[0-9]*? [A-Z]*) \[(?P[0-9]+)\] (?P[A-Z]*). (?P.*)$' + timestamp: + parse_from: attributes.ts + layout: '%Y-%m-%d %H:%M:%S %Z' + severity: + parse_from: attributes.log_level + mapping: + debug: + - DEBUG1 + - DEBUG2 + - DEBUG3 + - DEBUG4 + - DEBUG5 + info: + - INFO + - LOG + - NOTICE + - DETAIL + warning: WARNING + error: ERROR + fatal: + - FATAL + - PANIC + on_error: send + - type: move + if: attributes.message != nil + from: attributes.message + to: body + - type: remove + if: attributes.log_level != nil + field: attributes.log_level + - type: remove + if: attributes.ts != nil + field: attributes.ts + - type: add + field: attributes.source + value: postgres + +processors: + batch: + send_batch_size: 10000 + send_batch_max_size: 11000 + timeout: 10s + +exporters: + # export to SigNoz cloud + otlp/postgres-logs: + endpoint: "${env:OTLP_DESTINATION_ENDPOINT}" + tls: + insecure: false + headers: + "signoz-access-token": "${env:SIGNOZ_INGESTION_KEY}" + + # export to local collector + # otlp/local: + # endpoint: "localhost:4317" + # tls: + # insecure: true + +service: + pipelines: + postgresql: + receivers: [filelog/postgresql] + processors: [batch] + exporters: [otlp/postgresql-logs] +``` + +#### Set Environment Variables + +Set the following environment variables in your otel-collector environment: + +```bash + +# path of Postgres server log file. must be accessible by the otel collector +export POSTGRESQL_LOG_FILE=/usr/local/var/log/postgres.log + +# region specific SigNoz cloud ingestion endpoint +export OTLP_DESTINATION_ENDPOINT="ingest.us.signoz.cloud:443" + +# your SigNoz ingestion key +export SIGNOZ_INGESTION_KEY="signoz-ingestion-key" + +``` + +#### Use collector config file + +Make the collector config file available to your otel collector and use it by adding the following flag to the command for running your collector +```bash +--config postgres-logs-collection-config.yaml +``` +Note: the collector can use multiple config files, specified by multiple occurrences of the --config flag. + diff --git a/pkg/query-service/app/integrations/builtin_integrations/postgres/config/collect-metrics.md b/pkg/query-service/app/integrations/builtin_integrations/postgres/config/collect-metrics.md new file mode 100644 index 0000000000..ad1971fe35 --- /dev/null +++ b/pkg/query-service/app/integrations/builtin_integrations/postgres/config/collect-metrics.md @@ -0,0 +1,101 @@ +### Collect Postgres Metrics + +You can configure Postgres metrics collection by providing the required collector config to your collector. + +#### Create collector config file + +Save the following config for collecting postgres metrics in a file named `postgres-metrics-collection-config.yaml` + +```yaml +receivers: + postgresql: + # The endpoint of the postgresql server. Whether using TCP or Unix sockets, this value should be host:port. If transport is set to unix, the endpoint will internally be translated from host:port to /host.s.PGSQL.port + endpoint: ${env:POSTGRESQL_ENDPOINT} + # The frequency at which to collect metrics from the Postgres instance. + collection_interval: 60s + # The username used to access the postgres instance + username: ${env:POSTGRESQL_USERNAME} + # The password used to access the postgres instance + password: ${env:POSTGRESQL_PASSWORD} + # The list of databases for which the receiver will attempt to collect statistics. If an empty list is provided, the receiver will attempt to collect statistics for all non-template databases + databases: [] + # # Defines the network to use for connecting to the server. Valid Values are `tcp` or `unix` + # transport: tcp + tls: + # set to false if SSL is enabled on the server + insecure: true + # ca_file: /etc/ssl/certs/ca-certificates.crt + # cert_file: /etc/ssl/certs/postgres.crt + # key_file: /etc/ssl/certs/postgres.key + metrics: + postgresql.database.locks: + enabled: true + postgresql.deadlocks: + enabled: true + postgresql.sequential_scans: + enabled: true + +processors: + # enriches the data with additional host information + # see https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/resourcedetectionprocessor#resource-detection-processor + resourcedetection/system: + # add additional detectors if needed + detectors: ["system"] + system: + hostname_sources: ["os"] + +exporters: + # export to SigNoz cloud + otlp/postgres: + endpoint: "${env:OTLP_DESTINATION_ENDPOINT}" + tls: + insecure: false + headers: + "signoz-access-token": "${env:SIGNOZ_INGESTION_KEY}" + + # export to local collector + # otlp/local: + # endpoint: "localhost:4317" + # tls: + # insecure: true + +service: + pipelines: + metrics/postgresql: + receivers: [postgresql] + # note: remove this processor if the collector host is not running on the same host as the postgres instance + processors: [resourcedetection/system] + exporters: [otlp/postgres] +``` + +#### Set Environment Variables + +Set the following environment variables in your otel-collector environment: + +```bash + +# password for Postgres monitoring user" +export POSTGRESQL_USERNAME="monitoring" + +# password for Postgres monitoring user" +export POSTGRESQL_PASSWORD="" + +# Postgres endpoint reachable from the otel collector" +export POSTGRESQL_ENDPOINT="host:port" + + +# region specific SigNoz cloud ingestion endpoint +export OTLP_DESTINATION_ENDPOINT="ingest.us.signoz.cloud:443" + +# your SigNoz ingestion key +export SIGNOZ_INGESTION_KEY="signoz-ingestion-key" + +``` + +#### Use collector config file + +Make the collector config file available to your otel collector and use it by adding the following flag to the command for running your collector +```bash +--config postgres-metrics-collection-config.yaml +``` +Note: the collector can use multiple config files, specified by multiple occurrences of the --config flag. diff --git a/pkg/query-service/app/integrations/builtin_integrations/postgres/config/configure-otel-collector.md b/pkg/query-service/app/integrations/builtin_integrations/postgres/config/configure-otel-collector.md deleted file mode 100644 index 24fc840a30..0000000000 --- a/pkg/query-service/app/integrations/builtin_integrations/postgres/config/configure-otel-collector.md +++ /dev/null @@ -1,146 +0,0 @@ -### Configure otel collector - -#### Create collector config file - -Save the collector config for monitoring postgres in a file named `postgres-collector-config.yaml` - -Use the following configuration for SigNoz cloud. See further below for configuration for self hosted SigNoz - -```yaml -receivers: - postgresql: - # The endpoint of the postgresql server. Whether using TCP or Unix sockets, this value should be host:port. If transport is set to unix, the endpoint will internally be translated from host:port to /host.s.PGSQL.port - endpoint: ${env:POSTGRESQL_ENDPOINT} - # The frequency at which to collect metrics from the Postgres instance. - collection_interval: 60s - # The username used to access the postgres instance - username: monitoring - # The password used to access the postgres instance - password: ${env:POSTGRESQL_PASSWORD} - # The list of databases for which the receiver will attempt to collect statistics. If an empty list is provided, the receiver will attempt to collect statistics for all non-template databases - databases: [] - # # Defines the network to use for connecting to the server. Valid Values are `tcp` or `unix` - # transport: tcp - tls: - # set to false if SSL is enabled on the server - insecure: true - # ca_file: /etc/ssl/certs/ca-certificates.crt - # cert_file: /etc/ssl/certs/postgres.crt - # key_file: /etc/ssl/certs/postgres.key - metrics: - postgresql.database.locks: - enabled: true - postgresql.deadlocks: - enabled: true - postgresql.sequential_scans: - enabled: true - -processors: - # enriches the data with additional host information - # see https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/resourcedetectionprocessor#resource-detection-processor - resourcedetection/system: - # add additional detectors if needed - detectors: ["system"] - system: - hostname_sources: ["os"] - -exporters: - # export to SigNoz cloud - otlp/postgres: - endpoint: "${env:OTLP_DESTINATION_ENDPOINT}" - tls: - insecure: ${env:OTLP_DESTINATION_TLS_INSECURE} - headers: - "signoz-access-token": "${env:SIGNOZ_INGESTION_KEY}" - -service: - pipelines: - metrics/postgresql: - receivers: [postgresql] - # note: remove this processor if the collector host is not running on the same host as the postgres instance - processors: [resourcedetection/system] - exporters: [otlp/postgres] -``` - -Use the following config if using self-hosted SigNoz. See the config above if using SigNoz cloud -```yaml -receivers: - postgresql: - # The endpoint of the postgresql server. Whether using TCP or Unix sockets, this value should be host:port. If transport is set to unix, the endpoint will internally be translated from host:port to /host.s.PGSQL.port - endpoint: ${env:POSTGRESQL_ENDPOINT} - # The frequency at which to collect metrics from the Postgres instance. - collection_interval: 60s - # The username used to access the postgres instance - username: monitoring - # The password used to access the postgres instance - password: ${env:POSTGRESQL_PASSWORD} - # The list of databases for which the receiver will attempt to collect statistics. If an empty list is provided, the receiver will attempt to collect statistics for all non-template databases - databases: [] - # # Defines the network to use for connecting to the server. Valid Values are `tcp` or `unix` - # transport: tcp - tls: - # set to false if SSL is enabled on the server - insecure: true - # ca_file: /etc/ssl/certs/ca-certificates.crt - # cert_file: /etc/ssl/certs/postgres.crt - # key_file: /etc/ssl/certs/postgres.key - metrics: - postgresql.database.locks: - enabled: true - postgresql.deadlocks: - enabled: true - postgresql.sequential_scans: - enabled: true - -processors: - # enriches the data with additional host information - # see https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/resourcedetectionprocessor#resource-detection-processor - resourcedetection/system: - # add additional detectors if needed - detectors: ["system"] - system: - hostname_sources: ["os"] - -exporters: - otlp/postgres: - endpoint: "${env:OTLP_DESTINATION_ENDPOINT}" - tls: - insecure: ${env:OTLP_DESTINATION_TLS_INSECURE} - -service: - pipelines: - metrics/postgresql: - receivers: [postgresql] - # note: remove this processor if the collector host is not running on the same host as the postgres instance - processors: [resourcedetection/system] - exporters: [otlp/postgres] -``` - - -#### Set Environment Variables - -Set the following environment variables in your otel-collector environment: - -```bash - -# password for postgres monitoring user" -export POSTGRESQL_PASSWORD="password" - -# postgres endpoint reachable from the otel collector" -export POSTGRESQL_ENDPOINT="host:port" - -# A reachable OTLP destination for collected metrics. Eg: localhost:4317 or signoz cloud ingestion endpoint -export OTLP_DESTINATION_ENDPOINT="ingest.us.signoz.cloud:443" - -# Set to true if using an endpoint without TLS -export OTLP_DESTINATION_TLS_INSECURE="false" - -# your signoz ingestion key if using SigNoz cloud -export SIGNOZ_INGESTION_KEY="key" - -``` - -#### Use collector config file - -Make the `postgres-collector-config.yaml` file available to your otel collector and add the flag `--config postgres-collector-config.yaml` to the command for running your otel collector. -Note: the collector can use multiple config files, specified by multiple occurrences of the --config flag. diff --git a/pkg/query-service/app/integrations/builtin_integrations/postgres/config/prerequisites.md b/pkg/query-service/app/integrations/builtin_integrations/postgres/config/prerequisites.md index fbfc9e9052..e50282d2a8 100644 --- a/pkg/query-service/app/integrations/builtin_integrations/postgres/config/prerequisites.md +++ b/pkg/query-service/app/integrations/builtin_integrations/postgres/config/prerequisites.md @@ -1,22 +1,40 @@ -### Prepare postgres for monitoring +## Before You Begin -- Have a running postgresql instance -- Have the monitoring user created -- Have the monitoring user granted the necessary permissions +To configure metrics and logs collection for a Postgres server, you need the following. -This receiver supports PostgreSQL versions 9.6+ +### Ensure Postgres server is prepared for monitoring -For PostgreSQL versions 10+, run: +- **Ensure that the Postgres server is running a supported version** + Postgres versions 9.6+ are supported. + You can use the following SQL statement to determine server version + ```SQL + SELECT version(); + ``` -```sql -create user monitoring with password ''; -grant pg_monitor to monitoring; -grant SELECT ON pg_stat_database to monitoring; -``` +- **If collecting metrics, ensure that there is a Postgres user with required permissions** + To create a monitoring user for Postgres versions 10+, run: + ```SQL + create user monitoring with password ''; + grant pg_monitor to monitoring; + grant SELECT ON pg_stat_database to monitoring; + ``` + + To create a monitoring user for Postgres versions >= 9.6 and <10, run: + ```SQL + create user monitoring with password ''; + grant SELECT ON pg_stat_database to monitoring; + ``` + -For PostgreSQL versions >= 9.6 and <10, run: +### Ensure OTEL Collector is running with access to the Postgres server -```sql -create user monitoring with password ''; -grant SELECT ON pg_stat_database to monitoring; -``` +- **Ensure that an OTEL collector is running in your deployment environment** + If needed, please [install an OTEL Collector](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/) + If already installed, ensure that the collector version is v0.88.0 or newer. + + Also ensure that you can provide config files to the collector and that you can set environment variables and command line flags used for running it. + +- **Ensure that the OTEL collector can access the Postgres server** + In order to collect metrics, the collector must be able to access the Postgres server as a client using the monitoring user. + + In order to collect logs, the collector must be able to read the Postgres server log file. diff --git a/pkg/query-service/app/integrations/builtin_integrations/postgres/integration.json b/pkg/query-service/app/integrations/builtin_integrations/postgres/integration.json index 687ca31993..c796a886ee 100644 --- a/pkg/query-service/app/integrations/builtin_integrations/postgres/integration.json +++ b/pkg/query-service/app/integrations/builtin_integrations/postgres/integration.json @@ -1,7 +1,7 @@ { "id": "postgres", "title": "PostgreSQL", - "description": "Monitor postgres using logs and metrics.", + "description": "Monitor Postgres with metrics and logs", "author": { "name": "SigNoz", "email": "integrations@signoz.io", @@ -18,8 +18,12 @@ "instructions": "file://config/prerequisites.md" }, { - "title": "Configure Otel Collector", - "instructions": "file://config/configure-otel-collector.md" + "title": "Collect Metrics", + "instructions": "file://config/collect-metrics.md" + }, + { + "title": "Collect Logs", + "instructions": "file://config/collect-logs.md" } ], "assets": { @@ -48,30 +52,48 @@ } }, "data_collected": { - "logs": [], + "logs": [ + { + "name": "Process ID", + "path": "attributes.pid", + "type": "string" + }, { + "name": "Timestamp", + "path": "timestamp", + "type": "timestamp" + }, { + "name": "Severity Text", + "path": "severity_text", + "type": "string" + }, { + "name": "Severity Number", + "path": "severity_number", + "type": "number" + } + ], "metrics": [ { "name": "postgresql.backends", "type": "sum", - "unit": "1", + "unit": "number", "description": "The number of backends." }, { "name": "postgresql.bgwriter.buffers.allocated", "type": "sum", - "unit": "{buffers}", + "unit": "number", "description": "Number of buffers allocated." }, { "name": "postgresql.bgwriter.buffers.writes", "type": "sum", - "unit": "{buffers}", + "unit": "number", "description": "Number of buffers written." }, { "name": "postgresql.bgwriter.checkpoint.count", "type": "sum", - "unit": "{checkpoints}", + "unit": "number", "description": "The number of checkpoints performed." }, { @@ -83,133 +105,133 @@ { "name": "postgresql.bgwriter.maxwritten", "type": "sum", - "unit": "1", + "unit": "number", "description": "Number of times the background writer stopped a cleaning scan because it had written too many buffers." }, { "name": "postgresql.blocks_read", "type": "sum", - "unit": "1", + "unit": "number", "description": "The number of blocks read." }, { "name": "postgresql.commits", "type": "sum", - "unit": "1", + "unit": "number", "description": "The number of commits." }, { "name": "postgresql.connection.max", "type": "gauge", - "unit": "{connections}", + "unit": "number", "description": "Configured maximum number of client connections allowed" }, { "name": "postgresql.database.count", "type": "sum", - "unit": "{databases}", + "unit": "number", "description": "Number of user databases." }, { "name": "postgresql.database.locks", "type": "gauge", - "unit": "{lock}", + "unit": "number", "description": "The number of database locks." }, { "name": "postgresql.db_size", "type": "sum", - "unit": "By", + "unit": "Bytes", "description": "The database disk usage." }, { "name": "postgresql.deadlocks", "type": "sum", - "unit": "{deadlock}", + "unit": "number", "description": "The number of deadlocks." }, { "name": "postgresql.index.scans", "type": "sum", - "unit": "{scans}", + "unit": "number", "description": "The number of index scans on a table." }, { "name": "postgresql.index.size", "type": "gauge", - "unit": "By", + "unit": "Bytes", "description": "The size of the index on disk." }, { "name": "postgresql.operations", "type": "sum", - "unit": "1", + "unit": "number", "description": "The number of db row operations." }, { "name": "postgresql.replication.data_delay", "type": "gauge", - "unit": "By", + "unit": "Bytes", "description": "The amount of data delayed in replication." }, { "name": "postgresql.rollbacks", "type": "sum", - "unit": "1", + "unit": "number", "description": "The number of rollbacks." }, { "name": "postgresql.rows", "type": "sum", - "unit": "1", + "unit": "number", "description": "The number of rows in the database." }, { "name": "postgresql.sequential_scans", "type": "sum", - "unit": "{sequential_scan}", + "unit": "number", "description": "The number of sequential scans." }, { "name": "postgresql.table.count", "type": "sum", - "unit": "{table}", + "unit": "number", "description": "Number of user tables in a database." }, { "name": "postgresql.table.size", "type": "sum", - "unit": "By", + "unit": "Bytes", "description": "Disk space used by a table." }, { "name": "postgresql.table.vacuum.count", "type": "sum", - "unit": "{vacuums}", + "unit": "number", "description": "Number of times a table has manually been vacuumed." }, { "name": "postgresql.temp_files", "type": "sum", - "unit": "{temp_file}", + "unit": "number", "description": "The number of temp files." }, { "name": "postgresql.wal.age", "type": "gauge", - "unit": "s", + "unit": "seconds", "description": "Age of the oldest WAL file." }, { "name": "postgresql.wal.delay", "type": "gauge", - "unit": "s", + "unit": "seconds", "description": "Time between flushing recent WAL locally and receiving notification that the standby server has completed an operation with it." }, { "name": "postgresql.wal.lag", "type": "gauge", - "unit": "s", + "unit": "seconds", "description": "Time between flushing recent WAL locally and receiving notification that the standby server has completed an operation with it." } ] diff --git a/pkg/query-service/app/integrations/builtin_integrations/postgres/overview.md b/pkg/query-service/app/integrations/builtin_integrations/postgres/overview.md index 4af57e6b20..ac6e061eca 100644 --- a/pkg/query-service/app/integrations/builtin_integrations/postgres/overview.md +++ b/pkg/query-service/app/integrations/builtin_integrations/postgres/overview.md @@ -1,3 +1,5 @@ ### Monitor Postgres with SigNoz -Parse your Postgres logs and collect key metrics. +Collect key Postgres metrics and view them with an out of the box dashboard. + +Collect and parse Postgres logs to populate timestamp, severity, and other log attributes for better querying and aggregation. From 994814864cf578a0852b2e7d9a6696929c676b8a Mon Sep 17 00:00:00 2001 From: Vibhu Pandey Date: Tue, 26 Mar 2024 06:20:35 +0530 Subject: [PATCH 22/33] fix: send 403 on wrong password entry during change password operation (#4733) --- pkg/query-service/app/http_handler.go | 7 +++---- pkg/query-service/auth/auth.go | 11 +++++------ pkg/query-service/model/response.go | 7 +++++++ 3 files changed, 15 insertions(+), 10 deletions(-) diff --git a/pkg/query-service/app/http_handler.go b/pkg/query-service/app/http_handler.go index 16b741e572..964850cbf8 100644 --- a/pkg/query-service/app/http_handler.go +++ b/pkg/query-service/app/http_handler.go @@ -2363,10 +2363,9 @@ func (aH *APIHandler) changePassword(w http.ResponseWriter, r *http.Request) { return } - if err := auth.ChangePassword(context.Background(), req); err != nil { - if aH.HandleError(w, err, http.StatusInternalServerError) { - return - } + if apiErr := auth.ChangePassword(context.Background(), req); apiErr != nil { + RespondError(w, apiErr, nil) + return } aH.WriteJSON(w, r, map[string]string{"data": "password changed successfully"}) diff --git a/pkg/query-service/auth/auth.go b/pkg/query-service/auth/auth.go index 6b96a6da85..e307f401ab 100644 --- a/pkg/query-service/auth/auth.go +++ b/pkg/query-service/auth/auth.go @@ -234,24 +234,23 @@ func ResetPassword(ctx context.Context, req *model.ResetPasswordRequest) error { return nil } -func ChangePassword(ctx context.Context, req *model.ChangePasswordRequest) error { - +func ChangePassword(ctx context.Context, req *model.ChangePasswordRequest) *model.ApiError { user, apiErr := dao.DB().GetUser(ctx, req.UserId) if apiErr != nil { - return errors.Wrap(apiErr.Err, "failed to query user from the DB") + return apiErr } if user == nil || !passwordMatch(user.Password, req.OldPassword) { - return ErrorInvalidCreds + return model.ForbiddenError(ErrorInvalidCreds) } hash, err := PasswordHash(req.NewPassword) if err != nil { - return errors.Wrap(err, "Failed to generate password hash") + return model.InternalError(errors.New("Failed to generate password hash")) } if apiErr := dao.DB().UpdateUserPassword(ctx, hash, user.Id); apiErr != nil { - return apiErr.Err + return apiErr } return nil diff --git a/pkg/query-service/model/response.go b/pkg/query-service/model/response.go index a8e09b9d6e..1f3970e0d4 100644 --- a/pkg/query-service/model/response.go +++ b/pkg/query-service/model/response.go @@ -112,6 +112,13 @@ func UnavailableError(err error) *ApiError { } } +func ForbiddenError(err error) *ApiError { + return &ApiError{ + Typ: ErrorForbidden, + Err: err, + } +} + func WrapApiError(err *ApiError, msg string) *ApiError { return &ApiError{ Typ: err.Type(), From 83f68f13db3dbedf692f7d2b0eb25c4ea99410cb Mon Sep 17 00:00:00 2001 From: Srikanth Chekuri Date: Tue, 26 Mar 2024 12:40:53 +0530 Subject: [PATCH 23/33] feat: add ability to customize alert frequency (#4697) --- frontend/public/locales/en-GB/alerts.json | 1 + frontend/public/locales/en/alerts.json | 1 + .../container/FormAlertRules/RuleOptions.tsx | 108 ++++++++++++------ .../src/container/FormAlertRules/styles.ts | 7 ++ frontend/src/pages/AlertList/index.tsx | 5 + frontend/src/types/api/alerts/def.ts | 1 + pkg/query-service/rules/manager.go | 4 +- pkg/query-service/rules/thresholdRule.go | 2 +- 8 files changed, 90 insertions(+), 39 deletions(-) diff --git a/frontend/public/locales/en-GB/alerts.json b/frontend/public/locales/en-GB/alerts.json index fb360e579b..4dffb641d3 100644 --- a/frontend/public/locales/en-GB/alerts.json +++ b/frontend/public/locales/en-GB/alerts.json @@ -112,6 +112,7 @@ "exceptions_based_alert_desc": "Send a notification when a condition occurs in the exceptions data.", "field_unit": "Threshold unit", "text_alert_on_absent": "Send a notification if data is missing for", + "text_alert_frequency": "Run alert every", "text_for": "minutes", "selected_query_placeholder": "Select query" } diff --git a/frontend/public/locales/en/alerts.json b/frontend/public/locales/en/alerts.json index 0349568c70..33714d4429 100644 --- a/frontend/public/locales/en/alerts.json +++ b/frontend/public/locales/en/alerts.json @@ -112,6 +112,7 @@ "exceptions_based_alert_desc": "Send a notification when a condition occurs in the exceptions data.", "field_unit": "Threshold unit", "text_alert_on_absent": "Send a notification if data is missing for", + "text_alert_frequency": "Run alert every", "text_for": "minutes", "selected_query_placeholder": "Select query" } diff --git a/frontend/src/container/FormAlertRules/RuleOptions.tsx b/frontend/src/container/FormAlertRules/RuleOptions.tsx index 88e7c83979..d62b39f30f 100644 --- a/frontend/src/container/FormAlertRules/RuleOptions.tsx +++ b/frontend/src/container/FormAlertRules/RuleOptions.tsx @@ -1,5 +1,6 @@ import { Checkbox, + Collapse, Form, InputNumber, InputNumberProps, @@ -24,7 +25,12 @@ import { import { EQueryType } from 'types/common/dashboard'; import { popupContainer } from 'utils/selectPopupContainer'; -import { FormContainer, InlineSelect, StepHeading } from './styles'; +import { + FormContainer, + InlineSelect, + StepHeading, + VerticalLine, +} from './styles'; function RuleOptions({ alertDef, @@ -238,42 +244,72 @@ function RuleOptions({ /> - - - { - setAlertDef({ - ...alertDef, - condition: { - ...alertDef.condition, - alertOnAbsent: e.target.checked, - }, - }); - }} - /> - - {t('text_alert_on_absent')} + + + + + + {t('text_alert_frequency')} + + { + setAlertDef({ + ...alertDef, + frequency: Number(value) || 0, + }); + }} + type="number" + onWheel={(e): void => e.currentTarget.blur()} + /> + + {t('text_for')} + + - - { - setAlertDef({ - ...alertDef, - condition: { - ...alertDef.condition, - absentFor: Number(value) || 0, - }, - }); - }} - type="number" - onWheel={(e): void => e.currentTarget.blur()} - /> - - {t('text_for')} - + + + + { + setAlertDef({ + ...alertDef, + condition: { + ...alertDef.condition, + alertOnAbsent: e.target.checked, + }, + }); + }} + /> + + {t('text_alert_on_absent')} + + + { + setAlertDef({ + ...alertDef, + condition: { + ...alertDef.condition, + absentFor: Number(value) || 0, + }, + }); + }} + type="number" + onWheel={(e): void => e.currentTarget.blur()} + /> + + {t('text_for')} + + + + + diff --git a/frontend/src/container/FormAlertRules/styles.ts b/frontend/src/container/FormAlertRules/styles.ts index 9fcaf4c59c..11205c0ab4 100644 --- a/frontend/src/container/FormAlertRules/styles.ts +++ b/frontend/src/container/FormAlertRules/styles.ts @@ -67,6 +67,13 @@ export const SeveritySelect = styled(Select)` width: 25% !important; `; +export const VerticalLine = styled.div` + border-left: 2px solid #e8e8e8; /* Adjust color and thickness as desired */ + padding-left: 20px; /* Adjust spacing to content as needed */ + margin-left: 20px; /* Adjust margin as desired */ + height: 100%; /* Adjust based on your layout needs */ +`; + export const InputSmall = styled(Input)` width: 40% !important; `; diff --git a/frontend/src/pages/AlertList/index.tsx b/frontend/src/pages/AlertList/index.tsx index 336c399a2f..33f3ada0f9 100644 --- a/frontend/src/pages/AlertList/index.tsx +++ b/frontend/src/pages/AlertList/index.tsx @@ -12,6 +12,11 @@ function AllAlertList(): JSX.Element { children: , }, // { + // label: 'Planned Downtime', + // key: 'Planned Downtime', + // // children: , + // }, + // { // label: 'Map Alert Channels', // key = 'Map Alert Channels', // children: , diff --git a/frontend/src/types/api/alerts/def.ts b/frontend/src/types/api/alerts/def.ts index af3a4bc912..96fa86654f 100644 --- a/frontend/src/types/api/alerts/def.ts +++ b/frontend/src/types/api/alerts/def.ts @@ -14,6 +14,7 @@ export interface AlertDef { alertType?: string; alert?: string; ruleType?: string; + frequency?: number | undefined; condition: RuleCondition; labels?: Labels; annotations?: Labels; diff --git a/pkg/query-service/rules/manager.go b/pkg/query-service/rules/manager.go index 530bb30d14..95181eade6 100644 --- a/pkg/query-service/rules/manager.go +++ b/pkg/query-service/rules/manager.go @@ -525,7 +525,7 @@ func (m *Manager) prepareTask(acquireLock bool, r *PostableRule, taskName string rules = append(rules, tr) // create ch rule task for evalution - task = newTask(TaskTypeCh, taskName, taskNamesuffix, time.Duration(r.Frequency), rules, m.opts, m.prepareNotifyFunc()) + task = newTask(TaskTypeCh, taskName, taskNamesuffix, time.Duration(r.Frequency*Duration(time.Minute)), rules, m.opts, m.prepareNotifyFunc()) // add rule to memory m.rules[ruleId] = tr @@ -547,7 +547,7 @@ func (m *Manager) prepareTask(acquireLock bool, r *PostableRule, taskName string rules = append(rules, pr) // create promql rule task for evalution - task = newTask(TaskTypeProm, taskName, taskNamesuffix, time.Duration(r.Frequency), rules, m.opts, m.prepareNotifyFunc()) + task = newTask(TaskTypeProm, taskName, taskNamesuffix, time.Duration(r.Frequency*Duration(time.Minute)), rules, m.opts, m.prepareNotifyFunc()) // add rule to memory m.rules[ruleId] = pr diff --git a/pkg/query-service/rules/thresholdRule.go b/pkg/query-service/rules/thresholdRule.go index 0fdb3745ca..f358d80393 100644 --- a/pkg/query-service/rules/thresholdRule.go +++ b/pkg/query-service/rules/thresholdRule.go @@ -713,7 +713,7 @@ func (r *ThresholdRule) runChQuery(ctx context.Context, db clickhouse.Conn, quer zap.S().Debugf("ruleid:", r.ID(), "\t resultmap(potential alerts):", len(resultMap)) // if the data is missing for `For` duration then we should send alert - if r.ruleCondition.AlertOnAbsent && r.lastTimestampWithDatapoints.Add(r.Condition().AbsentFor).Before(time.Now()) { + if r.ruleCondition.AlertOnAbsent && r.lastTimestampWithDatapoints.Add(r.Condition().AbsentFor*time.Minute).Before(time.Now()) { zap.S().Debugf("ruleid:", r.ID(), "\t msg: no data found for rule condition") lbls := labels.NewBuilder(labels.Labels{}) if !r.lastTimestampWithDatapoints.IsZero() { From 4c91dbcff0b70dd06594596e01fffb9f8513c12f Mon Sep 17 00:00:00 2001 From: Rajat Dabade Date: Tue, 26 Mar 2024 17:09:13 +0530 Subject: [PATCH 24/33] Explorer Toolbar maximised and minimised (#4721) --- frontend/src/constants/localStorage.ts | 1 + .../ExplorerOptions/ExplorerOptionWrapper.tsx | 39 ++++ .../ExplorerOptions.styles.scss | 27 ++- .../ExplorerOptions/ExplorerOptions.tsx | 212 +++++++++++------- .../ExplorerOptionsHideArea.styles.scss | 55 +++++ .../ExplorerOptionsHideArea.tsx | 78 +++++++ .../src/container/ExplorerOptions/utils.ts | 52 +++++ .../src/container/LogsExplorerViews/index.tsx | 4 +- frontend/src/pages/TracesExplorer/index.tsx | 6 +- 9 files changed, 376 insertions(+), 98 deletions(-) create mode 100644 frontend/src/container/ExplorerOptions/ExplorerOptionWrapper.tsx create mode 100644 frontend/src/container/ExplorerOptions/ExplorerOptionsHideArea.styles.scss create mode 100644 frontend/src/container/ExplorerOptions/ExplorerOptionsHideArea.tsx diff --git a/frontend/src/constants/localStorage.ts b/frontend/src/constants/localStorage.ts index 296735b286..0ba6cac302 100644 --- a/frontend/src/constants/localStorage.ts +++ b/frontend/src/constants/localStorage.ts @@ -16,4 +16,5 @@ export enum LOCALSTORAGE { CHAT_SUPPORT = 'CHAT_SUPPORT', IS_IDENTIFIED_USER = 'IS_IDENTIFIED_USER', DASHBOARD_VARIABLES = 'DASHBOARD_VARIABLES', + SHOW_EXPLORER_TOOLBAR = 'SHOW_EXPLORER_TOOLBAR', } diff --git a/frontend/src/container/ExplorerOptions/ExplorerOptionWrapper.tsx b/frontend/src/container/ExplorerOptions/ExplorerOptionWrapper.tsx new file mode 100644 index 0000000000..a2e0eff9c8 --- /dev/null +++ b/frontend/src/container/ExplorerOptions/ExplorerOptionWrapper.tsx @@ -0,0 +1,39 @@ +import { useEffect, useState } from 'react'; + +import ExplorerOptions, { ExplorerOptionsProps } from './ExplorerOptions'; +import { getExplorerToolBarVisibility } from './utils'; + +type ExplorerOptionsWrapperProps = Omit< + ExplorerOptionsProps, + 'isExplorerOptionDrop' +>; + +function ExplorerOptionWrapper({ + disabled, + query, + isLoading, + onExport, + sourcepage, +}: ExplorerOptionsWrapperProps): JSX.Element { + const [isExplorerOptionHidden, setIsExplorerOptionHidden] = useState(false); + + useEffect(() => { + const toolbarVisibility = getExplorerToolBarVisibility(sourcepage); + setIsExplorerOptionHidden(!toolbarVisibility); + // eslint-disable-next-line react-hooks/exhaustive-deps + }, []); + + return ( + + ); +} + +export default ExplorerOptionWrapper; diff --git a/frontend/src/container/ExplorerOptions/ExplorerOptions.styles.scss b/frontend/src/container/ExplorerOptions/ExplorerOptions.styles.scss index d76d18bb4f..cddeb356b8 100644 --- a/frontend/src/container/ExplorerOptions/ExplorerOptions.styles.scss +++ b/frontend/src/container/ExplorerOptions/ExplorerOptions.styles.scss @@ -3,8 +3,8 @@ } .explorer-update { position: fixed; - bottom: 16px; - left: calc(50% - 225px); + bottom: 24px; + left: calc(50% - 250px); display: flex; align-items: center; gap: 12px; @@ -37,21 +37,24 @@ } } + .explorer-options { - display: flex; - gap: 16px; + position: fixed; + bottom: 24px; + left: calc(50% + 240px); padding: 10px 12px; - border-radius: 50px; + transform: translate(calc(-50% - 120px), 0); + transition: left 0.2s linear; border: 1px solid var(--bg-slate-400); + border-radius: 50px; background: rgba(22, 24, 29, 0.6); box-shadow: 4px 4px 16px 4px rgba(0, 0, 0, 0.25); backdrop-filter: blur(20px); - position: fixed; - bottom: 16px; - left: calc(50% + 240px); - transform: translate(calc(-50% - 120px), 0); - transition: left 0.2s linear; + cursor: default; + display: flex; + gap: 16px; + z-index: 1; .ant-select-selector { padding: 0 !important; } @@ -236,9 +239,9 @@ .lightMode { .explorer-options { + background: transparent; + box-shadow: none; border: 1px solid var(--bg-vanilla-300); - background: rgba(255, 255, 255, 0.8); - box-shadow: 4px 4px 16px 4px rgba(255, 255, 255, 0.55); backdrop-filter: blur(20px); hr { diff --git a/frontend/src/container/ExplorerOptions/ExplorerOptions.tsx b/frontend/src/container/ExplorerOptions/ExplorerOptions.tsx index 8322c694d6..635d085e1e 100644 --- a/frontend/src/container/ExplorerOptions/ExplorerOptions.tsx +++ b/frontend/src/container/ExplorerOptions/ExplorerOptions.tsx @@ -1,3 +1,4 @@ +/* eslint-disable react/jsx-props-no-spreading */ import './ExplorerOptions.styles.scss'; import { Color } from '@signozhq/design-tokens'; @@ -30,8 +31,24 @@ import useErrorNotification from 'hooks/useErrorNotification'; import { useHandleExplorerTabChange } from 'hooks/useHandleExplorerTabChange'; import { useNotifications } from 'hooks/useNotifications'; import { mapCompositeQueryFromQuery } from 'lib/newQueryBuilder/queryBuilderMappers/mapCompositeQueryFromQuery'; -import { Check, ConciergeBell, Disc3, Plus, X, XCircle } from 'lucide-react'; -import { CSSProperties, useCallback, useMemo, useRef, useState } from 'react'; +import { + Check, + ConciergeBell, + Disc3, + PanelBottomClose, + Plus, + X, + XCircle, +} from 'lucide-react'; +import { + CSSProperties, + Dispatch, + SetStateAction, + useCallback, + useMemo, + useRef, + useState, +} from 'react'; import { useSelector } from 'react-redux'; import { useHistory } from 'react-router-dom'; import { AppState } from 'store/reducers'; @@ -41,11 +58,13 @@ import { DataSource } from 'types/common/queryBuilder'; import AppReducer from 'types/reducer/app'; import { USER_ROLES } from 'types/roles'; +import ExplorerOptionsHideArea from './ExplorerOptionsHideArea'; import { DATASOURCE_VS_ROUTES, generateRGBAFromHex, getRandomColor, saveNewViewHandler, + setExplorerToolBarVisibility, } from './utils'; const allowedRoles = [USER_ROLES.ADMIN, USER_ROLES.AUTHOR, USER_ROLES.EDITOR]; @@ -57,6 +76,8 @@ function ExplorerOptions({ onExport, query, sourcepage, + isExplorerOptionHidden = false, + setIsExplorerOptionHidden, }: ExplorerOptionsProps): JSX.Element { const [isExport, setIsExport] = useState(false); const [isSaveModalOpen, setIsSaveModalOpen] = useState(false); @@ -257,11 +278,18 @@ function ExplorerOptions({ [isDarkMode], ); + const hideToolbar = (): void => { + setExplorerToolBarVisibility(false, sourcepage); + if (setIsExplorerOptionHidden) { + setIsExplorerOptionHidden(true); + } + }; + const isEditDeleteSupported = allowedRoles.includes(role as string); return ( <> - {isQueryUpdated && ( + {isQueryUpdated && !isExplorerOptionHidden && (
)} -
-
- - showSearch - placeholder="Select a view" - loading={viewsIsLoading || isRefetching} - value={viewName || undefined} - onSelect={handleSelect} - style={{ - minWidth: 170, - }} - dropdownStyle={dropdownStyle} - className="views-dropdown" - allowClear={{ - clearIcon: , - }} - onClear={handleClearSelect} - ref={ref} - > - {viewsData?.data?.data?.map((view) => { - const extraData = - view.extraData !== '' ? JSON.parse(view.extraData) : ''; - let bgColor = getRandomColor(); - if (extraData !== '') { - bgColor = extraData.color; - } - return ( - -
- {' '} - {view.name} -
-
- ); - })} - - - -
- -
- -
- - - + {viewsData?.data?.data?.map((view) => { + const extraData = + view.extraData !== '' ? JSON.parse(view.extraData) : ''; + let bgColor = getRandomColor(); + if (extraData !== '') { + bgColor = extraData.color; + } + return ( + +
+ {' '} + {view.name} +
+
+ ); + })} + - - - +
+ +
+ +
+ + + + + + + + + + + +
-
+ )} + + >; } -ExplorerOptions.defaultProps = { isLoading: false }; +ExplorerOptions.defaultProps = { + isLoading: false, + isExplorerOptionHidden: false, + setIsExplorerOptionHidden: undefined, +}; export default ExplorerOptions; diff --git a/frontend/src/container/ExplorerOptions/ExplorerOptionsHideArea.styles.scss b/frontend/src/container/ExplorerOptions/ExplorerOptionsHideArea.styles.scss new file mode 100644 index 0000000000..e45b9e893c --- /dev/null +++ b/frontend/src/container/ExplorerOptions/ExplorerOptionsHideArea.styles.scss @@ -0,0 +1,55 @@ +.explorer-option-droppable-container { + position: fixed; + bottom: 0; + width: -webkit-fill-available; + height: 24px; + display: flex; + justify-content: center; + border-radius: 10px 10px 0px 0px; + // box-shadow: 0px 4px 16px 0px rgba(0, 0, 0, 0.25); + // backdrop-filter: blur(20px); + + .explorer-actions-btn { + display: flex; + gap: 8px; + margin-right: 8px; + + .action-btn { + display: flex; + justify-content: center; + align-items: center; + border-radius: 10px 10px 0px 0px; + box-shadow: 0px 4px 16px 0px rgba(0, 0, 0, 0.25); + backdrop-filter: blur(20px); + height: 24px !important; + border: none; + } + } + + .explorer-show-btn { + border-radius: 10px 10px 0px 0px; + border: 1px solid var(--bg-slate-400); + background: rgba(22, 24, 29, 0.40); + box-shadow: 0px 4px 16px 0px rgba(0, 0, 0, 0.25); + backdrop-filter: blur(20px); + align-self: center; + padding: 8px 12px; + height: 24px !important; + + .menu-bar { + border-radius: 50px; + background: var(--bg-slate-200); + height: 4px; + width: 50px; + } + } +} + +.lightMode { + .explorer-option-droppable-container { + + .explorer-show-btn { + background: var(--bg-vanilla-200); + } + } +} \ No newline at end of file diff --git a/frontend/src/container/ExplorerOptions/ExplorerOptionsHideArea.tsx b/frontend/src/container/ExplorerOptions/ExplorerOptionsHideArea.tsx new file mode 100644 index 0000000000..f5e7faf0dc --- /dev/null +++ b/frontend/src/container/ExplorerOptions/ExplorerOptionsHideArea.tsx @@ -0,0 +1,78 @@ +/* eslint-disable no-nested-ternary */ +import './ExplorerOptionsHideArea.styles.scss'; + +import { Color } from '@signozhq/design-tokens'; +import { Button, Tooltip } from 'antd'; +import { Disc3, X } from 'lucide-react'; +import { Dispatch, SetStateAction } from 'react'; +import { DataSource } from 'types/common/queryBuilder'; + +import { setExplorerToolBarVisibility } from './utils'; + +interface DroppableAreaProps { + isQueryUpdated: boolean; + isExplorerOptionHidden?: boolean; + sourcepage: DataSource; + setIsExplorerOptionHidden?: Dispatch>; + handleClearSelect: () => void; + onUpdateQueryHandler: () => void; +} + +function ExplorerOptionsHideArea({ + isQueryUpdated, + isExplorerOptionHidden, + sourcepage, + setIsExplorerOptionHidden, + handleClearSelect, + onUpdateQueryHandler, +}: DroppableAreaProps): JSX.Element { + const handleShowExplorerOption = (): void => { + if (setIsExplorerOptionHidden) { + setIsExplorerOptionHidden(false); + setExplorerToolBarVisibility(true, sourcepage); + } + }; + + return ( +
+ {isExplorerOptionHidden && ( + <> + {isQueryUpdated && ( +
+ +
+ )} + + + )} +
+ ); +} + +ExplorerOptionsHideArea.defaultProps = { + isExplorerOptionHidden: undefined, + setIsExplorerOptionHidden: undefined, +}; + +export default ExplorerOptionsHideArea; diff --git a/frontend/src/container/ExplorerOptions/utils.ts b/frontend/src/container/ExplorerOptions/utils.ts index e3ac710609..d94e64161e 100644 --- a/frontend/src/container/ExplorerOptions/utils.ts +++ b/frontend/src/container/ExplorerOptions/utils.ts @@ -1,5 +1,6 @@ import { Color } from '@signozhq/design-tokens'; import { showErrorNotification } from 'components/ExplorerCard/utils'; +import { LOCALSTORAGE } from 'constants/localStorage'; import { QueryParams } from 'constants/query'; import ROUTES from 'constants/routes'; import { mapQueryDataFromApi } from 'lib/newQueryBuilder/queryBuilderMappers/mapQueryDataFromApi'; @@ -67,3 +68,54 @@ export const generateRGBAFromHex = (hex: string, opacity: number): string => hex.slice(3, 5), 16, )}, ${parseInt(hex.slice(5, 7), 16)}, ${opacity})`; + +export const getExplorerToolBarVisibility = (dataSource: string): boolean => { + try { + const showExplorerToolbar = localStorage.getItem( + LOCALSTORAGE.SHOW_EXPLORER_TOOLBAR, + ); + if (showExplorerToolbar === null) { + const parsedShowExplorerToolbar: { + [DataSource.LOGS]: boolean; + [DataSource.TRACES]: boolean; + [DataSource.METRICS]: boolean; + } = { + [DataSource.METRICS]: true, + [DataSource.TRACES]: true, + [DataSource.LOGS]: true, + }; + localStorage.setItem( + LOCALSTORAGE.SHOW_EXPLORER_TOOLBAR, + JSON.stringify(parsedShowExplorerToolbar), + ); + return true; + } + const parsedShowExplorerToolbar = JSON.parse(showExplorerToolbar || '{}'); + return parsedShowExplorerToolbar[dataSource]; + } catch (error) { + console.error(error); + return false; + } +}; + +export const setExplorerToolBarVisibility = ( + value: boolean, + dataSource: string, +): void => { + try { + const showExplorerToolbar = localStorage.getItem( + LOCALSTORAGE.SHOW_EXPLORER_TOOLBAR, + ); + if (showExplorerToolbar) { + const parsedShowExplorerToolbar = JSON.parse(showExplorerToolbar); + parsedShowExplorerToolbar[dataSource] = value; + localStorage.setItem( + LOCALSTORAGE.SHOW_EXPLORER_TOOLBAR, + JSON.stringify(parsedShowExplorerToolbar), + ); + return; + } + } catch (error) { + console.error(error); + } +}; diff --git a/frontend/src/container/LogsExplorerViews/index.tsx b/frontend/src/container/LogsExplorerViews/index.tsx index e07450229a..c814ac8cb6 100644 --- a/frontend/src/container/LogsExplorerViews/index.tsx +++ b/frontend/src/container/LogsExplorerViews/index.tsx @@ -15,7 +15,7 @@ import { } from 'constants/queryBuilder'; import { DEFAULT_PER_PAGE_VALUE } from 'container/Controls/config'; import Download from 'container/DownloadV2/DownloadV2'; -import ExplorerOptions from 'container/ExplorerOptions/ExplorerOptions'; +import ExplorerOptionWrapper from 'container/ExplorerOptions/ExplorerOptionWrapper'; import GoToTop from 'container/GoToTop'; import LogsExplorerChart from 'container/LogsExplorerChart'; import LogsExplorerList from 'container/LogsExplorerList'; @@ -659,7 +659,7 @@ function LogsExplorerViews({ - - From 2b3d1c8ee5ad8eb83e0d3eae245a226738bc4fde Mon Sep 17 00:00:00 2001 From: Tan Wei Been <63707630+wbtan7@users.noreply.github.com> Date: Tue, 26 Mar 2024 19:39:59 +0800 Subject: [PATCH 25/33] [Fix]: Using exported dashboards as input to dashboard provisioning #2 (#4726) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix(be,fe): upsert dashboard on provision, export with uuid from frontend * chore(fe): formatting in dashboard description * fix: miss out while merging --------- Co-authored-by: HÃ¥vard Co-authored-by: Srikanth Chekuri Co-authored-by: Haavasma <61970295+Haavasma@users.noreply.github.com> --- .../DashboardDescription/index.tsx | 7 ++++- frontend/src/types/api/dashboard/getAll.ts | 1 + pkg/query-service/app/dashboards/model.go | 20 +++++++------ pkg/query-service/app/dashboards/provision.go | 29 ++++++++++++++----- 4 files changed, 40 insertions(+), 17 deletions(-) diff --git a/frontend/src/container/NewDashboard/DashboardDescription/index.tsx b/frontend/src/container/NewDashboard/DashboardDescription/index.tsx index 996c508da4..c916ec7501 100644 --- a/frontend/src/container/NewDashboard/DashboardDescription/index.tsx +++ b/frontend/src/container/NewDashboard/DashboardDescription/index.tsx @@ -23,7 +23,12 @@ function DashboardDescription(): JSX.Element { handleDashboardLockToggle, } = useDashboard(); - const selectedData = selectedDashboard?.data || ({} as DashboardData); + const selectedData = selectedDashboard + ? { + ...selectedDashboard.data, + uuid: selectedDashboard.uuid, + } + : ({} as DashboardData); const { title = '', tags, description } = selectedData || {}; diff --git a/frontend/src/types/api/dashboard/getAll.ts b/frontend/src/types/api/dashboard/getAll.ts index e616ee28ea..ba23e55186 100644 --- a/frontend/src/types/api/dashboard/getAll.ts +++ b/frontend/src/types/api/dashboard/getAll.ts @@ -55,6 +55,7 @@ export interface Dashboard { } export interface DashboardData { + uuid?: string; description?: string; tags?: string[]; name?: string; diff --git a/pkg/query-service/app/dashboards/model.go b/pkg/query-service/app/dashboards/model.go index 698b697279..6e777f49c9 100644 --- a/pkg/query-service/app/dashboards/model.go +++ b/pkg/query-service/app/dashboards/model.go @@ -25,12 +25,14 @@ import ( var db *sqlx.DB // User for mapping job,instance from grafana -var instanceEQRE = regexp.MustCompile("instance(?s)=(?s)\\\"{{.instance}}\\\"") -var nodeEQRE = regexp.MustCompile("instance(?s)=(?s)\\\"{{.node}}\\\"") -var jobEQRE = regexp.MustCompile("job(?s)=(?s)\\\"{{.job}}\\\"") -var instanceRERE = regexp.MustCompile("instance(?s)=~(?s)\\\"{{.instance}}\\\"") -var nodeRERE = regexp.MustCompile("instance(?s)=~(?s)\\\"{{.node}}\\\"") -var jobRERE = regexp.MustCompile("job(?s)=~(?s)\\\"{{.job}}\\\"") +var ( + instanceEQRE = regexp.MustCompile("instance(?s)=(?s)\\\"{{.instance}}\\\"") + nodeEQRE = regexp.MustCompile("instance(?s)=(?s)\\\"{{.node}}\\\"") + jobEQRE = regexp.MustCompile("job(?s)=(?s)\\\"{{.job}}\\\"") + instanceRERE = regexp.MustCompile("instance(?s)=~(?s)\\\"{{.instance}}\\\"") + nodeRERE = regexp.MustCompile("instance(?s)=~(?s)\\\"{{.node}}\\\"") + jobRERE = regexp.MustCompile("job(?s)=~(?s)\\\"{{.job}}\\\"") +) // InitDB sets up setting up the connection pool global variable. func InitDB(dataSourceName string) (*sqlx.DB, error) { @@ -188,6 +190,9 @@ func CreateDashboard(ctx context.Context, data map[string]interface{}, fm interf dash.UpdateBy = &userEmail dash.UpdateSlug() dash.Uuid = uuid.New().String() + if data["uuid"] != nil { + dash.Uuid = data["uuid"].(string) + } mapData, err := json.Marshal(dash.Data) if err != nil { @@ -211,7 +216,6 @@ func CreateDashboard(ctx context.Context, data map[string]interface{}, fm interf return nil, &model.ApiError{Typ: model.ErrorExec, Err: err} } lastInsertId, err := result.LastInsertId() - if err != nil { return nil, &model.ApiError{Typ: model.ErrorExec, Err: err} } @@ -255,7 +259,6 @@ func DeleteDashboard(ctx context.Context, uuid string, fm interfaces.FeatureLook query := `DELETE FROM dashboards WHERE uuid=?` result, err := db.Exec(query, uuid) - if err != nil { return &model.ApiError{Typ: model.ErrorExec, Err: err} } @@ -419,7 +422,6 @@ func (d *Dashboard) UpdateSlug() { } func IsPostDataSane(data *map[string]interface{}) error { - val, ok := (*data)["title"] if !ok || val == nil { return fmt.Errorf("title not found in post data") diff --git a/pkg/query-service/app/dashboards/provision.go b/pkg/query-service/app/dashboards/provision.go index 6f60dc50fe..049ae42e72 100644 --- a/pkg/query-service/app/dashboards/provision.go +++ b/pkg/query-service/app/dashboards/provision.go @@ -10,6 +10,7 @@ import ( "go.signoz.io/signoz/pkg/query-service/constants" "go.signoz.io/signoz/pkg/query-service/interfaces" + "go.signoz.io/signoz/pkg/query-service/model" ) func readCurrentDir(dir string, fm interfaces.FeatureLookup) error { @@ -43,22 +44,36 @@ func readCurrentDir(dir string, fm interfaces.FeatureLookup) error { continue } - _, apiErr := GetDashboard(context.Background(), data["uuid"].(string)) - if apiErr == nil { - zap.S().Infof("Creating Dashboards: Error in file: %s\t%s", filename, "Dashboard already present in database") + id := data["uuid"] + if id == nil { + _, apiErr := CreateDashboard(context.Background(), data, fm) + if apiErr != nil { + zap.S().Errorf("Creating Dashboards: Error in file: %s\t%s", filename, apiErr.Err) + } continue } - _, apiErr = CreateDashboard(context.Background(), data, fm) + apiErr := upsertDashboard(id.(string), data, filename, fm) if apiErr != nil { - zap.S().Errorf("Creating Dashboards: Error in file: %s\t%s", filename, apiErr.Err) - continue + zap.S().Errorf("Creating Dashboards: Error upserting dashboard: %s\t%s", filename, apiErr.Err) } - } return nil } +func upsertDashboard(uuid string, data map[string]interface{}, filename string, fm interfaces.FeatureLookup) *model.ApiError { + _, apiErr := GetDashboard(context.Background(), uuid) + if apiErr == nil { + zap.S().Infof("Creating Dashboards: Already exists: %s\t%s", filename, "Dashboard already present in database, Updating dashboard") + _, apiErr := UpdateDashboard(context.Background(), uuid, data, fm) + return apiErr + } + + zap.S().Infof("Creating Dashboards: UUID not found: %s\t%s", filename, "Dashboard not present in database, Creating dashboard") + _, apiErr = CreateDashboard(context.Background(), data, fm) + return apiErr +} + func LoadDashboardFiles(fm interfaces.FeatureLookup) error { dashboardsPath := constants.GetOrDefaultEnv("DASHBOARDS_PATH", "./config/dashboards") return readCurrentDir(dashboardsPath, fm) From 9e02147d4ca49c4a137dcdaa14d304cb215dcf9a Mon Sep 17 00:00:00 2001 From: Vikrant Gupta Date: Tue, 26 Mar 2024 23:54:31 +0530 Subject: [PATCH 26/33] fix: [SIG-574]: support __ in the groupBy clause (#4747) --- .../filters/GroupByFilter/GroupByFilter.tsx | 21 +++++-------------- 1 file changed, 5 insertions(+), 16 deletions(-) diff --git a/frontend/src/container/QueryBuilder/filters/GroupByFilter/GroupByFilter.tsx b/frontend/src/container/QueryBuilder/filters/GroupByFilter/GroupByFilter.tsx index 386786f70c..e7b00756f5 100644 --- a/frontend/src/container/QueryBuilder/filters/GroupByFilter/GroupByFilter.tsx +++ b/frontend/src/container/QueryBuilder/filters/GroupByFilter/GroupByFilter.tsx @@ -1,11 +1,7 @@ import { Select, Spin } from 'antd'; import { getAggregateKeys } from 'api/queryBuilder/getAttributeKeys'; // ** Constants -import { - idDivider, - QueryBuilderKeys, - selectValueDivider, -} from 'constants/queryBuilder'; +import { idDivider, QueryBuilderKeys } from 'constants/queryBuilder'; import { DEBOUNCE_DELAY } from 'constants/queryBuilderFilterConfig'; import { useGetAggregateKeys } from 'hooks/queryBuilder/useGetAggregateKeys'; import useDebounce from 'hooks/useDebounce'; @@ -83,11 +79,7 @@ export const GroupByFilter = memo(function GroupByFilter({ dataType={item.dataType || ''} /> ), - value: `${transformStringWithPrefix({ - str: item.key, - prefix: item.type || '', - condition: !item.isColumn, - })}${selectValueDivider}${item.id}`, + value: `${item.id}`, })) || []; setOptionsData(options); @@ -135,7 +127,8 @@ export const GroupByFilter = memo(function GroupByFilter({ const keys = await getAttributeKeys(); const groupByValues: BaseAutocompleteData[] = values.map((item) => { - const [currentValue, id] = item.value.split(selectValueDivider); + const id = item.value; + const currentValue = item.value.split(idDivider)[0]; if (id && id.includes(idDivider)) { const attribute = keys.find((item) => item.id === id); @@ -174,11 +167,7 @@ export const GroupByFilter = memo(function GroupByFilter({ condition: !item.isColumn, }), )}`, - value: `${transformStringWithPrefix({ - str: item.key, - prefix: item.type || '', - condition: !item.isColumn, - })}${selectValueDivider}${item.id}`, + value: `${item.id}`, }), ); From ae594061e9e7d75c6b5f9663b2c8505c2c9e24f0 Mon Sep 17 00:00:00 2001 From: Srikanth Chekuri Date: Wed, 27 Mar 2024 00:07:29 +0530 Subject: [PATCH 27/33] chore: fix query-service logging (#4696) --- ee/query-service/app/api/auth.go | 40 +- ee/query-service/app/api/license.go | 8 +- ee/query-service/app/api/metrics.go | 4 +- ee/query-service/app/api/pat.go | 12 +- ee/query-service/app/api/traces.go | 4 +- ee/query-service/app/db/metrics.go | 49 +-- ee/query-service/app/db/trace.go | 4 +- ee/query-service/app/server.go | 28 +- ee/query-service/auth/auth.go | 12 +- ee/query-service/dao/sqlite/auth.go | 24 +- ee/query-service/dao/sqlite/domain.go | 22 +- ee/query-service/dao/sqlite/pat.go | 12 +- .../integrations/signozio/signozio.go | 6 +- ee/query-service/license/db.go | 4 +- ee/query-service/license/manager.go | 24 +- ee/query-service/main.go | 39 +- ee/query-service/model/domain.go | 45 +- ee/query-service/sso/saml/request.go | 2 +- ee/query-service/usage/manager.go | 22 +- pkg/query-service/agentConf/db.go | 12 +- pkg/query-service/agentConf/manager.go | 22 +- .../app/clickhouseReader/options.go | 2 +- .../app/clickhouseReader/reader.go | 411 ++++++++---------- pkg/query-service/app/dashboards/model.go | 36 +- pkg/query-service/app/dashboards/provision.go | 16 +- pkg/query-service/app/http_handler.go | 86 ++-- .../logparsingpipeline/collector_config.go | 2 +- .../app/logparsingpipeline/controller.go | 6 +- .../app/logparsingpipeline/db.go | 8 +- .../app/opamp/configure_ingestionRules.go | 14 +- pkg/query-service/app/opamp/model/agent.go | 4 +- pkg/query-service/app/opamp/model/agents.go | 4 +- pkg/query-service/app/opamp/opamp_server.go | 15 +- .../app/opamp/pipeline_builder.go | 8 +- pkg/query-service/app/querier/helper.go | 24 +- pkg/query-service/app/querier/querier.go | 10 +- pkg/query-service/app/querier/v2/helper.go | 16 +- pkg/query-service/app/querier/v2/querier.go | 10 +- .../app/queryBuilder/query_builder.go | 2 +- pkg/query-service/app/server.go | 32 +- pkg/query-service/auth/auth.go | 40 +- pkg/query-service/auth/jwt.go | 2 +- pkg/query-service/cache/redis/redis.go | 6 +- pkg/query-service/constants/constants.go | 4 +- pkg/query-service/dao/sqlite/connection.go | 2 +- pkg/query-service/featureManager/manager.go | 6 +- .../integrations/alertManager/manager.go | 24 +- .../integrations/alertManager/notifier.go | 12 +- pkg/query-service/main.go | 6 +- .../queryBuilderToExpr/queryBuilderToExpr.go | 4 +- pkg/query-service/rules/alerting.go | 10 +- pkg/query-service/rules/apiParams.go | 6 - pkg/query-service/rules/db.go | 18 +- pkg/query-service/rules/manager.go | 89 ++-- pkg/query-service/rules/promRule.go | 8 +- pkg/query-service/rules/promRuleTask.go | 6 +- pkg/query-service/rules/ruleTask.go | 14 +- pkg/query-service/rules/thresholdRule.go | 53 ++- pkg/query-service/telemetry/telemetry.go | 2 +- pkg/query-service/tests/docker.go | 3 +- pkg/query-service/utils/format.go | 4 +- pkg/query-service/utils/time.go | 2 +- pkg/query-service/version/version.go | 4 +- 63 files changed, 689 insertions(+), 737 deletions(-) diff --git a/ee/query-service/app/api/auth.go b/ee/query-service/app/api/auth.go index a469b99e33..9ec99a4cc1 100644 --- a/ee/query-service/app/api/auth.go +++ b/ee/query-service/app/api/auth.go @@ -74,7 +74,7 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() requestBody, err := io.ReadAll(r.Body) if err != nil { - zap.S().Errorf("received no input in api\n", err) + zap.L().Error("received no input in api", zap.Error(err)) RespondError(w, model.BadRequest(err), nil) return } @@ -82,7 +82,7 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) { err = json.Unmarshal(requestBody, &req) if err != nil { - zap.S().Errorf("received invalid user registration request", zap.Error(err)) + zap.L().Error("received invalid user registration request", zap.Error(err)) RespondError(w, model.BadRequest(fmt.Errorf("failed to register user")), nil) return } @@ -90,13 +90,13 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) { // get invite object invite, err := baseauth.ValidateInvite(ctx, req) if err != nil { - zap.S().Errorf("failed to validate invite token", err) + zap.L().Error("failed to validate invite token", zap.Error(err)) RespondError(w, model.BadRequest(err), nil) return } if invite == nil { - zap.S().Errorf("failed to validate invite token: it is either empty or invalid", err) + zap.L().Error("failed to validate invite token: it is either empty or invalid", zap.Error(err)) RespondError(w, model.BadRequest(basemodel.ErrSignupFailed{}), nil) return } @@ -104,7 +104,7 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) { // get auth domain from email domain domain, apierr := ah.AppDao().GetDomainByEmail(ctx, invite.Email) if apierr != nil { - zap.S().Errorf("failed to get domain from email", apierr) + zap.L().Error("failed to get domain from email", zap.Error(apierr)) RespondError(w, model.InternalError(basemodel.ErrSignupFailed{}), nil) } @@ -205,24 +205,24 @@ func (ah *APIHandler) receiveGoogleAuth(w http.ResponseWriter, r *http.Request) ctx := context.Background() if !ah.CheckFeature(model.SSO) { - zap.S().Errorf("[receiveGoogleAuth] sso requested but feature unavailable %s in org domain %s", model.SSO) + zap.L().Error("[receiveGoogleAuth] sso requested but feature unavailable in org domain") http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently) return } q := r.URL.Query() if errType := q.Get("error"); errType != "" { - zap.S().Errorf("[receiveGoogleAuth] failed to login with google auth", q.Get("error_description")) + zap.L().Error("[receiveGoogleAuth] failed to login with google auth", zap.String("error", errType), zap.String("error_description", q.Get("error_description"))) http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "failed to login through SSO "), http.StatusMovedPermanently) return } relayState := q.Get("state") - zap.S().Debug("[receiveGoogleAuth] relay state received", zap.String("state", relayState)) + zap.L().Debug("[receiveGoogleAuth] relay state received", zap.String("state", relayState)) parsedState, err := url.Parse(relayState) if err != nil || relayState == "" { - zap.S().Errorf("[receiveGoogleAuth] failed to process response - invalid response from IDP", err, r) + zap.L().Error("[receiveGoogleAuth] failed to process response - invalid response from IDP", zap.Error(err), zap.Any("request", r)) handleSsoError(w, r, redirectUri) return } @@ -244,14 +244,14 @@ func (ah *APIHandler) receiveGoogleAuth(w http.ResponseWriter, r *http.Request) identity, err := callbackHandler.HandleCallback(r) if err != nil { - zap.S().Errorf("[receiveGoogleAuth] failed to process HandleCallback ", domain.String(), zap.Error(err)) + zap.L().Error("[receiveGoogleAuth] failed to process HandleCallback ", zap.String("domain", domain.String()), zap.Error(err)) handleSsoError(w, r, redirectUri) return } nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, identity.Email) if err != nil { - zap.S().Errorf("[receiveGoogleAuth] failed to generate redirect URI after successful login ", domain.String(), zap.Error(err)) + zap.L().Error("[receiveGoogleAuth] failed to generate redirect URI after successful login ", zap.String("domain", domain.String()), zap.Error(err)) handleSsoError(w, r, redirectUri) return } @@ -266,14 +266,14 @@ func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) { ctx := context.Background() if !ah.CheckFeature(model.SSO) { - zap.S().Errorf("[receiveSAML] sso requested but feature unavailable %s in org domain %s", model.SSO) + zap.L().Error("[receiveSAML] sso requested but feature unavailable in org domain") http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently) return } err := r.ParseForm() if err != nil { - zap.S().Errorf("[receiveSAML] failed to process response - invalid response from IDP", err, r) + zap.L().Error("[receiveSAML] failed to process response - invalid response from IDP", zap.Error(err), zap.Any("request", r)) handleSsoError(w, r, redirectUri) return } @@ -281,11 +281,11 @@ func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) { // the relay state is sent when a login request is submitted to // Idp. relayState := r.FormValue("RelayState") - zap.S().Debug("[receiveML] relay state", zap.String("relayState", relayState)) + zap.L().Debug("[receiveML] relay state", zap.String("relayState", relayState)) parsedState, err := url.Parse(relayState) if err != nil || relayState == "" { - zap.S().Errorf("[receiveSAML] failed to process response - invalid response from IDP", err, r) + zap.L().Error("[receiveSAML] failed to process response - invalid response from IDP", zap.Error(err), zap.Any("request", r)) handleSsoError(w, r, redirectUri) return } @@ -302,34 +302,34 @@ func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) { sp, err := domain.PrepareSamlRequest(parsedState) if err != nil { - zap.S().Errorf("[receiveSAML] failed to prepare saml request for domain (%s): %v", domain.String(), err) + zap.L().Error("[receiveSAML] failed to prepare saml request for domain", zap.String("domain", domain.String()), zap.Error(err)) handleSsoError(w, r, redirectUri) return } assertionInfo, err := sp.RetrieveAssertionInfo(r.FormValue("SAMLResponse")) if err != nil { - zap.S().Errorf("[receiveSAML] failed to retrieve assertion info from saml response for organization (%s): %v", domain.String(), err) + zap.L().Error("[receiveSAML] failed to retrieve assertion info from saml response", zap.String("domain", domain.String()), zap.Error(err)) handleSsoError(w, r, redirectUri) return } if assertionInfo.WarningInfo.InvalidTime { - zap.S().Errorf("[receiveSAML] expired saml response for organization (%s): %v", domain.String(), err) + zap.L().Error("[receiveSAML] expired saml response", zap.String("domain", domain.String()), zap.Error(err)) handleSsoError(w, r, redirectUri) return } email := assertionInfo.NameID if email == "" { - zap.S().Errorf("[receiveSAML] invalid email in the SSO response (%s)", domain.String()) + zap.L().Error("[receiveSAML] invalid email in the SSO response", zap.String("domain", domain.String())) handleSsoError(w, r, redirectUri) return } nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, email) if err != nil { - zap.S().Errorf("[receiveSAML] failed to generate redirect URI after successful login ", domain.String(), zap.Error(err)) + zap.L().Error("[receiveSAML] failed to generate redirect URI after successful login ", zap.String("domain", domain.String()), zap.Error(err)) handleSsoError(w, r, redirectUri) return } diff --git a/ee/query-service/app/api/license.go b/ee/query-service/app/api/license.go index 5c397020b1..51cfddefb1 100644 --- a/ee/query-service/app/api/license.go +++ b/ee/query-service/app/api/license.go @@ -191,7 +191,7 @@ func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) { url := fmt.Sprintf("%s/trial?licenseKey=%s", constants.LicenseSignozIo, currentActiveLicenseKey) req, err := http.NewRequest("GET", url, nil) if err != nil { - zap.S().Error("Error while creating request for trial details", err) + zap.L().Error("Error while creating request for trial details", zap.Error(err)) // If there is an error in fetching trial details, we will still return the license details // to avoid blocking the UI ah.Respond(w, resp) @@ -200,7 +200,7 @@ func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) { req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey) trialResp, err := hClient.Do(req) if err != nil { - zap.S().Error("Error while fetching trial details", err) + zap.L().Error("Error while fetching trial details", zap.Error(err)) // If there is an error in fetching trial details, we will still return the license details // to avoid incorrectly blocking the UI ah.Respond(w, resp) @@ -211,7 +211,7 @@ func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) { trialRespBody, err := io.ReadAll(trialResp.Body) if err != nil || trialResp.StatusCode != http.StatusOK { - zap.S().Error("Error while fetching trial details", err) + zap.L().Error("Error while fetching trial details", zap.Error(err)) // If there is an error in fetching trial details, we will still return the license details // to avoid incorrectly blocking the UI ah.Respond(w, resp) @@ -222,7 +222,7 @@ func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) { var trialRespData model.SubscriptionServerResp if err := json.Unmarshal(trialRespBody, &trialRespData); err != nil { - zap.S().Error("Error while decoding trial details", err) + zap.L().Error("Error while decoding trial details", zap.Error(err)) // If there is an error in fetching trial details, we will still return the license details // to avoid incorrectly blocking the UI ah.Respond(w, resp) diff --git a/ee/query-service/app/api/metrics.go b/ee/query-service/app/api/metrics.go index 81af7035b7..7c0e320f45 100644 --- a/ee/query-service/app/api/metrics.go +++ b/ee/query-service/app/api/metrics.go @@ -18,14 +18,14 @@ import ( func (ah *APIHandler) queryRangeMetricsV2(w http.ResponseWriter, r *http.Request) { if !ah.CheckFeature(basemodel.CustomMetricsFunction) { - zap.S().Info("CustomMetricsFunction feature is not enabled in this plan") + zap.L().Info("CustomMetricsFunction feature is not enabled in this plan") ah.APIHandler.QueryRangeMetricsV2(w, r) return } metricsQueryRangeParams, apiErrorObj := parser.ParseMetricQueryRangeParams(r) if apiErrorObj != nil { - zap.S().Errorf(apiErrorObj.Err.Error()) + zap.L().Error("Error in parsing metric query params", zap.Error(apiErrorObj.Err)) RespondError(w, apiErrorObj, nil) return } diff --git a/ee/query-service/app/api/pat.go b/ee/query-service/app/api/pat.go index ea43f47fb0..3ff8be74a2 100644 --- a/ee/query-service/app/api/pat.go +++ b/ee/query-service/app/api/pat.go @@ -43,8 +43,8 @@ func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) { return } pat := model.PAT{ - Name: req.Name, - Role: req.Role, + Name: req.Name, + Role: req.Role, ExpiresAt: req.ExpiresInDays, } err = validatePATRequest(pat) @@ -65,7 +65,7 @@ func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) { pat.ExpiresAt = time.Now().Unix() + (pat.ExpiresAt * 24 * 60 * 60) } - zap.S().Debugf("Got Create PAT request: %+v", pat) + zap.L().Info("Got Create PAT request", zap.Any("pat", pat)) var apierr basemodel.BaseApiError if pat, apierr = ah.AppDao().CreatePAT(ctx, pat); apierr != nil { RespondError(w, apierr, nil) @@ -115,7 +115,7 @@ func (ah *APIHandler) updatePAT(w http.ResponseWriter, r *http.Request) { req.UpdatedByUserID = user.Id id := mux.Vars(r)["id"] req.UpdatedAt = time.Now().Unix() - zap.S().Debugf("Got Update PAT request: %+v", req) + zap.L().Info("Got Update PAT request", zap.Any("pat", req)) var apierr basemodel.BaseApiError if apierr = ah.AppDao().UpdatePAT(ctx, req, id); apierr != nil { RespondError(w, apierr, nil) @@ -135,7 +135,7 @@ func (ah *APIHandler) getPATs(w http.ResponseWriter, r *http.Request) { }, nil) return } - zap.S().Infof("Get PATs for user: %+v", user.Id) + zap.L().Info("Get PATs for user", zap.String("user_id", user.Id)) pats, apierr := ah.AppDao().ListPATs(ctx) if apierr != nil { RespondError(w, apierr, nil) @@ -156,7 +156,7 @@ func (ah *APIHandler) revokePAT(w http.ResponseWriter, r *http.Request) { return } - zap.S().Debugf("Revoke PAT with id: %+v", id) + zap.L().Info("Revoke PAT with id", zap.String("id", id)) if apierr := ah.AppDao().RevokePAT(ctx, id, user.Id); apierr != nil { RespondError(w, apierr, nil) return diff --git a/ee/query-service/app/api/traces.go b/ee/query-service/app/api/traces.go index 22d66f7a82..ee18b2f50b 100644 --- a/ee/query-service/app/api/traces.go +++ b/ee/query-service/app/api/traces.go @@ -15,7 +15,7 @@ import ( func (ah *APIHandler) searchTraces(w http.ResponseWriter, r *http.Request) { if !ah.CheckFeature(basemodel.SmartTraceDetail) { - zap.S().Info("SmartTraceDetail feature is not enabled in this plan") + zap.L().Info("SmartTraceDetail feature is not enabled in this plan") ah.APIHandler.SearchTraces(w, r) return } @@ -26,7 +26,7 @@ func (ah *APIHandler) searchTraces(w http.ResponseWriter, r *http.Request) { } spanLimit, err := strconv.Atoi(constants.SpanLimitStr) if err != nil { - zap.S().Error("Error during strconv.Atoi() on SPAN_LIMIT env variable: ", err) + zap.L().Error("Error during strconv.Atoi() on SPAN_LIMIT env variable", zap.Error(err)) return } result, err := ah.opts.DataConnector.SearchTraces(r.Context(), traceId, spanId, levelUpInt, levelDownInt, spanLimit, db.SmartTraceAlgorithm) diff --git a/ee/query-service/app/db/metrics.go b/ee/query-service/app/db/metrics.go index 3bafc6a638..c7b41b17f5 100644 --- a/ee/query-service/app/db/metrics.go +++ b/ee/query-service/app/db/metrics.go @@ -22,7 +22,7 @@ import ( func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string) ([]*basemodel.Series, string, error) { defer utils.Elapsed("GetMetricResult")() - zap.S().Infof("Executing metric result query: %s", query) + zap.L().Info("Executing metric result query: ", zap.String("query", query)) var hash string // If getSubTreeSpans function is used in the clickhouse query @@ -38,9 +38,8 @@ func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string) } rows, err := r.conn.Query(ctx, query) - zap.S().Debug(query) if err != nil { - zap.S().Debug("Error in processing query: ", err) + zap.L().Error("Error in processing query", zap.Error(err)) return nil, "", fmt.Errorf("error in processing query") } @@ -117,7 +116,7 @@ func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string) groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int()) } default: - zap.S().Errorf("invalid var found in metric builder query result", v, colName) + zap.L().Error("invalid var found in metric builder query result", zap.Any("var", v), zap.String("colName", colName)) } } sort.Strings(groupBy) @@ -140,7 +139,7 @@ func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string) } // err = r.conn.Exec(ctx, "DROP TEMPORARY TABLE IF EXISTS getSubTreeSpans"+hash) // if err != nil { - // zap.S().Error("Error in dropping temporary table: ", err) + // zap.L().Error("Error in dropping temporary table: ", err) // return nil, err // } if hash == "" { @@ -152,7 +151,7 @@ func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string) func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, query string, hash string) (string, string, error) { - zap.S().Debugf("Executing getSubTreeSpans function") + zap.L().Debug("Executing getSubTreeSpans function") // str1 := `select fromUnixTimestamp64Milli(intDiv( toUnixTimestamp64Milli ( timestamp ), 100) * 100) AS interval, toFloat64(count()) as count from (select timestamp, spanId, parentSpanId, durationNano from getSubTreeSpans(select * from signoz_traces.signoz_index_v2 where serviceName='frontend' and name='/driver.DriverService/FindNearest' and traceID='00000000000000004b0a863cb5ed7681') where name='FindDriverIDs' group by interval order by interval asc;` @@ -162,28 +161,28 @@ func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, qu err := r.conn.Exec(ctx, "DROP TABLE IF EXISTS getSubTreeSpans"+hash) if err != nil { - zap.S().Error("Error in dropping temporary table: ", err) + zap.L().Error("Error in dropping temporary table", zap.Error(err)) return query, hash, err } // Create temporary table to store the getSubTreeSpans() results - zap.S().Debugf("Creating temporary table getSubTreeSpans%s", hash) + zap.L().Debug("Creating temporary table getSubTreeSpans", zap.String("hash", hash)) err = r.conn.Exec(ctx, "CREATE TABLE IF NOT EXISTS "+"getSubTreeSpans"+hash+" (timestamp DateTime64(9) CODEC(DoubleDelta, LZ4), traceID FixedString(32) CODEC(ZSTD(1)), spanID String CODEC(ZSTD(1)), parentSpanID String CODEC(ZSTD(1)), rootSpanID String CODEC(ZSTD(1)), serviceName LowCardinality(String) CODEC(ZSTD(1)), name LowCardinality(String) CODEC(ZSTD(1)), rootName LowCardinality(String) CODEC(ZSTD(1)), durationNano UInt64 CODEC(T64, ZSTD(1)), kind Int8 CODEC(T64, ZSTD(1)), tagMap Map(LowCardinality(String), String) CODEC(ZSTD(1)), events Array(String) CODEC(ZSTD(2))) ENGINE = MergeTree() ORDER BY (timestamp)") if err != nil { - zap.S().Error("Error in creating temporary table: ", err) + zap.L().Error("Error in creating temporary table", zap.Error(err)) return query, hash, err } var getSpansSubQueryDBResponses []model.GetSpansSubQueryDBResponse getSpansSubQuery := subtreeInput // Execute the subTree query - zap.S().Debugf("Executing subTree query: %s", getSpansSubQuery) + zap.L().Debug("Executing subTree query", zap.String("query", getSpansSubQuery)) err = r.conn.Select(ctx, &getSpansSubQueryDBResponses, getSpansSubQuery) - // zap.S().Info(getSpansSubQuery) + // zap.L().Info(getSpansSubQuery) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return query, hash, fmt.Errorf("Error in processing sql query") } @@ -196,16 +195,16 @@ func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, qu if len(getSpansSubQueryDBResponses) == 0 { return query, hash, fmt.Errorf("No spans found for the given query") } - zap.S().Debugf("Executing query to fetch all the spans from the same TraceID: %s", modelQuery) + zap.L().Debug("Executing query to fetch all the spans from the same TraceID: ", zap.String("modelQuery", modelQuery)) err = r.conn.Select(ctx, &searchScanResponses, modelQuery, getSpansSubQueryDBResponses[0].TraceID) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return query, hash, fmt.Errorf("Error in processing sql query") } // Process model to fetch the spans - zap.S().Debugf("Processing model to fetch the spans") + zap.L().Debug("Processing model to fetch the spans") searchSpanResponses := []basemodel.SearchSpanResponseItem{} for _, item := range searchScanResponses { var jsonItem basemodel.SearchSpanResponseItem @@ -218,17 +217,17 @@ func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, qu } // Build the subtree and store all the subtree spans in temporary table getSubTreeSpans+hash // Use map to store pointer to the spans to avoid duplicates and save memory - zap.S().Debugf("Building the subtree to store all the subtree spans in temporary table getSubTreeSpans%s", hash) + zap.L().Debug("Building the subtree to store all the subtree spans in temporary table getSubTreeSpans", zap.String("hash", hash)) treeSearchResponse, err := getSubTreeAlgorithm(searchSpanResponses, getSpansSubQueryDBResponses) if err != nil { - zap.S().Error("Error in getSubTreeAlgorithm function: ", err) + zap.L().Error("Error in getSubTreeAlgorithm function", zap.Error(err)) return query, hash, err } - zap.S().Debugf("Preparing batch to store subtree spans in temporary table getSubTreeSpans%s", hash) + zap.L().Debug("Preparing batch to store subtree spans in temporary table getSubTreeSpans", zap.String("hash", hash)) statement, err := r.conn.PrepareBatch(context.Background(), fmt.Sprintf("INSERT INTO getSubTreeSpans"+hash)) if err != nil { - zap.S().Error("Error in preparing batch statement: ", err) + zap.L().Error("Error in preparing batch statement", zap.Error(err)) return query, hash, err } for _, span := range treeSearchResponse { @@ -251,14 +250,14 @@ func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, qu span.Events, ) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return query, hash, err } } - zap.S().Debugf("Inserting the subtree spans in temporary table getSubTreeSpans%s", hash) + zap.L().Debug("Inserting the subtree spans in temporary table getSubTreeSpans", zap.String("hash", hash)) err = statement.Send() if err != nil { - zap.S().Error("Error in sending statement: ", err) + zap.L().Error("Error in sending statement", zap.Error(err)) return query, hash, err } return query, hash, nil @@ -323,7 +322,7 @@ func getSubTreeAlgorithm(payload []basemodel.SearchSpanResponseItem, getSpansSub spans = append(spans, span) } - zap.S().Debug("Building Tree") + zap.L().Debug("Building Tree") roots, err := buildSpanTrees(&spans) if err != nil { return nil, err @@ -333,7 +332,7 @@ func getSubTreeAlgorithm(payload []basemodel.SearchSpanResponseItem, getSpansSub // For each root, get the subtree spans for _, getSpansSubQueryDBResponse := range getSpansSubQueryDBResponses { targetSpan := &model.SpanForTraceDetails{} - // zap.S().Debug("Building tree for span id: " + getSpansSubQueryDBResponse.SpanID + " " + strconv.Itoa(i+1) + " of " + strconv.Itoa(len(getSpansSubQueryDBResponses))) + // zap.L().Debug("Building tree for span id: " + getSpansSubQueryDBResponse.SpanID + " " + strconv.Itoa(i+1) + " of " + strconv.Itoa(len(getSpansSubQueryDBResponses))) // Search target span object in the tree for _, root := range roots { targetSpan, err = breadthFirstSearch(root, getSpansSubQueryDBResponse.SpanID) @@ -341,7 +340,7 @@ func getSubTreeAlgorithm(payload []basemodel.SearchSpanResponseItem, getSpansSub break } if err != nil { - zap.S().Error("Error during BreadthFirstSearch(): ", err) + zap.L().Error("Error during BreadthFirstSearch()", zap.Error(err)) return nil, err } } diff --git a/ee/query-service/app/db/trace.go b/ee/query-service/app/db/trace.go index 529a9a93fd..c6fe9045cf 100644 --- a/ee/query-service/app/db/trace.go +++ b/ee/query-service/app/db/trace.go @@ -49,7 +49,7 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI break } if err != nil { - zap.S().Error("Error during BreadthFirstSearch(): ", err) + zap.L().Error("Error during BreadthFirstSearch()", zap.Error(err)) return nil, err } } @@ -186,7 +186,7 @@ func buildSpanTrees(spansPtr *[]*model.SpanForTraceDetails) ([]*model.SpanForTra // If the parent span is not found, add current span to list of roots if parent == nil { - // zap.S().Debug("Parent Span not found parent_id: ", span.ParentID) + // zap.L().Debug("Parent Span not found parent_id: ", span.ParentID) roots = append(roots, span) span.ParentID = "" continue diff --git a/ee/query-service/app/server.go b/ee/query-service/app/server.go index 11ef8dffe0..c742eef01b 100644 --- a/ee/query-service/app/server.go +++ b/ee/query-service/app/server.go @@ -134,7 +134,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) { var reader interfaces.DataConnector storage := os.Getenv("STORAGE") if storage == "clickhouse" { - zap.S().Info("Using ClickHouse as datastore ...") + zap.L().Info("Using ClickHouse as datastore ...") qb := db.NewDataConnector( localDB, serverOptions.PromConfigPath, @@ -525,7 +525,7 @@ func (s *Server) initListeners() error { return err } - zap.S().Info(fmt.Sprintf("Query server started listening on %s...", s.serverOptions.HTTPHostPort)) + zap.L().Info(fmt.Sprintf("Query server started listening on %s...", s.serverOptions.HTTPHostPort)) // listen on private port to support internal services privateHostPort := s.serverOptions.PrivateHostPort @@ -538,7 +538,7 @@ func (s *Server) initListeners() error { if err != nil { return err } - zap.S().Info(fmt.Sprintf("Query server started listening on private port %s...", s.serverOptions.PrivateHostPort)) + zap.L().Info(fmt.Sprintf("Query server started listening on private port %s...", s.serverOptions.PrivateHostPort)) return nil } @@ -550,7 +550,7 @@ func (s *Server) Start() error { if !s.serverOptions.DisableRules { s.ruleManager.Start() } else { - zap.S().Info("msg: Rules disabled as rules.disable is set to TRUE") + zap.L().Info("msg: Rules disabled as rules.disable is set to TRUE") } err := s.initListeners() @@ -564,23 +564,23 @@ func (s *Server) Start() error { } go func() { - zap.S().Info("Starting HTTP server", zap.Int("port", httpPort), zap.String("addr", s.serverOptions.HTTPHostPort)) + zap.L().Info("Starting HTTP server", zap.Int("port", httpPort), zap.String("addr", s.serverOptions.HTTPHostPort)) switch err := s.httpServer.Serve(s.httpConn); err { case nil, http.ErrServerClosed, cmux.ErrListenerClosed: // normal exit, nothing to do default: - zap.S().Error("Could not start HTTP server", zap.Error(err)) + zap.L().Error("Could not start HTTP server", zap.Error(err)) } s.unavailableChannel <- healthcheck.Unavailable }() go func() { - zap.S().Info("Starting pprof server", zap.String("addr", baseconst.DebugHttpPort)) + zap.L().Info("Starting pprof server", zap.String("addr", baseconst.DebugHttpPort)) err = http.ListenAndServe(baseconst.DebugHttpPort, nil) if err != nil { - zap.S().Error("Could not start pprof server", zap.Error(err)) + zap.L().Error("Could not start pprof server", zap.Error(err)) } }() @@ -590,14 +590,14 @@ func (s *Server) Start() error { } go func() { - zap.S().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.serverOptions.PrivateHostPort)) + zap.L().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.serverOptions.PrivateHostPort)) switch err := s.privateHTTP.Serve(s.privateConn); err { case nil, http.ErrServerClosed, cmux.ErrListenerClosed: // normal exit, nothing to do - zap.S().Info("private http server closed") + zap.L().Info("private http server closed") default: - zap.S().Error("Could not start private HTTP server", zap.Error(err)) + zap.L().Error("Could not start private HTTP server", zap.Error(err)) } s.unavailableChannel <- healthcheck.Unavailable @@ -605,10 +605,10 @@ func (s *Server) Start() error { }() go func() { - zap.S().Info("Starting OpAmp Websocket server", zap.String("addr", baseconst.OpAmpWsEndpoint)) + zap.L().Info("Starting OpAmp Websocket server", zap.String("addr", baseconst.OpAmpWsEndpoint)) err := s.opampServer.Start(baseconst.OpAmpWsEndpoint) if err != nil { - zap.S().Info("opamp ws server failed to start", err) + zap.L().Error("opamp ws server failed to start", zap.Error(err)) s.unavailableChannel <- healthcheck.Unavailable } }() @@ -684,7 +684,7 @@ func makeRulesManager( return nil, fmt.Errorf("rule manager error: %v", err) } - zap.S().Info("rules manager is ready") + zap.L().Info("rules manager is ready") return manager, nil } diff --git a/ee/query-service/auth/auth.go b/ee/query-service/auth/auth.go index 8c06384549..d45d050cca 100644 --- a/ee/query-service/auth/auth.go +++ b/ee/query-service/auth/auth.go @@ -17,25 +17,25 @@ import ( func GetUserFromRequest(r *http.Request, apiHandler *api.APIHandler) (*basemodel.UserPayload, error) { patToken := r.Header.Get("SIGNOZ-API-KEY") if len(patToken) > 0 { - zap.S().Debugf("Received a non-zero length PAT token") + zap.L().Debug("Received a non-zero length PAT token") ctx := context.Background() dao := apiHandler.AppDao() pat, err := dao.GetPAT(ctx, patToken) if err == nil && pat != nil { - zap.S().Debugf("Found valid PAT: %+v", pat) + zap.L().Debug("Found valid PAT: ", zap.Any("pat", pat)) if pat.ExpiresAt < time.Now().Unix() && pat.ExpiresAt != 0 { - zap.S().Debugf("PAT has expired: %+v", pat) + zap.L().Info("PAT has expired: ", zap.Any("pat", pat)) return nil, fmt.Errorf("PAT has expired") } group, apiErr := dao.GetGroupByName(ctx, pat.Role) if apiErr != nil { - zap.S().Debugf("Error while getting group for PAT: %+v", apiErr) + zap.L().Error("Error while getting group for PAT: ", zap.Any("apiErr", apiErr)) return nil, apiErr } user, err := dao.GetUser(ctx, pat.UserID) if err != nil { - zap.S().Debugf("Error while getting user for PAT: %+v", err) + zap.L().Error("Error while getting user for PAT: ", zap.Error(err)) return nil, err } telemetry.GetInstance().SetPatTokenUser() @@ -48,7 +48,7 @@ func GetUserFromRequest(r *http.Request, apiHandler *api.APIHandler) (*basemodel }, nil } if err != nil { - zap.S().Debugf("Error while getting user for PAT: %+v", err) + zap.L().Error("Error while getting user for PAT: ", zap.Error(err)) return nil, err } } diff --git a/ee/query-service/dao/sqlite/auth.go b/ee/query-service/dao/sqlite/auth.go index 664323eaaf..4418b04cbf 100644 --- a/ee/query-service/dao/sqlite/auth.go +++ b/ee/query-service/dao/sqlite/auth.go @@ -22,19 +22,19 @@ func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) ( domain, apierr := m.GetDomainByEmail(ctx, email) if apierr != nil { - zap.S().Errorf("failed to get domain from email", apierr) + zap.L().Error("failed to get domain from email", zap.Error(apierr)) return nil, model.InternalErrorStr("failed to get domain from email") } hash, err := baseauth.PasswordHash(utils.GeneratePassowrd()) if err != nil { - zap.S().Errorf("failed to generate password hash when registering a user via SSO redirect", zap.Error(err)) + zap.L().Error("failed to generate password hash when registering a user via SSO redirect", zap.Error(err)) return nil, model.InternalErrorStr("failed to generate password hash") } group, apiErr := m.GetGroupByName(ctx, baseconst.ViewerGroup) if apiErr != nil { - zap.S().Debugf("GetGroupByName failed, err: %v\n", apiErr.Err) + zap.L().Error("GetGroupByName failed", zap.Error(apiErr)) return nil, apiErr } @@ -51,7 +51,7 @@ func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) ( user, apiErr = m.CreateUser(ctx, user, false) if apiErr != nil { - zap.S().Debugf("CreateUser failed, err: %v\n", apiErr.Err) + zap.L().Error("CreateUser failed", zap.Error(apiErr)) return nil, apiErr } @@ -65,7 +65,7 @@ func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email st userPayload, apierr := m.GetUserByEmail(ctx, email) if !apierr.IsNil() { - zap.S().Errorf(" failed to get user with email received from auth provider", apierr.Error()) + zap.L().Error("failed to get user with email received from auth provider", zap.String("error", apierr.Error())) return "", model.BadRequestStr("invalid user email received from the auth provider") } @@ -75,7 +75,7 @@ func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email st newUser, apiErr := m.createUserForSAMLRequest(ctx, email) user = newUser if apiErr != nil { - zap.S().Errorf("failed to create user with email received from auth provider: %v", apierr.Error()) + zap.L().Error("failed to create user with email received from auth provider", zap.Error(apiErr)) return "", apiErr } } else { @@ -84,7 +84,7 @@ func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email st tokenStore, err := baseauth.GenerateJWTForUser(user) if err != nil { - zap.S().Errorf("failed to generate token for SSO login user", err) + zap.L().Error("failed to generate token for SSO login user", zap.Error(err)) return "", model.InternalErrorStr("failed to generate token for the user") } @@ -143,8 +143,8 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) ( // do nothing, just skip sso ssoAvailable = false default: - zap.S().Errorf("feature check failed", zap.String("featureKey", model.SSO), zap.Error(err)) - return resp, model.BadRequest(err) + zap.L().Error("feature check failed", zap.String("featureKey", model.SSO), zap.Error(err)) + return resp, model.BadRequestStr(err.Error()) } } @@ -160,7 +160,7 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) ( if len(emailComponents) > 0 { emailDomain = emailComponents[1] } - zap.S().Errorf("failed to get org domain from email", zap.String("emailDomain", emailDomain), apierr.ToError()) + zap.L().Error("failed to get org domain from email", zap.String("emailDomain", emailDomain), zap.Error(apierr.ToError())) return resp, apierr } @@ -176,7 +176,7 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) ( escapedUrl, _ := url.QueryUnescape(sourceUrl) siteUrl, err := url.Parse(escapedUrl) if err != nil { - zap.S().Errorf("failed to parse referer", err) + zap.L().Error("failed to parse referer", zap.Error(err)) return resp, model.InternalError(fmt.Errorf("failed to generate login request")) } @@ -185,7 +185,7 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) ( resp.SsoUrl, err = orgDomain.BuildSsoUrl(siteUrl) if err != nil { - zap.S().Errorf("failed to prepare saml request for domain", zap.String("domain", orgDomain.Name), err) + zap.L().Error("failed to prepare saml request for domain", zap.String("domain", orgDomain.Name), zap.Error(err)) return resp, model.InternalError(err) } diff --git a/ee/query-service/dao/sqlite/domain.go b/ee/query-service/dao/sqlite/domain.go index b515af49c9..fbaa4fe332 100644 --- a/ee/query-service/dao/sqlite/domain.go +++ b/ee/query-service/dao/sqlite/domain.go @@ -48,13 +48,13 @@ func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url if domainIdStr != "" { domainId, err := uuid.Parse(domainIdStr) if err != nil { - zap.S().Errorf("failed to parse domainId from relay state", err) + zap.L().Error("failed to parse domainId from relay state", zap.Error(err)) return nil, fmt.Errorf("failed to parse domainId from IdP response") } domain, err = m.GetDomain(ctx, domainId) if (err != nil) || domain == nil { - zap.S().Errorf("failed to find domain from domainId received in IdP response", err.Error()) + zap.L().Error("failed to find domain from domainId received in IdP response", zap.Error(err)) return nil, fmt.Errorf("invalid credentials") } } @@ -64,7 +64,7 @@ func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url domainFromDB, err := m.GetDomainByName(ctx, domainNameStr) domain = domainFromDB if (err != nil) || domain == nil { - zap.S().Errorf("failed to find domain from domainName received in IdP response", err.Error()) + zap.L().Error("failed to find domain from domainName received in IdP response", zap.Error(err)) return nil, fmt.Errorf("invalid credentials") } } @@ -132,7 +132,7 @@ func (m *modelDao) ListDomains(ctx context.Context, orgId string) ([]model.OrgDo for _, s := range stored { domain := model.OrgDomain{Id: s.Id, Name: s.Name, OrgId: s.OrgId} if err := domain.LoadConfig(s.Data); err != nil { - zap.S().Errorf("ListDomains() failed", zap.Error(err)) + zap.L().Error("ListDomains() failed", zap.Error(err)) } domains = append(domains, domain) } @@ -153,7 +153,7 @@ func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) ba configJson, err := json.Marshal(domain) if err != nil { - zap.S().Errorf("failed to unmarshal domain config", zap.Error(err)) + zap.L().Error("failed to unmarshal domain config", zap.Error(err)) return model.InternalError(fmt.Errorf("domain creation failed")) } @@ -167,7 +167,7 @@ func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) ba time.Now().Unix()) if err != nil { - zap.S().Errorf("failed to insert domain in db", zap.Error(err)) + zap.L().Error("failed to insert domain in db", zap.Error(err)) return model.InternalError(fmt.Errorf("domain creation failed")) } @@ -178,13 +178,13 @@ func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) ba func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError { if domain.Id == uuid.Nil { - zap.S().Errorf("domain update failed", zap.Error(fmt.Errorf("OrgDomain.Id is null"))) + zap.L().Error("domain update failed", zap.Error(fmt.Errorf("OrgDomain.Id is null"))) return model.InternalError(fmt.Errorf("domain update failed")) } configJson, err := json.Marshal(domain) if err != nil { - zap.S().Errorf("domain update failed", zap.Error(err)) + zap.L().Error("domain update failed", zap.Error(err)) return model.InternalError(fmt.Errorf("domain update failed")) } @@ -195,7 +195,7 @@ func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) ba domain.Id) if err != nil { - zap.S().Errorf("domain update failed", zap.Error(err)) + zap.L().Error("domain update failed", zap.Error(err)) return model.InternalError(fmt.Errorf("domain update failed")) } @@ -206,7 +206,7 @@ func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) ba func (m *modelDao) DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError { if id == uuid.Nil { - zap.S().Errorf("domain delete failed", zap.Error(fmt.Errorf("OrgDomain.Id is null"))) + zap.L().Error("domain delete failed", zap.Error(fmt.Errorf("OrgDomain.Id is null"))) return model.InternalError(fmt.Errorf("domain delete failed")) } @@ -215,7 +215,7 @@ func (m *modelDao) DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.Bas id) if err != nil { - zap.S().Errorf("domain delete failed", zap.Error(err)) + zap.L().Error("domain delete failed", zap.Error(err)) return model.InternalError(fmt.Errorf("domain delete failed")) } diff --git a/ee/query-service/dao/sqlite/pat.go b/ee/query-service/dao/sqlite/pat.go index b2af1640c3..75169db685 100644 --- a/ee/query-service/dao/sqlite/pat.go +++ b/ee/query-service/dao/sqlite/pat.go @@ -26,12 +26,12 @@ func (m *modelDao) CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basem p.Revoked, ) if err != nil { - zap.S().Errorf("Failed to insert PAT in db, err: %v", zap.Error(err)) + zap.L().Error("Failed to insert PAT in db, err: %v", zap.Error(err)) return model.PAT{}, model.InternalError(fmt.Errorf("PAT insertion failed")) } id, err := result.LastInsertId() if err != nil { - zap.S().Errorf("Failed to get last inserted id, err: %v", zap.Error(err)) + zap.L().Error("Failed to get last inserted id, err: %v", zap.Error(err)) return model.PAT{}, model.InternalError(fmt.Errorf("PAT insertion failed")) } p.Id = strconv.Itoa(int(id)) @@ -62,7 +62,7 @@ func (m *modelDao) UpdatePAT(ctx context.Context, p model.PAT, id string) basemo p.UpdatedByUserID, id) if err != nil { - zap.S().Errorf("Failed to update PAT in db, err: %v", zap.Error(err)) + zap.L().Error("Failed to update PAT in db, err: %v", zap.Error(err)) return model.InternalError(fmt.Errorf("PAT update failed")) } return nil @@ -74,7 +74,7 @@ func (m *modelDao) UpdatePATLastUsed(ctx context.Context, token string, lastUsed lastUsed, token) if err != nil { - zap.S().Errorf("Failed to update PAT last used in db, err: %v", zap.Error(err)) + zap.L().Error("Failed to update PAT last used in db, err: %v", zap.Error(err)) return model.InternalError(fmt.Errorf("PAT last used update failed")) } return nil @@ -84,7 +84,7 @@ func (m *modelDao) ListPATs(ctx context.Context) ([]model.PAT, basemodel.BaseApi pats := []model.PAT{} if err := m.DB().Select(&pats, "SELECT * FROM personal_access_tokens WHERE revoked=false ORDER by updated_at DESC;"); err != nil { - zap.S().Errorf("Failed to fetch PATs err: %v", zap.Error(err)) + zap.L().Error("Failed to fetch PATs err: %v", zap.Error(err)) return nil, model.InternalError(fmt.Errorf("failed to fetch PATs")) } for i := range pats { @@ -129,7 +129,7 @@ func (m *modelDao) RevokePAT(ctx context.Context, id string, userID string) base "UPDATE personal_access_tokens SET revoked=true, updated_by_user_id = $1, updated_at=$2 WHERE id=$3", userID, updatedAt, id) if err != nil { - zap.S().Errorf("Failed to revoke PAT in db, err: %v", zap.Error(err)) + zap.L().Error("Failed to revoke PAT in db, err: %v", zap.Error(err)) return model.InternalError(fmt.Errorf("PAT revoke failed")) } return nil diff --git a/ee/query-service/integrations/signozio/signozio.go b/ee/query-service/integrations/signozio/signozio.go index c1ad5e57e4..c18cfb6572 100644 --- a/ee/query-service/integrations/signozio/signozio.go +++ b/ee/query-service/integrations/signozio/signozio.go @@ -47,13 +47,13 @@ func ActivateLicense(key, siteId string) (*ActivationResponse, *model.ApiError) httpResponse, err := http.Post(C.Prefix+"/licenses/activate", APPLICATION_JSON, bytes.NewBuffer(reqString)) if err != nil { - zap.S().Errorf("failed to connect to license.signoz.io", err) + zap.L().Error("failed to connect to license.signoz.io", zap.Error(err)) return nil, model.BadRequest(fmt.Errorf("unable to connect with license.signoz.io, please check your network connection")) } httpBody, err := io.ReadAll(httpResponse.Body) if err != nil { - zap.S().Errorf("failed to read activation response from license.signoz.io", err) + zap.L().Error("failed to read activation response from license.signoz.io", zap.Error(err)) return nil, model.BadRequest(fmt.Errorf("failed to read activation response from license.signoz.io")) } @@ -63,7 +63,7 @@ func ActivateLicense(key, siteId string) (*ActivationResponse, *model.ApiError) result := ActivationResult{} err = json.Unmarshal(httpBody, &result) if err != nil { - zap.S().Errorf("failed to marshal activation response from license.signoz.io", err) + zap.L().Error("failed to marshal activation response from license.signoz.io", zap.Error(err)) return nil, model.InternalError(errors.Wrap(err, "failed to marshal license activation response")) } diff --git a/ee/query-service/license/db.go b/ee/query-service/license/db.go index 8d2f7065ff..bf71e9376d 100644 --- a/ee/query-service/license/db.go +++ b/ee/query-service/license/db.go @@ -97,7 +97,7 @@ func (r *Repo) InsertLicense(ctx context.Context, l *model.License) error { l.ValidationMessage) if err != nil { - zap.S().Errorf("error in inserting license data: ", zap.Error(err)) + zap.L().Error("error in inserting license data: ", zap.Error(err)) return fmt.Errorf("failed to insert license in db: %v", err) } @@ -121,7 +121,7 @@ func (r *Repo) UpdatePlanDetails(ctx context.Context, _, err := r.db.ExecContext(ctx, query, planDetails, time.Now(), key) if err != nil { - zap.S().Errorf("error in updating license: ", zap.Error(err)) + zap.L().Error("error in updating license: ", zap.Error(err)) return fmt.Errorf("failed to update license in db: %v", err) } diff --git a/ee/query-service/license/manager.go b/ee/query-service/license/manager.go index dcfa8235b1..56cb685fec 100644 --- a/ee/query-service/license/manager.go +++ b/ee/query-service/license/manager.go @@ -100,7 +100,7 @@ func (lm *Manager) SetActive(l *model.License) { err := lm.InitFeatures(lm.activeFeatures) if err != nil { - zap.S().Panicf("Couldn't activate features: %v", err) + zap.L().Panic("Couldn't activate features", zap.Error(err)) } if !lm.validatorRunning { // we want to make sure only one validator runs, @@ -125,13 +125,13 @@ func (lm *Manager) LoadActiveLicense() error { if active != nil { lm.SetActive(active) } else { - zap.S().Info("No active license found, defaulting to basic plan") + zap.L().Info("No active license found, defaulting to basic plan") // if no active license is found, we default to basic(free) plan with all default features lm.activeFeatures = model.BasicPlan setDefaultFeatures(lm) err := lm.InitFeatures(lm.activeFeatures) if err != nil { - zap.S().Error("Couldn't initialize features: ", err) + zap.L().Error("Couldn't initialize features", zap.Error(err)) return err } } @@ -191,7 +191,7 @@ func (lm *Manager) Validator(ctx context.Context) { // Validate validates the current active license func (lm *Manager) Validate(ctx context.Context) (reterr error) { - zap.S().Info("License validation started") + zap.L().Info("License validation started") if lm.activeLicense == nil { return nil } @@ -201,12 +201,12 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) { lm.lastValidated = time.Now().Unix() if reterr != nil { - zap.S().Errorf("License validation completed with error", reterr) + zap.L().Error("License validation completed with error", zap.Error(reterr)) atomic.AddUint64(&lm.failedAttempts, 1) telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_CHECK_FAILED, map[string]interface{}{"err": reterr.Error()}, "") } else { - zap.S().Info("License validation completed with no errors") + zap.L().Info("License validation completed with no errors") } lm.mutex.Unlock() @@ -214,7 +214,7 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) { response, apiError := validate.ValidateLicense(lm.activeLicense.ActivationId) if apiError != nil { - zap.S().Errorf("failed to validate license", apiError) + zap.L().Error("failed to validate license", zap.Error(apiError.Err)) return apiError.Err } @@ -235,7 +235,7 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) { } if err := l.ParsePlan(); err != nil { - zap.S().Errorf("failed to parse updated license", zap.Error(err)) + zap.L().Error("failed to parse updated license", zap.Error(err)) return err } @@ -245,7 +245,7 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) { if err != nil { // unexpected db write issue but we can let the user continue // and wait for update to work in next cycle. - zap.S().Errorf("failed to validate license", zap.Error(err)) + zap.L().Error("failed to validate license", zap.Error(err)) } } @@ -270,7 +270,7 @@ func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *m response, apiError := validate.ActivateLicense(key, "") if apiError != nil { - zap.S().Errorf("failed to activate license", zap.Error(apiError.Err)) + zap.L().Error("failed to activate license", zap.Error(apiError.Err)) return nil, apiError } @@ -284,14 +284,14 @@ func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *m err := l.ParsePlan() if err != nil { - zap.S().Errorf("failed to activate license", zap.Error(err)) + zap.L().Error("failed to activate license", zap.Error(err)) return nil, model.InternalError(err) } // store the license before activating it err = lm.repo.InsertLicense(ctx, l) if err != nil { - zap.S().Errorf("failed to activate license", zap.Error(err)) + zap.L().Error("failed to activate license", zap.Error(err)) return nil, model.InternalError(err) } diff --git a/ee/query-service/main.go b/ee/query-service/main.go index 427f78059b..3323e5bdbd 100644 --- a/ee/query-service/main.go +++ b/ee/query-service/main.go @@ -14,10 +14,10 @@ import ( semconv "go.opentelemetry.io/otel/semconv/v1.4.0" "go.signoz.io/signoz/ee/query-service/app" "go.signoz.io/signoz/pkg/query-service/auth" - "go.signoz.io/signoz/pkg/query-service/constants" baseconst "go.signoz.io/signoz/pkg/query-service/constants" "go.signoz.io/signoz/pkg/query-service/version" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" zapotlpencoder "github.com/SigNoz/zap_otlp/zap_otlp_encoder" zapotlpsync "github.com/SigNoz/zap_otlp/zap_otlp_sync" @@ -27,18 +27,19 @@ import ( ) func initZapLog(enableQueryServiceLogOTLPExport bool) *zap.Logger { - config := zap.NewDevelopmentConfig() + config := zap.NewProductionConfig() ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt) defer stop() - config.EncoderConfig.EncodeDuration = zapcore.StringDurationEncoder - otlpEncoder := zapotlpencoder.NewOTLPEncoder(config.EncoderConfig) - consoleEncoder := zapcore.NewConsoleEncoder(config.EncoderConfig) - defaultLogLevel := zapcore.DebugLevel - config.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder + config.EncoderConfig.EncodeDuration = zapcore.MillisDurationEncoder + config.EncoderConfig.EncodeLevel = zapcore.CapitalLevelEncoder config.EncoderConfig.TimeKey = "timestamp" config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder + otlpEncoder := zapotlpencoder.NewOTLPEncoder(config.EncoderConfig) + consoleEncoder := zapcore.NewJSONEncoder(config.EncoderConfig) + defaultLogLevel := zapcore.InfoLevel + res := resource.NewWithAttributes( semconv.SchemaURL, semconv.ServiceNameKey.String("query-service"), @@ -48,14 +49,15 @@ func initZapLog(enableQueryServiceLogOTLPExport bool) *zap.Logger { zapcore.NewCore(consoleEncoder, os.Stdout, defaultLogLevel), ) - if enableQueryServiceLogOTLPExport == true { - conn, err := grpc.DialContext(ctx, constants.OTLPTarget, grpc.WithBlock(), grpc.WithInsecure(), grpc.WithTimeout(time.Second*30)) + if enableQueryServiceLogOTLPExport { + ctx, _ := context.WithTimeout(ctx, time.Second*30) + conn, err := grpc.DialContext(ctx, baseconst.OTLPTarget, grpc.WithBlock(), grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { - log.Println("failed to connect to otlp collector to export query service logs with error:", err) + log.Fatalf("failed to establish connection: %v", err) } else { logExportBatchSizeInt, err := strconv.Atoi(baseconst.LogExportBatchSize) if err != nil { - logExportBatchSizeInt = 1000 + logExportBatchSizeInt = 512 } ws := zapcore.AddSync(zapotlpsync.NewOtlpSyncer(conn, zapotlpsync.Options{ BatchSize: logExportBatchSizeInt, @@ -113,7 +115,6 @@ func main() { zap.ReplaceGlobals(loggerMgr) defer loggerMgr.Sync() // flushes buffer, if any - logger := loggerMgr.Sugar() version.PrintVersion() serverOptions := &app.ServerOptions{ @@ -137,22 +138,22 @@ func main() { auth.JwtSecret = os.Getenv("SIGNOZ_JWT_SECRET") if len(auth.JwtSecret) == 0 { - zap.S().Warn("No JWT secret key is specified.") + zap.L().Warn("No JWT secret key is specified.") } else { - zap.S().Info("No JWT secret key set successfully.") + zap.L().Info("JWT secret key set successfully.") } server, err := app.NewServer(serverOptions) if err != nil { - logger.Fatal("Failed to create server", zap.Error(err)) + zap.L().Fatal("Failed to create server", zap.Error(err)) } if err := server.Start(); err != nil { - logger.Fatal("Could not start servers", zap.Error(err)) + zap.L().Fatal("Could not start server", zap.Error(err)) } if err := auth.InitAuthCache(context.Background()); err != nil { - logger.Fatal("Failed to initialize auth cache", zap.Error(err)) + zap.L().Fatal("Failed to initialize auth cache", zap.Error(err)) } signalsChannel := make(chan os.Signal, 1) @@ -161,9 +162,9 @@ func main() { for { select { case status := <-server.HealthCheckStatus(): - logger.Info("Received HealthCheck status: ", zap.Int("status", int(status))) + zap.L().Info("Received HealthCheck status: ", zap.Int("status", int(status))) case <-signalsChannel: - logger.Fatal("Received OS Interrupt Signal ... ") + zap.L().Fatal("Received OS Interrupt Signal ... ") server.Stop() } } diff --git a/ee/query-service/model/domain.go b/ee/query-service/model/domain.go index beadd66a51..4d5ff66df2 100644 --- a/ee/query-service/model/domain.go +++ b/ee/query-service/model/domain.go @@ -9,8 +9,8 @@ import ( "github.com/google/uuid" "github.com/pkg/errors" saml2 "github.com/russellhaering/gosaml2" - "go.signoz.io/signoz/ee/query-service/sso/saml" "go.signoz.io/signoz/ee/query-service/sso" + "go.signoz.io/signoz/ee/query-service/sso/saml" basemodel "go.signoz.io/signoz/pkg/query-service/model" "go.uber.org/zap" ) @@ -24,16 +24,16 @@ const ( // OrgDomain identify org owned web domains for auth and other purposes type OrgDomain struct { - Id uuid.UUID `json:"id"` - Name string `json:"name"` - OrgId string `json:"orgId"` - SsoEnabled bool `json:"ssoEnabled"` - SsoType SSOType `json:"ssoType"` + Id uuid.UUID `json:"id"` + Name string `json:"name"` + OrgId string `json:"orgId"` + SsoEnabled bool `json:"ssoEnabled"` + SsoType SSOType `json:"ssoType"` - SamlConfig *SamlConfig `json:"samlConfig"` + SamlConfig *SamlConfig `json:"samlConfig"` GoogleAuthConfig *GoogleOAuthConfig `json:"googleAuthConfig"` - Org *basemodel.Organization + Org *basemodel.Organization } func (od *OrgDomain) String() string { @@ -100,8 +100,8 @@ func (od *OrgDomain) GetSAMLCert() string { return "" } -// PrepareGoogleOAuthProvider creates GoogleProvider that is used in -// requesting OAuth and also used in processing response from google +// PrepareGoogleOAuthProvider creates GoogleProvider that is used in +// requesting OAuth and also used in processing response from google func (od *OrgDomain) PrepareGoogleOAuthProvider(siteUrl *url.URL) (sso.OAuthCallbackProvider, error) { if od.GoogleAuthConfig == nil { return nil, fmt.Errorf("Google auth is not setup correctly for this domain") @@ -137,38 +137,36 @@ func (od *OrgDomain) PrepareSamlRequest(siteUrl *url.URL) (*saml2.SAMLServicePro } func (od *OrgDomain) BuildSsoUrl(siteUrl *url.URL) (ssoUrl string, err error) { - fmtDomainId := strings.Replace(od.Id.String(), "-", ":", -1) - + // build redirect url from window.location sent by frontend redirectURL := fmt.Sprintf("%s://%s%s", siteUrl.Scheme, siteUrl.Host, siteUrl.Path) // prepare state that gets relayed back when the auth provider // calls back our url. here we pass the app url (where signoz runs) // and the domain Id. The domain Id helps in identifying sso config - // when the call back occurs and the app url is useful in redirecting user - // back to the right path. + // when the call back occurs and the app url is useful in redirecting user + // back to the right path. // why do we need to pass app url? the callback typically is handled by backend // and sometimes backend might right at a different port or is unaware of frontend // endpoint (unless SITE_URL param is set). hence, we receive this build sso request - // along with frontend window.location and use it to relay the information through - // auth provider to the backend (HandleCallback or HandleSSO method). + // along with frontend window.location and use it to relay the information through + // auth provider to the backend (HandleCallback or HandleSSO method). relayState := fmt.Sprintf("%s?domainId=%s", redirectURL, fmtDomainId) - - switch (od.SsoType) { + switch od.SsoType { case SAML: sp, err := od.PrepareSamlRequest(siteUrl) if err != nil { return "", err } - + return sp.BuildAuthURL(relayState) - + case GoogleAuth: - + googleProvider, err := od.PrepareGoogleOAuthProvider(siteUrl) if err != nil { return "", err @@ -176,9 +174,8 @@ func (od *OrgDomain) BuildSsoUrl(siteUrl *url.URL) (ssoUrl string, err error) { return googleProvider.BuildAuthURL(relayState) default: - zap.S().Errorf("found unsupported SSO config for the org domain", zap.String("orgDomain", od.Name)) - return "", fmt.Errorf("unsupported SSO config for the domain") + zap.L().Error("found unsupported SSO config for the org domain", zap.String("orgDomain", od.Name)) + return "", fmt.Errorf("unsupported SSO config for the domain") } - } diff --git a/ee/query-service/sso/saml/request.go b/ee/query-service/sso/saml/request.go index 01af7afe28..c9788d0ff3 100644 --- a/ee/query-service/sso/saml/request.go +++ b/ee/query-service/sso/saml/request.go @@ -102,6 +102,6 @@ func PrepareRequest(issuer, acsUrl, audience, entity, idp, certString string) (* IDPCertificateStore: certStore, SPKeyStore: randomKeyStore, } - zap.S().Debugf("SAML request:", sp) + zap.L().Debug("SAML request", zap.Any("sp", sp)) return sp, nil } diff --git a/ee/query-service/usage/manager.go b/ee/query-service/usage/manager.go index 99158b4345..72535c9ae5 100644 --- a/ee/query-service/usage/manager.go +++ b/ee/query-service/usage/manager.go @@ -91,12 +91,12 @@ func (lm *Manager) UploadUsage() { // check if license is present or not license, err := lm.licenseRepo.GetActiveLicense(ctx) if err != nil { - zap.S().Errorf("failed to get active license: %v", zap.Error(err)) + zap.L().Error("failed to get active license", zap.Error(err)) return } if license == nil { // we will not start the usage reporting if license is not present. - zap.S().Info("no license present, skipping usage reporting") + zap.L().Info("no license present, skipping usage reporting") return } @@ -123,7 +123,7 @@ func (lm *Manager) UploadUsage() { dbusages := []model.UsageDB{} err := lm.clickhouseConn.Select(ctx, &dbusages, fmt.Sprintf(query, db, db), time.Now().Add(-(24 * time.Hour))) if err != nil && !strings.Contains(err.Error(), "doesn't exist") { - zap.S().Errorf("failed to get usage from clickhouse: %v", zap.Error(err)) + zap.L().Error("failed to get usage from clickhouse: %v", zap.Error(err)) return } for _, u := range dbusages { @@ -133,16 +133,16 @@ func (lm *Manager) UploadUsage() { } if len(usages) <= 0 { - zap.S().Info("no snapshots to upload, skipping.") + zap.L().Info("no snapshots to upload, skipping.") return } - zap.S().Info("uploading usage data") + zap.L().Info("uploading usage data") orgName := "" orgNames, orgError := lm.modelDao.GetOrgs(ctx) if orgError != nil { - zap.S().Errorf("failed to get org data: %v", zap.Error(orgError)) + zap.L().Error("failed to get org data: %v", zap.Error(orgError)) } if len(orgNames) == 1 { orgName = orgNames[0].Name @@ -152,14 +152,14 @@ func (lm *Manager) UploadUsage() { for _, usage := range usages { usageDataBytes, err := encryption.Decrypt([]byte(usage.ExporterID[:32]), []byte(usage.Data)) if err != nil { - zap.S().Errorf("error while decrypting usage data: %v", zap.Error(err)) + zap.L().Error("error while decrypting usage data: %v", zap.Error(err)) return } usageData := model.Usage{} err = json.Unmarshal(usageDataBytes, &usageData) if err != nil { - zap.S().Errorf("error while unmarshalling usage data: %v", zap.Error(err)) + zap.L().Error("error while unmarshalling usage data: %v", zap.Error(err)) return } @@ -184,13 +184,13 @@ func (lm *Manager) UploadUsageWithExponentalBackOff(ctx context.Context, payload for i := 1; i <= MaxRetries; i++ { apiErr := licenseserver.SendUsage(ctx, payload) if apiErr != nil && i == MaxRetries { - zap.S().Errorf("retries stopped : %v", zap.Error(apiErr)) + zap.L().Error("retries stopped : %v", zap.Error(apiErr)) // not returning error here since it is captured in the failed count return } else if apiErr != nil { // sleeping for exponential backoff sleepDuration := RetryInterval * time.Duration(i) - zap.S().Errorf("failed to upload snapshot retrying after %v secs : %v", sleepDuration.Seconds(), zap.Error(apiErr.Err)) + zap.L().Error("failed to upload snapshot retrying after %v secs : %v", zap.Duration("sleepDuration", sleepDuration), zap.Error(apiErr.Err)) time.Sleep(sleepDuration) } else { break @@ -201,7 +201,7 @@ func (lm *Manager) UploadUsageWithExponentalBackOff(ctx context.Context, payload func (lm *Manager) Stop() { lm.scheduler.Stop() - zap.S().Debug("sending usage data before shutting down") + zap.L().Info("sending usage data before shutting down") // send usage before shutting down lm.UploadUsage() diff --git a/pkg/query-service/agentConf/db.go b/pkg/query-service/agentConf/db.go index ffbc2f53a8..04ab780db6 100644 --- a/pkg/query-service/agentConf/db.go +++ b/pkg/query-service/agentConf/db.go @@ -151,7 +151,7 @@ func (r *Repo) insertConfig( // allowing empty elements for logs - use case is deleting all pipelines if len(elements) == 0 && c.ElementType != ElementTypeLogPipelines { - zap.S().Error("insert config called with no elements ", c.ElementType) + zap.L().Error("insert config called with no elements ", zap.String("ElementType", string(c.ElementType))) return model.BadRequest(fmt.Errorf("config must have atleast one element")) } @@ -159,7 +159,7 @@ func (r *Repo) insertConfig( // the version can not be set by the user, we want to auto-assign the versions // in a monotonically increasing order starting with 1. hence, we reject insert // requests with version anything other than 0. here, 0 indicates un-assigned - zap.S().Error("invalid version assignment while inserting agent config", c.Version, c.ElementType) + zap.L().Error("invalid version assignment while inserting agent config", zap.Int("version", c.Version), zap.String("ElementType", string(c.ElementType))) return model.BadRequest(fmt.Errorf( "user defined versions are not supported in the agent config", )) @@ -167,7 +167,7 @@ func (r *Repo) insertConfig( configVersion, err := r.GetLatestVersion(ctx, c.ElementType) if err != nil && err.Type() != model.ErrorNotFound { - zap.S().Error("failed to fetch latest config version", err) + zap.L().Error("failed to fetch latest config version", zap.Error(err)) return model.InternalError(fmt.Errorf("failed to fetch latest config version")) } @@ -212,7 +212,7 @@ func (r *Repo) insertConfig( c.DeployResult) if dbErr != nil { - zap.S().Error("error in inserting config version: ", zap.Error(dbErr)) + zap.L().Error("error in inserting config version: ", zap.Error(dbErr)) return model.InternalError(errors.Wrap(dbErr, "failed to insert ingestion rule")) } @@ -258,7 +258,7 @@ func (r *Repo) updateDeployStatus(ctx context.Context, _, err := r.db.ExecContext(ctx, updateQuery, status, result, lastHash, lastconf, version, string(elementType)) if err != nil { - zap.S().Error("failed to update deploy status", err) + zap.L().Error("failed to update deploy status", zap.Error(err)) return model.BadRequest(fmt.Errorf("failed to update deploy status")) } @@ -276,7 +276,7 @@ func (r *Repo) updateDeployStatusByHash( _, err := r.db.ExecContext(ctx, updateQuery, status, result, confighash) if err != nil { - zap.S().Error("failed to update deploy status", err) + zap.L().Error("failed to update deploy status", zap.Error(err)) return model.InternalError(errors.Wrap(err, "failed to update deploy status")) } diff --git a/pkg/query-service/agentConf/manager.go b/pkg/query-service/agentConf/manager.go index 0fdab4e990..c9a7335e0b 100644 --- a/pkg/query-service/agentConf/manager.go +++ b/pkg/query-service/agentConf/manager.go @@ -224,19 +224,19 @@ func Redeploy(ctx context.Context, typ ElementTypeDef, version int) *model.ApiEr configVersion, err := GetConfigVersion(ctx, typ, version) if err != nil { - zap.S().Debug("failed to fetch config version during redeploy", err) + zap.L().Error("failed to fetch config version during redeploy", zap.Error(err)) return model.WrapApiError(err, "failed to fetch details of the config version") } if configVersion == nil || (configVersion != nil && configVersion.LastConf == "") { - zap.S().Debug("config version has no conf yaml", configVersion) + zap.L().Debug("config version has no conf yaml", zap.Any("configVersion", configVersion)) return model.BadRequest(fmt.Errorf("the config version can not be redeployed")) } switch typ { case ElementTypeSamplingRules: var config *tsp.Config if err := yaml.Unmarshal([]byte(configVersion.LastConf), &config); err != nil { - zap.S().Error("failed to read last conf correctly", err) + zap.L().Debug("failed to read last conf correctly", zap.Error(err)) return model.BadRequest(fmt.Errorf("failed to read the stored config correctly")) } @@ -248,7 +248,7 @@ func Redeploy(ctx context.Context, typ ElementTypeDef, version int) *model.ApiEr opamp.AddToTracePipelineSpec("signoz_tail_sampling") configHash, err := opamp.UpsertControlProcessors(ctx, "traces", processorConf, m.OnConfigUpdate) if err != nil { - zap.S().Error("failed to call agent config update for trace processor:", err) + zap.L().Error("failed to call agent config update for trace processor", zap.Error(err)) return model.InternalError(fmt.Errorf("failed to deploy the config")) } @@ -256,7 +256,7 @@ func Redeploy(ctx context.Context, typ ElementTypeDef, version int) *model.ApiEr case ElementTypeDropRules: var filterConfig *filterprocessor.Config if err := yaml.Unmarshal([]byte(configVersion.LastConf), &filterConfig); err != nil { - zap.S().Error("failed to read last conf correctly", err) + zap.L().Error("failed to read last conf correctly", zap.Error(err)) return model.InternalError(fmt.Errorf("failed to read the stored config correctly")) } processorConf := map[string]interface{}{ @@ -266,7 +266,7 @@ func Redeploy(ctx context.Context, typ ElementTypeDef, version int) *model.ApiEr opamp.AddToMetricsPipelineSpec("filter") configHash, err := opamp.UpsertControlProcessors(ctx, "metrics", processorConf, m.OnConfigUpdate) if err != nil { - zap.S().Error("failed to call agent config update for trace processor:", err) + zap.L().Error("failed to call agent config update for trace processor", zap.Error(err)) return err } @@ -292,13 +292,13 @@ func UpsertFilterProcessor(ctx context.Context, version int, config *filterproce opamp.AddToMetricsPipelineSpec("filter") configHash, err := opamp.UpsertControlProcessors(ctx, "metrics", processorConf, m.OnConfigUpdate) if err != nil { - zap.S().Error("failed to call agent config update for trace processor:", err) + zap.L().Error("failed to call agent config update for trace processor", zap.Error(err)) return err } processorConfYaml, yamlErr := yaml.Marshal(config) if yamlErr != nil { - zap.S().Warnf("unexpected error while transforming processor config to yaml", yamlErr) + zap.L().Warn("unexpected error while transforming processor config to yaml", zap.Error(yamlErr)) } m.updateDeployStatus(ctx, ElementTypeDropRules, version, string(DeployInitiated), "Deployment started", configHash, string(processorConfYaml)) @@ -317,7 +317,7 @@ func (m *Manager) OnConfigUpdate(agentId string, hash string, err error) { message := "Deployment was successful" defer func() { - zap.S().Info(status, zap.String("agentId", agentId), zap.String("agentResponse", message)) + zap.L().Info(status, zap.String("agentId", agentId), zap.String("agentResponse", message)) }() if err != nil { @@ -343,13 +343,13 @@ func UpsertSamplingProcessor(ctx context.Context, version int, config *tsp.Confi opamp.AddToTracePipelineSpec("signoz_tail_sampling") configHash, err := opamp.UpsertControlProcessors(ctx, "traces", processorConf, m.OnConfigUpdate) if err != nil { - zap.S().Error("failed to call agent config update for trace processor:", err) + zap.L().Error("failed to call agent config update for trace processor", zap.Error(err)) return err } processorConfYaml, yamlErr := yaml.Marshal(config) if yamlErr != nil { - zap.S().Warnf("unexpected error while transforming processor config to yaml", yamlErr) + zap.L().Warn("unexpected error while transforming processor config to yaml", zap.Error(yamlErr)) } m.updateDeployStatus(ctx, ElementTypeSamplingRules, version, string(DeployInitiated), "Deployment started", configHash, string(processorConfYaml)) diff --git a/pkg/query-service/app/clickhouseReader/options.go b/pkg/query-service/app/clickhouseReader/options.go index 0defced7ed..d92b5ee38f 100644 --- a/pkg/query-service/app/clickhouseReader/options.go +++ b/pkg/query-service/app/clickhouseReader/options.go @@ -106,7 +106,7 @@ func defaultConnector(cfg *namespaceConfig) (clickhouse.Conn, error) { options.DialTimeout = cfg.DialTimeout } - zap.S().Infof("Connecting to Clickhouse at %s, Secure: %t, MaxIdleConns: %d, MaxOpenConns: %d, DialTimeout: %s", options.Addr, options.TLS != nil, options.MaxIdleConns, options.MaxOpenConns, options.DialTimeout) + zap.L().Info("Connecting to Clickhouse", zap.String("at", options.Addr[0]), zap.Int("MaxIdleConns", options.MaxIdleConns), zap.Int("MaxOpenConns", options.MaxOpenConns), zap.Duration("DialTimeout", options.DialTimeout)) db, err := clickhouse.Open(options) if err != nil { return nil, err diff --git a/pkg/query-service/app/clickhouseReader/reader.go b/pkg/query-service/app/clickhouseReader/reader.go index 00f3ca1ba6..a1b12d9415 100644 --- a/pkg/query-service/app/clickhouseReader/reader.go +++ b/pkg/query-service/app/clickhouseReader/reader.go @@ -141,8 +141,7 @@ func NewReader( db, err := initialize(options) if err != nil { - zap.S().Error("failed to initialize ClickHouse: ", err) - os.Exit(1) + zap.L().Fatal("failed to initialize ClickHouse", zap.Error(err)) } return NewReaderFromClickhouseConnection(db, options, localDB, configFile, featureFlag, cluster) @@ -158,8 +157,8 @@ func NewReaderFromClickhouseConnection( ) *ClickHouseReader { alertManager, err := am.New("") if err != nil { - zap.S().Errorf("msg: failed to initialize alert manager: ", "/t error:", err) - zap.S().Errorf("msg: check if the alert manager URL is correctly set and valid") + zap.L().Error("failed to initialize alert manager", zap.Error(err)) + zap.L().Error("check if the alert manager URL is correctly set and valid") os.Exit(1) } @@ -347,20 +346,6 @@ func (r *ClickHouseReader) Start(readerReady chan bool) { reloadReady.Close() - // ! commented the alert manager can now - // call query service to do this - // channels, apiErrorObj := r.GetChannels() - - // if apiErrorObj != nil { - // zap.S().Errorf("Not able to read channels from DB") - // } - // for _, channel := range *channels { - // apiErrorObj = r.LoadChannel(&channel) - // if apiErrorObj != nil { - // zap.S().Errorf("Not able to load channel with id=%d loaded from DB", channel.Id, channel.Data) - // } - // } - <-cancel return nil @@ -444,14 +429,14 @@ func (r *ClickHouseReader) LoadChannel(channel *model.ChannelItem) *model.ApiErr response, err := http.Post(constants.GetAlertManagerApiPrefix()+"v1/receivers", "application/json", bytes.NewBuffer([]byte(channel.Data))) if err != nil { - zap.S().Errorf("Error in getting response of API call to alertmanager/v1/receivers\n", err) + zap.L().Error("Error in getting response of API call to alertmanager/v1/receivers", zap.Error(err)) return &model.ApiError{Typ: model.ErrorInternal, Err: err} } if response.StatusCode > 299 { responseData, _ := io.ReadAll(response.Body) - err := fmt.Errorf("Error in getting 2xx response in API call to alertmanager/v1/receivers\n Status: %s \n Data: %s", response.Status, string(responseData)) - zap.S().Error(err) + err := fmt.Errorf("Error in getting 2xx response in API call to alertmanager/v1/receivers") + zap.L().Error("Error in getting 2xx response in API call to alertmanager/v1/receivers", zap.String("Status", response.Status), zap.String("Data", string(responseData))) return &model.ApiError{Typ: model.ErrorInternal, Err: err} } @@ -468,17 +453,15 @@ func (r *ClickHouseReader) GetChannel(id string) (*model.ChannelItem, *model.Api stmt, err := r.localDB.Preparex(query) - zap.S().Info(query, idInt) - if err != nil { - zap.S().Debug("Error in preparing sql query for GetChannel : ", err) + zap.L().Error("Error in preparing sql query for GetChannel", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} } err = stmt.Get(&channel, idInt) if err != nil { - zap.S().Debug(fmt.Sprintf("Error in getting channel with id=%d : ", idInt), err) + zap.L().Error("Error in getting channel with id", zap.Int("id", idInt), zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} } @@ -504,14 +487,14 @@ func (r *ClickHouseReader) DeleteChannel(id string) *model.ApiError { { stmt, err := tx.Prepare(`DELETE FROM notification_channels WHERE id=$1;`) if err != nil { - zap.S().Errorf("Error in preparing statement for INSERT to notification_channels\n", err) + zap.L().Error("Error in preparing statement for INSERT to notification_channels", zap.Error(err)) tx.Rollback() return &model.ApiError{Typ: model.ErrorInternal, Err: err} } defer stmt.Close() if _, err := stmt.Exec(idInt); err != nil { - zap.S().Errorf("Error in Executing prepared statement for INSERT to notification_channels\n", err) + zap.L().Error("Error in Executing prepared statement for INSERT to notification_channels", zap.Error(err)) tx.Rollback() // return an error too, we may want to wrap them return &model.ApiError{Typ: model.ErrorInternal, Err: err} } @@ -525,7 +508,7 @@ func (r *ClickHouseReader) DeleteChannel(id string) *model.ApiError { err = tx.Commit() if err != nil { - zap.S().Errorf("Error in committing transaction for DELETE command to notification_channels\n", err) + zap.L().Error("Error in committing transaction for DELETE command to notification_channels", zap.Error(err)) return &model.ApiError{Typ: model.ErrorInternal, Err: err} } @@ -541,10 +524,10 @@ func (r *ClickHouseReader) GetChannels() (*[]model.ChannelItem, *model.ApiError) err := r.localDB.Select(&channels, query) - zap.S().Info(query) + zap.L().Info(query) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} } @@ -609,7 +592,7 @@ func (r *ClickHouseReader) EditChannel(receiver *am.Receiver, id string) (*am.Re // check if channel type is supported in the current user plan if err := r.featureFlags.CheckFeature(fmt.Sprintf("ALERT_CHANNEL_%s", strings.ToUpper(channel_type))); err != nil { - zap.S().Warn("an unsupported feature was blocked", err) + zap.L().Warn("an unsupported feature was blocked", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("unsupported feature. please upgrade your plan to access this feature")} } @@ -619,14 +602,14 @@ func (r *ClickHouseReader) EditChannel(receiver *am.Receiver, id string) (*am.Re stmt, err := tx.Prepare(`UPDATE notification_channels SET updated_at=$1, type=$2, data=$3 WHERE id=$4;`) if err != nil { - zap.S().Errorf("Error in preparing statement for UPDATE to notification_channels\n", err) + zap.L().Error("Error in preparing statement for UPDATE to notification_channels", zap.Error(err)) tx.Rollback() return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} } defer stmt.Close() if _, err := stmt.Exec(time.Now(), channel_type, string(receiverString), idInt); err != nil { - zap.S().Errorf("Error in Executing prepared statement for UPDATE to notification_channels\n", err) + zap.L().Error("Error in Executing prepared statement for UPDATE to notification_channels", zap.Error(err)) tx.Rollback() // return an error too, we may want to wrap them return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} } @@ -640,7 +623,7 @@ func (r *ClickHouseReader) EditChannel(receiver *am.Receiver, id string) (*am.Re err = tx.Commit() if err != nil { - zap.S().Errorf("Error in committing transaction for INSERT to notification_channels\n", err) + zap.L().Error("Error in committing transaction for INSERT to notification_channels", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} } @@ -654,7 +637,7 @@ func (r *ClickHouseReader) CreateChannel(receiver *am.Receiver) (*am.Receiver, * // check if channel type is supported in the current user plan if err := r.featureFlags.CheckFeature(fmt.Sprintf("ALERT_CHANNEL_%s", strings.ToUpper(channel_type))); err != nil { - zap.S().Warn("an unsupported feature was blocked", err) + zap.L().Warn("an unsupported feature was blocked", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("unsupported feature. please upgrade your plan to access this feature")} } @@ -668,14 +651,14 @@ func (r *ClickHouseReader) CreateChannel(receiver *am.Receiver) (*am.Receiver, * { stmt, err := tx.Prepare(`INSERT INTO notification_channels (created_at, updated_at, name, type, data) VALUES($1,$2,$3,$4,$5);`) if err != nil { - zap.S().Errorf("Error in preparing statement for INSERT to notification_channels\n", err) + zap.L().Error("Error in preparing statement for INSERT to notification_channels", zap.Error(err)) tx.Rollback() return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} } defer stmt.Close() if _, err := stmt.Exec(time.Now(), time.Now(), receiver.Name, channel_type, string(receiverString)); err != nil { - zap.S().Errorf("Error in Executing prepared statement for INSERT to notification_channels\n", err) + zap.L().Error("Error in Executing prepared statement for INSERT to notification_channels", zap.Error(err)) tx.Rollback() // return an error too, we may want to wrap them return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} } @@ -689,7 +672,7 @@ func (r *ClickHouseReader) CreateChannel(receiver *am.Receiver) (*am.Receiver, * err = tx.Commit() if err != nil { - zap.S().Errorf("Error in committing transaction for INSERT to notification_channels\n", err) + zap.L().Error("Error in committing transaction for INSERT to notification_channels", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} } @@ -742,10 +725,10 @@ func (r *ClickHouseReader) GetServicesList(ctx context.Context) (*[]string, erro rows, err := r.db.Query(ctx, query) - zap.S().Info(query) + zap.L().Info(query) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, fmt.Errorf("Error in processing sql query") } @@ -773,7 +756,7 @@ func (r *ClickHouseReader) GetTopLevelOperations(ctx context.Context, skipConfig rows, err := r.db.Query(ctx, query) if err != nil { - zap.S().Error("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")} } @@ -874,7 +857,7 @@ func (r *ClickHouseReader) GetServices(ctx context.Context, queryParams *model.G query += subQuery args = append(args, argsSubQuery...) if errStatus != nil { - zap.S().Error("Error in processing sql query: ", errStatus) + zap.L().Error("Error in processing sql query", zap.Error(errStatus)) return } err := r.db.QueryRow( @@ -888,19 +871,19 @@ func (r *ClickHouseReader) GetServices(ctx context.Context, queryParams *model.G } if err != nil { - zap.S().Error("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return } subQuery, argsSubQuery, errStatus = buildQueryWithTagParams(ctx, tags) if errStatus != nil { - zap.S().Error("Error building query with tag params: ", err) + zap.L().Error("Error building query with tag params", zap.Error(errStatus)) return } query += subQuery args = append(args, argsSubQuery...) err = r.db.QueryRow(ctx, errorQuery, args...).Scan(&numErrors) if err != nil { - zap.S().Error("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return } @@ -966,11 +949,11 @@ func (r *ClickHouseReader) GetServiceOverview(ctx context.Context, queryParams * query += " GROUP BY time ORDER BY time DESC" err := r.db.Select(ctx, &serviceOverviewItems, query, args...) - zap.S().Debug(query) + zap.L().Debug("running query", zap.String("query", query)) if err != nil { - zap.S().Error("Error in processing sql query: ", err) - return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")} + zap.L().Error("Error in processing sql query", zap.Error(err)) + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")} } serviceErrorItems := []model.ServiceErrorItem{} @@ -994,10 +977,8 @@ func (r *ClickHouseReader) GetServiceOverview(ctx context.Context, queryParams * query += " GROUP BY time ORDER BY time DESC" err = r.db.Select(ctx, &serviceErrorItems, query, args...) - zap.S().Debug(query) - if err != nil { - zap.S().Error("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")} } @@ -1133,10 +1114,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode finalQuery += " GROUP BY serviceName" var dBResponse []model.DBResponseServiceName err := r.db.Select(ctx, &dBResponse, finalQuery, args...) - zap.S().Info(finalQuery) + zap.L().Info(finalQuery) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)} } for _, service := range dBResponse { @@ -1150,10 +1131,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode finalQuery += " GROUP BY httpCode" var dBResponse []model.DBResponseHttpCode err := r.db.Select(ctx, &dBResponse, finalQuery, args...) - zap.S().Info(finalQuery) + zap.L().Info(finalQuery) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)} } for _, service := range dBResponse { @@ -1167,10 +1148,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode finalQuery += " GROUP BY httpRoute" var dBResponse []model.DBResponseHttpRoute err := r.db.Select(ctx, &dBResponse, finalQuery, args...) - zap.S().Info(finalQuery) + zap.L().Info(finalQuery) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)} } for _, service := range dBResponse { @@ -1184,10 +1165,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode finalQuery += " GROUP BY httpUrl" var dBResponse []model.DBResponseHttpUrl err := r.db.Select(ctx, &dBResponse, finalQuery, args...) - zap.S().Info(finalQuery) + zap.L().Info(finalQuery) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)} } for _, service := range dBResponse { @@ -1201,10 +1182,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode finalQuery += " GROUP BY httpMethod" var dBResponse []model.DBResponseHttpMethod err := r.db.Select(ctx, &dBResponse, finalQuery, args...) - zap.S().Info(finalQuery) + zap.L().Info(finalQuery) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)} } for _, service := range dBResponse { @@ -1218,10 +1199,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode finalQuery += " GROUP BY httpHost" var dBResponse []model.DBResponseHttpHost err := r.db.Select(ctx, &dBResponse, finalQuery, args...) - zap.S().Info(finalQuery) + zap.L().Info(finalQuery) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)} } for _, service := range dBResponse { @@ -1235,10 +1216,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode finalQuery += " GROUP BY name" var dBResponse []model.DBResponseOperation err := r.db.Select(ctx, &dBResponse, finalQuery, args...) - zap.S().Info(finalQuery) + zap.L().Info(finalQuery) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)} } for _, service := range dBResponse { @@ -1252,10 +1233,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode finalQuery += " GROUP BY component" var dBResponse []model.DBResponseComponent err := r.db.Select(ctx, &dBResponse, finalQuery, args...) - zap.S().Info(finalQuery) + zap.L().Info(finalQuery) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)} } for _, service := range dBResponse { @@ -1268,10 +1249,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode finalQuery += query var dBResponse []model.DBResponseTotal err := r.db.Select(ctx, &dBResponse, finalQuery, args...) - zap.S().Info(finalQuery) + zap.L().Info(finalQuery) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)} } @@ -1279,10 +1260,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode finalQuery2 += query var dBResponse2 []model.DBResponseTotal err = r.db.Select(ctx, &dBResponse2, finalQuery2, args...) - zap.S().Info(finalQuery2) + zap.L().Info(finalQuery2) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)} } if len(dBResponse) > 0 && len(dBResponse2) > 0 { @@ -1304,9 +1285,9 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode finalQuery += query var dBResponse []model.DBResponseMinMax err = r.db.Select(ctx, &dBResponse, finalQuery, args...) - zap.S().Info(finalQuery) + zap.L().Info(finalQuery) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)} } if len(dBResponse) > 0 { @@ -1319,10 +1300,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode finalQuery += " ORDER BY durationNano LIMIT 1" var dBResponse []model.DBResponseTotal err = r.db.Select(ctx, &dBResponse, finalQuery, args...) - zap.S().Info(finalQuery) + zap.L().Info(finalQuery) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)} } @@ -1331,10 +1312,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode finalQuery += " ORDER BY durationNano DESC LIMIT 1" var dBResponse2 []model.DBResponseTotal err = r.db.Select(ctx, &dBResponse2, finalQuery, args...) - zap.S().Info(finalQuery) + zap.L().Info(finalQuery) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query: %s", err)} } if len(dBResponse) > 0 { @@ -1350,10 +1331,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode finalQuery += " GROUP BY rpcMethod" var dBResponse []model.DBResponseRPCMethod err := r.db.Select(ctx, &dBResponse, finalQuery, args...) - zap.S().Info(finalQuery) + zap.L().Info(finalQuery) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query: %s", err)} } for _, service := range dBResponse { @@ -1368,10 +1349,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode finalQuery += " GROUP BY responseStatusCode" var dBResponse []model.DBResponseStatusCodeMethod err := r.db.Select(ctx, &dBResponse, finalQuery, args...) - zap.S().Info(finalQuery) + zap.L().Info(finalQuery) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query: %s", err)} } for _, service := range dBResponse { @@ -1496,10 +1477,10 @@ func (r *ClickHouseReader) GetFilteredSpans(ctx context.Context, queryParams *mo projectionOptQuery := "SET allow_experimental_projection_optimization = 1" err := r.db.Exec(ctx, projectionOptQuery) - zap.S().Info(projectionOptQuery) + zap.L().Info(projectionOptQuery) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")} } if queryParams.Order == constants.Descending { @@ -1534,10 +1515,10 @@ func (r *ClickHouseReader) GetFilteredSpans(ctx context.Context, queryParams *mo } } - zap.S().Info(baseQuery) + zap.L().Info(baseQuery) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")} } @@ -1774,10 +1755,10 @@ func (r *ClickHouseReader) GetTagFilters(ctx context.Context, queryParams *model finalQuery += query err := r.db.Select(ctx, &tagFilters, finalQuery, args...) - zap.S().Info(query) + zap.L().Info(query) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")} } tagFiltersResult := model.TagFilters{ @@ -1896,10 +1877,10 @@ func (r *ClickHouseReader) GetTagValues(ctx context.Context, queryParams *model. args = append(args, clickhouse.Named("limit", queryParams.Limit)) err := r.db.Select(ctx, &tagValues, finalQuery, args...) - zap.S().Info(query) + zap.L().Info(query) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")} } @@ -1958,10 +1939,8 @@ func (r *ClickHouseReader) GetTopOperations(ctx context.Context, queryParams *mo } err := r.db.Select(ctx, &topOperationsItems, query, args...) - zap.S().Debug(query) - if err != nil { - zap.S().Error("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")} } @@ -1990,10 +1969,10 @@ func (r *ClickHouseReader) GetUsage(ctx context.Context, queryParams *model.GetU err := r.db.Select(ctx, &usageItems, query, namedArgs...) - zap.S().Info(query) + zap.L().Info(query) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, fmt.Errorf("Error in processing sql query") } @@ -2018,14 +1997,14 @@ func (r *ClickHouseReader) SearchTraces(ctx context.Context, traceId string, spa err := r.db.Select(ctx, &searchScanResponses, query, traceId) - zap.S().Info(query) + zap.L().Info(query) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) - return nil, fmt.Errorf("Error in processing sql query") + zap.L().Error("Error in processing sql query", zap.Error(err)) + return nil, fmt.Errorf("error in processing sql query") } end := time.Now() - zap.S().Debug("getTraceSQLQuery took: ", end.Sub(start)) + zap.L().Debug("getTraceSQLQuery took: ", zap.Duration("duration", end.Sub(start))) searchSpansResult := []model.SearchSpansResult{{ Columns: []string{"__time", "SpanId", "TraceId", "ServiceName", "Name", "Kind", "DurationNano", "TagsKeys", "TagsValues", "References", "Events", "HasError"}, Events: make([][]interface{}, len(searchScanResponses)), @@ -2041,7 +2020,7 @@ func (r *ClickHouseReader) SearchTraces(ctx context.Context, traceId string, spa searchSpanResponses = append(searchSpanResponses, jsonItem) } end = time.Now() - zap.S().Debug("getTraceSQLQuery unmarshal took: ", end.Sub(start)) + zap.L().Debug("getTraceSQLQuery unmarshal took: ", zap.Duration("duration", end.Sub(start))) err = r.featureFlags.CheckFeature(model.SmartTraceDetail) smartAlgoEnabled := err == nil @@ -2052,7 +2031,7 @@ func (r *ClickHouseReader) SearchTraces(ctx context.Context, traceId string, spa return nil, err } end = time.Now() - zap.S().Debug("smartTraceAlgo took: ", end.Sub(start)) + zap.L().Debug("smartTraceAlgo took: ", zap.Duration("duration", end.Sub(start))) } else { for i, item := range searchSpanResponses { spanEvents := item.GetValues() @@ -2099,12 +2078,12 @@ func (r *ClickHouseReader) GetDependencyGraph(ctx context.Context, queryParams * query += filterQuery + " GROUP BY src, dest;" args = append(args, filterArgs...) - zap.S().Debug(query, args) + zap.L().Debug("GetDependencyGraph query", zap.String("query", query), zap.Any("args", args)) err := r.db.Select(ctx, &response, query, args...) if err != nil { - zap.S().Error("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, fmt.Errorf("error in processing sql query %w", err) } @@ -2252,10 +2231,10 @@ func (r *ClickHouseReader) GetFilteredSpansAggregates(ctx context.Context, query err := r.db.Select(ctx, &SpanAggregatesDBResponseItems, query, args...) - zap.S().Info(query) + zap.L().Info(query) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")} } @@ -2338,7 +2317,7 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context, go func(tableName string) { _, dbErr := r.localDB.Exec("INSERT INTO ttl_status (transaction_id, created_at, updated_at, table_name, ttl, status, cold_storage_ttl) VALUES (?, ?, ?, ?, ?, ?, ?)", uuid, time.Now(), time.Now(), tableName, params.DelDuration, constants.StatusPending, coldStorageDuration) if dbErr != nil { - zap.S().Error(fmt.Errorf("Error in inserting to ttl_status table: %s", dbErr.Error())) + zap.L().Error("Error in inserting to ttl_status table", zap.Error(dbErr)) return } req := fmt.Sprintf( @@ -2350,32 +2329,32 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context, } err := r.setColdStorage(context.Background(), tableName, params.ColdStorageVolume) if err != nil { - zap.S().Error(fmt.Errorf("Error in setting cold storage: %s", err.Err.Error())) + zap.L().Error("Error in setting cold storage", zap.Error(err)) statusItem, err := r.checkTTLStatusItem(ctx, tableName) if err == nil { _, dbErr := r.localDB.Exec("UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?", time.Now(), constants.StatusFailed, statusItem.Id) if dbErr != nil { - zap.S().Debug("Error in processing ttl_status update sql query: ", dbErr) + zap.L().Error("Error in processing ttl_status update sql query", zap.Error(dbErr)) return } } return } req += fmt.Sprint(" SETTINGS distributed_ddl_task_timeout = -1;") - zap.S().Debugf("Executing TTL request: %s\n", req) + zap.L().Error("Executing TTL request: ", zap.String("request", req)) statusItem, _ := r.checkTTLStatusItem(ctx, tableName) if err := r.db.Exec(context.Background(), req); err != nil { - zap.S().Error(fmt.Errorf("Error in executing set TTL query: %s", err.Error())) + zap.L().Error("Error in executing set TTL query", zap.Error(err)) _, dbErr := r.localDB.Exec("UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?", time.Now(), constants.StatusFailed, statusItem.Id) if dbErr != nil { - zap.S().Debug("Error in processing ttl_status update sql query: ", dbErr) + zap.L().Error("Error in processing ttl_status update sql query", zap.Error(dbErr)) return } return } _, dbErr = r.localDB.Exec("UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?", time.Now(), constants.StatusSuccess, statusItem.Id) if dbErr != nil { - zap.S().Debug("Error in processing ttl_status update sql query: ", dbErr) + zap.L().Error("Error in processing ttl_status update sql query", zap.Error(dbErr)) return } }(tableName) @@ -2393,7 +2372,7 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context, go func(tableName string) { _, dbErr := r.localDB.Exec("INSERT INTO ttl_status (transaction_id, created_at, updated_at, table_name, ttl, status, cold_storage_ttl) VALUES (?, ?, ?, ?, ?, ?, ?)", uuid, time.Now(), time.Now(), tableName, params.DelDuration, constants.StatusPending, coldStorageDuration) if dbErr != nil { - zap.S().Error(fmt.Errorf("Error in inserting to ttl_status table: %s", dbErr.Error())) + zap.L().Error("Error in inserting to ttl_status table", zap.Error(dbErr)) return } req := fmt.Sprintf( @@ -2406,32 +2385,32 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context, } err := r.setColdStorage(context.Background(), tableName, params.ColdStorageVolume) if err != nil { - zap.S().Error(fmt.Errorf("Error in setting cold storage: %s", err.Err.Error())) + zap.L().Error("Error in setting cold storage", zap.Error(err)) statusItem, err := r.checkTTLStatusItem(ctx, tableName) if err == nil { _, dbErr := r.localDB.Exec("UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?", time.Now(), constants.StatusFailed, statusItem.Id) if dbErr != nil { - zap.S().Debug("Error in processing ttl_status update sql query: ", dbErr) + zap.L().Error("Error in processing ttl_status update sql query", zap.Error(dbErr)) return } } return } req += fmt.Sprint(" SETTINGS distributed_ddl_task_timeout = -1") - zap.S().Debugf("Executing TTL request: %s\n", req) + zap.L().Info("Executing TTL request: ", zap.String("request", req)) statusItem, _ := r.checkTTLStatusItem(ctx, tableName) if err := r.db.Exec(ctx, req); err != nil { - zap.S().Error(fmt.Errorf("error while setting ttl. Err=%v", err)) + zap.L().Error("error while setting ttl.", zap.Error(err)) _, dbErr := r.localDB.Exec("UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?", time.Now(), constants.StatusFailed, statusItem.Id) if dbErr != nil { - zap.S().Debug("Error in processing ttl_status update sql query: ", dbErr) + zap.L().Error("Error in processing ttl_status update sql query", zap.Error(dbErr)) return } return } _, dbErr = r.localDB.Exec("UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?", time.Now(), constants.StatusSuccess, statusItem.Id) if dbErr != nil { - zap.S().Debug("Error in processing ttl_status update sql query: ", dbErr) + zap.L().Error("Error in processing ttl_status update sql query", zap.Error(dbErr)) return } }(tableName) @@ -2447,7 +2426,7 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context, go func(tableName string) { _, dbErr := r.localDB.Exec("INSERT INTO ttl_status (transaction_id, created_at, updated_at, table_name, ttl, status, cold_storage_ttl) VALUES (?, ?, ?, ?, ?, ?, ?)", uuid, time.Now(), time.Now(), tableName, params.DelDuration, constants.StatusPending, coldStorageDuration) if dbErr != nil { - zap.S().Error(fmt.Errorf("error in inserting to ttl_status table: %s", dbErr.Error())) + zap.L().Error("error in inserting to ttl_status table", zap.Error(dbErr)) return } req := fmt.Sprintf( @@ -2460,32 +2439,32 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context, } err := r.setColdStorage(context.Background(), tableName, params.ColdStorageVolume) if err != nil { - zap.S().Error(fmt.Errorf("error in setting cold storage: %s", err.Err.Error())) + zap.L().Error("error in setting cold storage", zap.Error(err)) statusItem, err := r.checkTTLStatusItem(ctx, tableName) if err == nil { _, dbErr := r.localDB.Exec("UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?", time.Now(), constants.StatusFailed, statusItem.Id) if dbErr != nil { - zap.S().Debug("Error in processing ttl_status update sql query: ", dbErr) + zap.L().Error("Error in processing ttl_status update sql query", zap.Error(dbErr)) return } } return } req += fmt.Sprint(" SETTINGS distributed_ddl_task_timeout = -1") - zap.S().Debugf("Executing TTL request: %s\n", req) + zap.L().Info("Executing TTL request: ", zap.String("request", req)) statusItem, _ := r.checkTTLStatusItem(ctx, tableName) if err := r.db.Exec(ctx, req); err != nil { - zap.S().Error(fmt.Errorf("error while setting ttl. Err=%v", err)) + zap.L().Error("error while setting ttl", zap.Error(err)) _, dbErr := r.localDB.Exec("UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?", time.Now(), constants.StatusFailed, statusItem.Id) if dbErr != nil { - zap.S().Debug("Error in processing ttl_status update sql query: ", dbErr) + zap.L().Error("Error in processing ttl_status update sql query", zap.Error(dbErr)) return } return } _, dbErr = r.localDB.Exec("UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?", time.Now(), constants.StatusSuccess, statusItem.Id) if dbErr != nil { - zap.S().Debug("Error in processing ttl_status update sql query: ", dbErr) + zap.L().Error("Error in processing ttl_status update sql query", zap.Error(dbErr)) return } }(tableName) @@ -2501,7 +2480,7 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context, func (r *ClickHouseReader) deleteTtlTransactions(ctx context.Context, numberOfTransactionsStore int) { _, err := r.localDB.Exec("DELETE FROM ttl_status WHERE transaction_id NOT IN (SELECT distinct transaction_id FROM ttl_status ORDER BY created_at DESC LIMIT ?)", numberOfTransactionsStore) if err != nil { - zap.S().Debug("Error in processing ttl_status delete sql query: ", err) + zap.L().Error("Error in processing ttl_status delete sql query", zap.Error(err)) } } @@ -2511,12 +2490,12 @@ func (r *ClickHouseReader) checkTTLStatusItem(ctx context.Context, tableName str query := `SELECT id, status, ttl, cold_storage_ttl FROM ttl_status WHERE table_name = ? ORDER BY created_at DESC` - zap.S().Info(query, tableName) + zap.L().Info("checkTTLStatusItem query", zap.String("query", query), zap.String("tableName", tableName)) stmt, err := r.localDB.Preparex(query) if err != nil { - zap.S().Debug("Error preparing query for checkTTLStatusItem: ", err) + zap.L().Error("Error preparing query for checkTTLStatusItem", zap.Error(err)) return model.TTLStatusItem{}, &model.ApiError{Typ: model.ErrorInternal, Err: err} } @@ -2526,7 +2505,7 @@ func (r *ClickHouseReader) checkTTLStatusItem(ctx context.Context, tableName str return model.TTLStatusItem{}, nil } if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return model.TTLStatusItem{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing ttl_status check sql query")} } return statusItem[0], nil @@ -2567,9 +2546,9 @@ func (r *ClickHouseReader) setColdStorage(ctx context.Context, tableName string, if len(coldStorageVolume) > 0 { policyReq := fmt.Sprintf("ALTER TABLE %s ON CLUSTER %s MODIFY SETTING storage_policy='tiered'", tableName, r.cluster) - zap.S().Debugf("Executing Storage policy request: %s\n", policyReq) + zap.L().Info("Executing Storage policy request: ", zap.String("request", policyReq)) if err := r.db.Exec(ctx, policyReq); err != nil { - zap.S().Error(fmt.Errorf("error while setting storage policy. Err=%v", err)) + zap.L().Error("error while setting storage policy", zap.Error(err)) return &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error while setting storage policy. Err=%v", err)} } } @@ -2582,12 +2561,10 @@ func (r *ClickHouseReader) GetDisks(ctx context.Context) (*[]model.DiskItem, *mo query := "SELECT name,type FROM system.disks" if err := r.db.Select(ctx, &diskItems, query); err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error while getting disks. Err=%v", err)} } - zap.S().Infof("Got response: %+v\n", diskItems) - return &diskItems, nil } @@ -2605,7 +2582,7 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, ttlParams *model.GetTTLPa parseTTL := func(queryResp string) (int, int) { - zap.S().Debugf("Parsing TTL from: %s", queryResp) + zap.L().Info("Parsing TTL from: ", zap.String("queryResp", queryResp)) deleteTTLExp := regexp.MustCompile(`toIntervalSecond\(([0-9]*)\)`) moveTTLExp := regexp.MustCompile(`toIntervalSecond\(([0-9]*)\) TO VOLUME`) @@ -2640,7 +2617,7 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, ttlParams *model.GetTTLPa err := r.db.Select(ctx, &dbResp, query) if err != nil { - zap.S().Error(fmt.Errorf("error while getting ttl. Err=%v", err)) + zap.L().Error("error while getting ttl", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error while getting ttl. Err=%v", err)} } if len(dbResp) == 0 { @@ -2658,7 +2635,7 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, ttlParams *model.GetTTLPa err := r.db.Select(ctx, &dbResp, query) if err != nil { - zap.S().Error(fmt.Errorf("error while getting ttl. Err=%v", err)) + zap.L().Error("error while getting ttl", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error while getting ttl. Err=%v", err)} } if len(dbResp) == 0 { @@ -2676,7 +2653,7 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, ttlParams *model.GetTTLPa err := r.db.Select(ctx, &dbResp, query) if err != nil { - zap.S().Error(fmt.Errorf("error while getting ttl. Err=%v", err)) + zap.L().Error("error while getting ttl", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error while getting ttl. Err=%v", err)} } if len(dbResp) == 0 { @@ -2798,7 +2775,7 @@ func (r *ClickHouseReader) ListErrors(ctx context.Context, queryParams *model.Li args = append(args, argsSubQuery...) if errStatus != nil { - zap.S().Error("Error in processing tags: ", errStatus) + zap.L().Error("Error in processing tags", zap.Error(errStatus)) return nil, errStatus } query = query + " GROUP BY groupID" @@ -2826,10 +2803,10 @@ func (r *ClickHouseReader) ListErrors(ctx context.Context, queryParams *model.Li } err := r.db.Select(ctx, &getErrorResponses, query, args...) - zap.S().Info(query) + zap.L().Info(query) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")} } @@ -2858,15 +2835,15 @@ func (r *ClickHouseReader) CountErrors(ctx context.Context, queryParams *model.C args = append(args, argsSubQuery...) if errStatus != nil { - zap.S().Error("Error in processing tags: ", errStatus) + zap.L().Error("Error in processing tags", zap.Error(errStatus)) return 0, errStatus } err := r.db.QueryRow(ctx, query, args...).Scan(&errorCount) - zap.S().Info(query) + zap.L().Info(query) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return 0, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")} } @@ -2876,7 +2853,7 @@ func (r *ClickHouseReader) CountErrors(ctx context.Context, queryParams *model.C func (r *ClickHouseReader) GetErrorFromErrorID(ctx context.Context, queryParams *model.GetErrorParams) (*model.ErrorWithSpan, *model.ApiError) { if queryParams.ErrorID == "" { - zap.S().Debug("errorId missing from params") + zap.L().Error("errorId missing from params") return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("ErrorID missing from params")} } var getErrorWithSpanReponse []model.ErrorWithSpan @@ -2885,10 +2862,10 @@ func (r *ClickHouseReader) GetErrorFromErrorID(ctx context.Context, queryParams args := []interface{}{clickhouse.Named("errorID", queryParams.ErrorID), clickhouse.Named("groupID", queryParams.GroupID), clickhouse.Named("timestamp", strconv.FormatInt(queryParams.Timestamp.UnixNano(), 10))} err := r.db.Select(ctx, &getErrorWithSpanReponse, query, args...) - zap.S().Info(query) + zap.L().Info(query) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")} } @@ -2909,10 +2886,10 @@ func (r *ClickHouseReader) GetErrorFromGroupID(ctx context.Context, queryParams err := r.db.Select(ctx, &getErrorWithSpanReponse, query, args...) - zap.S().Info(query) + zap.L().Info(query) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")} } @@ -2927,7 +2904,7 @@ func (r *ClickHouseReader) GetErrorFromGroupID(ctx context.Context, queryParams func (r *ClickHouseReader) GetNextPrevErrorIDs(ctx context.Context, queryParams *model.GetErrorParams) (*model.NextPrevErrorIDs, *model.ApiError) { if queryParams.ErrorID == "" { - zap.S().Debug("errorId missing from params") + zap.L().Error("errorId missing from params") return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("ErrorID missing from params")} } var err *model.ApiError @@ -2936,12 +2913,12 @@ func (r *ClickHouseReader) GetNextPrevErrorIDs(ctx context.Context, queryParams } getNextPrevErrorIDsResponse.NextErrorID, getNextPrevErrorIDsResponse.NextTimestamp, err = r.getNextErrorID(ctx, queryParams) if err != nil { - zap.S().Debug("Unable to get next error ID due to err: ", err) + zap.L().Error("Unable to get next error ID due to err: ", zap.Error(err)) return nil, err } getNextPrevErrorIDsResponse.PrevErrorID, getNextPrevErrorIDsResponse.PrevTimestamp, err = r.getPrevErrorID(ctx, queryParams) if err != nil { - zap.S().Debug("Unable to get prev error ID due to err: ", err) + zap.L().Error("Unable to get prev error ID due to err: ", zap.Error(err)) return nil, err } return &getNextPrevErrorIDsResponse, nil @@ -2957,17 +2934,17 @@ func (r *ClickHouseReader) getNextErrorID(ctx context.Context, queryParams *mode err := r.db.Select(ctx, &getNextErrorIDReponse, query, args...) - zap.S().Info(query) + zap.L().Info(query) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return "", time.Time{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")} } if len(getNextErrorIDReponse) == 0 { - zap.S().Info("NextErrorID not found") + zap.L().Info("NextErrorID not found") return "", time.Time{}, nil } else if len(getNextErrorIDReponse) == 1 { - zap.S().Info("NextErrorID found") + zap.L().Info("NextErrorID found") return getNextErrorIDReponse[0].NextErrorID, getNextErrorIDReponse[0].NextTimestamp, nil } else { if getNextErrorIDReponse[0].Timestamp.UnixNano() == getNextErrorIDReponse[1].Timestamp.UnixNano() { @@ -2978,10 +2955,10 @@ func (r *ClickHouseReader) getNextErrorID(ctx context.Context, queryParams *mode err := r.db.Select(ctx, &getNextErrorIDReponse, query, args...) - zap.S().Info(query) + zap.L().Info(query) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return "", time.Time{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")} } if len(getNextErrorIDReponse) == 0 { @@ -2992,26 +2969,26 @@ func (r *ClickHouseReader) getNextErrorID(ctx context.Context, queryParams *mode err := r.db.Select(ctx, &getNextErrorIDReponse, query, args...) - zap.S().Info(query) + zap.L().Info(query) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return "", time.Time{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")} } if len(getNextErrorIDReponse) == 0 { - zap.S().Info("NextErrorID not found") + zap.L().Info("NextErrorID not found") return "", time.Time{}, nil } else { - zap.S().Info("NextErrorID found") + zap.L().Info("NextErrorID found") return getNextErrorIDReponse[0].NextErrorID, getNextErrorIDReponse[0].NextTimestamp, nil } } else { - zap.S().Info("NextErrorID found") + zap.L().Info("NextErrorID found") return getNextErrorIDReponse[0].NextErrorID, getNextErrorIDReponse[0].NextTimestamp, nil } } else { - zap.S().Info("NextErrorID found") + zap.L().Info("NextErrorID found") return getNextErrorIDReponse[0].NextErrorID, getNextErrorIDReponse[0].NextTimestamp, nil } } @@ -3026,17 +3003,17 @@ func (r *ClickHouseReader) getPrevErrorID(ctx context.Context, queryParams *mode err := r.db.Select(ctx, &getPrevErrorIDReponse, query, args...) - zap.S().Info(query) + zap.L().Info(query) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return "", time.Time{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")} } if len(getPrevErrorIDReponse) == 0 { - zap.S().Info("PrevErrorID not found") + zap.L().Info("PrevErrorID not found") return "", time.Time{}, nil } else if len(getPrevErrorIDReponse) == 1 { - zap.S().Info("PrevErrorID found") + zap.L().Info("PrevErrorID found") return getPrevErrorIDReponse[0].PrevErrorID, getPrevErrorIDReponse[0].PrevTimestamp, nil } else { if getPrevErrorIDReponse[0].Timestamp.UnixNano() == getPrevErrorIDReponse[1].Timestamp.UnixNano() { @@ -3047,10 +3024,10 @@ func (r *ClickHouseReader) getPrevErrorID(ctx context.Context, queryParams *mode err := r.db.Select(ctx, &getPrevErrorIDReponse, query, args...) - zap.S().Info(query) + zap.L().Info(query) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return "", time.Time{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")} } if len(getPrevErrorIDReponse) == 0 { @@ -3061,26 +3038,26 @@ func (r *ClickHouseReader) getPrevErrorID(ctx context.Context, queryParams *mode err := r.db.Select(ctx, &getPrevErrorIDReponse, query, args...) - zap.S().Info(query) + zap.L().Info(query) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return "", time.Time{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("Error in processing sql query")} } if len(getPrevErrorIDReponse) == 0 { - zap.S().Info("PrevErrorID not found") + zap.L().Info("PrevErrorID not found") return "", time.Time{}, nil } else { - zap.S().Info("PrevErrorID found") + zap.L().Info("PrevErrorID found") return getPrevErrorIDReponse[0].PrevErrorID, getPrevErrorIDReponse[0].PrevTimestamp, nil } } else { - zap.S().Info("PrevErrorID found") + zap.L().Info("PrevErrorID found") return getPrevErrorIDReponse[0].PrevErrorID, getPrevErrorIDReponse[0].PrevTimestamp, nil } } else { - zap.S().Info("PrevErrorID found") + zap.L().Info("PrevErrorID found") return getPrevErrorIDReponse[0].PrevErrorID, getPrevErrorIDReponse[0].PrevTimestamp, nil } } @@ -3111,7 +3088,7 @@ func (r *ClickHouseReader) GetMetricAutocompleteTagKey(ctx context.Context, para } if err != nil { - zap.S().Error(err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: err} } @@ -3150,7 +3127,7 @@ func (r *ClickHouseReader) GetMetricAutocompleteTagValue(ctx context.Context, pa } if err != nil { - zap.S().Error(err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: err} } @@ -3180,7 +3157,7 @@ func (r *ClickHouseReader) GetMetricAutocompleteMetricNames(ctx context.Context, rows, err = r.db.Query(ctx, query, fmt.Sprintf("%%%s%%", matchText)) if err != nil { - zap.S().Error(err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: err} } @@ -3198,7 +3175,7 @@ func (r *ClickHouseReader) GetMetricAutocompleteMetricNames(ctx context.Context, } func (r *ClickHouseReader) GetMetricResultEE(ctx context.Context, query string) ([]*model.Series, string, error) { - zap.S().Error("GetMetricResultEE is not implemented for opensource version") + zap.L().Error("GetMetricResultEE is not implemented for opensource version") return nil, "", fmt.Errorf("GetMetricResultEE is not implemented for opensource version") } @@ -3207,12 +3184,12 @@ func (r *ClickHouseReader) GetMetricResult(ctx context.Context, query string) ([ defer utils.Elapsed("GetMetricResult")() - zap.S().Infof("Executing metric result query: %s", query) + zap.L().Info("Executing metric result query: ", zap.String("query", query)) rows, err := r.db.Query(ctx, query) if err != nil { - zap.S().Debug("Error in processing query: ", err) + zap.L().Error("Error in processing query", zap.Error(err)) return nil, err } @@ -3289,7 +3266,7 @@ func (r *ClickHouseReader) GetMetricResult(ctx context.Context, query string) ([ groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int()) } default: - zap.S().Errorf("invalid var found in metric builder query result", v, colName) + zap.L().Error("invalid var found in metric builder query result", zap.Any("v", v), zap.String("colName", colName)) } } sort.Strings(groupBy) @@ -3457,8 +3434,7 @@ func (r *ClickHouseReader) GetTagsInfoInLastHeartBeatInterval(ctx context.Contex err := r.db.Select(ctx, &tagTelemetryDataList, queryStr) if err != nil { - zap.S().Info(queryStr) - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query: ", zap.Error(err)) return nil, err } @@ -3515,7 +3491,7 @@ func (r *ClickHouseReader) GetDashboardsInfo(ctx context.Context) (*model.Dashbo var dashboardsData []dashboards.Dashboard err := r.localDB.Select(&dashboardsData, query) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return &dashboardsInfo, err } totalDashboardsWithPanelAndName := 0 @@ -3601,14 +3577,14 @@ func (r *ClickHouseReader) GetAlertsInfo(ctx context.Context) (*model.AlertsInfo var alertsData []string err := r.localDB.Select(&alertsData, query) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return &alertsInfo, err } for _, alert := range alertsData { var rule rules.GettableRule err = json.Unmarshal([]byte(alert), &rule) if err != nil { - zap.S().Errorf("msg:", "invalid rule data", "\t err:", err) + zap.L().Error("invalid rule data", zap.Error(err)) continue } if rule.AlertType == "LOGS_BASED_ALERT" { @@ -3837,7 +3813,6 @@ func (r *ClickHouseReader) GetLogs(ctx context.Context, params *model.LogsFilter } query = fmt.Sprintf("%s order by %s %s limit %d", query, params.OrderBy, params.Order, params.Limit) - zap.S().Debug(query) err = r.db.Select(ctx, &response, query) if err != nil { return nil, &model.ApiError{Err: err, Typ: model.ErrorInternal} @@ -3897,7 +3872,7 @@ func (r *ClickHouseReader) TailLogs(ctx context.Context, client *model.LogsTailC case <-ctx.Done(): done := true client.Done <- &done - zap.S().Debug("closing go routine : " + client.Name) + zap.L().Debug("closing go routine : " + client.Name) return case <-ticker.C: // get the new 100 logs as anything more older won't make sense @@ -3909,11 +3884,10 @@ func (r *ClickHouseReader) TailLogs(ctx context.Context, client *model.LogsTailC tmpQuery = fmt.Sprintf("%s and id > '%s'", tmpQuery, idStart) } tmpQuery = fmt.Sprintf("%s order by timestamp desc, id desc limit 100", tmpQuery) - zap.S().Debug(tmpQuery) response := []model.SignozLog{} err := r.db.Select(ctx, &response, tmpQuery) if err != nil { - zap.S().Error(err) + zap.L().Error("Error while getting logs", zap.Error(err)) client.Error <- err return } @@ -3922,7 +3896,7 @@ func (r *ClickHouseReader) TailLogs(ctx context.Context, client *model.LogsTailC case <-ctx.Done(): done := true client.Done <- &done - zap.S().Debug("closing go routine while sending logs : " + client.Name) + zap.L().Debug("closing go routine while sending logs : " + client.Name) return default: client.Logs <- &response[i] @@ -3987,7 +3961,6 @@ func (r *ClickHouseReader) AggregateLogs(ctx context.Context, params *model.Logs query = fmt.Sprintf("%s GROUP BY ts_start_interval ORDER BY ts_start_interval", query) } - zap.S().Debug(query) err = r.db.Select(ctx, &logAggregatesDBResponseItems, query) if err != nil { return nil, &model.ApiError{Err: err, Typ: model.ErrorInternal} @@ -4026,10 +3999,10 @@ func (r *ClickHouseReader) QueryDashboardVars(ctx context.Context, query string) var result model.DashboardVar rows, err := r.db.Query(ctx, query) - zap.S().Info(query) + zap.L().Info(query) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, err } @@ -4072,7 +4045,7 @@ func (r *ClickHouseReader) GetMetricAggregateAttributes(ctx context.Context, req rows, err = r.db.Query(ctx, query, fmt.Sprintf("%%%s%%", req.SearchText)) if err != nil { - zap.S().Error(err) + zap.L().Error("Error while executing query", zap.Error(err)) return nil, fmt.Errorf("error while executing query: %s", err.Error()) } defer rows.Close() @@ -4121,7 +4094,7 @@ func (r *ClickHouseReader) GetMetricAttributeKeys(ctx context.Context, req *v3.F } rows, err = r.db.Query(ctx, query, req.AggregateAttribute, common.PastDayRoundOff(), fmt.Sprintf("%%%s%%", req.SearchText)) if err != nil { - zap.S().Error(err) + zap.L().Error("Error while executing query", zap.Error(err)) return nil, fmt.Errorf("error while executing query: %s", err.Error()) } defer rows.Close() @@ -4157,7 +4130,7 @@ func (r *ClickHouseReader) GetMetricAttributeValues(ctx context.Context, req *v3 rows, err = r.db.Query(ctx, query, req.FilterAttributeKey, req.AggregateAttribute, req.FilterAttributeKey, fmt.Sprintf("%%%s%%", req.SearchText), common.PastDayRoundOff()) if err != nil { - zap.S().Error(err) + zap.L().Error("Error while executing query", zap.Error(err)) return nil, fmt.Errorf("error while executing query: %s", err.Error()) } defer rows.Close() @@ -4179,7 +4152,7 @@ func (r *ClickHouseReader) GetLatencyMetricMetadata(ctx context.Context, metricN query := fmt.Sprintf("SELECT DISTINCT(temporality) from %s.%s WHERE metric_name='%s' AND JSONExtractString(labels, 'service_name') = '%s'", signozMetricDBName, signozTSTableName, metricName, serviceName) rows, err := r.db.Query(ctx, query, metricName) if err != nil { - zap.S().Error(err) + zap.L().Error("Error while executing query", zap.Error(err)) return nil, fmt.Errorf("error while executing query: %s", err.Error()) } defer rows.Close() @@ -4198,7 +4171,7 @@ func (r *ClickHouseReader) GetLatencyMetricMetadata(ctx context.Context, metricN query = fmt.Sprintf("SELECT DISTINCT(JSONExtractString(labels, 'le')) as le from %s.%s WHERE metric_name='%s' AND JSONExtractString(labels, 'service_name') = '%s' ORDER BY le", signozMetricDBName, signozTSTableName, metricName, serviceName) rows, err = r.db.Query(ctx, query, metricName) if err != nil { - zap.S().Error(err) + zap.L().Error("Error while executing query", zap.Error(err)) return nil, fmt.Errorf("error while executing query: %s", err.Error()) } defer rows.Close() @@ -4214,7 +4187,7 @@ func (r *ClickHouseReader) GetLatencyMetricMetadata(ctx context.Context, metricN // ideally this should not happen but we have seen ClickHouse // returning empty string for some values if err != nil { - zap.S().Error("error while parsing le value: ", err) + zap.L().Error("error while parsing le value", zap.Error(err)) continue } if math.IsInf(le, 0) { @@ -4236,7 +4209,7 @@ func (r *ClickHouseReader) GetMetricMetadata(ctx context.Context, metricName, se query := fmt.Sprintf("SELECT DISTINCT temporality, description, type, unit, is_monotonic from %s.%s WHERE metric_name=$1", signozMetricDBName, signozTSTableNameV41Day) rows, err := r.db.Query(ctx, query, metricName) if err != nil { - zap.S().Error(err) + zap.L().Error("Error while fetching metric metadata", zap.Error(err)) return nil, fmt.Errorf("error while fetching metric metadata: %s", err.Error()) } defer rows.Close() @@ -4255,7 +4228,7 @@ func (r *ClickHouseReader) GetMetricMetadata(ctx context.Context, metricName, se query = fmt.Sprintf("SELECT DISTINCT(JSONExtractString(labels, 'le')) as le from %s.%s WHERE metric_name=$1 AND type = 'Histogram' AND JSONExtractString(labels, 'service_name') = $2 ORDER BY le", signozMetricDBName, signozTSTableNameV41Day) rows, err = r.db.Query(ctx, query, metricName, serviceName) if err != nil { - zap.S().Error(err) + zap.L().Error("Error while executing query", zap.Error(err)) return nil, fmt.Errorf("error while executing query: %s", err.Error()) } defer rows.Close() @@ -4271,7 +4244,7 @@ func (r *ClickHouseReader) GetMetricMetadata(ctx context.Context, metricName, se // ideally this should not happen but we have seen ClickHouse // returning empty string for some values if err != nil { - zap.S().Error("error while parsing le value: ", err) + zap.L().Error("error while parsing le value", zap.Error(err)) continue } if math.IsInf(le, 0) { @@ -4405,7 +4378,7 @@ func (r *ClickHouseReader) GetLogAggregateAttributes(ctx context.Context, req *v query = fmt.Sprintf("SELECT DISTINCT(tagKey), tagType, tagDataType from %s.%s WHERE %s limit $2", r.logsDB, r.logsTagAttributeTable, where) rows, err = r.db.Query(ctx, query, fmt.Sprintf("%%%s%%", req.SearchText), req.Limit) if err != nil { - zap.S().Error(err) + zap.L().Error("Error while executing query", zap.Error(err)) return nil, fmt.Errorf("error while executing query: %s", err.Error()) } defer rows.Close() @@ -4459,7 +4432,7 @@ func (r *ClickHouseReader) GetLogAttributeKeys(ctx context.Context, req *v3.Filt } if err != nil { - zap.S().Error(err) + zap.L().Error("Error while executing query", zap.Error(err)) return nil, fmt.Errorf("error while executing query: %s", err.Error()) } defer rows.Close() @@ -4571,7 +4544,7 @@ func (r *ClickHouseReader) GetLogAttributeValues(ctx context.Context, req *v3.Fi } if err != nil { - zap.S().Error(err) + zap.L().Error("Error while executing query", zap.Error(err)) return nil, fmt.Errorf("error while executing query: %s", err.Error()) } defer rows.Close() @@ -4632,7 +4605,7 @@ func readRow(vars []interface{}, columnNames []string) ([]string, map[string]str var metric map[string]string err := json.Unmarshal([]byte(*v), &metric) if err != nil { - zap.S().Errorf("unexpected error encountered %v", err) + zap.L().Error("unexpected error encountered", zap.Error(err)) } for key, val := range metric { groupBy = append(groupBy, val) @@ -4688,7 +4661,7 @@ func readRow(vars []interface{}, columnNames []string) ([]string, map[string]str groupAttributes[colName] = fmt.Sprintf("%v", *v) default: - zap.S().Errorf("unsupported var type %v found in query builder query result for column %s", v, colName) + zap.L().Error("unsupported var type found in query builder query result", zap.Any("v", v), zap.String("colName", colName)) } } return groupBy, groupAttributes, groupAttributesArray, point @@ -4786,7 +4759,7 @@ func (r *ClickHouseReader) GetTimeSeriesResultV3(ctx context.Context, query stri rows, err := r.db.Query(ctx, query) if err != nil { - zap.S().Errorf("error while reading time series result %v", err) + zap.L().Error("error while reading time series result", zap.Error(err)) return nil, err } defer rows.Close() @@ -4811,7 +4784,7 @@ func (r *ClickHouseReader) GetListResultV3(ctx context.Context, query string) ([ rows, err := r.db.Query(ctx, query) if err != nil { - zap.S().Errorf("error while reading time series result %v", err) + zap.L().Error("error while reading time series result", zap.Error(err)) return nil, err } defer rows.Close() @@ -4954,7 +4927,7 @@ func (r *ClickHouseReader) GetTraceAggregateAttributes(ctx context.Context, req rows, err = r.db.Query(ctx, query, fmt.Sprintf("%%%s%%", req.SearchText)) if err != nil { - zap.S().Error(err) + zap.L().Error("Error while executing query", zap.Error(err)) return nil, fmt.Errorf("error while executing query: %s", err.Error()) } defer rows.Close() @@ -4995,7 +4968,7 @@ func (r *ClickHouseReader) GetTraceAttributeKeys(ctx context.Context, req *v3.Fi rows, err = r.db.Query(ctx, query, fmt.Sprintf("%%%s%%", req.SearchText)) if err != nil { - zap.S().Error(err) + zap.L().Error("Error while executing query", zap.Error(err)) return nil, fmt.Errorf("error while executing query: %s", err.Error()) } defer rows.Close() @@ -5049,7 +5022,7 @@ func (r *ClickHouseReader) GetTraceAttributeValues(ctx context.Context, req *v3. query = fmt.Sprintf("SELECT DISTINCT stringTagValue from %s.%s WHERE tagKey = $1 AND stringTagValue ILIKE $2 AND tagType=$3 limit $4", r.TraceDB, r.spanAttributeTable) rows, err = r.db.Query(ctx, query, req.FilterAttributeKey, fmt.Sprintf("%%%s%%", req.SearchText), req.TagType, req.Limit) if err != nil { - zap.S().Error(err) + zap.L().Error("Error while executing query", zap.Error(err)) return nil, fmt.Errorf("error while executing query: %s", err.Error()) } defer rows.Close() @@ -5065,7 +5038,7 @@ func (r *ClickHouseReader) GetTraceAttributeValues(ctx context.Context, req *v3. query = fmt.Sprintf("SELECT DISTINCT float64TagValue from %s.%s where tagKey = $1 AND toString(float64TagValue) ILIKE $2 AND tagType=$3 limit $4", r.TraceDB, r.spanAttributeTable) rows, err = r.db.Query(ctx, query, req.FilterAttributeKey, fmt.Sprintf("%%%s%%", req.SearchText), req.TagType, req.Limit) if err != nil { - zap.S().Error(err) + zap.L().Error("Error while executing query", zap.Error(err)) return nil, fmt.Errorf("error while executing query: %s", err.Error()) } defer rows.Close() @@ -5099,7 +5072,7 @@ func (r *ClickHouseReader) GetSpanAttributeKeys(ctx context.Context) (map[string rows, err = r.db.Query(ctx, query) if err != nil { - zap.S().Error(err) + zap.L().Error("Error while executing query", zap.Error(err)) return nil, fmt.Errorf("error while executing query: %s", err.Error()) } defer rows.Close() @@ -5137,7 +5110,7 @@ func (r *ClickHouseReader) LiveTailLogsV3(ctx context.Context, query string, tim case <-ctx.Done(): done := true client.Done <- &done - zap.S().Debug("closing go routine : " + client.Name) + zap.L().Debug("closing go routine : " + client.Name) return case <-ticker.C: // get the new 100 logs as anything more older won't make sense @@ -5152,7 +5125,7 @@ func (r *ClickHouseReader) LiveTailLogsV3(ctx context.Context, query string, tim response := []model.SignozLog{} err := r.db.Select(ctx, &response, tmpQuery) if err != nil { - zap.S().Error(err) + zap.L().Error("Error while getting logs", zap.Error(err)) client.Error <- err return } diff --git a/pkg/query-service/app/dashboards/model.go b/pkg/query-service/app/dashboards/model.go index 6e777f49c9..c69f30a6bd 100644 --- a/pkg/query-service/app/dashboards/model.go +++ b/pkg/query-service/app/dashboards/model.go @@ -196,7 +196,7 @@ func CreateDashboard(ctx context.Context, data map[string]interface{}, fm interf mapData, err := json.Marshal(dash.Data) if err != nil { - zap.S().Errorf("Error in marshalling data field in dashboard: ", dash, err) + zap.L().Error("Error in marshalling data field in dashboard: ", zap.Any("dashboard", dash), zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: err} } @@ -212,7 +212,7 @@ func CreateDashboard(ctx context.Context, data map[string]interface{}, fm interf dash.Uuid, dash.CreatedAt, userEmail, dash.UpdatedAt, userEmail, mapData) if err != nil { - zap.S().Errorf("Error in inserting dashboard data: ", dash, err) + zap.L().Error("Error in inserting dashboard data: ", zap.Any("dashboard", dash), zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: err} } lastInsertId, err := result.LastInsertId() @@ -246,7 +246,7 @@ func DeleteDashboard(ctx context.Context, uuid string, fm interfaces.FeatureLook dashboard, dErr := GetDashboard(ctx, uuid) if dErr != nil { - zap.S().Errorf("Error in getting dashboard: ", uuid, dErr) + zap.L().Error("Error in getting dashboard: ", zap.String("uuid", uuid), zap.Any("error", dErr)) return dErr } @@ -296,7 +296,7 @@ func UpdateDashboard(ctx context.Context, uuid string, data map[string]interface mapData, err := json.Marshal(data) if err != nil { - zap.S().Errorf("Error in marshalling data field in dashboard: ", data, err) + zap.L().Error("Error in marshalling data field in dashboard: ", zap.Any("data", data), zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorBadData, Err: err} } @@ -337,7 +337,7 @@ func UpdateDashboard(ctx context.Context, uuid string, data map[string]interface dashboard.UpdatedAt, userEmail, mapData, dashboard.Uuid) if err != nil { - zap.S().Errorf("Error in inserting dashboard data: ", data, err) + zap.L().Error("Error in inserting dashboard data", zap.Any("data", data), zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: err} } if existingCount != newCount { @@ -358,7 +358,7 @@ func LockUnlockDashboard(ctx context.Context, uuid string, lock bool) *model.Api _, err := db.Exec(query, uuid) if err != nil { - zap.S().Errorf("Error in updating dashboard: ", uuid, err) + zap.L().Error("Error in updating dashboard", zap.String("uuid", uuid), zap.Error(err)) return &model.ApiError{Typ: model.ErrorExec, Err: err} } @@ -370,10 +370,10 @@ func updateFeatureUsage(fm interfaces.FeatureLookup, usage int64) *model.ApiErro if err != nil { switch err.(type) { case model.ErrFeatureUnavailable: - zap.S().Errorf("feature unavailable", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err)) + zap.L().Error("feature unavailable", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err)) return model.BadRequest(err) default: - zap.S().Errorf("feature check failed", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err)) + zap.L().Error("feature check failed", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err)) return model.BadRequest(err) } } @@ -397,10 +397,10 @@ func checkFeatureUsage(fm interfaces.FeatureLookup, usage int64) *model.ApiError if err != nil { switch err.(type) { case model.ErrFeatureUnavailable: - zap.S().Errorf("feature unavailable", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err)) + zap.L().Error("feature unavailable", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err)) return model.BadRequest(err) default: - zap.S().Errorf("feature check failed", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err)) + zap.L().Error("feature check failed", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err)) return model.BadRequest(err) } } @@ -535,13 +535,13 @@ func TransformGrafanaJSONToSignoz(grafanaJSON model.GrafanaJSON) model.Dashboard if template.Type == "query" { if template.Datasource == nil { - zap.S().Warnf("Skipping panel %d as it has no datasource", templateIdx) + zap.L().Warn("Skipping panel as it has no datasource", zap.Int("templateIdx", templateIdx)) continue } // Skip if the source is not prometheus source, stringOk := template.Datasource.(string) if stringOk && !strings.Contains(strings.ToLower(source), "prometheus") { - zap.S().Warnf("Skipping template %d as it is not prometheus", templateIdx) + zap.L().Warn("Skipping template as it is not prometheus", zap.Int("templateIdx", templateIdx)) continue } var result model.Datasource @@ -553,12 +553,12 @@ func TransformGrafanaJSONToSignoz(grafanaJSON model.GrafanaJSON) model.Dashboard } } if result.Type != "prometheus" && result.Type != "" { - zap.S().Warnf("Skipping template %d as it is not prometheus", templateIdx) + zap.L().Warn("Skipping template as it is not prometheus", zap.Int("templateIdx", templateIdx)) continue } if !stringOk && !structOk { - zap.S().Warnf("Didn't recognize source, skipping") + zap.L().Warn("Didn't recognize source, skipping") continue } typ = "QUERY" @@ -629,13 +629,13 @@ func TransformGrafanaJSONToSignoz(grafanaJSON model.GrafanaJSON) model.Dashboard continue } if panel.Datasource == nil { - zap.S().Warnf("Skipping panel %d as it has no datasource", idx) + zap.L().Warn("Skipping panel as it has no datasource", zap.Int("idx", idx)) continue } // Skip if the datasource is not prometheus source, stringOk := panel.Datasource.(string) if stringOk && !strings.Contains(strings.ToLower(source), "prometheus") { - zap.S().Warnf("Skipping panel %d as it is not prometheus", idx) + zap.L().Warn("Skipping panel as it is not prometheus", zap.Int("idx", idx)) continue } var result model.Datasource @@ -647,12 +647,12 @@ func TransformGrafanaJSONToSignoz(grafanaJSON model.GrafanaJSON) model.Dashboard } } if result.Type != "prometheus" && result.Type != "" { - zap.S().Warnf("Skipping panel %d as it is not prometheus", idx) + zap.L().Warn("Skipping panel as it is not prometheus", zap.Int("idx", idx)) continue } if !stringOk && !structOk { - zap.S().Warnf("Didn't recognize source, skipping") + zap.L().Warn("Didn't recognize source, skipping") continue } diff --git a/pkg/query-service/app/dashboards/provision.go b/pkg/query-service/app/dashboards/provision.go index 049ae42e72..fb97a960c1 100644 --- a/pkg/query-service/app/dashboards/provision.go +++ b/pkg/query-service/app/dashboards/provision.go @@ -16,31 +16,31 @@ import ( func readCurrentDir(dir string, fm interfaces.FeatureLookup) error { file, err := os.Open(dir) if err != nil { - zap.S().Errorf("failed opening directory: %s", err) - return err + zap.L().Warn("failed opening directory", zap.Error(err)) + return nil } defer file.Close() list, _ := file.Readdirnames(0) // 0 to read all files and folders for _, filename := range list { - zap.S().Info("Provisioning dashboard: ", filename) + zap.L().Info("Provisioning dashboard: ", zap.String("filename", filename)) // using filepath.Join for platform specific path creation // which is equivalent to "dir+/+filename" (on unix based systems) but cleaner plan, err := os.ReadFile(filepath.Join(dir, filename)) if err != nil { - zap.S().Errorf("Creating Dashboards: Error in reading json fron file: %s\t%s", filename, err) + zap.L().Error("Creating Dashboards: Error in reading json fron file", zap.String("filename", filename), zap.Error(err)) continue } var data map[string]interface{} err = json.Unmarshal(plan, &data) if err != nil { - zap.S().Errorf("Creating Dashboards: Error in unmarshalling json from file: %s\t%s", filename, err) + zap.L().Error("Creating Dashboards: Error in unmarshalling json from file", zap.String("filename", filename), zap.Error(err)) continue } err = IsPostDataSane(&data) if err != nil { - zap.S().Infof("Creating Dashboards: Error in file: %s\t%s", filename, err) + zap.L().Info("Creating Dashboards: Error in file", zap.String("filename", filename), zap.Error(err)) continue } @@ -48,14 +48,14 @@ func readCurrentDir(dir string, fm interfaces.FeatureLookup) error { if id == nil { _, apiErr := CreateDashboard(context.Background(), data, fm) if apiErr != nil { - zap.S().Errorf("Creating Dashboards: Error in file: %s\t%s", filename, apiErr.Err) + zap.L().Error("Creating Dashboards: Error in file", zap.String("filename", filename), zap.Error(apiErr.Err)) } continue } apiErr := upsertDashboard(id.(string), data, filename, fm) if apiErr != nil { - zap.S().Errorf("Creating Dashboards: Error upserting dashboard: %s\t%s", filename, apiErr.Err) + zap.L().Error("Creating Dashboards: Error upserting dashboard", zap.String("filename", filename), zap.Error(apiErr.Err)) } } return nil diff --git a/pkg/query-service/app/http_handler.go b/pkg/query-service/app/http_handler.go index 964850cbf8..c025345cef 100644 --- a/pkg/query-service/app/http_handler.go +++ b/pkg/query-service/app/http_handler.go @@ -207,7 +207,7 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) { hasUsers, err := aH.appDao.GetUsersWithOpts(context.Background(), 1) if err.Error() != "" { // raise warning but no panic as this is a recoverable condition - zap.S().Warnf("unexpected error while fetch user count while initializing base api handler", err.Error()) + zap.L().Warn("unexpected error while fetch user count while initializing base api handler", zap.Error(err)) } if len(hasUsers) != 0 { // first user is already created, we can mark the app ready for general use. @@ -273,7 +273,7 @@ func RespondError(w http.ResponseWriter, apiErr model.BaseApiError, data interfa Data: data, }) if err != nil { - zap.S().Error("msg", "error marshalling json response", "err", err) + zap.L().Error("error marshalling json response", zap.Error(err)) http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -303,7 +303,7 @@ func RespondError(w http.ResponseWriter, apiErr model.BaseApiError, data interfa w.Header().Set("Content-Type", "application/json") w.WriteHeader(code) if n, err := w.Write(b); err != nil { - zap.S().Error("msg", "error writing response", "bytesWritten", n, "err", err) + zap.L().Error("error writing response", zap.Int("bytesWritten", n), zap.Error(err)) } } @@ -314,7 +314,7 @@ func writeHttpResponse(w http.ResponseWriter, data interface{}) { Data: data, }) if err != nil { - zap.S().Error("msg", "error marshalling json response", "err", err) + zap.L().Error("error marshalling json response", zap.Error(err)) http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -322,7 +322,7 @@ func writeHttpResponse(w http.ResponseWriter, data interface{}) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) if n, err := w.Write(b); err != nil { - zap.S().Error("msg", "error writing response", "bytesWritten", n, "err", err) + zap.L().Error("error writing response", zap.Int("bytesWritten", n), zap.Error(err)) } } @@ -567,7 +567,7 @@ func (aH *APIHandler) addTemporality(ctx context.Context, qp *v3.QueryRangeParam var err error if aH.preferDelta { - zap.S().Debug("fetching metric temporality") + zap.L().Debug("fetching metric temporality") metricNameToTemporality, err = aH.reader.FetchTemporality(ctx, metricNames) if err != nil { return err @@ -595,7 +595,7 @@ func (aH *APIHandler) QueryRangeMetricsV2(w http.ResponseWriter, r *http.Request metricsQueryRangeParams, apiErrorObj := parser.ParseMetricQueryRangeParams(r) if apiErrorObj != nil { - zap.S().Errorf(apiErrorObj.Err.Error()) + zap.L().Error("error parsing metric query range params", zap.Error(apiErrorObj.Err)) RespondError(w, apiErrorObj, nil) return } @@ -1130,7 +1130,7 @@ func (aH *APIHandler) testRule(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() body, err := io.ReadAll(r.Body) if err != nil { - zap.S().Errorf("Error in getting req body in test rule API\n", err) + zap.L().Error("Error in getting req body in test rule API", zap.Error(err)) RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil) return } @@ -1173,7 +1173,7 @@ func (aH *APIHandler) patchRule(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() body, err := io.ReadAll(r.Body) if err != nil { - zap.S().Errorf("msg: error in getting req body of patch rule API\n", "\t error:", err) + zap.L().Error("error in getting req body of patch rule API\n", zap.Error(err)) RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil) return } @@ -1194,7 +1194,7 @@ func (aH *APIHandler) editRule(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() body, err := io.ReadAll(r.Body) if err != nil { - zap.S().Errorf("msg: error in getting req body of edit rule API\n", "\t error:", err) + zap.L().Error("error in getting req body of edit rule API", zap.Error(err)) RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil) return } @@ -1245,14 +1245,14 @@ func (aH *APIHandler) testChannel(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() body, err := io.ReadAll(r.Body) if err != nil { - zap.S().Errorf("Error in getting req body of testChannel API\n", err) + zap.L().Error("Error in getting req body of testChannel API", zap.Error(err)) RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil) return } receiver := &am.Receiver{} if err := json.Unmarshal(body, receiver); err != nil { // Parse []byte to go struct pointer - zap.S().Errorf("Error in parsing req body of testChannel API\n", err) + zap.L().Error("Error in parsing req body of testChannel API\n", zap.Error(err)) RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil) return } @@ -1272,14 +1272,14 @@ func (aH *APIHandler) editChannel(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() body, err := io.ReadAll(r.Body) if err != nil { - zap.S().Errorf("Error in getting req body of editChannel API\n", err) + zap.L().Error("Error in getting req body of editChannel API", zap.Error(err)) RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil) return } receiver := &am.Receiver{} if err := json.Unmarshal(body, receiver); err != nil { // Parse []byte to go struct pointer - zap.S().Errorf("Error in parsing req body of editChannel API\n", err) + zap.L().Error("Error in parsing req body of editChannel API", zap.Error(err)) RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil) return } @@ -1300,14 +1300,14 @@ func (aH *APIHandler) createChannel(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() body, err := io.ReadAll(r.Body) if err != nil { - zap.S().Errorf("Error in getting req body of createChannel API\n", err) + zap.L().Error("Error in getting req body of createChannel API", zap.Error(err)) RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil) return } receiver := &am.Receiver{} if err := json.Unmarshal(body, receiver); err != nil { // Parse []byte to go struct pointer - zap.S().Errorf("Error in parsing req body of createChannel API\n", err) + zap.L().Error("Error in parsing req body of createChannel API", zap.Error(err)) RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil) return } @@ -1347,7 +1347,7 @@ func (aH *APIHandler) createRule(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() body, err := io.ReadAll(r.Body) if err != nil { - zap.S().Errorf("Error in getting req body for create rule API\n", err) + zap.L().Error("Error in getting req body for create rule API", zap.Error(err)) RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil) return } @@ -1374,7 +1374,7 @@ func (aH *APIHandler) queryRangeMetrics(w http.ResponseWriter, r *http.Request) return } - // zap.S().Info(query, apiError) + // zap.L().Info(query, apiError) ctx := r.Context() if to := r.FormValue("timeout"); to != "" { @@ -1396,7 +1396,7 @@ func (aH *APIHandler) queryRangeMetrics(w http.ResponseWriter, r *http.Request) } if res.Err != nil { - zap.S().Error(res.Err) + zap.L().Error("error in query range metrics", zap.Error(res.Err)) } if res.Err != nil { @@ -1429,7 +1429,7 @@ func (aH *APIHandler) queryMetrics(w http.ResponseWriter, r *http.Request) { return } - // zap.S().Info(query, apiError) + // zap.L().Info(query, apiError) ctx := r.Context() if to := r.FormValue("timeout"); to != "" { @@ -1451,7 +1451,7 @@ func (aH *APIHandler) queryMetrics(w http.ResponseWriter, r *http.Request) { } if res.Err != nil { - zap.S().Error(res.Err) + zap.L().Error("error in query range metrics", zap.Error(res.Err)) } if res.Err != nil { @@ -2045,7 +2045,7 @@ func (aH *APIHandler) loginUser(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) listUsers(w http.ResponseWriter, r *http.Request) { users, err := dao.DB().GetUsers(context.Background()) if err != nil { - zap.S().Debugf("[listUsers] Failed to query list of users, err: %v", err) + zap.L().Error("[listUsers] Failed to query list of users", zap.Error(err)) RespondError(w, err, nil) return } @@ -2062,7 +2062,7 @@ func (aH *APIHandler) getUser(w http.ResponseWriter, r *http.Request) { ctx := context.Background() user, err := dao.DB().GetUser(ctx, id) if err != nil { - zap.S().Debugf("[getUser] Failed to query user, err: %v", err) + zap.L().Error("[getUser] Failed to query user", zap.Error(err)) RespondError(w, err, "Failed to get user") return } @@ -2092,7 +2092,7 @@ func (aH *APIHandler) editUser(w http.ResponseWriter, r *http.Request) { ctx := context.Background() old, apiErr := dao.DB().GetUser(ctx, id) if apiErr != nil { - zap.S().Debugf("[editUser] Failed to query user, err: %v", err) + zap.L().Error("[editUser] Failed to query user", zap.Error(err)) RespondError(w, apiErr, nil) return } @@ -2176,7 +2176,7 @@ func (aH *APIHandler) patchUserFlag(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() b, err := io.ReadAll(r.Body) if err != nil { - zap.S().Errorf("failed read user flags from http request for userId ", userId, "with error: ", err) + zap.L().Error("failed read user flags from http request for userId ", zap.String("userId", userId), zap.Error(err)) RespondError(w, model.BadRequestStr("received user flags in invalid format"), nil) return } @@ -2184,7 +2184,7 @@ func (aH *APIHandler) patchUserFlag(w http.ResponseWriter, r *http.Request) { err = json.Unmarshal(b, &flags) if err != nil { - zap.S().Errorf("failed parsing user flags for userId ", userId, "with error: ", err) + zap.L().Error("failed parsing user flags for userId ", zap.String("userId", userId), zap.Error(err)) RespondError(w, model.BadRequestStr("received user flags in invalid format"), nil) return } @@ -2348,7 +2348,7 @@ func (aH *APIHandler) resetPassword(w http.ResponseWriter, r *http.Request) { } if err := auth.ResetPassword(context.Background(), req); err != nil { - zap.S().Debugf("resetPassword failed, err: %v\n", err) + zap.L().Error("resetPassword failed", zap.Error(err)) if aH.HandleError(w, err, http.StatusInternalServerError) { return } @@ -2391,7 +2391,7 @@ func (aH *APIHandler) HandleError(w http.ResponseWriter, err error, statusCode i return false } if statusCode == http.StatusInternalServerError { - zap.S().Error("HTTP handler, Internal Server Error", zap.Error(err)) + zap.L().Error("HTTP handler, Internal Server Error", zap.Error(err)) } structuredResp := structuredResponse{ Errors: []structuredError{ @@ -2809,10 +2809,10 @@ func (aH *APIHandler) tailLogs(w http.ResponseWriter, r *http.Request) { fmt.Fprintf(w, "data: %v\n\n", buf.String()) flusher.Flush() case <-client.Done: - zap.S().Debug("done!") + zap.L().Debug("done!") return case err := <-client.Error: - zap.S().Error("error occured!", err) + zap.L().Error("error occured", zap.Error(err)) return } } @@ -2963,7 +2963,7 @@ func (ah *APIHandler) CreateLogsPipeline(w http.ResponseWriter, r *http.Request) postable []logparsingpipeline.PostablePipeline, ) (*logparsingpipeline.PipelinesResponse, *model.ApiError) { if len(postable) == 0 { - zap.S().Warnf("found no pipelines in the http request, this will delete all the pipelines") + zap.L().Warn("found no pipelines in the http request, this will delete all the pipelines") } for _, p := range postable { @@ -3403,7 +3403,7 @@ func (aH *APIHandler) QueryRangeV3Format(w http.ResponseWriter, r *http.Request) queryRangeParams, apiErrorObj := ParseQueryRangeParams(r) if apiErrorObj != nil { - zap.S().Errorf(apiErrorObj.Err.Error()) + zap.L().Error(apiErrorObj.Err.Error()) RespondError(w, apiErrorObj, nil) return } @@ -3478,11 +3478,11 @@ func sendQueryResultEvents(r *http.Request, result []*v3.Result, queryRangeParam dashboardMatched, err := regexp.MatchString(`/dashboard/[a-zA-Z0-9\-]+/(new|edit)(?:\?.*)?$`, referrer) if err != nil { - zap.S().Errorf("error while matching the dashboard: %v", err) + zap.L().Error("error while matching the referrer", zap.Error(err)) } alertMatched, err := regexp.MatchString(`/alerts/(new|edit)(?:\?.*)?$`, referrer) if err != nil { - zap.S().Errorf("error while matching the alert: %v", err) + zap.L().Error("error while matching the alert: ", zap.Error(err)) } if alertMatched || dashboardMatched { @@ -3559,7 +3559,7 @@ func (aH *APIHandler) QueryRangeV3(w http.ResponseWriter, r *http.Request) { queryRangeParams, apiErrorObj := ParseQueryRangeParams(r) if apiErrorObj != nil { - zap.S().Errorf(apiErrorObj.Err.Error()) + zap.L().Error("error parsing metric query range params", zap.Error(apiErrorObj.Err)) RespondError(w, apiErrorObj, nil) return } @@ -3568,7 +3568,7 @@ func (aH *APIHandler) QueryRangeV3(w http.ResponseWriter, r *http.Request) { temporalityErr := aH.addTemporality(r.Context(), queryRangeParams) if temporalityErr != nil { - zap.S().Errorf("Error while adding temporality for metrics: %v", temporalityErr) + zap.L().Error("Error while adding temporality for metrics", zap.Error(temporalityErr)) RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: temporalityErr}, nil) return } @@ -3584,7 +3584,7 @@ func (aH *APIHandler) liveTailLogs(w http.ResponseWriter, r *http.Request) { queryRangeParams, apiErrorObj := ParseQueryRangeParams(r) if apiErrorObj != nil { - zap.S().Errorf(apiErrorObj.Err.Error()) + zap.L().Error(apiErrorObj.Err.Error()) RespondError(w, apiErrorObj, nil) return } @@ -3645,10 +3645,10 @@ func (aH *APIHandler) liveTailLogs(w http.ResponseWriter, r *http.Request) { fmt.Fprintf(w, "data: %v\n\n", buf.String()) flusher.Flush() case <-client.Done: - zap.S().Debug("done!") + zap.L().Debug("done!") return case err := <-client.Error: - zap.S().Error("error occurred!", err) + zap.L().Error("error occurred", zap.Error(err)) fmt.Fprintf(w, "event: error\ndata: %v\n\n", err.Error()) flusher.Flush() return @@ -3725,7 +3725,7 @@ func (aH *APIHandler) QueryRangeV4(w http.ResponseWriter, r *http.Request) { queryRangeParams, apiErrorObj := ParseQueryRangeParams(r) if apiErrorObj != nil { - zap.S().Errorf(apiErrorObj.Err.Error()) + zap.L().Error("error parsing metric query range params", zap.Error(apiErrorObj.Err)) RespondError(w, apiErrorObj, nil) return } @@ -3733,7 +3733,7 @@ func (aH *APIHandler) QueryRangeV4(w http.ResponseWriter, r *http.Request) { // add temporality for each metric temporalityErr := aH.populateTemporality(r.Context(), queryRangeParams) if temporalityErr != nil { - zap.S().Errorf("Error while adding temporality for metrics: %v", temporalityErr) + zap.L().Error("Error while adding temporality for metrics", zap.Error(temporalityErr)) RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: temporalityErr}, nil) return } @@ -3777,12 +3777,12 @@ func postProcessResult(result []*v3.Result, queryRangeParams *v3.QueryRangeParam expression, err := govaluate.NewEvaluableExpressionWithFunctions(query.Expression, evalFuncs()) // This shouldn't happen here, because it should have been caught earlier in validation if err != nil { - zap.S().Errorf("error in expression: %s", err.Error()) + zap.L().Error("error in expression", zap.Error(err)) return nil, err } formulaResult, err := processResults(result, expression) if err != nil { - zap.S().Errorf("error in expression: %s", err.Error()) + zap.L().Error("error in expression", zap.Error(err)) return nil, err } formulaResult.QueryName = query.QueryName diff --git a/pkg/query-service/app/logparsingpipeline/collector_config.go b/pkg/query-service/app/logparsingpipeline/collector_config.go index c370441210..17b8d96c1e 100644 --- a/pkg/query-service/app/logparsingpipeline/collector_config.go +++ b/pkg/query-service/app/logparsingpipeline/collector_config.go @@ -138,7 +138,7 @@ func buildLogsProcessors(current []string, logsParserPipeline []string) ([]strin func checkDuplicateString(pipeline []string) bool { exists := make(map[string]bool, len(pipeline)) - zap.S().Debugf("checking duplicate processors in the pipeline:", pipeline) + zap.L().Debug("checking duplicate processors in the pipeline:", zap.Any("pipeline", pipeline)) for _, processor := range pipeline { name := processor if _, ok := exists[name]; ok { diff --git a/pkg/query-service/app/logparsingpipeline/controller.go b/pkg/query-service/app/logparsingpipeline/controller.go index 9527fe9e8d..2e6b0ba4d3 100644 --- a/pkg/query-service/app/logparsingpipeline/controller.go +++ b/pkg/query-service/app/logparsingpipeline/controller.go @@ -104,7 +104,7 @@ func (ic *LogParsingPipelineController) getEffectivePipelinesByVersion( if version >= 0 { savedPipelines, errors := ic.getPipelinesByVersion(ctx, version) if errors != nil { - zap.S().Errorf("failed to get pipelines for version %d, %w", version, errors) + zap.L().Error("failed to get pipelines for version", zap.Int("version", version), zap.Errors("errors", errors)) return nil, model.InternalError(fmt.Errorf("failed to get pipelines for given version")) } result = savedPipelines @@ -158,7 +158,7 @@ func (ic *LogParsingPipelineController) GetPipelinesByVersion( ) (*PipelinesResponse, *model.ApiError) { pipelines, errors := ic.getEffectivePipelinesByVersion(ctx, version) if errors != nil { - zap.S().Errorf("failed to get pipelines for version %d, %w", version, errors) + zap.L().Error("failed to get pipelines for version", zap.Int("version", version), zap.Error(errors)) return nil, model.InternalError(fmt.Errorf("failed to get pipelines for given version")) } @@ -166,7 +166,7 @@ func (ic *LogParsingPipelineController) GetPipelinesByVersion( if version >= 0 { cv, err := agentConf.GetConfigVersion(ctx, agentConf.ElementTypeLogPipelines, version) if err != nil { - zap.S().Errorf("failed to get config for version %d, %s", version, err.Error()) + zap.L().Error("failed to get config for version", zap.Int("version", version), zap.Error(err)) return nil, model.WrapApiError(err, "failed to get config for given version") } configVersion = cv diff --git a/pkg/query-service/app/logparsingpipeline/db.go b/pkg/query-service/app/logparsingpipeline/db.go index df187f0de3..618060d105 100644 --- a/pkg/query-service/app/logparsingpipeline/db.go +++ b/pkg/query-service/app/logparsingpipeline/db.go @@ -99,7 +99,7 @@ func (r *Repo) insertPipeline( insertRow.RawConfig) if err != nil { - zap.S().Errorf("error in inserting pipeline data: ", zap.Error(err)) + zap.L().Error("error in inserting pipeline data", zap.Error(err)) return nil, model.InternalError(errors.Wrap(err, "failed to insert pipeline")) } @@ -171,19 +171,19 @@ func (r *Repo) GetPipeline( err := r.db.SelectContext(ctx, &pipelines, pipelineQuery, id) if err != nil { - zap.S().Errorf("failed to get ingestion pipeline from db", err) + zap.L().Error("failed to get ingestion pipeline from db", zap.Error(err)) return nil, model.InternalError(errors.Wrap(err, "failed to get ingestion pipeline from db")) } if len(pipelines) == 0 { - zap.S().Warnf("No row found for ingestion pipeline id", id) + zap.L().Warn("No row found for ingestion pipeline id", zap.String("id", id)) return nil, model.NotFoundError(fmt.Errorf("No row found for ingestion pipeline id %v", id)) } if len(pipelines) == 1 { err := pipelines[0].ParseRawConfig() if err != nil { - zap.S().Errorf("invalid pipeline config found", id, err) + zap.L().Error("invalid pipeline config found", zap.String("id", id), zap.Error(err)) return nil, model.InternalError( errors.Wrap(err, "found an invalid pipeline config"), ) diff --git a/pkg/query-service/app/opamp/configure_ingestionRules.go b/pkg/query-service/app/opamp/configure_ingestionRules.go index bd71aa38b0..ec9c9e5b88 100644 --- a/pkg/query-service/app/opamp/configure_ingestionRules.go +++ b/pkg/query-service/app/opamp/configure_ingestionRules.go @@ -27,10 +27,10 @@ func UpsertControlProcessors( // AddToTracePipeline() or RemoveFromTracesPipeline() prior to calling // this method - zap.S().Debug("initiating ingestion rules deployment config", signal, processors) + zap.L().Debug("initiating ingestion rules deployment config", zap.String("signal", signal), zap.Any("processors", processors)) if signal != string(Metrics) && signal != string(Traces) { - zap.S().Error("received invalid signal int UpsertControlProcessors", signal) + zap.L().Error("received invalid signal int UpsertControlProcessors", zap.String("signal", signal)) fnerr = coreModel.BadRequest(fmt.Errorf( "signal not supported in ingestion rules: %s", signal, )) @@ -51,7 +51,7 @@ func UpsertControlProcessors( } if len(agents) > 1 && signal == string(Traces) { - zap.S().Debug("found multiple agents. this feature is not supported for traces pipeline (sampling rules)") + zap.L().Debug("found multiple agents. this feature is not supported for traces pipeline (sampling rules)") fnerr = coreModel.BadRequest(fmt.Errorf("multiple agents not supported in sampling rules")) return } @@ -60,7 +60,7 @@ func UpsertControlProcessors( agenthash, err := addIngestionControlToAgent(agent, signal, processors, false) if err != nil { - zap.S().Error("failed to push ingestion rules config to agent", agent.ID, err) + zap.L().Error("failed to push ingestion rules config to agent", zap.String("agentID", agent.ID), zap.Error(err)) continue } @@ -89,7 +89,7 @@ func addIngestionControlToAgent(agent *model.Agent, signal string, processors ma // add ingestion control spec err = makeIngestionControlSpec(agentConf, Signal(signal), processors) if err != nil { - zap.S().Error("failed to prepare ingestion control processors for agent ", agent.ID, err) + zap.L().Error("failed to prepare ingestion control processors for agent", zap.String("agentID", agent.ID), zap.Error(err)) return confHash, err } @@ -99,7 +99,7 @@ func addIngestionControlToAgent(agent *model.Agent, signal string, processors ma return confHash, err } - zap.S().Debugf("sending new config", string(configR)) + zap.L().Debug("sending new config", zap.String("config", string(configR))) hash := sha256.New() _, err = hash.Write(configR) if err != nil { @@ -140,7 +140,7 @@ func makeIngestionControlSpec(agentConf *confmap.Conf, signal Signal, processors // merge tracesPipelinePlan with current pipeline mergedPipeline, err := buildPipeline(signal, currentPipeline) if err != nil { - zap.S().Error("failed to build pipeline", signal, err) + zap.L().Error("failed to build pipeline", zap.String("signal", string(signal)), zap.Error(err)) return err } diff --git a/pkg/query-service/app/opamp/model/agent.go b/pkg/query-service/app/opamp/model/agent.go index 1eef7bb4cf..5751bd255b 100644 --- a/pkg/query-service/app/opamp/model/agent.go +++ b/pkg/query-service/app/opamp/model/agent.go @@ -276,7 +276,7 @@ func (agent *Agent) processStatusUpdate( func (agent *Agent) updateRemoteConfig(configProvider AgentConfigProvider) bool { recommendedConfig, confId, err := configProvider.RecommendAgentConfig([]byte(agent.EffectiveConfig)) if err != nil { - zap.S().Error("could not generate config recommendation for agent:", agent.ID, err) + zap.L().Error("could not generate config recommendation for agent", zap.String("agentID", agent.ID), zap.Error(err)) return false } @@ -293,7 +293,7 @@ func (agent *Agent) updateRemoteConfig(configProvider AgentConfigProvider) bool if len(confId) < 1 { // Should never happen. Handle gracefully if it does by some chance. - zap.S().Errorf("config provider recommended a config with empty confId. Using content hash for configId") + zap.L().Error("config provider recommended a config with empty confId. Using content hash for configId") hash := sha256.New() for k, v := range cfg.Config.ConfigMap { diff --git a/pkg/query-service/app/opamp/model/agents.go b/pkg/query-service/app/opamp/model/agents.go index 2e2118e216..e984cafce2 100644 --- a/pkg/query-service/app/opamp/model/agents.go +++ b/pkg/query-service/app/opamp/model/agents.go @@ -131,8 +131,8 @@ func (agents *Agents) RecommendLatestConfigToAll( // Recommendation is same as current config if string(newConfig) == agent.EffectiveConfig { - zap.S().Infof( - "Recommended config same as current effective config for agent %s", agent.ID, + zap.L().Info( + "Recommended config same as current effective config for agent", zap.String("agentID", agent.ID), ) return nil } diff --git a/pkg/query-service/app/opamp/opamp_server.go b/pkg/query-service/app/opamp/opamp_server.go index 2a7ba4c6fa..75d8d877be 100644 --- a/pkg/query-service/app/opamp/opamp_server.go +++ b/pkg/query-service/app/opamp/opamp_server.go @@ -40,7 +40,7 @@ func InitializeServer( agents: agents, agentConfigProvider: agentConfigProvider, } - opAmpServer.server = server.New(zap.S()) + opAmpServer.server = server.New(zap.L().Sugar()) return opAmpServer } @@ -58,8 +58,8 @@ func (srv *Server) Start(listener string) error { unsubscribe := srv.agentConfigProvider.SubscribeToConfigUpdates(func() { err := srv.agents.RecommendLatestConfigToAll(srv.agentConfigProvider) if err != nil { - zap.S().Errorf( - "could not roll out latest config recommendation to connected agents: %w", err, + zap.L().Error( + "could not roll out latest config recommendation to connected agents", zap.Error(err), ) } }) @@ -85,15 +85,14 @@ func (srv *Server) OnMessage(conn types.Connection, msg *protobufs.AgentToServer agent, created, err := srv.agents.FindOrCreateAgent(agentID, conn) if err != nil { - zap.S().Error("Failed to find or create agent %q: %v", agentID, err) + zap.L().Error("Failed to find or create agent", zap.String("agentID", agentID), zap.Error(err)) // TODO: handle error } if created { agent.CanLB = model.ExtractLbFlag(msg.AgentDescription) - zap.S().Debugf( - "New agent added:", - zap.Bool("canLb", agent.CanLB), + zap.L().Debug( + "New agent added", zap.Bool("canLb", agent.CanLB), zap.String("ID", agent.ID), zap.Any("status", agent.CurrentStatus), ) @@ -117,7 +116,7 @@ func Ready() bool { return false } if opAmpServer.agents.Count() == 0 { - zap.S().Warnf("no agents available, all agent config requests will be rejected") + zap.L().Warn("no agents available, all agent config requests will be rejected") return false } return true diff --git a/pkg/query-service/app/opamp/pipeline_builder.go b/pkg/query-service/app/opamp/pipeline_builder.go index 841a9ce5c6..7654fe8c4f 100644 --- a/pkg/query-service/app/opamp/pipeline_builder.go +++ b/pkg/query-service/app/opamp/pipeline_builder.go @@ -89,7 +89,7 @@ func RemoveFromMetricsPipelineSpec(name string) { func checkDuplicates(pipeline []interface{}) bool { exists := make(map[string]bool, len(pipeline)) - zap.S().Debugf("checking duplicate processors in the pipeline:", pipeline) + zap.L().Debug("checking duplicate processors in the pipeline", zap.Any("pipeline", pipeline)) for _, processor := range pipeline { name := processor.(string) if _, ok := exists[name]; ok { @@ -149,7 +149,7 @@ func buildPipeline(signal Signal, current []interface{}) ([]interface{}, error) currentPos := loc + inserts // if disabled then remove from the pipeline if !m.Enabled { - zap.S().Debugf("build_pipeline: found a disabled item, removing from pipeline at position", currentPos-1, " ", m.Name) + zap.L().Debug("build_pipeline: found a disabled item, removing from pipeline at position", zap.Int("position", currentPos-1), zap.String("processor", m.Name)) if currentPos-1 <= 0 { pipeline = pipeline[currentPos+1:] } else { @@ -170,10 +170,10 @@ func buildPipeline(signal Signal, current []interface{}) ([]interface{}, error) // right after last matched processsor (e.g. insert filters after tail_sampling for existing list of [batch, tail_sampling]) if lastMatched <= 0 { - zap.S().Debugf("build_pipeline: found a new item to be inserted, inserting at position 0:", m.Name) + zap.L().Debug("build_pipeline: found a new item to be inserted, inserting at position 0", zap.String("processor", m.Name)) pipeline = append([]interface{}{m.Name}, pipeline[lastMatched+1:]...) } else { - zap.S().Debugf("build_pipeline: found a new item to be inserted, inserting at position :", lastMatched, " ", m.Name) + zap.L().Debug("build_pipeline: found a new item to be inserted, inserting at position", zap.Int("position", lastMatched), zap.String("processor", m.Name)) prior := make([]interface{}, len(pipeline[:lastMatched])) next := make([]interface{}, len(pipeline[lastMatched:])) copy(prior, pipeline[:lastMatched]) diff --git a/pkg/query-service/app/querier/helper.go b/pkg/query-service/app/querier/helper.go index 47f65fe007..71ee5da72d 100644 --- a/pkg/query-service/app/querier/helper.go +++ b/pkg/query-service/app/querier/helper.go @@ -116,7 +116,7 @@ func (q *querier) runBuilderQuery( if !params.NoCache && q.cache != nil { var retrieveStatus status.RetrieveStatus data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true) - zap.S().Infof("cache retrieve status: %s", retrieveStatus.String()) + zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String())) if err == nil { cachedData = data } @@ -143,7 +143,7 @@ func (q *querier) runBuilderQuery( missedSeries = append(missedSeries, series...) } if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil { - zap.S().Error("error unmarshalling cached data", zap.Error(err)) + zap.L().Error("error unmarshalling cached data", zap.Error(err)) } mergedSeries := mergeSerieses(cachedSeries, missedSeries) @@ -154,7 +154,7 @@ func (q *querier) runBuilderQuery( // caching the data mergedSeriesData, marshallingErr = json.Marshal(mergedSeries) if marshallingErr != nil { - zap.S().Error("error marshalling merged series", zap.Error(marshallingErr)) + zap.L().Error("error marshalling merged series", zap.Error(marshallingErr)) } } @@ -172,7 +172,7 @@ func (q *querier) runBuilderQuery( // caching the data err = q.cache.Store(cacheKey, mergedSeriesData, time.Hour) if err != nil { - zap.S().Error("error storing merged series", zap.Error(err)) + zap.L().Error("error storing merged series", zap.Error(err)) return } } @@ -251,7 +251,7 @@ func (q *querier) runBuilderQuery( if !params.NoCache && q.cache != nil { var retrieveStatus status.RetrieveStatus data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true) - zap.S().Infof("cache retrieve status: %s", retrieveStatus.String()) + zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String())) if err == nil { cachedData = data } @@ -290,7 +290,7 @@ func (q *querier) runBuilderQuery( missedSeries = append(missedSeries, series...) } if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil { - zap.S().Error("error unmarshalling cached data", zap.Error(err)) + zap.L().Error("error unmarshalling cached data", zap.Error(err)) } mergedSeries := mergeSerieses(cachedSeries, missedSeries) var mergedSeriesData []byte @@ -300,7 +300,7 @@ func (q *querier) runBuilderQuery( // caching the data mergedSeriesData, marshallingErr = json.Marshal(mergedSeries) if marshallingErr != nil { - zap.S().Error("error marshalling merged series", zap.Error(marshallingErr)) + zap.L().Error("error marshalling merged series", zap.Error(marshallingErr)) } } @@ -316,7 +316,7 @@ func (q *querier) runBuilderQuery( if missedSeriesLen > 0 && !params.NoCache && q.cache != nil && marshallingErr == nil { err := q.cache.Store(cacheKey, mergedSeriesData, time.Hour) if err != nil { - zap.S().Error("error storing merged series", zap.Error(err)) + zap.L().Error("error storing merged series", zap.Error(err)) return } } @@ -353,7 +353,7 @@ func (q *querier) runBuilderExpression( if !params.NoCache && q.cache != nil { var retrieveStatus status.RetrieveStatus data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true) - zap.S().Infof("cache retrieve status: %s", retrieveStatus.String()) + zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String())) if err == nil { cachedData = data } @@ -379,7 +379,7 @@ func (q *querier) runBuilderExpression( missedSeries = append(missedSeries, series...) } if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil { - zap.S().Error("error unmarshalling cached data", zap.Error(err)) + zap.L().Error("error unmarshalling cached data", zap.Error(err)) } mergedSeries := mergeSerieses(cachedSeries, missedSeries) @@ -390,7 +390,7 @@ func (q *querier) runBuilderExpression( // caching the data mergedSeriesData, marshallingErr = json.Marshal(mergedSeries) if marshallingErr != nil { - zap.S().Error("error marshalling merged series", zap.Error(marshallingErr)) + zap.L().Error("error marshalling merged series", zap.Error(marshallingErr)) } } @@ -406,7 +406,7 @@ func (q *querier) runBuilderExpression( if len(missedSeries) > 0 && !params.NoCache && q.cache != nil && marshallingErr == nil { err = q.cache.Store(cacheKey, mergedSeriesData, time.Hour) if err != nil { - zap.S().Error("error storing merged series", zap.Error(err)) + zap.L().Error("error storing merged series", zap.Error(err)) return } } diff --git a/pkg/query-service/app/querier/querier.go b/pkg/query-service/app/querier/querier.go index 103660f8bc..d735e00a1f 100644 --- a/pkg/query-service/app/querier/querier.go +++ b/pkg/query-service/app/querier/querier.go @@ -108,7 +108,7 @@ func (q *querier) execClickHouseQuery(ctx context.Context, query string) ([]*v3. series.Points = points } if pointsWithNegativeTimestamps > 0 { - zap.S().Errorf("found points with negative timestamps for query %s", query) + zap.L().Error("found points with negative timestamps for query", zap.String("query", query)) } return result, err } @@ -346,7 +346,7 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam // Ensure NoCache is not set and cache is not nil if !params.NoCache && q.cache != nil { data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true) - zap.S().Infof("cache retrieve status: %s", retrieveStatus.String()) + zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String())) if err == nil { cachedData = data } @@ -365,7 +365,7 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam } if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil { // ideally we should not be getting an error here - zap.S().Error("error unmarshalling cached data", zap.Error(err)) + zap.L().Error("error unmarshalling cached data", zap.Error(err)) } mergedSeries := mergeSerieses(cachedSeries, missedSeries) @@ -375,12 +375,12 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam if len(missedSeries) > 0 && !params.NoCache && q.cache != nil { mergedSeriesData, err := json.Marshal(mergedSeries) if err != nil { - zap.S().Error("error marshalling merged series", zap.Error(err)) + zap.L().Error("error marshalling merged series", zap.Error(err)) return } err = q.cache.Store(cacheKey, mergedSeriesData, time.Hour) if err != nil { - zap.S().Error("error storing merged series", zap.Error(err)) + zap.L().Error("error storing merged series", zap.Error(err)) return } } diff --git a/pkg/query-service/app/querier/v2/helper.go b/pkg/query-service/app/querier/v2/helper.go index 24738806d3..e564956f19 100644 --- a/pkg/query-service/app/querier/v2/helper.go +++ b/pkg/query-service/app/querier/v2/helper.go @@ -169,7 +169,7 @@ func (q *querier) runBuilderQuery( if !params.NoCache && q.cache != nil { var retrieveStatus status.RetrieveStatus data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true) - zap.S().Infof("cache retrieve status: %s", retrieveStatus.String()) + zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String())) if err == nil { cachedData = data } @@ -208,7 +208,7 @@ func (q *querier) runBuilderQuery( missedSeries = append(missedSeries, series...) } if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil { - zap.S().Error("error unmarshalling cached data", zap.Error(err)) + zap.L().Error("error unmarshalling cached data", zap.Error(err)) } mergedSeries := mergeSerieses(cachedSeries, missedSeries) @@ -221,12 +221,12 @@ func (q *querier) runBuilderQuery( if len(missedSeries) > 0 && !params.NoCache && q.cache != nil { mergedSeriesData, err := json.Marshal(mergedSeries) if err != nil { - zap.S().Error("error marshalling merged series", zap.Error(err)) + zap.L().Error("error marshalling merged series", zap.Error(err)) return } err = q.cache.Store(cacheKey, mergedSeriesData, time.Hour) if err != nil { - zap.S().Error("error storing merged series", zap.Error(err)) + zap.L().Error("error storing merged series", zap.Error(err)) return } } @@ -263,7 +263,7 @@ func (q *querier) runBuilderExpression( if !params.NoCache && q.cache != nil { var retrieveStatus status.RetrieveStatus data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true) - zap.S().Infof("cache retrieve status: %s", retrieveStatus.String()) + zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String())) if err == nil { cachedData = data } @@ -289,7 +289,7 @@ func (q *querier) runBuilderExpression( missedSeries = append(missedSeries, series...) } if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil { - zap.S().Error("error unmarshalling cached data", zap.Error(err)) + zap.L().Error("error unmarshalling cached data", zap.Error(err)) } mergedSeries := mergeSerieses(cachedSeries, missedSeries) @@ -302,12 +302,12 @@ func (q *querier) runBuilderExpression( if len(missedSeries) > 0 && !params.NoCache && q.cache != nil { mergedSeriesData, err := json.Marshal(mergedSeries) if err != nil { - zap.S().Error("error marshalling merged series", zap.Error(err)) + zap.L().Error("error marshalling merged series", zap.Error(err)) return } err = q.cache.Store(cacheKey, mergedSeriesData, time.Hour) if err != nil { - zap.S().Error("error storing merged series", zap.Error(err)) + zap.L().Error("error storing merged series", zap.Error(err)) return } } diff --git a/pkg/query-service/app/querier/v2/querier.go b/pkg/query-service/app/querier/v2/querier.go index 50f19b89b1..e45153da7d 100644 --- a/pkg/query-service/app/querier/v2/querier.go +++ b/pkg/query-service/app/querier/v2/querier.go @@ -108,7 +108,7 @@ func (q *querier) execClickHouseQuery(ctx context.Context, query string) ([]*v3. series.Points = points } if pointsWithNegativeTimestamps > 0 { - zap.S().Errorf("found points with negative timestamps for query %s", query) + zap.L().Error("found points with negative timestamps for query", zap.String("query", query)) } return result, err } @@ -326,7 +326,7 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam // Ensure NoCache is not set and cache is not nil if !params.NoCache && q.cache != nil { data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true) - zap.S().Infof("cache retrieve status: %s", retrieveStatus.String()) + zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String())) if err == nil { cachedData = data } @@ -345,7 +345,7 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam } if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil { // ideally we should not be getting an error here - zap.S().Error("error unmarshalling cached data", zap.Error(err)) + zap.L().Error("error unmarshalling cached data", zap.Error(err)) } mergedSeries := mergeSerieses(cachedSeries, missedSeries) @@ -355,12 +355,12 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam if len(missedSeries) > 0 && !params.NoCache && q.cache != nil { mergedSeriesData, err := json.Marshal(mergedSeries) if err != nil { - zap.S().Error("error marshalling merged series", zap.Error(err)) + zap.L().Error("error marshalling merged series", zap.Error(err)) return } err = q.cache.Store(cacheKey, mergedSeriesData, time.Hour) if err != nil { - zap.S().Error("error storing merged series", zap.Error(err)) + zap.L().Error("error storing merged series", zap.Error(err)) return } } diff --git a/pkg/query-service/app/queryBuilder/query_builder.go b/pkg/query-service/app/queryBuilder/query_builder.go index 647edd191b..693bc88f44 100644 --- a/pkg/query-service/app/queryBuilder/query_builder.go +++ b/pkg/query-service/app/queryBuilder/query_builder.go @@ -246,7 +246,7 @@ func (qb *QueryBuilder) PrepareQueries(params *v3.QueryRangeParamsV3, args ...in } queries[queryName] = queryString default: - zap.S().Errorf("Unknown data source %s", query.DataSource) + zap.L().Error("Unknown data source", zap.String("dataSource", string(query.DataSource))) } } } diff --git a/pkg/query-service/app/server.go b/pkg/query-service/app/server.go index 81ef4e9c13..549e74e976 100644 --- a/pkg/query-service/app/server.go +++ b/pkg/query-service/app/server.go @@ -115,7 +115,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) { var reader interfaces.Reader storage := os.Getenv("STORAGE") if storage == "clickhouse" { - zap.S().Info("Using ClickHouse as datastore ...") + zap.L().Info("Using ClickHouse as datastore ...") clickhouseReader := clickhouseReader.NewReader( localDB, serverOptions.PromConfigPath, @@ -304,7 +304,7 @@ func loggingMiddleware(next http.Handler) http.Handler { path, _ := route.GetPathTemplate() startTime := time.Now() next.ServeHTTP(w, r) - zap.S().Info(path+"\ttimeTaken:"+time.Now().Sub(startTime).String(), zap.Duration("timeTaken", time.Now().Sub(startTime)), zap.String("path", path)) + zap.L().Info(path+"\ttimeTaken:"+time.Now().Sub(startTime).String(), zap.Duration("timeTaken", time.Now().Sub(startTime)), zap.String("path", path)) }) } @@ -375,7 +375,7 @@ func loggingMiddlewarePrivate(next http.Handler) http.Handler { path, _ := route.GetPathTemplate() startTime := time.Now() next.ServeHTTP(w, r) - zap.S().Info(path+"\tprivatePort: true \ttimeTaken"+time.Now().Sub(startTime).String(), zap.Duration("timeTaken", time.Now().Sub(startTime)), zap.String("path", path), zap.Bool("tprivatePort", true)) + zap.L().Info(path+"\tprivatePort: true \ttimeTaken"+time.Now().Sub(startTime).String(), zap.Duration("timeTaken", time.Now().Sub(startTime)), zap.String("path", path), zap.Bool("tprivatePort", true)) }) } @@ -550,7 +550,7 @@ func (s *Server) initListeners() error { return err } - zap.S().Info(fmt.Sprintf("Query server started listening on %s...", s.serverOptions.HTTPHostPort)) + zap.L().Info(fmt.Sprintf("Query server started listening on %s...", s.serverOptions.HTTPHostPort)) // listen on private port to support internal services privateHostPort := s.serverOptions.PrivateHostPort @@ -563,7 +563,7 @@ func (s *Server) initListeners() error { if err != nil { return err } - zap.S().Info(fmt.Sprintf("Query server started listening on private port %s...", s.serverOptions.PrivateHostPort)) + zap.L().Info(fmt.Sprintf("Query server started listening on private port %s...", s.serverOptions.PrivateHostPort)) return nil } @@ -575,7 +575,7 @@ func (s *Server) Start() error { if !s.serverOptions.DisableRules { s.ruleManager.Start() } else { - zap.S().Info("msg: Rules disabled as rules.disable is set to TRUE") + zap.L().Info("msg: Rules disabled as rules.disable is set to TRUE") } err := s.initListeners() @@ -589,23 +589,23 @@ func (s *Server) Start() error { } go func() { - zap.S().Info("Starting HTTP server", zap.Int("port", httpPort), zap.String("addr", s.serverOptions.HTTPHostPort)) + zap.L().Info("Starting HTTP server", zap.Int("port", httpPort), zap.String("addr", s.serverOptions.HTTPHostPort)) switch err := s.httpServer.Serve(s.httpConn); err { case nil, http.ErrServerClosed, cmux.ErrListenerClosed: // normal exit, nothing to do default: - zap.S().Error("Could not start HTTP server", zap.Error(err)) + zap.L().Error("Could not start HTTP server", zap.Error(err)) } s.unavailableChannel <- healthcheck.Unavailable }() go func() { - zap.S().Info("Starting pprof server", zap.String("addr", constants.DebugHttpPort)) + zap.L().Info("Starting pprof server", zap.String("addr", constants.DebugHttpPort)) err = http.ListenAndServe(constants.DebugHttpPort, nil) if err != nil { - zap.S().Error("Could not start pprof server", zap.Error(err)) + zap.L().Error("Could not start pprof server", zap.Error(err)) } }() @@ -615,14 +615,14 @@ func (s *Server) Start() error { } fmt.Println("starting private http") go func() { - zap.S().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.serverOptions.PrivateHostPort)) + zap.L().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.serverOptions.PrivateHostPort)) switch err := s.privateHTTP.Serve(s.privateConn); err { case nil, http.ErrServerClosed, cmux.ErrListenerClosed: // normal exit, nothing to do - zap.S().Info("private http server closed") + zap.L().Info("private http server closed") default: - zap.S().Error("Could not start private HTTP server", zap.Error(err)) + zap.L().Error("Could not start private HTTP server", zap.Error(err)) } s.unavailableChannel <- healthcheck.Unavailable @@ -630,10 +630,10 @@ func (s *Server) Start() error { }() go func() { - zap.S().Info("Starting OpAmp Websocket server", zap.String("addr", constants.OpAmpWsEndpoint)) + zap.L().Info("Starting OpAmp Websocket server", zap.String("addr", constants.OpAmpWsEndpoint)) err := s.opampServer.Start(constants.OpAmpWsEndpoint) if err != nil { - zap.S().Info("opamp ws server failed to start", err) + zap.L().Info("opamp ws server failed to start", zap.Error(err)) s.unavailableChannel <- healthcheck.Unavailable } }() @@ -706,7 +706,7 @@ func makeRulesManager( return nil, fmt.Errorf("rule manager error: %v", err) } - zap.S().Info("rules manager is ready") + zap.L().Info("rules manager is ready") return manager, nil } diff --git a/pkg/query-service/auth/auth.go b/pkg/query-service/auth/auth.go index e307f401ab..0a90c8c730 100644 --- a/pkg/query-service/auth/auth.go +++ b/pkg/query-service/auth/auth.go @@ -40,7 +40,7 @@ type InviteEmailData struct { // The root user should be able to invite people to create account on SigNoz cluster. func Invite(ctx context.Context, req *model.InviteRequest) (*model.InviteResponse, error) { - zap.S().Debugf("Got an invite request for email: %s\n", req.Email) + zap.L().Debug("Got an invite request for email", zap.String("email", req.Email)) token, err := utils.RandomHex(opaqueTokenSize) if err != nil { @@ -110,13 +110,13 @@ func inviteEmail(req *model.InviteRequest, au *model.UserPayload, token string) tmpl, err := template.ParseFiles(constants.InviteEmailTemplate) if err != nil { - zap.S().Errorf("failed to send email", err) + zap.L().Error("failed to send email", zap.Error(err)) return } var body bytes.Buffer if err := tmpl.Execute(&body, data); err != nil { - zap.S().Errorf("failed to send email", err) + zap.L().Error("failed to send email", zap.Error(err)) return } @@ -126,7 +126,7 @@ func inviteEmail(req *model.InviteRequest, au *model.UserPayload, token string) body.String(), ) if err != nil { - zap.S().Errorf("failed to send email", err) + zap.L().Error("failed to send email", zap.Error(err)) return } return @@ -134,7 +134,7 @@ func inviteEmail(req *model.InviteRequest, au *model.UserPayload, token string) // RevokeInvite is used to revoke the invitation for the given email. func RevokeInvite(ctx context.Context, email string) error { - zap.S().Debugf("RevokeInvite method invoked for email: %s\n", email) + zap.L().Debug("RevokeInvite method invoked for email", zap.String("email", email)) if !isValidEmail(email) { return ErrorInvalidInviteToken @@ -148,7 +148,7 @@ func RevokeInvite(ctx context.Context, email string) error { // GetInvite returns an invitation object for the given token. func GetInvite(ctx context.Context, token string) (*model.InvitationResponseObject, error) { - zap.S().Debugf("GetInvite method invoked for token: %s\n", token) + zap.L().Debug("GetInvite method invoked for token", zap.String("token", token)) inv, apiErr := dao.DB().GetInviteFromToken(ctx, token) if apiErr != nil { @@ -282,13 +282,13 @@ func RegisterFirstUser(ctx context.Context, req *RegisterRequest) (*model.User, org, apierr := dao.DB().CreateOrg(ctx, &model.Organization{Name: req.OrgName}) if apierr != nil { - zap.S().Debugf("CreateOrg failed, err: %v\n", zap.Error(apierr.ToError())) + zap.L().Error("CreateOrg failed", zap.Error(apierr.ToError())) return nil, apierr } group, apiErr := dao.DB().GetGroupByName(ctx, groupName) if apiErr != nil { - zap.S().Debugf("GetGroupByName failed, err: %v\n", apiErr.Err) + zap.L().Error("GetGroupByName failed", zap.Error(apiErr.Err)) return nil, apiErr } @@ -297,7 +297,7 @@ func RegisterFirstUser(ctx context.Context, req *RegisterRequest) (*model.User, hash, err = PasswordHash(req.Password) if err != nil { - zap.S().Errorf("failed to generate password hash when registering a user", zap.Error(err)) + zap.L().Error("failed to generate password hash when registering a user", zap.Error(err)) return nil, model.InternalError(model.ErrSignupFailed{}) } @@ -328,7 +328,7 @@ func RegisterInvitedUser(ctx context.Context, req *RegisterRequest, nopassword b invite, err := ValidateInvite(ctx, req) if err != nil { - zap.S().Errorf("failed to validate invite token", err) + zap.L().Error("failed to validate invite token", zap.Error(err)) return nil, model.BadRequest(model.ErrSignupFailed{}) } @@ -337,7 +337,7 @@ func RegisterInvitedUser(ctx context.Context, req *RegisterRequest, nopassword b // in the same transaction at the end of this function userPayload, apierr := dao.DB().GetUserByEmail(ctx, invite.Email) if apierr != nil { - zap.S().Debugf("failed to get user by email", apierr.Err) + zap.L().Error("failed to get user by email", zap.Error(apierr.Err)) return nil, apierr } @@ -347,7 +347,7 @@ func RegisterInvitedUser(ctx context.Context, req *RegisterRequest, nopassword b } if invite.OrgId == "" { - zap.S().Errorf("failed to find org in the invite") + zap.L().Error("failed to find org in the invite") return nil, model.InternalError(fmt.Errorf("invalid invite, org not found")) } @@ -358,7 +358,7 @@ func RegisterInvitedUser(ctx context.Context, req *RegisterRequest, nopassword b group, apiErr := dao.DB().GetGroupByName(ctx, invite.Role) if apiErr != nil { - zap.S().Debugf("GetGroupByName failed, err: %v\n", apiErr.Err) + zap.L().Error("GetGroupByName failed", zap.Error(apiErr.Err)) return nil, model.InternalError(model.ErrSignupFailed{}) } @@ -368,13 +368,13 @@ func RegisterInvitedUser(ctx context.Context, req *RegisterRequest, nopassword b if req.Password != "" { hash, err = PasswordHash(req.Password) if err != nil { - zap.S().Errorf("failed to generate password hash when registering a user", zap.Error(err)) + zap.L().Error("failed to generate password hash when registering a user", zap.Error(err)) return nil, model.InternalError(model.ErrSignupFailed{}) } } else { hash, err = PasswordHash(utils.GeneratePassowrd()) if err != nil { - zap.S().Errorf("failed to generate password hash when registering a user", zap.Error(err)) + zap.L().Error("failed to generate password hash when registering a user", zap.Error(err)) return nil, model.InternalError(model.ErrSignupFailed{}) } } @@ -393,13 +393,13 @@ func RegisterInvitedUser(ctx context.Context, req *RegisterRequest, nopassword b // TODO(Ahsan): Ideally create user and delete invitation should happen in a txn. user, apiErr = dao.DB().CreateUser(ctx, user, false) if apiErr != nil { - zap.S().Debugf("CreateUser failed, err: %v\n", apiErr.Err) + zap.L().Error("CreateUser failed", zap.Error(apiErr.Err)) return nil, apiErr } apiErr = dao.DB().DeleteInvitation(ctx, user.Email) if apiErr != nil { - zap.S().Debugf("delete invitation failed, err: %v\n", apiErr.Err) + zap.L().Error("delete invitation failed", zap.Error(apiErr.Err)) return nil, apiErr } @@ -428,17 +428,17 @@ func Register(ctx context.Context, req *RegisterRequest) (*model.User, *model.Ap // Login method returns access and refresh tokens on successful login, else it errors out. func Login(ctx context.Context, request *model.LoginRequest) (*model.LoginResponse, error) { - zap.S().Debugf("Login method called for user: %s\n", request.Email) + zap.L().Debug("Login method called for user", zap.String("email", request.Email)) user, err := authenticateLogin(ctx, request) if err != nil { - zap.S().Debugf("Failed to authenticate login request, %v", err) + zap.L().Error("Failed to authenticate login request", zap.Error(err)) return nil, err } userjwt, err := GenerateJWTForUser(&user.User) if err != nil { - zap.S().Debugf("Failed to generate JWT against login creds, %v", err) + zap.L().Error("Failed to generate JWT against login creds", zap.Error(err)) return nil, err } diff --git a/pkg/query-service/auth/jwt.go b/pkg/query-service/auth/jwt.go index 90e2f7008d..b27d43fb9d 100644 --- a/pkg/query-service/auth/jwt.go +++ b/pkg/query-service/auth/jwt.go @@ -60,7 +60,7 @@ func validateUser(tok string) (*model.UserPayload, error) { func AttachJwtToContext(ctx context.Context, r *http.Request) context.Context { token, err := ExtractJwtFromRequest(r) if err != nil { - zap.S().Debugf("Error while getting token from header, %v", err) + zap.L().Error("Error while getting token from header", zap.Error(err)) return ctx } diff --git a/pkg/query-service/cache/redis/redis.go b/pkg/query-service/cache/redis/redis.go index 22278c52ed..6338eca6f3 100644 --- a/pkg/query-service/cache/redis/redis.go +++ b/pkg/query-service/cache/redis/redis.go @@ -59,7 +59,7 @@ func (c *cache) Retrieve(cacheKey string, allowExpired bool) ([]byte, status.Ret func (c *cache) SetTTL(cacheKey string, ttl time.Duration) { err := c.client.Expire(context.Background(), cacheKey, ttl).Err() if err != nil { - zap.S().Error("error setting TTL for cache key", zap.String("cacheKey", cacheKey), zap.Duration("ttl", ttl), zap.Error(err)) + zap.L().Error("error setting TTL for cache key", zap.String("cacheKey", cacheKey), zap.Duration("ttl", ttl), zap.Error(err)) } } @@ -67,7 +67,7 @@ func (c *cache) SetTTL(cacheKey string, ttl time.Duration) { func (c *cache) Remove(cacheKey string) { err := c.client.Del(context.Background(), cacheKey).Err() if err != nil { - zap.S().Error("error deleting cache key", zap.String("cacheKey", cacheKey), zap.Error(err)) + zap.L().Error("error deleting cache key", zap.String("cacheKey", cacheKey), zap.Error(err)) } } @@ -102,7 +102,7 @@ func (c *cache) GetOptions() *Options { func (c *cache) GetTTL(cacheKey string) time.Duration { ttl, err := c.client.TTL(context.Background(), cacheKey).Result() if err != nil { - zap.S().Error("error getting TTL for cache key", zap.String("cacheKey", cacheKey), zap.Error(err)) + zap.L().Error("error getting TTL for cache key", zap.String("cacheKey", cacheKey), zap.Error(err)) } return ttl } diff --git a/pkg/query-service/constants/constants.go b/pkg/query-service/constants/constants.go index 6181a66ea8..54fc819478 100644 --- a/pkg/query-service/constants/constants.go +++ b/pkg/query-service/constants/constants.go @@ -58,8 +58,8 @@ var InviteEmailTemplate = GetOrDefaultEnv("INVITE_EMAIL_TEMPLATE", "/root/templa // Alert manager channel subpath var AmChannelApiPath = GetOrDefaultEnv("ALERTMANAGER_API_CHANNEL_PATH", "v1/routes") -var OTLPTarget = GetOrDefaultEnv("OTLP_TARGET", "") -var LogExportBatchSize = GetOrDefaultEnv("LOG_EXPORT_BATCH_SIZE", "1000") +var OTLPTarget = GetOrDefaultEnv("OTEL_EXPORTER_OTLP_ENDPOINT", "") +var LogExportBatchSize = GetOrDefaultEnv("OTEL_BLRP_MAX_EXPORT_BATCH_SIZE", "512") var RELATIONAL_DATASOURCE_PATH = GetOrDefaultEnv("SIGNOZ_LOCAL_DB_PATH", "/var/lib/signoz/signoz.db") diff --git a/pkg/query-service/dao/sqlite/connection.go b/pkg/query-service/dao/sqlite/connection.go index a7335d6426..a2545e9531 100644 --- a/pkg/query-service/dao/sqlite/connection.go +++ b/pkg/query-service/dao/sqlite/connection.go @@ -180,7 +180,7 @@ func (mds *ModelDaoSqlite) createGroupIfNotPresent(ctx context.Context, return group, nil } - zap.S().Debugf("%s is not found, creating it", name) + zap.L().Debug("group is not found, creating it", zap.String("group_name", name)) group, cErr := mds.CreateGroup(ctx, &model.Group{Name: name}) if cErr != nil { return nil, cErr.Err diff --git a/pkg/query-service/featureManager/manager.go b/pkg/query-service/featureManager/manager.go index 15175b1882..439b8b7bd2 100644 --- a/pkg/query-service/featureManager/manager.go +++ b/pkg/query-service/featureManager/manager.go @@ -43,12 +43,12 @@ func (fm *FeatureManager) GetFeatureFlags() (model.FeatureSet, error) { } func (fm *FeatureManager) InitFeatures(req model.FeatureSet) error { - zap.S().Error("InitFeatures not implemented in OSS") + zap.L().Error("InitFeatures not implemented in OSS") return nil } func (fm *FeatureManager) UpdateFeatureFlag(req model.Feature) error { - zap.S().Error("UpdateFeatureFlag not implemented in OSS") + zap.L().Error("UpdateFeatureFlag not implemented in OSS") return nil } @@ -63,4 +63,4 @@ func (fm *FeatureManager) GetFeatureFlag(key string) (model.Feature, error) { } } return model.Feature{}, model.ErrFeatureUnavailable{Key: key} -} \ No newline at end of file +} diff --git a/pkg/query-service/integrations/alertManager/manager.go b/pkg/query-service/integrations/alertManager/manager.go index 3b7df3ce56..d80893010e 100644 --- a/pkg/query-service/integrations/alertManager/manager.go +++ b/pkg/query-service/integrations/alertManager/manager.go @@ -83,13 +83,12 @@ func (m *manager) AddRoute(receiver *Receiver) *model.ApiError { response, err := http.Post(amURL, contentType, bytes.NewBuffer(receiverString)) if err != nil { - zap.S().Errorf(fmt.Sprintf("Error in getting response of API call to alertmanager(POST %s)\n", amURL), err) + zap.L().Error("Error in getting response of API call to alertmanager", zap.String("url", amURL), zap.Error(err)) return &model.ApiError{Typ: model.ErrorInternal, Err: err} } if response.StatusCode > 299 { - err := fmt.Errorf(fmt.Sprintf("Error in getting 2xx response in API call to alertmanager(POST %s)\n", amURL), response.Status) - zap.S().Error(err) + zap.L().Error("Error in getting 2xx response in API call to alertmanager", zap.String("url", amURL), zap.String("status", response.Status)) return &model.ApiError{Typ: model.ErrorInternal, Err: err} } return nil @@ -102,7 +101,7 @@ func (m *manager) EditRoute(receiver *Receiver) *model.ApiError { req, err := http.NewRequest(http.MethodPut, amURL, bytes.NewBuffer(receiverString)) if err != nil { - zap.S().Errorf(fmt.Sprintf("Error creating new update request for API call to alertmanager(PUT %s)\n", amURL), err) + zap.L().Error("Error creating new update request for API call to alertmanager", zap.String("url", amURL), zap.Error(err)) return &model.ApiError{Typ: model.ErrorInternal, Err: err} } @@ -112,13 +111,12 @@ func (m *manager) EditRoute(receiver *Receiver) *model.ApiError { response, err := client.Do(req) if err != nil { - zap.S().Errorf(fmt.Sprintf("Error in getting response of API call to alertmanager(PUT %s)\n", amURL), err) + zap.L().Error("Error in getting response of API call to alertmanager", zap.String("url", amURL), zap.Error(err)) return &model.ApiError{Typ: model.ErrorInternal, Err: err} } if response.StatusCode > 299 { - err := fmt.Errorf(fmt.Sprintf("Error in getting 2xx response in PUT API call to alertmanager(PUT %s)\n", amURL), response.Status) - zap.S().Error(err) + zap.L().Error("Error in getting 2xx response in PUT API call to alertmanager", zap.String("url", amURL), zap.String("status", response.Status)) return &model.ApiError{Typ: model.ErrorInternal, Err: err} } return nil @@ -132,7 +130,7 @@ func (m *manager) DeleteRoute(name string) *model.ApiError { req, err := http.NewRequest(http.MethodDelete, amURL, bytes.NewBuffer(requestData)) if err != nil { - zap.S().Errorf("Error in creating new delete request to alertmanager/v1/receivers\n", err) + zap.L().Error("Error in creating new delete request to alertmanager/v1/receivers", zap.Error(err)) return &model.ApiError{Typ: model.ErrorInternal, Err: err} } @@ -142,13 +140,13 @@ func (m *manager) DeleteRoute(name string) *model.ApiError { response, err := client.Do(req) if err != nil { - zap.S().Errorf(fmt.Sprintf("Error in getting response of API call to alertmanager(DELETE %s)\n", amURL), err) + zap.L().Error("Error in getting response of API call to alertmanager", zap.String("url", amURL), zap.Error(err)) return &model.ApiError{Typ: model.ErrorInternal, Err: err} } if response.StatusCode > 299 { err := fmt.Errorf(fmt.Sprintf("Error in getting 2xx response in PUT API call to alertmanager(DELETE %s)\n", amURL), response.Status) - zap.S().Error(err) + zap.L().Error("Error in getting 2xx response in PUT API call to alertmanager", zap.String("url", amURL), zap.String("status", response.Status)) return &model.ApiError{Typ: model.ErrorInternal, Err: err} } return nil @@ -162,19 +160,19 @@ func (m *manager) TestReceiver(receiver *Receiver) *model.ApiError { response, err := http.Post(amTestURL, contentType, bytes.NewBuffer(receiverBytes)) if err != nil { - zap.S().Errorf(fmt.Sprintf("Error in getting response of API call to alertmanager(POST %s)\n", amTestURL), err) + zap.L().Error("Error in getting response of API call to alertmanager", zap.String("url", amTestURL), zap.Error(err)) return &model.ApiError{Typ: model.ErrorInternal, Err: err} } if response.StatusCode > 201 && response.StatusCode < 400 { err := fmt.Errorf(fmt.Sprintf("Invalid parameters in test alert api for alertmanager(POST %s)\n", amTestURL), response.Status) - zap.S().Error(err) + zap.L().Error("Invalid parameters in test alert api for alertmanager", zap.Error(err)) return &model.ApiError{Typ: model.ErrorInternal, Err: err} } if response.StatusCode > 400 { err := fmt.Errorf(fmt.Sprintf("Received Server Error response for API call to alertmanager(POST %s)\n", amTestURL), response.Status) - zap.S().Error(err) + zap.L().Error("Received Server Error response for API call to alertmanager", zap.Error(err)) return &model.ApiError{Typ: model.ErrorInternal, Err: err} } diff --git a/pkg/query-service/integrations/alertManager/notifier.go b/pkg/query-service/integrations/alertManager/notifier.go index 148d489ed0..e86cf28c5e 100644 --- a/pkg/query-service/integrations/alertManager/notifier.go +++ b/pkg/query-service/integrations/alertManager/notifier.go @@ -87,11 +87,11 @@ func NewNotifier(o *NotifierOptions, logger log.Logger) (*Notifier, error) { amset, err := newAlertmanagerSet(o.AlertManagerURLs, timeout, logger) if err != nil { - zap.S().Errorf("failed to parse alert manager urls") + zap.L().Error("failed to parse alert manager urls") return n, err } n.alertmanagers = amset - zap.S().Info("Starting notifier with alert manager:", o.AlertManagerURLs) + zap.L().Info("Starting notifier with alert manager", zap.Strings("urls", o.AlertManagerURLs)) return n, nil } @@ -123,7 +123,7 @@ func (n *Notifier) nextBatch() []*Alert { // Run dispatches notifications continuously. func (n *Notifier) Run() { - zap.S().Info("msg: Initiating alert notifier...") + zap.L().Info("msg: Initiating alert notifier...") for { select { case <-n.ctx.Done(): @@ -133,7 +133,7 @@ func (n *Notifier) Run() { alerts := n.nextBatch() if !n.sendAll(alerts...) { - zap.S().Warn("msg: dropped alerts", "\t count:", len(alerts)) + zap.L().Warn("msg: dropped alerts", zap.Int("count", len(alerts))) // n.metrics.dropped.Add(float64(len(alerts))) } // If the queue still has items left, kick off the next iteration. @@ -205,7 +205,7 @@ func (n *Notifier) sendAll(alerts ...*Alert) bool { b, err := json.Marshal(alerts) if err != nil { - zap.S().Errorf("msg", "Encoding alerts failed", "err", err) + zap.L().Error("Encoding alerts failed", zap.Error(err)) return false } @@ -229,7 +229,7 @@ func (n *Notifier) sendAll(alerts ...*Alert) bool { go func(ams *alertmanagerSet, am Manager) { u := am.URLPath(alertPushEndpoint).String() if err := n.sendOne(ctx, ams.client, u, b); err != nil { - zap.S().Errorf("alertmanager", u, "count", len(alerts), "msg", "Error calling alert API", "err", err) + zap.L().Error("Error calling alert API", zap.String("alertmanager", u), zap.Int("count", len(alerts)), zap.Error(err)) } else { atomic.AddUint64(&numSuccess, 1) } diff --git a/pkg/query-service/main.go b/pkg/query-service/main.go index f0602c4dcd..ec68c61939 100644 --- a/pkg/query-service/main.go +++ b/pkg/query-service/main.go @@ -18,7 +18,7 @@ import ( ) func initZapLog() *zap.Logger { - config := zap.NewDevelopmentConfig() + config := zap.NewProductionConfig() config.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder config.EncoderConfig.TimeKey = "timestamp" config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder @@ -85,9 +85,9 @@ func main() { auth.JwtSecret = os.Getenv("SIGNOZ_JWT_SECRET") if len(auth.JwtSecret) == 0 { - zap.S().Warn("No JWT secret key is specified.") + zap.L().Warn("No JWT secret key is specified.") } else { - zap.S().Info("No JWT secret key set successfully.") + zap.L().Info("No JWT secret key set successfully.") } server, err := app.NewServer(serverOptions) diff --git a/pkg/query-service/queryBuilderToExpr/queryBuilderToExpr.go b/pkg/query-service/queryBuilderToExpr/queryBuilderToExpr.go index 0139792dfa..e853a37685 100644 --- a/pkg/query-service/queryBuilderToExpr/queryBuilderToExpr.go +++ b/pkg/query-service/queryBuilderToExpr/queryBuilderToExpr.go @@ -143,11 +143,11 @@ func exprFormattedValue(v interface{}) string { case uint8, uint16, uint32, uint64, int, int8, int16, int32, int64, float32, float64, bool: return strings.Join(strings.Fields(fmt.Sprint(x)), ",") default: - zap.S().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x[0]))) + zap.L().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x[0]))) return "" } default: - zap.S().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x))) + zap.L().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x))) return "" } } diff --git a/pkg/query-service/rules/alerting.go b/pkg/query-service/rules/alerting.go index b2ee0b53d0..3e56c2d0c7 100644 --- a/pkg/query-service/rules/alerting.go +++ b/pkg/query-service/rules/alerting.go @@ -15,15 +15,9 @@ import ( // this file contains common structs and methods used by // rule engine -// how long before re-sending the alert -const resolvedRetention = 15 * time.Minute - const ( - // AlertMetricName is the metric name for synthetic alert timeseries. - alertMetricName = "ALERTS" - - // AlertForStateMetricName is the metric name for 'for' state of alert. - alertForStateMetricName = "ALERTS_FOR_STATE" + // how long before re-sending the alert + resolvedRetention = 15 * time.Minute TestAlertPostFix = "_TEST_ALERT" ) diff --git a/pkg/query-service/rules/apiParams.go b/pkg/query-service/rules/apiParams.go index 0ccf885b3d..1393f59697 100644 --- a/pkg/query-service/rules/apiParams.go +++ b/pkg/query-service/rules/apiParams.go @@ -10,7 +10,6 @@ import ( "github.com/pkg/errors" "go.signoz.io/signoz/pkg/query-service/model" v3 "go.signoz.io/signoz/pkg/query-service/model/v3" - "go.uber.org/zap" "go.signoz.io/signoz/pkg/query-service/utils/times" "go.signoz.io/signoz/pkg/query-service/utils/timestamp" @@ -74,18 +73,15 @@ func parseIntoRule(initRule PostableRule, content []byte, kind string) (*Postabl var err error if kind == "json" { if err = json.Unmarshal(content, rule); err != nil { - zap.S().Debugf("postable rule content", string(content), "\t kind:", kind) return nil, []error{fmt.Errorf("failed to load json")} } } else if kind == "yaml" { if err = yaml.Unmarshal(content, rule); err != nil { - zap.S().Debugf("postable rule content", string(content), "\t kind:", kind) return nil, []error{fmt.Errorf("failed to load yaml")} } } else { return nil, []error{fmt.Errorf("invalid data type")} } - zap.S().Debugf("postable rule(parsed):", rule) if rule.RuleCondition == nil && rule.Expr != "" { // account for legacy rules @@ -126,8 +122,6 @@ func parseIntoRule(initRule PostableRule, content []byte, kind string) (*Postabl } } - zap.S().Debugf("postable rule:", rule, "\t condition", rule.RuleCondition.String()) - if errs := rule.Validate(); len(errs) > 0 { return nil, errs } diff --git a/pkg/query-service/rules/db.go b/pkg/query-service/rules/db.go index f0b1bb3281..cf903884fd 100644 --- a/pkg/query-service/rules/db.go +++ b/pkg/query-service/rules/db.go @@ -73,7 +73,7 @@ func (r *ruleDB) CreateRuleTx(ctx context.Context, rule string) (int64, Tx, erro stmt, err := tx.Prepare(`INSERT into rules (created_at, created_by, updated_at, updated_by, data) VALUES($1,$2,$3,$4,$5);`) if err != nil { - zap.S().Errorf("Error in preparing statement for INSERT to rules\n", err) + zap.L().Error("Error in preparing statement for INSERT to rules", zap.Error(err)) tx.Rollback() return lastInsertId, nil, err } @@ -82,14 +82,14 @@ func (r *ruleDB) CreateRuleTx(ctx context.Context, rule string) (int64, Tx, erro result, err := stmt.Exec(createdAt, userEmail, updatedAt, userEmail, rule) if err != nil { - zap.S().Errorf("Error in Executing prepared statement for INSERT to rules\n", err) + zap.L().Error("Error in Executing prepared statement for INSERT to rules", zap.Error(err)) tx.Rollback() // return an error too, we may want to wrap them return lastInsertId, nil, err } lastInsertId, err = result.LastInsertId() if err != nil { - zap.S().Errorf("Error in getting last insert id for INSERT to rules\n", err) + zap.L().Error("Error in getting last insert id for INSERT to rules\n", zap.Error(err)) tx.Rollback() // return an error too, we may want to wrap them return lastInsertId, nil, err } @@ -122,14 +122,14 @@ func (r *ruleDB) EditRuleTx(ctx context.Context, rule string, id string) (string //} stmt, err := r.Prepare(`UPDATE rules SET updated_by=$1, updated_at=$2, data=$3 WHERE id=$4;`) if err != nil { - zap.S().Errorf("Error in preparing statement for UPDATE to rules\n", err) + zap.L().Error("Error in preparing statement for UPDATE to rules", zap.Error(err)) // tx.Rollback() return groupName, nil, err } defer stmt.Close() if _, err := stmt.Exec(userEmail, updatedAt, rule, idInt); err != nil { - zap.S().Errorf("Error in Executing prepared statement for UPDATE to rules\n", err) + zap.L().Error("Error in Executing prepared statement for UPDATE to rules", zap.Error(err)) // tx.Rollback() // return an error too, we may want to wrap them return groupName, nil, err } @@ -158,7 +158,7 @@ func (r *ruleDB) DeleteRuleTx(ctx context.Context, id string) (string, Tx, error defer stmt.Close() if _, err := stmt.Exec(idInt); err != nil { - zap.S().Errorf("Error in Executing prepared statement for DELETE to rules\n", err) + zap.L().Error("Error in Executing prepared statement for DELETE to rules", zap.Error(err)) // tx.Rollback() return groupName, nil, err } @@ -175,7 +175,7 @@ func (r *ruleDB) GetStoredRules(ctx context.Context) ([]StoredRule, error) { err := r.Select(&rules, query) if err != nil { - zap.S().Debug("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, err } @@ -193,10 +193,10 @@ func (r *ruleDB) GetStoredRule(ctx context.Context, id string) (*StoredRule, err query := fmt.Sprintf("SELECT id, created_at, created_by, updated_at, updated_by, data FROM rules WHERE id=%d", intId) err = r.Get(rule, query) - // zap.S().Info(query) + // zap.L().Info(query) if err != nil { - zap.S().Error("Error in processing sql query: ", err) + zap.L().Error("Error in processing sql query", zap.Error(err)) return nil, err } diff --git a/pkg/query-service/rules/manager.go b/pkg/query-service/rules/manager.go index 95181eade6..d5c6e74dd3 100644 --- a/pkg/query-service/rules/manager.go +++ b/pkg/query-service/rules/manager.go @@ -125,7 +125,7 @@ func NewManager(o *ManagerOptions) (*Manager, error) { func (m *Manager) Start() { if err := m.initiate(); err != nil { - zap.S().Errorf("failed to initialize alerting rules manager: %v", err) + zap.L().Error("failed to initialize alerting rules manager", zap.Error(err)) } m.run() } @@ -154,40 +154,40 @@ func (m *Manager) initiate() error { if len(errs) > 0 { if errs[0].Error() == "failed to load json" { - zap.S().Info("failed to load rule in json format, trying yaml now:", rec.Data) + zap.L().Info("failed to load rule in json format, trying yaml now:", zap.String("name", taskName)) // see if rule is stored in yaml format parsedRule, errs = parsePostableRule([]byte(rec.Data), "yaml") if parsedRule == nil { - zap.S().Errorf("failed to parse and initialize yaml rule:", errs) + zap.L().Error("failed to parse and initialize yaml rule", zap.String("name", taskName), zap.Error(err)) // just one rule is being parsed so expect just one error loadErrors = append(loadErrors, errs[0]) continue } else { // rule stored in yaml, so migrate it to json - zap.S().Info("msg:", "migrating rule from JSON to yaml", "\t rule:", rec.Data, "\t parsed rule:", parsedRule) + zap.L().Info("migrating rule from JSON to yaml", zap.String("name", taskName)) ruleJSON, err := json.Marshal(parsedRule) if err == nil { taskName, _, err := m.ruleDB.EditRuleTx(context.Background(), string(ruleJSON), fmt.Sprintf("%d", rec.Id)) if err != nil { - zap.S().Errorf("msg: failed to migrate rule ", "/t error:", err) + zap.L().Error("failed to migrate rule", zap.String("name", taskName), zap.Error(err)) } else { - zap.S().Info("msg:", "migrated rule from yaml to json", "/t rule:", taskName) + zap.L().Info("migrated rule from yaml to json", zap.String("name", taskName)) } } } } else { - zap.S().Errorf("failed to parse and initialize rule:", errs) + zap.L().Error("failed to parse and initialize rule", zap.String("name", taskName), zap.Error(err)) // just one rule is being parsed so expect just one error - loadErrors = append(loadErrors, errs[0]) + loadErrors = append(loadErrors, err) continue } } if !parsedRule.Disabled { err := m.addTask(parsedRule, taskName) if err != nil { - zap.S().Errorf("failed to load the rule definition (%s): %v", taskName, err) + zap.L().Error("failed to load the rule definition", zap.String("name", taskName), zap.Error(err)) } } } @@ -213,13 +213,13 @@ func (m *Manager) Stop() { m.mtx.Lock() defer m.mtx.Unlock() - zap.S().Info("msg: ", "Stopping rule manager...") + zap.L().Info("Stopping rule manager...") for _, t := range m.tasks { t.Stop() } - zap.S().Info("msg: ", "Rule manager stopped") + zap.L().Info("Rule manager stopped") } // EditRuleDefinition writes the rule definition to the @@ -230,7 +230,7 @@ func (m *Manager) EditRule(ctx context.Context, ruleStr string, id string) error currentRule, err := m.GetRule(ctx, id) if err != nil { - zap.S().Errorf("msg: ", "failed to get the rule from rule db", "\t ruleid: ", id) + zap.L().Error("failed to get the rule from rule db", zap.String("id", id), zap.Error(err)) return err } @@ -243,7 +243,7 @@ func (m *Manager) EditRule(ctx context.Context, ruleStr string, id string) error } if len(errs) > 0 { - zap.S().Errorf("failed to parse rules:", errs) + zap.L().Error("failed to parse rules", zap.Errors("errors", errs)) // just one rule is being parsed so expect just one error return errs[0] } @@ -264,13 +264,13 @@ func (m *Manager) EditRule(ctx context.Context, ruleStr string, id string) error if !checkIfTraceOrLogQB(¤tRule.PostableRule) { err = m.updateFeatureUsage(parsedRule, 1) if err != nil { - zap.S().Errorf("error updating feature usage: %v", err) + zap.L().Error("error updating feature usage", zap.Error(err)) } // update feature usage if the new rule is not a trace or log query builder and the current rule is } else if !checkIfTraceOrLogQB(parsedRule) { err = m.updateFeatureUsage(¤tRule.PostableRule, -1) if err != nil { - zap.S().Errorf("error updating feature usage: %v", err) + zap.L().Error("error updating feature usage", zap.Error(err)) } } @@ -281,12 +281,12 @@ func (m *Manager) editTask(rule *PostableRule, taskName string) error { m.mtx.Lock() defer m.mtx.Unlock() - zap.S().Debugf("msg:", "editing a rule task", "\t task name:", taskName) + zap.L().Debug("editing a rule task", zap.String("name", taskName)) newTask, err := m.prepareTask(false, rule, taskName) if err != nil { - zap.S().Errorf("msg:", "loading tasks failed", "\t err:", err) + zap.L().Error("loading tasks failed", zap.Error(err)) return errors.New("error preparing rule with given parameters, previous rule set restored") } @@ -294,7 +294,7 @@ func (m *Manager) editTask(rule *PostableRule, taskName string) error { // it to finish the current iteration. Then copy it into the new group. oldTask, ok := m.tasks[taskName] if !ok { - zap.S().Warnf("msg:", "rule task not found, a new task will be created ", "\t task name:", taskName) + zap.L().Warn("rule task not found, a new task will be created", zap.String("name", taskName)) } delete(m.tasks, taskName) @@ -319,14 +319,14 @@ func (m *Manager) DeleteRule(ctx context.Context, id string) error { idInt, err := strconv.Atoi(id) if err != nil { - zap.S().Errorf("msg: ", "delete rule received an rule id in invalid format, must be a number", "\t ruleid:", id) + zap.L().Error("delete rule received an rule id in invalid format, must be a number", zap.String("id", id), zap.Error(err)) return fmt.Errorf("delete rule received an rule id in invalid format, must be a number") } // update feature usage rule, err := m.GetRule(ctx, id) if err != nil { - zap.S().Errorf("msg: ", "failed to get the rule from rule db", "\t ruleid: ", id) + zap.L().Error("failed to get the rule from rule db", zap.String("id", id), zap.Error(err)) return err } @@ -336,13 +336,13 @@ func (m *Manager) DeleteRule(ctx context.Context, id string) error { } if _, _, err := m.ruleDB.DeleteRuleTx(ctx, id); err != nil { - zap.S().Errorf("msg: ", "failed to delete the rule from rule db", "\t ruleid: ", id) + zap.L().Error("failed to delete the rule from rule db", zap.String("id", id), zap.Error(err)) return err } err = m.updateFeatureUsage(&rule.PostableRule, -1) if err != nil { - zap.S().Errorf("error updating feature usage: %v", err) + zap.L().Error("error updating feature usage", zap.Error(err)) } return nil @@ -351,16 +351,16 @@ func (m *Manager) DeleteRule(ctx context.Context, id string) error { func (m *Manager) deleteTask(taskName string) { m.mtx.Lock() defer m.mtx.Unlock() - zap.S().Debugf("msg:", "deleting a rule task", "\t task name:", taskName) + zap.L().Debug("deleting a rule task", zap.String("name", taskName)) oldg, ok := m.tasks[taskName] if ok { oldg.Stop() delete(m.tasks, taskName) delete(m.rules, ruleIdFromTaskName(taskName)) - zap.S().Debugf("msg:", "rule task deleted", "\t task name:", taskName) + zap.L().Debug("rule task deleted", zap.String("name", taskName)) } else { - zap.S().Info("msg: ", "rule not found for deletion", "\t name:", taskName) + zap.L().Info("rule not found for deletion", zap.String("name", taskName)) } } @@ -376,7 +376,7 @@ func (m *Manager) CreateRule(ctx context.Context, ruleStr string) (*GettableRule } if len(errs) > 0 { - zap.S().Errorf("failed to parse rules:", errs) + zap.L().Error("failed to parse rules", zap.Errors("errors", errs)) // just one rule is being parsed so expect just one error return nil, errs[0] } @@ -400,7 +400,7 @@ func (m *Manager) CreateRule(ctx context.Context, ruleStr string) (*GettableRule // update feature usage err = m.updateFeatureUsage(parsedRule, 1) if err != nil { - zap.S().Errorf("error updating feature usage: %v", err) + zap.L().Error("error updating feature usage", zap.Error(err)) } gettableRule := &GettableRule{ Id: fmt.Sprintf("%d", lastInsertId), @@ -438,10 +438,10 @@ func (m *Manager) checkFeatureUsage(parsedRule *PostableRule) error { if err != nil { switch err.(type) { case model.ErrFeatureUnavailable: - zap.S().Errorf("feature unavailable", zap.String("featureKey", model.QueryBuilderAlerts), zap.Error(err)) + zap.L().Error("feature unavailable", zap.String("featureKey", model.QueryBuilderAlerts), zap.Error(err)) return model.BadRequest(err) default: - zap.S().Errorf("feature check failed", zap.String("featureKey", model.QueryBuilderAlerts), zap.Error(err)) + zap.L().Error("feature check failed", zap.String("featureKey", model.QueryBuilderAlerts), zap.Error(err)) return model.BadRequest(err) } } @@ -466,11 +466,11 @@ func (m *Manager) addTask(rule *PostableRule, taskName string) error { m.mtx.Lock() defer m.mtx.Unlock() - zap.S().Debugf("msg:", "adding a new rule task", "\t task name:", taskName) + zap.L().Debug("adding a new rule task", zap.String("name", taskName)) newTask, err := m.prepareTask(false, rule, taskName) if err != nil { - zap.S().Errorf("msg:", "creating rule task failed", "\t name:", taskName, "\t err", err) + zap.L().Error("creating rule task failed", zap.String("name", taskName), zap.Error(err)) return errors.New("error loading rules, previous rule set restored") } @@ -504,7 +504,7 @@ func (m *Manager) prepareTask(acquireLock bool, r *PostableRule, taskName string var task Task if r.Alert == "" { - zap.S().Errorf("msg:", "task load failed, at least one rule must be set", "\t task name:", taskName) + zap.L().Error("task load failed, at least one rule must be set", zap.String("name", taskName)) return task, fmt.Errorf("task load failed, at least one rule must be set") } @@ -686,7 +686,7 @@ func (m *Manager) ListRuleStates(ctx context.Context) (*GettableRules, error) { ruleResponse := &GettableRule{} if err := json.Unmarshal([]byte(s.Data), ruleResponse); err != nil { // Parse []byte to go struct pointer - zap.S().Errorf("msg:", "invalid rule data", "\t err:", err) + zap.L().Error("failed to unmarshal rule from db", zap.Int("id", s.Id), zap.Error(err)) continue } @@ -779,28 +779,28 @@ func (m *Manager) PatchRule(ctx context.Context, ruleStr string, ruleId string) // retrieve rule from DB storedJSON, err := m.ruleDB.GetStoredRule(ctx, ruleId) if err != nil { - zap.S().Errorf("msg:", "failed to get stored rule with given id", "\t error:", err) + zap.L().Error("failed to get stored rule with given id", zap.String("id", ruleId), zap.Error(err)) return nil, err } // storedRule holds the current stored rule from DB storedRule := PostableRule{} if err := json.Unmarshal([]byte(storedJSON.Data), &storedRule); err != nil { - zap.S().Errorf("msg:", "failed to get unmarshal stored rule with given id", "\t error:", err) + zap.L().Error("failed to unmarshal stored rule with given id", zap.String("id", ruleId), zap.Error(err)) return nil, err } // patchedRule is combo of stored rule and patch received in the request patchedRule, errs := parseIntoRule(storedRule, []byte(ruleStr), "json") if len(errs) > 0 { - zap.S().Errorf("failed to parse rules:", errs) + zap.L().Error("failed to parse rules", zap.Errors("errors", errs)) // just one rule is being parsed so expect just one error return nil, errs[0] } // deploy or un-deploy task according to patched (new) rule state if err := m.syncRuleStateWithTask(taskName, patchedRule); err != nil { - zap.S().Errorf("failed to sync stored rule state with the task") + zap.L().Error("failed to sync stored rule state with the task", zap.String("taskName", taskName), zap.Error(err)) return nil, err } @@ -816,7 +816,7 @@ func (m *Manager) PatchRule(ctx context.Context, ruleStr string, ruleId string) // restore task state from the stored rule if err := m.syncRuleStateWithTask(taskName, &storedRule); err != nil { - zap.S().Errorf("msg: ", "failed to restore rule after patch failure", "\t error:", err) + zap.L().Error("failed to restore rule after patch failure", zap.String("taskName", taskName), zap.Error(err)) } return nil, err @@ -846,7 +846,7 @@ func (m *Manager) TestNotification(ctx context.Context, ruleStr string) (int, *m parsedRule, errs := ParsePostableRule([]byte(ruleStr)) if len(errs) > 0 { - zap.S().Errorf("msg: failed to parse rule from request:", "\t error: ", errs) + zap.L().Error("failed to parse rule from request", zap.Errors("errors", errs)) return 0, newApiErrorBadData(errs[0]) } @@ -882,7 +882,7 @@ func (m *Manager) TestNotification(ctx context.Context, ruleStr string) (int, *m ) if err != nil { - zap.S().Errorf("msg: failed to prepare a new threshold rule for test:", "\t error: ", err) + zap.L().Error("failed to prepare a new threshold rule for test", zap.String("name", rule.Name()), zap.Error(err)) return 0, newApiErrorBadData(err) } @@ -899,7 +899,7 @@ func (m *Manager) TestNotification(ctx context.Context, ruleStr string) (int, *m ) if err != nil { - zap.S().Errorf("msg: failed to prepare a new promql rule for test:", "\t error: ", err) + zap.L().Error("failed to prepare a new promql rule for test", zap.String("name", rule.Name()), zap.Error(err)) return 0, newApiErrorBadData(err) } } else { @@ -911,10 +911,13 @@ func (m *Manager) TestNotification(ctx context.Context, ruleStr string) (int, *m count, err := rule.Eval(ctx, ts, m.opts.Queriers) if err != nil { - zap.S().Warn("msg:", "Evaluating rule failed", "\t rule:", rule, "\t err: ", err) + zap.L().Error("evaluating rule failed", zap.String("rule", rule.Name()), zap.Error(err)) return 0, newApiErrorInternal(fmt.Errorf("rule evaluation failed")) } - alertsFound := count.(int) + alertsFound, ok := count.(int) + if !ok { + return 0, newApiErrorInternal(fmt.Errorf("something went wrong")) + } rule.SendAlerts(ctx, ts, 0, time.Duration(1*time.Minute), m.prepareNotifyFunc()) return alertsFound, nil diff --git a/pkg/query-service/rules/promRule.go b/pkg/query-service/rules/promRule.go index 1a4a89e3d2..8f829e0ad3 100644 --- a/pkg/query-service/rules/promRule.go +++ b/pkg/query-service/rules/promRule.go @@ -94,7 +94,7 @@ func NewPromRule( return nil, err } - zap.S().Info("msg:", "creating new alerting rule", "\t name:", p.name, "\t condition:", p.ruleCondition.String(), "\t query:", query) + zap.L().Info("creating new alerting rule", zap.String("name", p.name), zap.String("condition", p.ruleCondition.String()), zap.String("query", query)) return &p, nil } @@ -339,7 +339,7 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time, queriers *Queriers) ( if err != nil { return nil, err } - zap.S().Info("rule:", r.Name(), "\t evaluating promql query: ", q) + zap.L().Info("evaluating promql query", zap.String("name", r.Name()), zap.String("query", q)) res, err := queriers.PqlEngine.RunAlertQuery(ctx, q, start, end, interval) if err != nil { r.SetHealth(HealthBad) @@ -368,7 +368,7 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time, queriers *Queriers) ( if !shouldAlert { continue } - zap.S().Debugf("rule: %s, alerting for series: %v", r.Name(), series) + zap.L().Debug("alerting for series", zap.String("name", r.Name()), zap.Any("series", series)) thresholdFormatter := formatter.FromUnit(r.ruleCondition.TargetUnit) threshold := thresholdFormatter.Format(r.targetVal(), r.ruleCondition.TargetUnit) @@ -435,7 +435,7 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time, queriers *Queriers) ( } } - zap.S().Debugf("For rule: %s, found %d alerts", r.Name(), len(alerts)) + zap.L().Debug("found alerts for rule", zap.Int("count", len(alerts)), zap.String("name", r.Name())) // alerts[h] is ready, add or update active list now for h, a := range alerts { // Check whether we already have alerting state for the identifying label set. diff --git a/pkg/query-service/rules/promRuleTask.go b/pkg/query-service/rules/promRuleTask.go index d4a853d844..af38488f7c 100644 --- a/pkg/query-service/rules/promRuleTask.go +++ b/pkg/query-service/rules/promRuleTask.go @@ -40,7 +40,7 @@ type PromRuleTask struct { // newPromRuleTask holds rules that have promql condition // and evalutes the rule at a given frequency func newPromRuleTask(name, file string, frequency time.Duration, rules []Rule, opts *ManagerOptions, notify NotifyFunc) *PromRuleTask { - zap.S().Info("Initiating a new rule group:", name, "\t frequency:", frequency) + zap.L().Info("Initiating a new rule group", zap.String("name", name), zap.Duration("frequency", frequency)) if time.Now() == time.Now().Add(frequency) { frequency = DefaultFrequency @@ -312,7 +312,7 @@ func (g *PromRuleTask) CopyState(fromTask Task) error { // Eval runs a single evaluation cycle in which all rules are evaluated sequentially. func (g *PromRuleTask) Eval(ctx context.Context, ts time.Time) { - zap.S().Info("promql rule task:", g.name, "\t eval started at:", ts) + zap.L().Info("promql rule task", zap.String("name", g.name), zap.Time("eval started at", ts)) for i, rule := range g.rules { if rule == nil { continue @@ -340,7 +340,7 @@ func (g *PromRuleTask) Eval(ctx context.Context, ts time.Time) { rule.SetHealth(HealthBad) rule.SetLastError(err) - zap.S().Warn("msg", "Evaluating rule failed", "rule", rule, "err", err) + zap.L().Warn("Evaluating rule failed", zap.String("ruleid", rule.ID()), zap.Error(err)) // Canceled queries are intentional termination of queries. This normally // happens on shutdown and thus we skip logging of any errors here. diff --git a/pkg/query-service/rules/ruleTask.go b/pkg/query-service/rules/ruleTask.go index b2f6f09921..edf3957a6f 100644 --- a/pkg/query-service/rules/ruleTask.go +++ b/pkg/query-service/rules/ruleTask.go @@ -25,10 +25,8 @@ type RuleTask struct { evaluationTime time.Duration lastEvaluation time.Time - markStale bool - done chan struct{} - terminated chan struct{} - managerDone chan struct{} + done chan struct{} + terminated chan struct{} pause bool notify NotifyFunc @@ -42,7 +40,7 @@ func newRuleTask(name, file string, frequency time.Duration, rules []Rule, opts if time.Now() == time.Now().Add(frequency) { frequency = DefaultFrequency } - zap.S().Info("msg:", "initiating a new rule task", "\t name:", name, "\t frequency:", frequency) + zap.L().Info("initiating a new rule task", zap.String("name", name), zap.Duration("frequency", frequency)) return &RuleTask{ name: name, @@ -91,7 +89,7 @@ func (g *RuleTask) Run(ctx context.Context) { // Wait an initial amount to have consistently slotted intervals. evalTimestamp := g.EvalTimestamp(time.Now().UnixNano()).Add(g.frequency) - zap.S().Debugf("group:", g.name, "\t group run to begin at: ", evalTimestamp) + zap.L().Debug("group run to begin at", zap.Time("evalTimestamp", evalTimestamp)) select { case <-time.After(time.Until(evalTimestamp)): case <-g.done: @@ -294,7 +292,7 @@ func (g *RuleTask) CopyState(fromTask Task) error { // Eval runs a single evaluation cycle in which all rules are evaluated sequentially. func (g *RuleTask) Eval(ctx context.Context, ts time.Time) { - zap.S().Debugf("msg:", "rule task eval started", "\t name:", g.name, "\t start time:", ts) + zap.L().Debug("rule task eval started", zap.String("name", g.name), zap.Time("start time", ts)) for i, rule := range g.rules { if rule == nil { @@ -330,7 +328,7 @@ func (g *RuleTask) Eval(ctx context.Context, ts time.Time) { rule.SetHealth(HealthBad) rule.SetLastError(err) - zap.S().Warn("msg:", "Evaluating rule failed", "\t rule:", rule, "\t err: ", err) + zap.L().Warn("Evaluating rule failed", zap.String("ruleid", rule.ID()), zap.Error(err)) // Canceled queries are intentional termination of queries. This normally // happens on shutdown and thus we skip logging of any errors here. diff --git a/pkg/query-service/rules/thresholdRule.go b/pkg/query-service/rules/thresholdRule.go index f358d80393..c05c61c57b 100644 --- a/pkg/query-service/rules/thresholdRule.go +++ b/pkg/query-service/rules/thresholdRule.go @@ -135,7 +135,7 @@ func NewThresholdRule( } t.queryBuilderV4 = queryBuilder.NewQueryBuilder(builderOptsV4, featureFlags) - zap.S().Info("msg:", "creating new alerting rule", "\t name:", t.name, "\t condition:", t.ruleCondition.String(), "\t generatorURL:", t.GeneratorURL()) + zap.L().Info("creating new ThresholdRule", zap.String("name", t.name), zap.String("id", t.id)) return &t, nil } @@ -386,7 +386,7 @@ func (r *ThresholdRule) ForEachActiveAlert(f func(*Alert)) { } func (r *ThresholdRule) SendAlerts(ctx context.Context, ts time.Time, resendDelay time.Duration, interval time.Duration, notifyFunc NotifyFunc) { - zap.S().Info("msg:", "sending alerts", "\t rule:", r.Name()) + zap.L().Info("sending alerts", zap.String("rule", r.Name())) alerts := []*Alert{} r.ForEachActiveAlert(func(alert *Alert) { if r.opts.SendAlways || alert.needsSending(ts, resendDelay) { @@ -400,7 +400,7 @@ func (r *ThresholdRule) SendAlerts(ctx context.Context, ts time.Time, resendDela anew := *alert alerts = append(alerts, &anew) } else { - zap.S().Debugf("msg: skipping send alert due to resend delay", "\t rule: ", r.Name(), "\t alert:", alert.Labels) + zap.L().Debug("skipping send alert due to resend delay", zap.String("rule", r.Name()), zap.Any("alert", alert.Labels)) } }) notifyFunc(ctx, "", alerts...) @@ -416,12 +416,12 @@ func (r *ThresholdRule) Unit() string { func (r *ThresholdRule) CheckCondition(v float64) bool { if math.IsNaN(v) { - zap.S().Debugf("msg:", "found NaN in rule condition", "\t rule name:", r.Name()) + zap.L().Debug("found NaN in rule condition", zap.String("rule", r.Name())) return false } if r.ruleCondition.Target == nil { - zap.S().Debugf("msg:", "found null target in rule condition", "\t rulename:", r.Name()) + zap.L().Debug("found null target in rule condition", zap.String("rule", r.Name())) return false } @@ -429,7 +429,7 @@ func (r *ThresholdRule) CheckCondition(v float64) bool { value := unitConverter.Convert(converter.Value{F: *r.ruleCondition.Target, U: converter.Unit(r.ruleCondition.TargetUnit)}, converter.Unit(r.Unit())) - zap.S().Debugf("Checking condition for rule: %s, Converter=%s, Value=%f, Target=%f, CompareOp=%s", r.Name(), unitConverter.Name(), v, value.F, r.ruleCondition.CompareOp) + zap.L().Info("Checking condition for rule", zap.String("rule", r.Name()), zap.String("converter", unitConverter.Name()), zap.Float64("value", v), zap.Float64("target", value.F), zap.String("compareOp", string(r.ruleCondition.CompareOp))) switch r.ruleCondition.CompareOp { case ValueIsEq: return v == value.F @@ -496,7 +496,7 @@ func (r *ThresholdRule) shouldSkipFirstRecord() bool { func (r *ThresholdRule) runChQuery(ctx context.Context, db clickhouse.Conn, query string) (Vector, error) { rows, err := db.Query(ctx, query) if err != nil { - zap.S().Errorf("rule:", r.Name(), "\t failed to get alert query result") + zap.L().Error("failed to get alert query result", zap.String("rule", r.Name()), zap.Error(err)) return nil, err } @@ -604,7 +604,7 @@ func (r *ThresholdRule) runChQuery(ctx context.Context, db clickhouse.Conn, quer lblsOrig.Set(columnNames[i], fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int())) } default: - zap.S().Errorf("ruleId:", r.ID(), "\t error: invalid var found in query result", v, columnNames[i]) + zap.L().Error("invalid var found in query result", zap.String("ruleId", r.ID()), zap.Any("value", v), zap.Any("column", columnNames[i])) } } @@ -710,11 +710,11 @@ func (r *ThresholdRule) runChQuery(ctx context.Context, db clickhouse.Conn, quer } } - zap.S().Debugf("ruleid:", r.ID(), "\t resultmap(potential alerts):", len(resultMap)) + zap.L().Debug("resultmap(potential alerts)", zap.String("ruleid", r.ID()), zap.Int("count", len(resultMap))) // if the data is missing for `For` duration then we should send alert if r.ruleCondition.AlertOnAbsent && r.lastTimestampWithDatapoints.Add(r.Condition().AbsentFor*time.Minute).Before(time.Now()) { - zap.S().Debugf("ruleid:", r.ID(), "\t msg: no data found for rule condition") + zap.L().Info("no data found for rule condition", zap.String("ruleid", r.ID())) lbls := labels.NewBuilder(labels.Labels{}) if !r.lastTimestampWithDatapoints.IsZero() { lbls.Set("lastSeen", r.lastTimestampWithDatapoints.Format(constants.AlertTimeFormat)) @@ -734,7 +734,7 @@ func (r *ThresholdRule) runChQuery(ctx context.Context, db clickhouse.Conn, quer } } if len(result) != 0 { - zap.S().Infof("For rule %s, with ClickHouseQuery %s, found %d alerts", r.ID(), query, len(result)) + zap.L().Info("found alerts", zap.String("ruleid", r.ID()), zap.String("query", query), zap.Int("count", len(result))) } return result, nil } @@ -979,7 +979,7 @@ func (r *ThresholdRule) prepareClickhouseQueries(ts time.Time) (map[string]strin } if r.ruleCondition.QueryType() != v3.QueryTypeClickHouseSQL { - zap.S().Debugf("ruleid:", r.ID(), "\t msg: unsupported query type in prepareClickhouseQueries()") + zap.L().Error("unsupported query type in prepareClickhouseQueries", zap.String("ruleid", r.ID())) return nil, fmt.Errorf("failed to prepare clickhouse queries") } @@ -995,18 +995,17 @@ func (r *ThresholdRule) prepareClickhouseQueries(ts time.Time) (map[string]strin tmpl := template.New("clickhouse-query") tmpl, err := tmpl.Parse(chQuery.Query) if err != nil { - zap.S().Errorf("ruleid:", r.ID(), "\t msg: failed to parse clickhouse query to populate vars", err) + zap.L().Error("failed to parse clickhouse query to populate vars", zap.String("ruleid", r.ID()), zap.Error(err)) r.SetHealth(HealthBad) return nil, err } var query bytes.Buffer err = tmpl.Execute(&query, params.Variables) if err != nil { - zap.S().Errorf("ruleid:", r.ID(), "\t msg: failed to populate clickhouse query", err) + zap.L().Error("failed to populate clickhouse query", zap.String("ruleid", r.ID()), zap.Error(err)) r.SetHealth(HealthBad) return nil, err } - zap.S().Debugf("ruleid:", r.ID(), "\t query:", query.String()) queries[name] = query.String() } return queries, nil @@ -1023,13 +1022,13 @@ func (r *ThresholdRule) GetSelectedQuery() string { if r.ruleCondition.QueryType() == v3.QueryTypeBuilder { queries, err = r.prepareBuilderQueries(time.Now(), nil) if err != nil { - zap.S().Errorf("ruleid:", r.ID(), "\t msg: failed to prepare metric queries", zap.Error(err)) + zap.L().Error("failed to prepare metric queries", zap.String("ruleid", r.ID()), zap.Error(err)) return "" } } else if r.ruleCondition.QueryType() == v3.QueryTypeClickHouseSQL { queries, err = r.prepareClickhouseQueries(time.Now()) if err != nil { - zap.S().Errorf("ruleid:", r.ID(), "\t msg: failed to prepare clickhouse queries", zap.Error(err)) + zap.L().Error("failed to prepare clickhouse queries", zap.String("ruleid", r.ID()), zap.Error(err)) return "" } } @@ -1078,7 +1077,7 @@ func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, ts time.Time, ch c queries, err = r.prepareBuilderQueries(ts, ch) if err != nil { - zap.S().Errorf("ruleid:", r.ID(), "\t msg: failed to prepare metric queries", zap.Error(err)) + zap.L().Error("failed to prepare metric queries", zap.String("ruleid", r.ID()), zap.Error(err)) return nil, fmt.Errorf("failed to prepare metric queries") } @@ -1087,7 +1086,7 @@ func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, ts time.Time, ch c queries, err = r.prepareClickhouseQueries(ts) if err != nil { - zap.S().Errorf("ruleid:", r.ID(), "\t msg: failed to prepare clickhouse queries", zap.Error(err)) + zap.L().Error("failed to prepare clickhouse queries", zap.String("ruleid", r.ID()), zap.Error(err)) return nil, fmt.Errorf("failed to prepare clickhouse queries") } @@ -1099,16 +1098,16 @@ func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, ts time.Time, ch c return nil, fmt.Errorf("no queries could be built with the rule config") } - zap.S().Debugf("ruleid:", r.ID(), "\t runQueries:", queries) + zap.L().Info("prepared queries", zap.String("ruleid", r.ID()), zap.Any("queries", queries)) queryLabel := r.GetSelectedQuery() - zap.S().Debugf("ruleId: ", r.ID(), "\t result query label:", queryLabel) + zap.L().Debug("Selected query lable for rule", zap.String("ruleid", r.ID()), zap.String("label", queryLabel)) if queryString, ok := queries[queryLabel]; ok { return r.runChQuery(ctx, ch, queryString) } - zap.S().Errorf("ruleId: ", r.ID(), "\t invalid query label:", queryLabel, "\t queries:", queries) + zap.L().Error("invalid query label", zap.String("ruleid", r.ID()), zap.String("label", queryLabel), zap.Any("queries", queries)) return nil, fmt.Errorf("this is unexpected, invalid query label") } @@ -1137,7 +1136,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time, queriers *Querie if err != nil { r.SetHealth(HealthBad) r.SetLastError(err) - zap.S().Debugf("ruleid:", r.ID(), "\t failure in buildAndRunQuery:", err) + zap.L().Error("failure in buildAndRunQuery", zap.String("ruleid", r.ID()), zap.Error(err)) return nil, err } @@ -1156,7 +1155,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time, queriers *Querie value := valueFormatter.Format(smpl.V, r.Unit()) thresholdFormatter := formatter.FromUnit(r.ruleCondition.TargetUnit) threshold := thresholdFormatter.Format(r.targetVal(), r.ruleCondition.TargetUnit) - zap.S().Debugf("Alert template data for rule %s: Formatter=%s, Value=%s, Threshold=%s", r.Name(), valueFormatter.Name(), value, threshold) + zap.L().Debug("Alert template data for rule", zap.String("name", r.Name()), zap.String("formatter", valueFormatter.Name()), zap.String("value", value), zap.String("threshold", threshold)) tmplData := AlertTemplateData(l, value, threshold) // Inject some convenience variables that are easier to remember for users @@ -1177,7 +1176,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time, queriers *Querie result, err := tmpl.Expand() if err != nil { result = fmt.Sprintf("", err) - zap.S().Errorf("msg:", "Expanding alert template failed", "\t err", err, "\t data", tmplData) + zap.L().Error("Expanding alert template failed", zap.Error(err), zap.Any("data", tmplData)) } return result } @@ -1222,7 +1221,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time, queriers *Querie resultFPs[h] = struct{}{} if _, ok := alerts[h]; ok { - zap.S().Errorf("ruleId: ", r.ID(), "\t msg:", "the alert query returns duplicate records:", alerts[h]) + zap.L().Error("the alert query returns duplicate records", zap.String("ruleid", r.ID()), zap.Any("alert", alerts[h])) err = fmt.Errorf("duplicate alert found, vector contains metrics with the same labelset after applying alert labels") // We have already acquired the lock above hence using SetHealth and // SetLastError will deadlock. @@ -1242,7 +1241,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time, queriers *Querie } } - zap.S().Info("rule:", r.Name(), "\t alerts found: ", len(alerts)) + zap.L().Info("alerts found", zap.String("name", r.Name()), zap.Int("count", len(alerts))) // alerts[h] is ready, add or update active list now for h, a := range alerts { diff --git a/pkg/query-service/telemetry/telemetry.go b/pkg/query-service/telemetry/telemetry.go index 9202a32168..4c23cbd092 100644 --- a/pkg/query-service/telemetry/telemetry.go +++ b/pkg/query-service/telemetry/telemetry.go @@ -467,7 +467,7 @@ func (a *Telemetry) SendEvent(event string, data map[string]interface{}, userEma } } - // zap.S().Info(data) + // zap.L().Info(data) properties := analytics.NewProperties() properties.Set("version", version.GetVersion()) properties.Set("deploymentType", getDeploymentType()) diff --git a/pkg/query-service/tests/docker.go b/pkg/query-service/tests/docker.go index a710161a0e..c65a627512 100644 --- a/pkg/query-service/tests/docker.go +++ b/pkg/query-service/tests/docker.go @@ -13,7 +13,6 @@ import ( "log" minio "github.com/minio/minio-go/v6" - "go.uber.org/zap" ) const ( @@ -36,7 +35,7 @@ func init() { } else if goArch == "amd64" { composeFile = "./test-deploy/docker-compose.yaml" } else { - zap.S().Fatalf("Unsupported architecture: %s", goArch) + log.Fatalf("Unsupported architecture: %s", goArch) } } diff --git a/pkg/query-service/utils/format.go b/pkg/query-service/utils/format.go index bc15a8a1e9..0a614e2987 100644 --- a/pkg/query-service/utils/format.go +++ b/pkg/query-service/utils/format.go @@ -183,11 +183,11 @@ func ClickHouseFormattedValue(v interface{}) string { case uint8, uint16, uint32, uint64, int, int8, int16, int32, int64, float32, float64, bool: return strings.Join(strings.Fields(fmt.Sprint(x)), ",") default: - zap.S().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x[0]))) + zap.L().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x[0]))) return "" } default: - zap.S().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x))) + zap.L().Error("invalid type for formatted value", zap.Any("type", reflect.TypeOf(x))) return "" } } diff --git a/pkg/query-service/utils/time.go b/pkg/query-service/utils/time.go index 69b49e42ac..274b032cdb 100644 --- a/pkg/query-service/utils/time.go +++ b/pkg/query-service/utils/time.go @@ -15,6 +15,6 @@ func Elapsed(funcName string, args ...interface{}) func() { } argsStr = argsStr[:len(argsStr)-2] return func() { - zap.S().Infof("func %s took %v with args %v", funcName, time.Since(start), string(argsStr)) + zap.L().Info("Elapsed time", zap.String("func_name", funcName), zap.Duration("duration", time.Since(start)), zap.String("args", argsStr)) } } diff --git a/pkg/query-service/version/version.go b/pkg/query-service/version/version.go index 577fe6789c..68c37a4e0e 100644 --- a/pkg/query-service/version/version.go +++ b/pkg/query-service/version/version.go @@ -3,8 +3,6 @@ package version import ( "fmt" "runtime" - - "go.uber.org/zap" ) // These fields are set during an official build @@ -40,7 +38,7 @@ Copyright 2022 SigNoz // PrintVersion prints version and other helpful information. func PrintVersion() { - zap.S().Infof("\n%s\n", BuildDetails()) + fmt.Println(BuildDetails()) } func GetVersion() string { From e1679790f7d81d39b25e3ffd8f2f4a8c1244b4ef Mon Sep 17 00:00:00 2001 From: Vikrant Gupta Date: Wed, 27 Mar 2024 01:01:24 +0530 Subject: [PATCH 28/33] fix: log chips not forming making filtering not work (#4749) * fix: log chips not forming making filtering not work * fix: remove console log --- frontend/src/components/Logs/ListLogView/index.tsx | 1 - frontend/src/hooks/queryBuilder/useTag.ts | 9 ++++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/frontend/src/components/Logs/ListLogView/index.tsx b/frontend/src/components/Logs/ListLogView/index.tsx index 5577579b64..2b828d663c 100644 --- a/frontend/src/components/Logs/ListLogView/index.tsx +++ b/frontend/src/components/Logs/ListLogView/index.tsx @@ -48,7 +48,6 @@ function LogGeneralField({ fieldValue, linesPerRow = 1, }: LogFieldProps): JSX.Element { - console.log('fieldKey:', fieldKey, linesPerRow); const html = useMemo( () => ({ __html: convert.toHtml(dompurify.sanitize(fieldValue)), diff --git a/frontend/src/hooks/queryBuilder/useTag.ts b/frontend/src/hooks/queryBuilder/useTag.ts index a3bbd4af05..419aaaedc9 100644 --- a/frontend/src/hooks/queryBuilder/useTag.ts +++ b/frontend/src/hooks/queryBuilder/useTag.ts @@ -75,10 +75,13 @@ export const useTag = ( (value: string): void => { const { tagKey } = getTagToken(value); const parts = tagKey.split('-'); - // this is done to ensure that `hello-world` also gets converted to `body CONTAINS hello-world` - const id = parts[parts.length - 1]; - const key = parts.slice(0, -1).join('-'); + let id = parts[parts.length - 1]; + let key = parts.slice(0, -1).join('-'); + if (parts.length === 1) { + id = ''; + [key] = parts; + } if (id === 'custom') { const customValue = whereClauseConfig From ad1b01f225aeef839e5dc066f8720159a7392002 Mon Sep 17 00:00:00 2001 From: SagarRajput-7 <162284829+SagarRajput-7@users.noreply.github.com> Date: Wed, 27 Mar 2024 10:23:57 +0530 Subject: [PATCH 29/33] feat: [SIG-566]: Added message to alert user about their past due - subscription status (#4724) * feat: [SIG-566]: Added message to alert user about their past due - subscription status * feat: [SIG-566]: Added message string to billings.json * feat: [SIG-566]: Added strings to billings.json * feat: [SIG-566]: updated test cases * feat: [SIG-566]: updated message text * feat: [SIG-566]: code fix * feat: [SIG-566]: code fix --- frontend/public/locales/en/billings.json | 14 ++++ frontend/src/api/billing/getUsage.ts | 1 + .../BillingContainer.test.tsx | 28 ++++---- .../BillingContainer/BillingContainer.tsx | 67 ++++++++++++++----- 4 files changed, 79 insertions(+), 31 deletions(-) create mode 100644 frontend/public/locales/en/billings.json diff --git a/frontend/public/locales/en/billings.json b/frontend/public/locales/en/billings.json new file mode 100644 index 0000000000..fb706e002f --- /dev/null +++ b/frontend/public/locales/en/billings.json @@ -0,0 +1,14 @@ +{ + "days_remaining": "days remaining in your billing period.", + "billing": "Billing", + "manage_billing_and_costs": "Manage your billing information, invoices, and monitor costs.", + "enterprise_cloud": "Enterprise Cloud", + "enterprise": "Enterprise", + "card_details_recieved_and_billing_info": "We have received your card details, your billing will only start after the end of your free trial period.", + "upgrade_plan": "Upgrade Plan", + "manage_billing": "Manage Billing", + "upgrade_now_text": "Upgrade now to have uninterrupted access", + "billing_start_info": "Your billing will start only after the trial period", + "checkout_plans": "Check out features in paid plans", + "here": "here" +} diff --git a/frontend/src/api/billing/getUsage.ts b/frontend/src/api/billing/getUsage.ts index 1cb5be5640..da7b6ebd63 100644 --- a/frontend/src/api/billing/getUsage.ts +++ b/frontend/src/api/billing/getUsage.ts @@ -13,6 +13,7 @@ export interface UsageResponsePayloadProps { billTotal: number; }; discount: number; + subscriptionStatus?: string; } const getUsage = async ( diff --git a/frontend/src/container/BillingContainer/BillingContainer.test.tsx b/frontend/src/container/BillingContainer/BillingContainer.test.tsx index cd447e5d60..1988df313b 100644 --- a/frontend/src/container/BillingContainer/BillingContainer.test.tsx +++ b/frontend/src/container/BillingContainer/BillingContainer.test.tsx @@ -56,14 +56,14 @@ describe('BillingContainer', () => { expect(cost).toBeInTheDocument(); const manageBilling = screen.getByRole('button', { - name: /manage billing/i, + name: 'manage_billing', }); expect(manageBilling).toBeInTheDocument(); const dollar = screen.getByText(/\$0/i); expect(dollar).toBeInTheDocument(); - const currentBill = screen.getByText('Billing'); + const currentBill = screen.getByText('billing'); expect(currentBill).toBeInTheDocument(); }); @@ -75,7 +75,7 @@ describe('BillingContainer', () => { const freeTrailText = await screen.findByText('Free Trial'); expect(freeTrailText).toBeInTheDocument(); - const currentBill = screen.getByText('Billing'); + const currentBill = screen.getByText('billing'); expect(currentBill).toBeInTheDocument(); const dollar0 = await screen.findByText(/\$0/i); @@ -85,18 +85,14 @@ describe('BillingContainer', () => { ); expect(onTrail).toBeInTheDocument(); - const numberOfDayRemaining = await screen.findByText( - /1 days remaining in your billing period./i, - ); + const numberOfDayRemaining = await screen.findByText(/1 days_remaining/i); expect(numberOfDayRemaining).toBeInTheDocument(); const upgradeButton = await screen.findAllByRole('button', { - name: /upgrade/i, + name: /upgrade_plan/i, }); expect(upgradeButton[1]).toBeInTheDocument(); expect(upgradeButton.length).toBe(2); - const checkPaidPlan = await screen.findByText( - /Check out features in paid plans/i, - ); + const checkPaidPlan = await screen.findByText(/checkout_plans/i); expect(checkPaidPlan).toBeInTheDocument(); const link = screen.getByRole('link', { name: /here/i }); @@ -114,7 +110,7 @@ describe('BillingContainer', () => { render(); }); - const currentBill = screen.getByText('Billing'); + const currentBill = screen.getByText('billing'); expect(currentBill).toBeInTheDocument(); const dollar0 = await screen.findByText(/\$0/i); @@ -126,17 +122,17 @@ describe('BillingContainer', () => { expect(onTrail).toBeInTheDocument(); const receivedCardDetails = await screen.findByText( - /We have received your card details, your billing will only start after the end of your free trial period./i, + /card_details_recieved_and_billing_info/i, ); expect(receivedCardDetails).toBeInTheDocument(); const manageBillingButton = await screen.findByRole('button', { - name: /manage billing/i, + name: /manage_billing/i, }); expect(manageBillingButton).toBeInTheDocument(); const dayRemainingInBillingPeriod = await screen.findByText( - /1 days remaining in your billing period./i, + /1 days_remaining/i, ); expect(dayRemainingInBillingPeriod).toBeInTheDocument(); }); @@ -156,7 +152,7 @@ describe('BillingContainer', () => { const billingPeriod = await findByText(billingPeriodText); expect(billingPeriod).toBeInTheDocument(); - const currentBill = screen.getByText('Billing'); + const currentBill = screen.getByText('billing'); expect(currentBill).toBeInTheDocument(); const dollar0 = await screen.findByText(/\$1,278.3/i); @@ -181,7 +177,7 @@ describe('BillingContainer', () => { ); render(); const dayRemainingInBillingPeriod = await screen.findByText( - /11 days remaining in your billing period./i, + /11 days_remaining/i, ); expect(dayRemainingInBillingPeriod).toBeInTheDocument(); }); diff --git a/frontend/src/container/BillingContainer/BillingContainer.tsx b/frontend/src/container/BillingContainer/BillingContainer.tsx index b31f9c4745..fe784a0c57 100644 --- a/frontend/src/container/BillingContainer/BillingContainer.tsx +++ b/frontend/src/container/BillingContainer/BillingContainer.tsx @@ -17,7 +17,7 @@ import { } from 'antd'; import { ColumnsType } from 'antd/es/table'; import updateCreditCardApi from 'api/billing/checkout'; -import getUsage from 'api/billing/getUsage'; +import getUsage, { UsageResponsePayloadProps } from 'api/billing/getUsage'; import manageCreditCardApi from 'api/billing/manage'; import Spinner from 'components/Spinner'; import { SOMETHING_WENT_WRONG } from 'constants/api'; @@ -28,6 +28,7 @@ import useLicense from 'hooks/useLicense'; import { useNotifications } from 'hooks/useNotifications'; import { pick } from 'lodash-es'; import { useCallback, useEffect, useState } from 'react'; +import { useTranslation } from 'react-i18next'; import { useMutation, useQuery } from 'react-query'; import { useSelector } from 'react-redux'; import { AppState } from 'store/reducers'; @@ -49,6 +50,11 @@ interface DataType { cost: string; } +enum SubscriptionStatus { + PastDue = 'past_due', + Active = 'active', +} + const renderSkeletonInput = (): JSX.Element => ( = [ }, ]; +// eslint-disable-next-line sonarjs/cognitive-complexity export default function BillingContainer(): JSX.Element { - const daysRemainingStr = 'days remaining in your billing period.'; + const { t } = useTranslation(['billings']); + const daysRemainingStr = t('days_remaining'); const [headerText, setHeaderText] = useState(''); const [billAmount, setBillAmount] = useState(0); const [activeLicense, setActiveLicense] = useState(null); const [daysRemaining, setDaysRemaining] = useState(0); const [isFreeTrial, setIsFreeTrial] = useState(false); const [data, setData] = useState([]); - const [apiResponse, setApiResponse] = useState({}); + const [apiResponse, setApiResponse] = useState< + Partial + >({}); const { trackEvent } = useAnalytics(); @@ -186,6 +196,9 @@ export default function BillingContainer(): JSX.Element { [licensesData?.payload?.onTrial], ); + const isSubscriptionPastDue = + apiResponse.subscriptionStatus === SubscriptionStatus.PastDue; + const { isLoading, isFetching: isFetchingBillingData } = useQuery( [REACT_QUERY_KEY.GET_BILLING_USAGE, user?.userId], { @@ -342,14 +355,27 @@ export default function BillingContainer(): JSX.Element { [apiResponse, billAmount, isLoading, isFetchingBillingData], ); + const { Text } = Typography; + const subscriptionPastDueMessage = (): JSX.Element => ( + + {`We were not able to process payments for your account. Please update your card details `} + + {t('here')} + + {` if your payment information has changed. Email us at `} + cloud-support@signoz.io + {` otherwise. Be sure to provide this information immediately to avoid interruption to your service.`} + + ); + return (
- Billing + {t('billing')} - Manage your billing information, invoices, and monitor costs. + {t('manage_billing_and_costs')} @@ -361,7 +387,7 @@ export default function BillingContainer(): JSX.Element { - {isCloudUserVal ? 'Enterprise Cloud' : 'Enterprise'}{' '} + {isCloudUserVal ? t('enterprise_cloud') : t('enterprise')}{' '} {isFreeTrial ? Free Trial : ''} {!isLoading && !isFetchingBillingData ? ( @@ -378,8 +404,8 @@ export default function BillingContainer(): JSX.Element { onClick={handleBilling} > {isFreeTrial && !licensesData?.payload?.trialConvertedToSubscription - ? 'Upgrade Plan' - : 'Manage Billing'} + ? t('upgrade_plan') + : t('manage_billing')} @@ -389,8 +415,7 @@ export default function BillingContainer(): JSX.Element { ellipsis style={{ fontWeight: '300', color: '#49aa19', fontSize: 12 }} > - We have received your card details, your billing will only start after - the end of your free trial period. + {t('card_details_recieved_and_billing_info')} )} @@ -404,6 +429,18 @@ export default function BillingContainer(): JSX.Element { ) : ( )} + + {isSubscriptionPastDue && + (!isLoading && !isFetchingBillingData ? ( + + ) : ( + + ))} @@ -434,16 +471,16 @@ export default function BillingContainer(): JSX.Element { - Upgrade now to have uninterrupted access + {t('upgrade_now_text')} - Your billing will start only after the trial period + {t('Your billing will start only after the trial period')} - Check out features in paid plans   + {t('checkout_plans')}   - here + {t('here')} @@ -464,7 +501,7 @@ export default function BillingContainer(): JSX.Element { loading={isLoadingBilling || isLoadingManageBilling} onClick={handleBilling} > - Upgrade Plan + {t('upgrade_plan')} From dbd4363ff87b5e986f73839512b7a2e966f406b4 Mon Sep 17 00:00:00 2001 From: SagarRajput-7 <162284829+SagarRajput-7@users.noreply.github.com> Date: Wed, 27 Mar 2024 11:55:28 +0530 Subject: [PATCH 30/33] feat: [SIG-573]: Fixed billing page issues (#4744) * feat: [SIG-573]: Fixed billing page issues * feat: [SIG-573]: Fixed jest test case --- .../BillingContainer/BillingContainer.tsx | 19 +++++++----- .../BillingUsageGraph/BillingUsageGraph.tsx | 31 +++++++++++++------ .../src/lib/uPlotLib/plugins/tooltipPlugin.ts | 14 ++++++++- 3 files changed, 46 insertions(+), 18 deletions(-) diff --git a/frontend/src/container/BillingContainer/BillingContainer.tsx b/frontend/src/container/BillingContainer/BillingContainer.tsx index fe784a0c57..9b45801356 100644 --- a/frontend/src/container/BillingContainer/BillingContainer.tsx +++ b/frontend/src/container/BillingContainer/BillingContainer.tsx @@ -26,7 +26,7 @@ import useAnalytics from 'hooks/analytics/useAnalytics'; import useAxiosError from 'hooks/useAxiosError'; import useLicense from 'hooks/useLicense'; import { useNotifications } from 'hooks/useNotifications'; -import { pick } from 'lodash-es'; +import { isEmpty, pick } from 'lodash-es'; import { useCallback, useEffect, useState } from 'react'; import { useTranslation } from 'react-i18next'; import { useMutation, useQuery } from 'react-query'; @@ -149,6 +149,9 @@ export default function BillingContainer(): JSX.Element { const processUsageData = useCallback( (data: any): void => { + if (isEmpty(data?.payload)) { + return; + } const { details: { breakdown = [], billTotal }, billingPeriodStart, @@ -420,12 +423,14 @@ export default function BillingContainer(): JSX.Element { )} {!isLoading && !isFetchingBillingData ? ( - + headerText && ( + + ) ) : ( )} diff --git a/frontend/src/container/BillingContainer/BillingUsageGraph/BillingUsageGraph.tsx b/frontend/src/container/BillingContainer/BillingUsageGraph/BillingUsageGraph.tsx index fa6ce813a6..be77ebba95 100644 --- a/frontend/src/container/BillingContainer/BillingUsageGraph/BillingUsageGraph.tsx +++ b/frontend/src/container/BillingContainer/BillingUsageGraph/BillingUsageGraph.tsx @@ -3,9 +3,7 @@ import '../../../lib/uPlotLib/uPlotLib.styles.scss'; import { Color } from '@signozhq/design-tokens'; import { Card, Flex, Typography } from 'antd'; -import { getComponentForPanelType } from 'constants/panelTypes'; -import { PANEL_TYPES } from 'constants/queryBuilder'; -import { PropsTypePropsMap } from 'container/GridPanelSwitch/types'; +import Uplot from 'components/Uplot'; import { useIsDarkMode } from 'hooks/useDarkMode'; import { useResizeObserver } from 'hooks/useDimensions'; import tooltipPlugin from 'lib/uPlotLib/plugins/tooltipPlugin'; @@ -14,7 +12,7 @@ import getRenderer from 'lib/uPlotLib/utils/getRenderer'; import { getUPlotChartData } from 'lib/uPlotLib/utils/getUplotChartData'; import { getXAxisScale } from 'lib/uPlotLib/utils/getXAxisScale'; import { getYAxisScale } from 'lib/uPlotLib/utils/getYAxisScale'; -import { FC, useMemo, useRef } from 'react'; +import { useMemo, useRef } from 'react'; import uPlot from 'uplot'; import { @@ -43,6 +41,21 @@ const paths = ( return renderer(u, seriesIdx, idx0, idx1, extendGap, buildClip); }; +const calculateStartEndTime = ( + data: any, +): { startTime: number; endTime: number } => { + const timestamps: number[] = []; + data?.details?.breakdown?.forEach((breakdown: any) => { + breakdown?.dayWiseBreakdown?.breakdown.forEach((entry: any) => { + timestamps.push(entry?.timestamp); + }); + }); + const billingTime = [data?.billingPeriodStart, data?.billingPeriodEnd]; + const startTime: number = Math.min(...timestamps, ...billingTime); + const endTime: number = Math.max(...timestamps, ...billingTime); + return { startTime, endTime }; +}; + export function BillingUsageGraph(props: BillingUsageGraphProps): JSX.Element { const { data, billAmount } = props; const graphCompatibleData = useMemo( @@ -54,11 +67,9 @@ export function BillingUsageGraph(props: BillingUsageGraphProps): JSX.Element { const isDarkMode = useIsDarkMode(); const containerDimensions = useResizeObserver(graphRef); - const { billingPeriodStart: startTime, billingPeriodEnd: endTime } = data; - - const Component = getComponentForPanelType(PANEL_TYPES.BAR) as FC< - PropsTypePropsMap[PANEL_TYPES] - >; + const { startTime, endTime } = useMemo(() => calculateStartEndTime(data), [ + data, + ]); const getGraphSeries = (color: string, label: string): any => ({ drawStyle: 'bars', @@ -183,7 +194,7 @@ export function BillingUsageGraph(props: BillingUsageGraphProps): JSX.Element {
- +
); diff --git a/frontend/src/lib/uPlotLib/plugins/tooltipPlugin.ts b/frontend/src/lib/uPlotLib/plugins/tooltipPlugin.ts index 4ec3677dfb..b06e5bff63 100644 --- a/frontend/src/lib/uPlotLib/plugins/tooltipPlugin.ts +++ b/frontend/src/lib/uPlotLib/plugins/tooltipPlugin.ts @@ -3,6 +3,7 @@ import { themeColors } from 'constants/theme'; import dayjs from 'dayjs'; import customParseFormat from 'dayjs/plugin/customParseFormat'; import getLabelName from 'lib/getLabelName'; +import { get } from 'lodash-es'; import { MetricRangePayloadProps } from 'types/api/metrics/getQueryRange'; import { placement } from '../placement'; @@ -68,7 +69,18 @@ const generateTooltipContent = ( const dataIngested = quantity[idx]; const label = getLabelName(metric, queryName || '', legend || ''); - const color = generateColor(label, themeColors.chartcolors); + let color = generateColor(label, themeColors.chartcolors); + + // in case of billing graph pick colors from the series options + if (isBillingUsageGraphs) { + let clr; + series.forEach((item) => { + if (item.label === label) { + clr = get(item, '_fill'); + } + }); + color = clr ?? color; + } let tooltipItemLabel = label; From a30b75a2a8be6f81f877953027d43989358e48c4 Mon Sep 17 00:00:00 2001 From: Yunus M Date: Wed, 27 Mar 2024 18:46:05 +0530 Subject: [PATCH 31/33] feat: show environments in a separate dropdown (#4717) * feat: show environments in a separate dropdown --- .../ResourceAttributesFilter.styles.scss | 20 ++ .../ResourceAttributesFilter.tsx | 172 +++++++++++++----- .../components/QueryChip/QueryChip.tsx | 5 +- .../ResourceAttributesFilter/styles.ts | 7 +- .../useResourceAttribute/ResourceProvider.tsx | 37 +++- .../src/hooks/useResourceAttribute/types.ts | 1 + .../src/hooks/useResourceAttribute/utils.ts | 48 +++++ frontend/src/pages/Services/Metrics.test.tsx | 4 +- 8 files changed, 237 insertions(+), 57 deletions(-) create mode 100644 frontend/src/container/ResourceAttributesFilter/ResourceAttributesFilter.styles.scss diff --git a/frontend/src/container/ResourceAttributesFilter/ResourceAttributesFilter.styles.scss b/frontend/src/container/ResourceAttributesFilter/ResourceAttributesFilter.styles.scss new file mode 100644 index 0000000000..9d10445703 --- /dev/null +++ b/frontend/src/container/ResourceAttributesFilter/ResourceAttributesFilter.styles.scss @@ -0,0 +1,20 @@ +.resourceAttributesFilter-container { + display: flex; + align-items: center; + justify-content: stretch; + flex-wrap: wrap; + gap: 8px; + margin-bottom: 16px; + + .resource-attributes-selector { + flex: 1; + } + + .environment-selector { + min-width: 200px; + } + + .ant-form-item { + margin-bottom: 0; + } +} diff --git a/frontend/src/container/ResourceAttributesFilter/ResourceAttributesFilter.tsx b/frontend/src/container/ResourceAttributesFilter/ResourceAttributesFilter.tsx index a61a0ce0ee..4211291742 100644 --- a/frontend/src/container/ResourceAttributesFilter/ResourceAttributesFilter.tsx +++ b/frontend/src/container/ResourceAttributesFilter/ResourceAttributesFilter.tsx @@ -1,10 +1,17 @@ +import './ResourceAttributesFilter.styles.scss'; + import { CloseCircleFilled } from '@ant-design/icons'; import { Button, Select, Spin } from 'antd'; import useResourceAttribute, { isResourceEmpty, } from 'hooks/useResourceAttribute'; -import { convertMetricKeyToTrace } from 'hooks/useResourceAttribute/utils'; -import { ReactNode, useMemo } from 'react'; +import { + convertMetricKeyToTrace, + getEnvironmentTagKeys, + getEnvironmentTagValues, +} from 'hooks/useResourceAttribute/utils'; +import { ReactNode, useEffect, useMemo, useState } from 'react'; +import { SelectOption } from 'types/common/select'; import { popupContainer } from 'utils/selectPopupContainer'; import { v4 as uuid } from 'uuid'; @@ -22,60 +29,129 @@ function ResourceAttributesFilter({ handleClearAll, handleFocus, handleChange, + handleEnvironmentChange, selectedQuery, optionsData, loading, } = useResourceAttribute(); - const isEmpty = useMemo( - () => isResourceEmpty(queries, staging, selectedQuery), - [queries, selectedQuery, staging], + const [environments, setEnvironments] = useState< + SelectOption[] + >([]); + + const [selectedEnvironments, setSelectedEnvironments] = useState([]); + + const queriesExcludingEnvironment = useMemo( + () => + queries.filter( + (query) => query.tagKey !== 'resource_deployment_environment', + ), + [queries], ); - return ( - -
- {queries.map((query) => ( - - ))} - {staging.map((query, idx) => ( - - {idx === 0 ? convertMetricKeyToTrace(query) : query} - - ))} -
- + {environments.map((opt) => ( + + {opt.label} + + ))} + +
+ +
+ +
+ {queriesExcludingEnvironment.map((query) => ( + + ))} + {staging.map((query, idx) => ( + + {idx === 0 ? convertMetricKeyToTrace(query) : query} + + ))} +
+