From f3bc1a8f8a0acba6ace41069b8d9d37168f490d0 Mon Sep 17 00:00:00 2001 From: Vikrant Gupta Date: Thu, 22 Feb 2024 16:32:30 +0530 Subject: [PATCH 01/16] fix: date time value initialising to start of day in case of typing (#4585) --- .../CustomTimePickerPopoverContent.tsx | 8 +++-- .../TopNav/DateTimeSelectionV2/config.ts | 5 +++ .../TopNav/DateTimeSelectionV2/index.tsx | 35 +++++++++++-------- 3 files changed, 32 insertions(+), 16 deletions(-) diff --git a/frontend/src/components/CustomTimePicker/CustomTimePickerPopoverContent.tsx b/frontend/src/components/CustomTimePicker/CustomTimePickerPopoverContent.tsx index e7bd571f35..3141158f7f 100644 --- a/frontend/src/components/CustomTimePicker/CustomTimePickerPopoverContent.tsx +++ b/frontend/src/components/CustomTimePicker/CustomTimePickerPopoverContent.tsx @@ -5,6 +5,7 @@ import cx from 'classnames'; import ROUTES from 'constants/routes'; import { DateTimeRangeType } from 'container/TopNav/CustomDateTimeModal'; import { + LexicalContext, Option, RelativeDurationSuggestionOptions, } from 'container/TopNav/DateTimeSelectionV2/config'; @@ -20,7 +21,10 @@ interface CustomTimePickerPopoverContentProps { setIsOpen: Dispatch>; customDateTimeVisible: boolean; setCustomDTPickerVisible: Dispatch>; - onCustomDateHandler: (dateTimeRange: DateTimeRangeType) => void; + onCustomDateHandler: ( + dateTimeRange: DateTimeRangeType, + lexicalContext?: LexicalContext, + ) => void; onSelectHandler: (label: string, value: string) => void; handleGoLive: () => void; selectedTime: string; @@ -63,7 +67,7 @@ function CustomTimePickerPopoverContent({ if (date_time?.[1]) { onPopoverClose(false); } - onCustomDateHandler(date_time); + onCustomDateHandler(date_time, LexicalContext.CUSTOM_DATE_PICKER); }; function getTimeChips(options: Option[]): JSX.Element { return ( diff --git a/frontend/src/container/TopNav/DateTimeSelectionV2/config.ts b/frontend/src/container/TopNav/DateTimeSelectionV2/config.ts index a1562e5a4c..becd3fed7b 100644 --- a/frontend/src/container/TopNav/DateTimeSelectionV2/config.ts +++ b/frontend/src/container/TopNav/DateTimeSelectionV2/config.ts @@ -148,3 +148,8 @@ export interface TimeRange { startTime: string; endTime: string; } + +export enum LexicalContext { + CUSTOM_DATE_PICKER = 'customDatePicker', + CUSTOM_DATE_TIME_INPUT = 'customDateTimeInput', +} diff --git a/frontend/src/container/TopNav/DateTimeSelectionV2/index.tsx b/frontend/src/container/TopNav/DateTimeSelectionV2/index.tsx index 157397f026..6e5c0c5b48 100644 --- a/frontend/src/container/TopNav/DateTimeSelectionV2/index.tsx +++ b/frontend/src/container/TopNav/DateTimeSelectionV2/index.tsx @@ -44,6 +44,7 @@ import { DateTimeRangeType } from '../CustomDateTimeModal'; import { getDefaultOption, getOptions, + LexicalContext, LocalStorageTimeRange, Time, TimeRange, @@ -318,31 +319,37 @@ function DateTimeSelection({ onLastRefreshHandler(); }; - const onCustomDateHandler = (dateTimeRange: DateTimeRangeType): void => { + const onCustomDateHandler = ( + dateTimeRange: DateTimeRangeType, + lexicalContext?: LexicalContext, + ): void => { if (dateTimeRange !== null) { const [startTimeMoment, endTimeMoment] = dateTimeRange; if (startTimeMoment && endTimeMoment) { + let startTime = startTimeMoment; + let endTime = endTimeMoment; + if ( + lexicalContext && + lexicalContext === LexicalContext.CUSTOM_DATE_PICKER + ) { + startTime = startTime.startOf('day'); + endTime = endTime.endOf('day'); + } setCustomDTPickerVisible(false); - startTimeMoment.startOf('day').toString(); updateTimeInterval('custom', [ - startTimeMoment.startOf('day').toDate().getTime(), - endTimeMoment.endOf('day').toDate().getTime(), + startTime.toDate().getTime(), + endTime.toDate().getTime(), ]); - setLocalStorageKey('startTime', startTimeMoment.toString()); - setLocalStorageKey('endTime', endTimeMoment.toString()); - updateLocalStorageForRoutes( - JSON.stringify({ startTime: startTimeMoment, endTime: endTimeMoment }), - ); + setLocalStorageKey('startTime', startTime.toString()); + setLocalStorageKey('endTime', endTime.toString()); + updateLocalStorageForRoutes(JSON.stringify({ startTime, endTime })); if (!isLogsExplorerPage) { urlQuery.set( QueryParams.startTime, - startTimeMoment?.toDate().getTime().toString(), - ); - urlQuery.set( - QueryParams.endTime, - endTimeMoment?.toDate().getTime().toString(), + startTime?.toDate().getTime().toString(), ); + urlQuery.set(QueryParams.endTime, endTime?.toDate().getTime().toString()); const generatedUrl = `${location.pathname}?${urlQuery.toString()}`; history.replace(generatedUrl); } From f2d5d21581d6c341f4c684239088545ab0ecc6bb Mon Sep 17 00:00:00 2001 From: Yunus M Date: Thu, 22 Feb 2024 16:57:06 +0530 Subject: [PATCH 02/16] fix: redirect old logs routes to new routes (#4584) --- frontend/src/AppRoutes/Private.tsx | 21 +++++++++++++++++++-- frontend/src/AppRoutes/routes.ts | 28 +++++++++++++++++++++------- 2 files changed, 40 insertions(+), 9 deletions(-) diff --git a/frontend/src/AppRoutes/Private.tsx b/frontend/src/AppRoutes/Private.tsx index f0bfa62d6e..669def6f44 100644 --- a/frontend/src/AppRoutes/Private.tsx +++ b/frontend/src/AppRoutes/Private.tsx @@ -20,11 +20,16 @@ import { UPDATE_USER_IS_FETCH } from 'types/actions/app'; import AppReducer from 'types/reducer/app'; import { routePermission } from 'utils/permission'; -import routes, { LIST_LICENSES } from './routes'; +import routes, { + LIST_LICENSES, + oldNewRoutesMapping, + oldRoutes, +} from './routes'; import afterLogin from './utils'; function PrivateRoute({ children }: PrivateRouteProps): JSX.Element { - const { pathname } = useLocation(); + const location = useLocation(); + const { pathname } = location; const mapRoutes = useMemo( () => @@ -59,6 +64,8 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element { const currentRoute = mapRoutes.get('current'); + const isOldRoute = oldRoutes.indexOf(pathname) > -1; + const isLocalStorageLoggedIn = getLocalStorageApi(LOCALSTORAGE.IS_LOGGED_IN) === 'true'; @@ -158,6 +165,16 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element { useEffect(() => { (async (): Promise => { try { + if (isOldRoute) { + const redirectUrl = oldNewRoutesMapping[pathname]; + + const newLocation = { + ...location, + pathname: redirectUrl, + }; + history.replace(newLocation); + } + if (currentRoute) { const { isPrivate, key } = currentRoute; diff --git a/frontend/src/AppRoutes/routes.ts b/frontend/src/AppRoutes/routes.ts index a6543ad01d..9fb4fd9c70 100644 --- a/frontend/src/AppRoutes/routes.ts +++ b/frontend/src/AppRoutes/routes.ts @@ -279,6 +279,13 @@ const routes: AppRoutes[] = [ key: 'LIVE_LOGS', isPrivate: true, }, + { + path: ROUTES.LOGS_PIPELINES, + exact: true, + component: PipelinePage, + key: 'LOGS_PIPELINES', + isPrivate: true, + }, { path: ROUTES.LOGIN, exact: true, @@ -307,13 +314,6 @@ const routes: AppRoutes[] = [ key: 'SOMETHING_WENT_WRONG', isPrivate: false, }, - { - path: ROUTES.LOGS_PIPELINES, - exact: true, - component: PipelinePage, - key: 'LOGS_PIPELINES', - isPrivate: true, - }, { path: ROUTES.BILLING, exact: true, @@ -353,6 +353,20 @@ export const LIST_LICENSES: AppRoutes = { key: 'LIST_LICENSES', }; +export const oldRoutes = [ + '/pipelines', + '/logs/old-logs-explorer', + '/logs-explorer', + '/logs-explorer/live', +]; + +export const oldNewRoutesMapping: Record = { + '/pipelines': '/logs/pipelines', + '/logs/old-logs-explorer': '/logs/old-logs-explorer', + '/logs-explorer': '/logs/logs-explorer', + '/logs-explorer/live': '/logs/logs-explorer/live', +}; + export interface AppRoutes { component: RouteProps['component']; path: RouteProps['path']; From fe37a2e7e05580e750e206bed9c76a5b6bac47e9 Mon Sep 17 00:00:00 2001 From: Vikrant Gupta Date: Fri, 23 Feb 2024 22:19:45 +0530 Subject: [PATCH 03/16] fix: traceID link not opening from log details page (#4590) --- frontend/src/container/LogDetailedView/TableView.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/src/container/LogDetailedView/TableView.tsx b/frontend/src/container/LogDetailedView/TableView.tsx index 2095051fc7..593519404d 100644 --- a/frontend/src/container/LogDetailedView/TableView.tsx +++ b/frontend/src/container/LogDetailedView/TableView.tsx @@ -105,7 +105,7 @@ function TableView({ const onTraceHandler = ( record: DataType, event: React.MouseEvent, - ) => (): void => { + ): void => { if (flattenLogData === null) return; const traceId = flattenLogData[record.field]; From 17de5836bd290d00b5f26ab31a5481f4953b049a Mon Sep 17 00:00:00 2001 From: Yunus M Date: Fri, 23 Feb 2024 22:53:09 +0530 Subject: [PATCH 04/16] feat: send only required details in billingevents (#4587) --- frontend/src/container/BillingContainer/BillingContainer.tsx | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/frontend/src/container/BillingContainer/BillingContainer.tsx b/frontend/src/container/BillingContainer/BillingContainer.tsx index 7c1bfe374f..e419c581ed 100644 --- a/frontend/src/container/BillingContainer/BillingContainer.tsx +++ b/frontend/src/container/BillingContainer/BillingContainer.tsx @@ -13,6 +13,7 @@ import useAnalytics from 'hooks/analytics/useAnalytics'; import useAxiosError from 'hooks/useAxiosError'; import useLicense from 'hooks/useLicense'; import { useNotifications } from 'hooks/useNotifications'; +import { pick } from 'lodash-es'; import { useCallback, useEffect, useState } from 'react'; import { useMutation, useQuery } from 'react-query'; import { useSelector } from 'react-redux'; @@ -305,7 +306,7 @@ export default function BillingContainer(): JSX.Element { const handleBilling = useCallback(async () => { if (isFreeTrial && !licensesData?.payload?.trialConvertedToSubscription) { trackEvent('Billing : Upgrade Plan', { - user, + user: pick(user, ['email', 'userId', 'name']), org, }); @@ -316,7 +317,7 @@ export default function BillingContainer(): JSX.Element { }); } else { trackEvent('Billing : Manage Billing', { - user, + user: pick(user, ['email', 'userId', 'name']), org, }); From f9eddc9b18f6dc7ba9aebfca690876688da2b6ab Mon Sep 17 00:00:00 2001 From: Yunus M Date: Mon, 26 Feb 2024 12:09:31 +0530 Subject: [PATCH 05/16] fix: update no logs text and link based on the datasource (#4594) --- .../src/container/LogsExplorerList/index.tsx | 2 +- .../src/container/LogsExplorerViews/index.tsx | 1 + frontend/src/container/NoLogs/NoLogs.tsx | 16 +++++++++++----- .../container/TimeSeriesView/TimeSeriesView.tsx | 5 ++++- frontend/src/container/TimeSeriesView/index.tsx | 1 + 5 files changed, 18 insertions(+), 7 deletions(-) diff --git a/frontend/src/container/LogsExplorerList/index.tsx b/frontend/src/container/LogsExplorerList/index.tsx index c4bba5291b..dbc38b7923 100644 --- a/frontend/src/container/LogsExplorerList/index.tsx +++ b/frontend/src/container/LogsExplorerList/index.tsx @@ -161,7 +161,7 @@ function LogsExplorerList({ !isFetching && !isError && !isFilterApplied && - logs.length === 0 && } + logs.length === 0 && } {!isLoading && !isFetching && diff --git a/frontend/src/container/LogsExplorerViews/index.tsx b/frontend/src/container/LogsExplorerViews/index.tsx index 4b639ed505..49b5842908 100644 --- a/frontend/src/container/LogsExplorerViews/index.tsx +++ b/frontend/src/container/LogsExplorerViews/index.tsx @@ -611,6 +611,7 @@ function LogsExplorerViews({ data={data} isError={isError} isFilterApplied={!isEmpty(listQuery?.filters.items)} + dataSource={DataSource.LOGS} /> )} diff --git a/frontend/src/container/NoLogs/NoLogs.tsx b/frontend/src/container/NoLogs/NoLogs.tsx index 1274bdb20a..7745d130f3 100644 --- a/frontend/src/container/NoLogs/NoLogs.tsx +++ b/frontend/src/container/NoLogs/NoLogs.tsx @@ -2,25 +2,31 @@ import './NoLogs.styles.scss'; import { Typography } from 'antd'; import { ArrowUpRight } from 'lucide-react'; +import { DataSource } from 'types/common/queryBuilder'; -export default function NoLogs(): JSX.Element { +export default function NoLogs({ + dataSource, +}: { + dataSource: DataSource; +}): JSX.Element { return (
eyes emoji - No logs yet. + No {dataSource} yet. - When we receive logs, they would show up here + {' '} + When we receive {dataSource}, they would show up here - Sending Logs to SigNoz + Sending {dataSource} to SigNoz
diff --git a/frontend/src/container/TimeSeriesView/TimeSeriesView.tsx b/frontend/src/container/TimeSeriesView/TimeSeriesView.tsx index 8bac60b425..bd7f32b153 100644 --- a/frontend/src/container/TimeSeriesView/TimeSeriesView.tsx +++ b/frontend/src/container/TimeSeriesView/TimeSeriesView.tsx @@ -14,6 +14,7 @@ import { useSelector } from 'react-redux'; import { AppState } from 'store/reducers'; import { SuccessResponse } from 'types/api'; import { MetricRangePayloadProps } from 'types/api/metrics/getQueryRange'; +import { DataSource } from 'types/common/queryBuilder'; import { GlobalReducer } from 'types/reducer/globalTime'; import { getTimeRange } from 'utils/getTimeRange'; @@ -25,6 +26,7 @@ function TimeSeriesView({ isError, yAxisUnit, isFilterApplied, + dataSource, }: TimeSeriesViewProps): JSX.Element { const graphRef = useRef(null); @@ -93,7 +95,7 @@ function TimeSeriesView({ chartData[0]?.length === 0 && !isLoading && !isError && - !isFilterApplied && } + !isFilterApplied && } {!isLoading && !isError && @@ -111,6 +113,7 @@ interface TimeSeriesViewProps { isLoading: boolean; isError: boolean; isFilterApplied: boolean; + dataSource: DataSource; } TimeSeriesView.defaultProps = { diff --git a/frontend/src/container/TimeSeriesView/index.tsx b/frontend/src/container/TimeSeriesView/index.tsx index b2c6b3f01f..4acffd7e61 100644 --- a/frontend/src/container/TimeSeriesView/index.tsx +++ b/frontend/src/container/TimeSeriesView/index.tsx @@ -74,6 +74,7 @@ function TimeSeriesViewContainer({ isLoading={isLoading} data={responseData} yAxisUnit={isValidToConvertToMs ? 'ms' : 'short'} + dataSource={dataSource} /> ); } From c38247abe43dce81ce52719785272b26352fd79c Mon Sep 17 00:00:00 2001 From: Vikrant Gupta Date: Mon, 26 Feb 2024 18:17:34 +0530 Subject: [PATCH 06/16] fix: [SIG-528]: precommit typescript check for md files (#4596) --- frontend/scripts/typecheck-staged.sh | 2 +- frontend/tsconfig.json | 22 +++++----------------- 2 files changed, 6 insertions(+), 18 deletions(-) diff --git a/frontend/scripts/typecheck-staged.sh b/frontend/scripts/typecheck-staged.sh index e67cf54a72..7da93c088e 100644 --- a/frontend/scripts/typecheck-staged.sh +++ b/frontend/scripts/typecheck-staged.sh @@ -9,7 +9,7 @@ done # create temporary tsconfig which includes only passed files str="{ \"extends\": \"./tsconfig.json\", - \"include\": [\"src/types/global.d.ts\",\"src/typings/window.ts\", \"src/typings/chartjs-adapter-date-fns.d.ts\", \"src/typings/environment.ts\" ,$files] + \"include\": [\"src/types/global.d.ts\",\"src/typings/window.ts\", \"src/typings/chartjs-adapter-date-fns.d.ts\", \"src/typings/environment.ts\" ,\"src/container/OnboardingContainer/typings.d.ts\",$files] }" echo $str > tsconfig.tmp diff --git a/frontend/tsconfig.json b/frontend/tsconfig.json index e22372bac7..e286daf28e 100644 --- a/frontend/tsconfig.json +++ b/frontend/tsconfig.json @@ -11,11 +11,7 @@ "esModuleInterop": true, "skipLibCheck": true, "forceConsistentCasingInFileNames": true, - "lib": [ - "dom", - "dom.iterable", - "esnext" - ], + "lib": ["dom", "dom.iterable", "esnext"], "allowSyntheticDefaultImports": true, "noFallthroughCasesInSwitch": true, "moduleResolution": "node", @@ -24,9 +20,7 @@ "noEmit": true, "baseUrl": "./src", "paths": { - "@constants/*": [ - "/container/OnboardingContainer/constants/*" - ] + "@constants/*": ["/container/OnboardingContainer/constants/*"] }, "downlevelIteration": true, "plugins": [ @@ -34,15 +28,9 @@ "name": "typescript-plugin-css-modules" } ], - "types": [ - "node", - "jest" - ], + "types": ["node", "jest"] }, - "exclude": [ - "node_modules", - "./src/container/OnboardingContainer/constants/*.ts" - ], + "exclude": ["node_modules"], "include": [ "./src", "./src/**/*.ts", @@ -63,4 +51,4 @@ "./tests/**.ts", "./**/*.d.ts" ] -} \ No newline at end of file +} From 89c6eba913c8d68db2fd8a315f6536f9ba3d1848 Mon Sep 17 00:00:00 2001 From: Yunus M Date: Tue, 27 Feb 2024 13:38:43 +0530 Subject: [PATCH 07/16] feat: update naming to API keys to Access Tokens (#4597) * feat: update naming to API keys to Access Tokens * feat: update api-keys route to access-tokens --- frontend/public/locales/en-GB/routes.json | 2 +- frontend/public/locales/en-GB/titles.json | 2 +- frontend/public/locales/en/apiKeys.json | 2 +- frontend/public/locales/en/routes.json | 2 +- frontend/public/locales/en/titles.json | 2 +- frontend/src/AppRoutes/routes.ts | 2 ++ frontend/src/constants/routes.ts | 2 +- .../src/container/APIKeys/APIKeys.test.tsx | 22 +++++++------- frontend/src/container/APIKeys/APIKeys.tsx | 30 +++++++++---------- 9 files changed, 34 insertions(+), 32 deletions(-) diff --git a/frontend/public/locales/en-GB/routes.json b/frontend/public/locales/en-GB/routes.json index c88baa096a..ede3f467cf 100644 --- a/frontend/public/locales/en-GB/routes.json +++ b/frontend/public/locales/en-GB/routes.json @@ -3,7 +3,7 @@ "alert_channels": "Alert Channels", "organization_settings": "Organization Settings", "ingestion_settings": "Ingestion Settings", - "api_keys": "API Keys", + "api_keys": "Access Tokens", "my_settings": "My Settings", "overview_metrics": "Overview Metrics", "dbcall_metrics": "Database Calls", diff --git a/frontend/public/locales/en-GB/titles.json b/frontend/public/locales/en-GB/titles.json index d8ed6ff0ef..0eb98e9960 100644 --- a/frontend/public/locales/en-GB/titles.json +++ b/frontend/public/locales/en-GB/titles.json @@ -26,7 +26,7 @@ "MY_SETTINGS": "SigNoz | My Settings", "ORG_SETTINGS": "SigNoz | Organization Settings", "INGESTION_SETTINGS": "SigNoz | Ingestion Settings", - "API_KEYS": "SigNoz | API Keys", + "API_KEYS": "SigNoz | Access Tokens", "SOMETHING_WENT_WRONG": "SigNoz | Something Went Wrong", "UN_AUTHORIZED": "SigNoz | Unauthorized", "NOT_FOUND": "SigNoz | Page Not Found", diff --git a/frontend/public/locales/en/apiKeys.json b/frontend/public/locales/en/apiKeys.json index 5cc51fa92e..fb86610c9a 100644 --- a/frontend/public/locales/en/apiKeys.json +++ b/frontend/public/locales/en/apiKeys.json @@ -1,3 +1,3 @@ { - "delete_confirm_message": "Are you sure you want to delete {{keyName}} key? Deleting a key is irreversible and cannot be undone." + "delete_confirm_message": "Are you sure you want to delete {{keyName}} token? Deleting a token is irreversible and cannot be undone." } diff --git a/frontend/public/locales/en/routes.json b/frontend/public/locales/en/routes.json index c88baa096a..ede3f467cf 100644 --- a/frontend/public/locales/en/routes.json +++ b/frontend/public/locales/en/routes.json @@ -3,7 +3,7 @@ "alert_channels": "Alert Channels", "organization_settings": "Organization Settings", "ingestion_settings": "Ingestion Settings", - "api_keys": "API Keys", + "api_keys": "Access Tokens", "my_settings": "My Settings", "overview_metrics": "Overview Metrics", "dbcall_metrics": "Database Calls", diff --git a/frontend/public/locales/en/titles.json b/frontend/public/locales/en/titles.json index cebb3151d9..85da13a12a 100644 --- a/frontend/public/locales/en/titles.json +++ b/frontend/public/locales/en/titles.json @@ -26,7 +26,7 @@ "MY_SETTINGS": "SigNoz | My Settings", "ORG_SETTINGS": "SigNoz | Organization Settings", "INGESTION_SETTINGS": "SigNoz | Ingestion Settings", - "API_KEYS": "SigNoz | API Keys", + "API_KEYS": "SigNoz | Access Tokens", "SOMETHING_WENT_WRONG": "SigNoz | Something Went Wrong", "UN_AUTHORIZED": "SigNoz | Unauthorized", "NOT_FOUND": "SigNoz | Page Not Found", diff --git a/frontend/src/AppRoutes/routes.ts b/frontend/src/AppRoutes/routes.ts index 9fb4fd9c70..c0332448e7 100644 --- a/frontend/src/AppRoutes/routes.ts +++ b/frontend/src/AppRoutes/routes.ts @@ -358,6 +358,7 @@ export const oldRoutes = [ '/logs/old-logs-explorer', '/logs-explorer', '/logs-explorer/live', + '/settings/api-keys', ]; export const oldNewRoutesMapping: Record = { @@ -365,6 +366,7 @@ export const oldNewRoutesMapping: Record = { '/logs/old-logs-explorer': '/logs/old-logs-explorer', '/logs-explorer': '/logs/logs-explorer', '/logs-explorer/live': '/logs/logs-explorer/live', + '/settings/api-keys': '/settings/access-tokens', }; export interface AppRoutes { diff --git a/frontend/src/constants/routes.ts b/frontend/src/constants/routes.ts index 0715ebf787..2f7c650912 100644 --- a/frontend/src/constants/routes.ts +++ b/frontend/src/constants/routes.ts @@ -24,7 +24,7 @@ const ROUTES = { MY_SETTINGS: '/my-settings', SETTINGS: '/settings', ORG_SETTINGS: '/settings/org-settings', - API_KEYS: '/settings/api-keys', + API_KEYS: '/settings/access-tokens', INGESTION_SETTINGS: '/settings/ingestion-settings', SOMETHING_WENT_WRONG: '/something-went-wrong', UN_AUTHORIZED: '/un-authorized', diff --git a/frontend/src/container/APIKeys/APIKeys.test.tsx b/frontend/src/container/APIKeys/APIKeys.test.tsx index cfc2239236..960c276ebc 100644 --- a/frontend/src/container/APIKeys/APIKeys.test.tsx +++ b/frontend/src/container/APIKeys/APIKeys.test.tsx @@ -26,13 +26,13 @@ describe('APIKeys component', () => { }); it('renders APIKeys component without crashing', () => { - expect(screen.getByText('API Keys')).toBeInTheDocument(); + expect(screen.getByText('Access Tokens')).toBeInTheDocument(); expect( - screen.getByText('Create and manage access keys for the SigNoz API'), + screen.getByText('Create and manage access tokens for the SigNoz API'), ).toBeInTheDocument(); }); - it('render list of API Keys', async () => { + it('render list of Access Tokens', async () => { server.use( rest.get(apiKeysURL, (req, res, ctx) => res(ctx.status(200), ctx.json(getAPIKeysResponse)), @@ -41,15 +41,15 @@ describe('APIKeys component', () => { await waitFor(() => { expect(screen.getByText('No Expiry Token')).toBeInTheDocument(); - expect(screen.getByText('1-5 of 18 API Keys')).toBeInTheDocument(); + expect(screen.getByText('1-5 of 18 tokens')).toBeInTheDocument(); }); }); it('opens add new key modal on button click', async () => { - fireEvent.click(screen.getByText('New Key')); + fireEvent.click(screen.getByText('New Token')); await waitFor(() => { const createNewKeyBtn = screen.getByRole('button', { - name: /Create new key/i, + name: /Create new token/i, }); expect(createNewKeyBtn).toBeInTheDocument(); @@ -57,10 +57,10 @@ describe('APIKeys component', () => { }); it('closes add new key modal on cancel button click', async () => { - fireEvent.click(screen.getByText('New Key')); + fireEvent.click(screen.getByText('New Token')); const createNewKeyBtn = screen.getByRole('button', { - name: /Create new key/i, + name: /Create new token/i, }); await waitFor(() => { @@ -79,10 +79,10 @@ describe('APIKeys component', () => { ), ); - fireEvent.click(screen.getByText('New Key')); + fireEvent.click(screen.getByText('New Token')); const createNewKeyBtn = screen.getByRole('button', { - name: /Create new key/i, + name: /Create new token/i, }); await waitFor(() => { @@ -90,7 +90,7 @@ describe('APIKeys component', () => { }); act(() => { - const inputElement = screen.getByPlaceholderText('Enter Key Name'); + const inputElement = screen.getByPlaceholderText('Enter Token Name'); fireEvent.change(inputElement, { target: { value: 'Top Secret' } }); fireEvent.click(screen.getByTestId('create-form-admin-role-btn')); fireEvent.click(createNewKeyBtn); diff --git a/frontend/src/container/APIKeys/APIKeys.tsx b/frontend/src/container/APIKeys/APIKeys.tsx index c24bad7009..933012a00d 100644 --- a/frontend/src/container/APIKeys/APIKeys.tsx +++ b/frontend/src/container/APIKeys/APIKeys.tsx @@ -514,15 +514,15 @@ function APIKeys(): JSX.Element {
- API Keys + Access Tokens - Create and manage access keys for the SigNoz API + Create and manage access tokens for the SigNoz API
} value={searchValue} onChange={handleSearch} @@ -533,7 +533,7 @@ function APIKeys(): JSX.Element { type="primary" onClick={showAddModal} > - New Key + New Token
@@ -546,7 +546,7 @@ function APIKeys(): JSX.Element { pageSize: 5, hideOnSinglePage: true, showTotal: (total: number, range: number[]): string => - `${range[0]}-${range[1]} of ${total} API Keys`, + `${range[0]}-${range[1]} of ${total} tokens`, }} />
@@ -554,7 +554,7 @@ function APIKeys(): JSX.Element { {/* Delete Key Modal */} Delete key} + title={Delete Token} open={isDeleteModalOpen} closable afterClose={handleModalClose} @@ -576,7 +576,7 @@ function APIKeys(): JSX.Element { onClick={onDeleteHandler} className="delete-btn" > - Delete key + Delete Token , ]} > @@ -590,7 +590,7 @@ function APIKeys(): JSX.Element { {/* Edit Key Modal */} } onClick={onUpdateApiKey} > - Update key + Update Token , ]} > @@ -634,7 +634,7 @@ function APIKeys(): JSX.Element { label="Name" rules={[{ required: true }, { type: 'string', min: 6 }]} > - + @@ -668,7 +668,7 @@ function APIKeys(): JSX.Element { {/* Create New Key Modal */} } > - Copy key and close + Copy token and close , ] : [ @@ -706,7 +706,7 @@ function APIKeys(): JSX.Element { loading={isLoadingCreateAPIKey} onClick={onCreateAPIKey} > - Create new key + Create new token , ] } @@ -730,7 +730,7 @@ function APIKeys(): JSX.Element { rules={[{ required: true }, { type: 'string', min: 6 }]} validateTrigger="onFinish" > - + @@ -771,7 +771,7 @@ function APIKeys(): JSX.Element { {showNewAPIKeyDetails && (
- Key + Token From 7d32c63398d3a09c61cf605d4467b57df4f19932 Mon Sep 17 00:00:00 2001 From: Yunus M Date: Tue, 27 Feb 2024 16:40:29 +0530 Subject: [PATCH 08/16] feat: frontend telemetry setup (#4560) --- frontend/package.json | 3 + frontend/src/index.tsx | 21 ++ frontend/src/setupProxy.js | 14 + frontend/webpack.config.js | 20 +- frontend/webpack.config.prod.js | 18 ++ frontend/yarn.lock | 464 +++++++++++++++++++++++++------- 6 files changed, 438 insertions(+), 102 deletions(-) create mode 100644 frontend/src/setupProxy.js diff --git a/frontend/package.json b/frontend/package.json index 7d1a0855e3..293b4903fb 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -40,6 +40,8 @@ "@monaco-editor/react": "^4.3.1", "@radix-ui/react-tabs": "1.0.4", "@radix-ui/react-tooltip": "1.0.7", + "@sentry/react": "7.102.1", + "@sentry/webpack-plugin": "2.14.2", "@signozhq/design-tokens": "0.0.8", "@uiw/react-md-editor": "3.23.5", "@xstate/react": "^3.0.0", @@ -71,6 +73,7 @@ "fontfaceobserver": "2.3.0", "history": "4.10.1", "html-webpack-plugin": "5.5.0", + "http-proxy-middleware": "2.0.6", "i18next": "^21.6.12", "i18next-browser-languagedetector": "^6.1.3", "i18next-http-backend": "^1.3.2", diff --git a/frontend/src/index.tsx b/frontend/src/index.tsx index 405a9c6bc4..b95631c107 100644 --- a/frontend/src/index.tsx +++ b/frontend/src/index.tsx @@ -1,6 +1,7 @@ import './ReactI18'; import 'styles.scss'; +import * as Sentry from '@sentry/react'; import AppRoutes from 'AppRoutes'; import { ThemeProvider } from 'hooks/useDarkMode'; import ErrorBoundaryFallback from 'pages/ErrorBoundaryFallback/ErrorBoundaryFallback'; @@ -21,6 +22,26 @@ const queryClient = new QueryClient({ const container = document.getElementById('root'); +Sentry.init({ + dsn: process.env.SENTRY_DSN, + tunnel: process.env.TUNNEL_URL, + environment: 'production', + integrations: [ + Sentry.browserTracingIntegration(), + Sentry.replayIntegration({ + maskAllText: false, + blockAllMedia: false, + }), + ], + // Performance Monitoring + tracesSampleRate: 1.0, // Capture 100% of the transactions + // Set 'tracePropagationTargets' to control for which URLs distributed tracing should be enabled + tracePropagationTargets: [], + // Session Replay + replaysSessionSampleRate: 0.1, // This sets the sample rate at 10%. You may want to change it to 100% while in development and then sample at a lower rate in production. + replaysOnErrorSampleRate: 1.0, // If you're not already sampling the entire session, change the sample rate to 100% when sampling sessions where errors occur. +}); + if (container) { const root = createRoot(container); diff --git a/frontend/src/setupProxy.js b/frontend/src/setupProxy.js new file mode 100644 index 0000000000..96dd9714eb --- /dev/null +++ b/frontend/src/setupProxy.js @@ -0,0 +1,14 @@ +/* eslint-disable */ +// @ts-ignore +// @ts-nocheck +import { createProxyMiddleware } from 'http-proxy-middleware'; + +export default function (app) { + app.use( + '/tunnel', + createProxyMiddleware({ + target: process.env.TUNNEL_DOMAIN, + changeOrigin: true, + }), + ); +} diff --git a/frontend/webpack.config.js b/frontend/webpack.config.js index 58635d8994..281067ad47 100644 --- a/frontend/webpack.config.js +++ b/frontend/webpack.config.js @@ -2,6 +2,7 @@ // shared config (dev and prod) const { resolve } = require('path'); const HtmlWebpackPlugin = require('html-webpack-plugin'); +const { sentryWebpackPlugin } = require('@sentry/webpack-plugin'); const portFinderSync = require('portfinder-sync'); const dotenv = require('dotenv'); const webpack = require('webpack'); @@ -22,6 +23,12 @@ const plugins = [ INTERCOM_APP_ID: process.env.INTERCOM_APP_ID, SEGMENT_ID: process.env.SEGMENT_ID, CLARITY_PROJECT_ID: process.env.CLARITY_PROJECT_ID, + SENTRY_AUTH_TOKEN: process.env.SENTRY_AUTH_TOKEN, + SENTRY_ORG: process.env.SENTRY_ORG, + SENTRY_PROJECT_ID: process.env.SENTRY_PROJECT_ID, + SENTRY_DSN: process.env.SENTRY_DSN, + TUNNEL_URL: process.env.TUNNEL_URL, + TUNNEL_DOMAIN: process.env.TUNNEL_DOMAIN, }), new webpack.ProvidePlugin({ process: 'process/browser', @@ -33,8 +40,19 @@ const plugins = [ INTERCOM_APP_ID: process.env.INTERCOM_APP_ID, SEGMENT_ID: process.env.SEGMENT_ID, CLARITY_PROJECT_ID: process.env.CLARITY_PROJECT_ID, + SENTRY_AUTH_TOKEN: process.env.SENTRY_AUTH_TOKEN, + SENTRY_ORG: process.env.SENTRY_ORG, + SENTRY_PROJECT_ID: process.env.SENTRY_PROJECT_ID, + SENTRY_DSN: process.env.SENTRY_DSN, + TUNNEL_URL: process.env.TUNNEL_URL, + TUNNEL_DOMAIN: process.env.TUNNEL_DOMAIN, }), }), + sentryWebpackPlugin({ + authToken: process.env.SENTRY_AUTH_TOKEN, + org: process.env.SENTRY_ORG, + project: process.env.SENTRY_PROJECT_ID, + }), ]; if (process.env.BUNDLE_ANALYSER === 'true') { @@ -46,7 +64,7 @@ if (process.env.BUNDLE_ANALYSER === 'true') { */ const config = { mode: 'development', - devtool: 'eval-source-map', + devtool: 'source-map', entry: resolve(__dirname, './src/index.tsx'), devServer: { historyApiFallback: true, diff --git a/frontend/webpack.config.prod.js b/frontend/webpack.config.prod.js index f026a83b0c..a2b3ecb40e 100644 --- a/frontend/webpack.config.prod.js +++ b/frontend/webpack.config.prod.js @@ -4,6 +4,7 @@ const { resolve } = require('path'); const HtmlWebpackPlugin = require('html-webpack-plugin'); const CopyPlugin = require('copy-webpack-plugin'); +const { sentryWebpackPlugin } = require('@sentry/webpack-plugin'); const CompressionPlugin = require('compression-webpack-plugin'); const dotenv = require('dotenv'); const webpack = require('webpack'); @@ -26,6 +27,12 @@ const plugins = [ INTERCOM_APP_ID: process.env.INTERCOM_APP_ID, SEGMENT_ID: process.env.SEGMENT_ID, CLARITY_PROJECT_ID: process.env.CLARITY_PROJECT_ID, + SENTRY_AUTH_TOKEN: process.env.SENTRY_AUTH_TOKEN, + SENTRY_ORG: process.env.SENTRY_ORG, + SENTRY_PROJECT_ID: process.env.SENTRY_PROJECT_ID, + SENTRY_DSN: process.env.SENTRY_DSN, + TUNNEL_URL: process.env.TUNNEL_URL, + TUNNEL_DOMAIN: process.env.TUNNEL_DOMAIN, }), new CompressionPlugin({ exclude: /.map$/, @@ -42,6 +49,12 @@ const plugins = [ INTERCOM_APP_ID: process.env.INTERCOM_APP_ID, SEGMENT_ID: process.env.SEGMENT_ID, CLARITY_PROJECT_ID: process.env.CLARITY_PROJECT_ID, + SENTRY_AUTH_TOKEN: process.env.SENTRY_AUTH_TOKEN, + SENTRY_ORG: process.env.SENTRY_ORG, + SENTRY_PROJECT_ID: process.env.SENTRY_PROJECT_ID, + SENTRY_DSN: process.env.SENTRY_DSN, + TUNNEL_URL: process.env.TUNNEL_URL, + TUNNEL_DOMAIN: process.env.TUNNEL_DOMAIN, }), }), new MiniCssExtractPlugin(), @@ -53,6 +66,11 @@ const plugins = [ publicPath: resolve(__dirname, './public/css'), fonts: true, }), + sentryWebpackPlugin({ + authToken: process.env.SENTRY_AUTH_TOKEN, + org: process.env.SENTRY_ORG, + project: process.env.SENTRY_PROJECT_ID, + }), ]; if (process.env.BUNDLE_ANALYSER === 'true') { diff --git a/frontend/yarn.lock b/frontend/yarn.lock index 8e1c80fad2..6474b180c1 100644 --- a/frontend/yarn.lock +++ b/frontend/yarn.lock @@ -38,9 +38,9 @@ resolved "https://registry.yarnpkg.com/@adobe/css-tools/-/css-tools-4.3.2.tgz#a6abc715fb6884851fca9dad37fc34739a04fd11" integrity sha512-DA5a1C0gD/pLOvhv33YMrbf2FK3oUzwNl9oOJqE4XVjuEtt6XIakRcsd7eLiOSPkp1kTRQGICTA8cKra/vFbjw== -"@ampproject/remapping@^2.2.0": +"@ampproject/remapping@^2.1.0", "@ampproject/remapping@^2.2.0": version "2.2.1" - resolved "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.1.tgz" + resolved "https://registry.yarnpkg.com/@ampproject/remapping/-/remapping-2.2.1.tgz#99e8e11851128b8702cd57c33684f1d0f260b630" integrity sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg== dependencies: "@jridgewell/gen-mapping" "^0.3.0" @@ -130,6 +130,14 @@ dependencies: "@babel/highlight" "^7.18.6" +"@babel/code-frame@^7.16.7", "@babel/code-frame@^7.23.5": + version "7.23.5" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.23.5.tgz#9009b69a8c602293476ad598ff53e4562e15c244" + integrity sha512-CgH3s1a96LipHCmSUmYFPwY7MNx8C3avkq7i4Wl3cfa662ldtUe4VM1TPXX70pfmrlWTb6jLqTYrZyT2ZTJBgA== + dependencies: + "@babel/highlight" "^7.23.4" + chalk "^2.4.2" + "@babel/code-frame@^7.22.10", "@babel/code-frame@^7.22.13", "@babel/code-frame@^7.22.5": version "7.22.13" resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.22.13.tgz#e3c1c099402598483b7a8c46a721d1038803755e" @@ -138,14 +146,6 @@ "@babel/highlight" "^7.22.13" chalk "^2.4.2" -"@babel/code-frame@^7.23.5": - version "7.23.5" - resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.23.5.tgz#9009b69a8c602293476ad598ff53e4562e15c244" - integrity sha512-CgH3s1a96LipHCmSUmYFPwY7MNx8C3avkq7i4Wl3cfa662ldtUe4VM1TPXX70pfmrlWTb6jLqTYrZyT2ZTJBgA== - dependencies: - "@babel/highlight" "^7.23.4" - chalk "^2.4.2" - "@babel/compat-data@^7.17.7", "@babel/compat-data@^7.20.5", "@babel/compat-data@^7.21.4": version "7.21.4" resolved "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.21.4.tgz" @@ -161,6 +161,27 @@ resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.23.5.tgz#ffb878728bb6bdcb6f4510aa51b1be9afb8cfd98" integrity sha512-uU27kfDRlhfKl+w1U6vp16IuvSLtjAxdArVXPa9BvLkrr7CYIsxH5adpHObeAGY/41+syctUWOZ140a2Rvkgjw== +"@babel/core@7.18.5": + version "7.18.5" + resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.18.5.tgz#c597fa680e58d571c28dda9827669c78cdd7f000" + integrity sha512-MGY8vg3DxMnctw0LdvSEojOsumc70g0t18gNyUdAZqB1Rpd1Bqo/svHGvt+UJ6JcGX+DIekGFDxxIWofBxLCnQ== + dependencies: + "@ampproject/remapping" "^2.1.0" + "@babel/code-frame" "^7.16.7" + "@babel/generator" "^7.18.2" + "@babel/helper-compilation-targets" "^7.18.2" + "@babel/helper-module-transforms" "^7.18.0" + "@babel/helpers" "^7.18.2" + "@babel/parser" "^7.18.5" + "@babel/template" "^7.16.7" + "@babel/traverse" "^7.18.5" + "@babel/types" "^7.18.4" + convert-source-map "^1.7.0" + debug "^4.1.0" + gensync "^1.0.0-beta.2" + json5 "^2.2.1" + semver "^6.3.0" + "@babel/core@^7.1.0", "@babel/core@^7.12.3", "@babel/core@^7.16.0", "@babel/core@^7.7.2", "@babel/core@^7.7.5", "@babel/core@^7.8.0": version "7.21.4" resolved "https://registry.npmjs.org/@babel/core/-/core-7.21.4.tgz" @@ -245,6 +266,16 @@ json5 "^2.2.3" semver "^6.3.1" +"@babel/generator@^7.18.2", "@babel/generator@^7.23.6": + version "7.23.6" + resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.23.6.tgz#9e1fca4811c77a10580d17d26b57b036133f3c2e" + integrity sha512-qrSfCYxYQB5owCmGLbl8XRpX1ytXlpueOb0N0UmQwA073KZxejgQTzAmJezxvpwQD9uGtK2shHdi55QT+MbjIw== + dependencies: + "@babel/types" "^7.23.6" + "@jridgewell/gen-mapping" "^0.3.2" + "@jridgewell/trace-mapping" "^0.3.17" + jsesc "^2.5.1" + "@babel/generator@^7.21.4", "@babel/generator@^7.7.2": version "7.21.4" resolved "https://registry.npmjs.org/@babel/generator/-/generator-7.21.4.tgz" @@ -285,16 +316,6 @@ "@jridgewell/trace-mapping" "^0.3.17" jsesc "^2.5.1" -"@babel/generator@^7.23.6": - version "7.23.6" - resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.23.6.tgz#9e1fca4811c77a10580d17d26b57b036133f3c2e" - integrity sha512-qrSfCYxYQB5owCmGLbl8XRpX1ytXlpueOb0N0UmQwA073KZxejgQTzAmJezxvpwQD9uGtK2shHdi55QT+MbjIw== - dependencies: - "@babel/types" "^7.23.6" - "@jridgewell/gen-mapping" "^0.3.2" - "@jridgewell/trace-mapping" "^0.3.17" - jsesc "^2.5.1" - "@babel/helper-annotate-as-pure@^7.15.4", "@babel/helper-annotate-as-pure@^7.16.0", "@babel/helper-annotate-as-pure@^7.18.6": version "7.18.6" resolved "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.18.6.tgz" @@ -335,6 +356,17 @@ lru-cache "^5.1.1" semver "^6.3.0" +"@babel/helper-compilation-targets@^7.18.2", "@babel/helper-compilation-targets@^7.23.6": + version "7.23.6" + resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.23.6.tgz#4d79069b16cbcf1461289eccfbbd81501ae39991" + integrity sha512-9JB548GZoQVmzrFgp8o7KxdgkTGm6xs9DW0o/Pim72UDjzr5ObUQ6ZzYPqA+g9OTS2bBQoctLJrky0RDCAWRgQ== + dependencies: + "@babel/compat-data" "^7.23.5" + "@babel/helper-validator-option" "^7.23.5" + browserslist "^4.22.2" + lru-cache "^5.1.1" + semver "^6.3.1" + "@babel/helper-compilation-targets@^7.22.10", "@babel/helper-compilation-targets@^7.22.5", "@babel/helper-compilation-targets@^7.22.6": version "7.22.10" resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.22.10.tgz#01d648bbc25dd88f513d862ee0df27b7d4e67024" @@ -357,17 +389,6 @@ lru-cache "^5.1.1" semver "^6.3.1" -"@babel/helper-compilation-targets@^7.23.6": - version "7.23.6" - resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.23.6.tgz#4d79069b16cbcf1461289eccfbbd81501ae39991" - integrity sha512-9JB548GZoQVmzrFgp8o7KxdgkTGm6xs9DW0o/Pim72UDjzr5ObUQ6ZzYPqA+g9OTS2bBQoctLJrky0RDCAWRgQ== - dependencies: - "@babel/compat-data" "^7.23.5" - "@babel/helper-validator-option" "^7.23.5" - browserslist "^4.22.2" - lru-cache "^5.1.1" - semver "^6.3.1" - "@babel/helper-create-class-features-plugin@^7.18.6", "@babel/helper-create-class-features-plugin@^7.21.0": version "7.21.4" resolved "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.21.4.tgz" @@ -532,6 +553,17 @@ dependencies: "@babel/types" "^7.22.5" +"@babel/helper-module-transforms@^7.18.0", "@babel/helper-module-transforms@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.23.3.tgz#d7d12c3c5d30af5b3c0fcab2a6d5217773e2d0f1" + integrity sha512-7bBs4ED9OmswdfDzpz4MpWgSrV7FXlc3zIagvLFjS5H+Mk7Snr21vQ6QwrsoCGMfNC4e4LQPdoULEt4ykz0SRQ== + dependencies: + "@babel/helper-environment-visitor" "^7.22.20" + "@babel/helper-module-imports" "^7.22.15" + "@babel/helper-simple-access" "^7.22.5" + "@babel/helper-split-export-declaration" "^7.22.6" + "@babel/helper-validator-identifier" "^7.22.20" + "@babel/helper-module-transforms@^7.18.6", "@babel/helper-module-transforms@^7.20.11", "@babel/helper-module-transforms@^7.21.2": version "7.21.2" resolved "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.21.2.tgz" @@ -568,17 +600,6 @@ "@babel/helper-split-export-declaration" "^7.22.6" "@babel/helper-validator-identifier" "^7.22.5" -"@babel/helper-module-transforms@^7.23.3": - version "7.23.3" - resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.23.3.tgz#d7d12c3c5d30af5b3c0fcab2a6d5217773e2d0f1" - integrity sha512-7bBs4ED9OmswdfDzpz4MpWgSrV7FXlc3zIagvLFjS5H+Mk7Snr21vQ6QwrsoCGMfNC4e4LQPdoULEt4ykz0SRQ== - dependencies: - "@babel/helper-environment-visitor" "^7.22.20" - "@babel/helper-module-imports" "^7.22.15" - "@babel/helper-simple-access" "^7.22.5" - "@babel/helper-split-export-declaration" "^7.22.6" - "@babel/helper-validator-identifier" "^7.22.20" - "@babel/helper-optimise-call-expression@^7.18.6": version "7.18.6" resolved "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.18.6.tgz" @@ -759,6 +780,15 @@ "@babel/template" "^7.22.5" "@babel/types" "^7.22.10" +"@babel/helpers@^7.18.2", "@babel/helpers@^7.23.9": + version "7.23.9" + resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.23.9.tgz#c3e20bbe7f7a7e10cb9b178384b4affdf5995c7d" + integrity sha512-87ICKgU5t5SzOT7sBMfCOZQ2rHjRU+Pcb9BoILMYz600W6DkVRLFBPwQ18gwUVvggqXivaUakpnxWQGbpywbBQ== + dependencies: + "@babel/template" "^7.23.9" + "@babel/traverse" "^7.23.9" + "@babel/types" "^7.23.9" + "@babel/helpers@^7.21.0": version "7.21.0" resolved "https://registry.npmjs.org/@babel/helpers/-/helpers-7.21.0.tgz" @@ -786,15 +816,6 @@ "@babel/traverse" "^7.22.15" "@babel/types" "^7.22.15" -"@babel/helpers@^7.23.9": - version "7.23.9" - resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.23.9.tgz#c3e20bbe7f7a7e10cb9b178384b4affdf5995c7d" - integrity sha512-87ICKgU5t5SzOT7sBMfCOZQ2rHjRU+Pcb9BoILMYz600W6DkVRLFBPwQ18gwUVvggqXivaUakpnxWQGbpywbBQ== - dependencies: - "@babel/template" "^7.23.9" - "@babel/traverse" "^7.23.9" - "@babel/types" "^7.23.9" - "@babel/highlight@^7.10.4", "@babel/highlight@^7.18.6": version "7.18.6" resolved "https://registry.npmjs.org/@babel/highlight/-/highlight-7.18.6.tgz" @@ -827,6 +848,11 @@ resolved "https://registry.npmjs.org/@babel/parser/-/parser-7.21.4.tgz" integrity sha512-alVJj7k7zIxqBZ7BTRhz0IqJFxW1VJbm6N8JbcYhQ186df9ZBPbZBmWSqAMXwHGsCJdYks7z/voa3ibiS5bCIw== +"@babel/parser@^7.18.5", "@babel/parser@^7.23.9": + version "7.23.9" + resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.23.9.tgz#7b903b6149b0f8fa7ad564af646c4c38a77fc44b" + integrity sha512-9tcKgqKbs3xGJ+NtKF2ndOBBLVwPjl1SHxPQkd36r3Dlirw3xWUeGaTbqr7uGZcTaxkVNwc+03SVP7aCdWrTlA== + "@babel/parser@^7.22.11", "@babel/parser@^7.22.5": version "7.22.14" resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.22.14.tgz#c7de58e8de106e88efca42ce17f0033209dfd245" @@ -847,11 +873,6 @@ resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.23.6.tgz#ba1c9e512bda72a47e285ae42aff9d2a635a9e3b" integrity sha512-Z2uID7YJ7oNvAI20O9X0bblw7Qqs8Q2hFy0R9tAfnfLkp5MW0UH9eUvnDSnFwKZ0AvgS1ucqR4KzvVHgnke1VQ== -"@babel/parser@^7.23.9": - version "7.23.9" - resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.23.9.tgz#7b903b6149b0f8fa7ad564af646c4c38a77fc44b" - integrity sha512-9tcKgqKbs3xGJ+NtKF2ndOBBLVwPjl1SHxPQkd36r3Dlirw3xWUeGaTbqr7uGZcTaxkVNwc+03SVP7aCdWrTlA== - "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression@^7.18.6": version "7.18.6" resolved "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.18.6.tgz" @@ -2194,6 +2215,15 @@ dependencies: regenerator-runtime "^0.14.0" +"@babel/template@^7.16.7", "@babel/template@^7.23.9": + version "7.23.9" + resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.23.9.tgz#f881d0487cba2828d3259dcb9ef5005a9731011a" + integrity sha512-+xrD2BWLpvHKNmX2QbpdpsBaWnRxahMwJjO+KZk2JOElj5nSmKezyS1B4u+QbHMTX69t4ukm6hh9lsYQ7GHCKA== + dependencies: + "@babel/code-frame" "^7.23.5" + "@babel/parser" "^7.23.9" + "@babel/types" "^7.23.9" + "@babel/template@^7.18.10", "@babel/template@^7.20.7", "@babel/template@^7.3.3": version "7.20.7" resolved "https://registry.npmjs.org/@babel/template/-/template-7.20.7.tgz" @@ -2221,14 +2251,21 @@ "@babel/parser" "^7.22.5" "@babel/types" "^7.22.5" -"@babel/template@^7.23.9": +"@babel/traverse@^7.18.5", "@babel/traverse@^7.23.9": version "7.23.9" - resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.23.9.tgz#f881d0487cba2828d3259dcb9ef5005a9731011a" - integrity sha512-+xrD2BWLpvHKNmX2QbpdpsBaWnRxahMwJjO+KZk2JOElj5nSmKezyS1B4u+QbHMTX69t4ukm6hh9lsYQ7GHCKA== + resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.23.9.tgz#2f9d6aead6b564669394c5ce0f9302bb65b9d950" + integrity sha512-I/4UJ9vs90OkBtY6iiiTORVMyIhJ4kAVmsKo9KFc8UOxMeUfi2hvtIBsET5u9GizXE6/GFSuKCTNfgCswuEjRg== dependencies: "@babel/code-frame" "^7.23.5" + "@babel/generator" "^7.23.6" + "@babel/helper-environment-visitor" "^7.22.20" + "@babel/helper-function-name" "^7.23.0" + "@babel/helper-hoist-variables" "^7.22.5" + "@babel/helper-split-export-declaration" "^7.22.6" "@babel/parser" "^7.23.9" "@babel/types" "^7.23.9" + debug "^4.3.1" + globals "^11.1.0" "@babel/traverse@^7.20.5", "@babel/traverse@^7.20.7", "@babel/traverse@^7.21.0", "@babel/traverse@^7.21.2", "@babel/traverse@^7.21.4", "@babel/traverse@^7.22.11", "@babel/traverse@^7.22.15", "@babel/traverse@^7.4.5", "@babel/traverse@^7.7.0", "@babel/traverse@^7.7.2": version "7.23.2" @@ -2246,22 +2283,6 @@ debug "^4.1.0" globals "^11.1.0" -"@babel/traverse@^7.23.9": - version "7.23.9" - resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.23.9.tgz#2f9d6aead6b564669394c5ce0f9302bb65b9d950" - integrity sha512-I/4UJ9vs90OkBtY6iiiTORVMyIhJ4kAVmsKo9KFc8UOxMeUfi2hvtIBsET5u9GizXE6/GFSuKCTNfgCswuEjRg== - dependencies: - "@babel/code-frame" "^7.23.5" - "@babel/generator" "^7.23.6" - "@babel/helper-environment-visitor" "^7.22.20" - "@babel/helper-function-name" "^7.23.0" - "@babel/helper-hoist-variables" "^7.22.5" - "@babel/helper-split-export-declaration" "^7.22.6" - "@babel/parser" "^7.23.9" - "@babel/types" "^7.23.9" - debug "^4.3.1" - globals "^11.1.0" - "@babel/types@^7.0.0", "@babel/types@^7.18.6", "@babel/types@^7.18.9", "@babel/types@^7.20.0", "@babel/types@^7.20.2", "@babel/types@^7.20.5", "@babel/types@^7.20.7", "@babel/types@^7.21.0", "@babel/types@^7.21.2", "@babel/types@^7.21.4", "@babel/types@^7.3.0", "@babel/types@^7.3.3", "@babel/types@^7.4.4", "@babel/types@^7.7.0": version "7.21.4" resolved "https://registry.npmjs.org/@babel/types/-/types-7.21.4.tgz" @@ -2271,6 +2292,15 @@ "@babel/helper-validator-identifier" "^7.19.1" to-fast-properties "^2.0.0" +"@babel/types@^7.18.4", "@babel/types@^7.23.6", "@babel/types@^7.23.9": + version "7.23.9" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.23.9.tgz#1dd7b59a9a2b5c87f8b41e52770b5ecbf492e002" + integrity sha512-dQjSq/7HaSjRM43FFGnv5keM2HsxpmyV1PfaSVm0nzzjwwTmjOe6J4bC8e3+pTEIgHaHj+1ZlLThRJ2auc/w1Q== + dependencies: + "@babel/helper-string-parser" "^7.23.4" + "@babel/helper-validator-identifier" "^7.22.20" + to-fast-properties "^2.0.0" + "@babel/types@^7.22.10", "@babel/types@^7.22.11", "@babel/types@^7.22.5": version "7.22.11" resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.22.11.tgz#0e65a6a1d4d9cbaa892b2213f6159485fe632ea2" @@ -2298,15 +2328,6 @@ "@babel/helper-validator-identifier" "^7.22.20" to-fast-properties "^2.0.0" -"@babel/types@^7.23.6", "@babel/types@^7.23.9": - version "7.23.9" - resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.23.9.tgz#1dd7b59a9a2b5c87f8b41e52770b5ecbf492e002" - integrity sha512-dQjSq/7HaSjRM43FFGnv5keM2HsxpmyV1PfaSVm0nzzjwwTmjOe6J4bC8e3+pTEIgHaHj+1ZlLThRJ2auc/w1Q== - dependencies: - "@babel/helper-string-parser" "^7.23.4" - "@babel/helper-validator-identifier" "^7.22.20" - to-fast-properties "^2.0.0" - "@bcoe/v8-coverage@^0.2.3": version "0.2.3" resolved "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz" @@ -2963,7 +2984,7 @@ resolved "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz" integrity sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw== -"@jridgewell/sourcemap-codec@^1.4.10", "@jridgewell/sourcemap-codec@^1.4.14": +"@jridgewell/sourcemap-codec@^1.4.10", "@jridgewell/sourcemap-codec@^1.4.13", "@jridgewell/sourcemap-codec@^1.4.14": version "1.4.15" resolved "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz" integrity sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg== @@ -3552,6 +3573,170 @@ colors "~1.2.1" string-argv "~0.3.1" +"@sentry-internal/feedback@7.102.1": + version "7.102.1" + resolved "https://registry.yarnpkg.com/@sentry-internal/feedback/-/feedback-7.102.1.tgz#747f88c2881c76fddd16bce57cc4bc17b4c2af93" + integrity sha512-vY4hpLLMNLjICtWiizc7KeGbWOTUMGrF7C+9dPCztZww3CLgzWy9A7DvPj5hodRiYzpdRnAMl8yQnMFbYXh7bA== + dependencies: + "@sentry/core" "7.102.1" + "@sentry/types" "7.102.1" + "@sentry/utils" "7.102.1" + +"@sentry-internal/replay-canvas@7.102.1": + version "7.102.1" + resolved "https://registry.yarnpkg.com/@sentry-internal/replay-canvas/-/replay-canvas-7.102.1.tgz#f098814ce21fdf95ef6d440d7ff8a6d3bfe73054" + integrity sha512-GUX4RWI10uRjdjeyvCLtAAhWRVqnAnG6+yNxWfqUQ3qMA7B7XxG43KT2UhSnulmErNzODQ6hA68rGPwwYeRIww== + dependencies: + "@sentry/core" "7.102.1" + "@sentry/replay" "7.102.1" + "@sentry/types" "7.102.1" + "@sentry/utils" "7.102.1" + +"@sentry-internal/tracing@7.102.1": + version "7.102.1" + resolved "https://registry.yarnpkg.com/@sentry-internal/tracing/-/tracing-7.102.1.tgz#5c39c8f04a4a1a665fb6d368e1cd13605152f18b" + integrity sha512-RkFlFyAC0fQOvBbBqnq0CLmFW5m3JJz9pKbZd5vXPraWAlniKSb1bC/4DF9SlNx0FN1LWG+IU3ISdpzwwTeAGg== + dependencies: + "@sentry/core" "7.102.1" + "@sentry/types" "7.102.1" + "@sentry/utils" "7.102.1" + +"@sentry/babel-plugin-component-annotate@2.14.2": + version "2.14.2" + resolved "https://registry.yarnpkg.com/@sentry/babel-plugin-component-annotate/-/babel-plugin-component-annotate-2.14.2.tgz#d756bed93495e97a5a2aad56e2a6dc5020305adc" + integrity sha512-mFBVnIZmdMrpxo61rG5yf0WFt5VrRpy8cpIpJtT3mYkX9vDmcUZaZaD1ctv73iZF3QwaieVdn05Na5mWzZ8h/A== + +"@sentry/browser@7.102.1": + version "7.102.1" + resolved "https://registry.yarnpkg.com/@sentry/browser/-/browser-7.102.1.tgz#30d3da587b2b6542b3d9e39d923ed28a2704d454" + integrity sha512-7BOfPBiM7Kp6q/iy0JIbsBTxIASV+zWXByqqjuEMWGj3X2u4oRIfm3gv4erPU/l+CORQUVQZLSPGoIoM1gbB/A== + dependencies: + "@sentry-internal/feedback" "7.102.1" + "@sentry-internal/replay-canvas" "7.102.1" + "@sentry-internal/tracing" "7.102.1" + "@sentry/core" "7.102.1" + "@sentry/replay" "7.102.1" + "@sentry/types" "7.102.1" + "@sentry/utils" "7.102.1" + +"@sentry/bundler-plugin-core@2.14.2": + version "2.14.2" + resolved "https://registry.yarnpkg.com/@sentry/bundler-plugin-core/-/bundler-plugin-core-2.14.2.tgz#6750c46fa4836b46ea48556b19f5e6789a428a47" + integrity sha512-HgOFWYdq87lSmeVW1w8K2Vf2DGzRPvKzHTajZYLTPlrZ1jbajq9vwuqhrJ9AnDkjl0mjyzSPEy3ZTeG1Z7uRNA== + dependencies: + "@babel/core" "7.18.5" + "@sentry/babel-plugin-component-annotate" "2.14.2" + "@sentry/cli" "^2.22.3" + dotenv "^16.3.1" + find-up "5.0.0" + glob "9.3.2" + magic-string "0.27.0" + unplugin "1.0.1" + +"@sentry/cli-darwin@2.28.6": + version "2.28.6" + resolved "https://registry.yarnpkg.com/@sentry/cli-darwin/-/cli-darwin-2.28.6.tgz#83f9127de77e2a2d25eb143d90720b3e9042adc1" + integrity sha512-KRf0VvTltHQ5gA7CdbUkaIp222LAk/f1+KqpDzO6nB/jC/tL4sfiy6YyM4uiH6IbVEudB8WpHCECiatmyAqMBA== + +"@sentry/cli-linux-arm64@2.28.6": + version "2.28.6" + resolved "https://registry.yarnpkg.com/@sentry/cli-linux-arm64/-/cli-linux-arm64-2.28.6.tgz#6bb660e5d8145270e287a9a21201d2f9576b0634" + integrity sha512-caMDt37FI752n4/3pVltDjlrRlPFCOxK4PHvoZGQ3KFMsai0ZhE/0CLBUMQqfZf0M0r8KB2x7wqLm7xSELjefQ== + +"@sentry/cli-linux-arm@2.28.6": + version "2.28.6" + resolved "https://registry.yarnpkg.com/@sentry/cli-linux-arm/-/cli-linux-arm-2.28.6.tgz#73d466004ac445d9258e83a7b3d4e0ee6604e0bd" + integrity sha512-ANG7U47yEHD1g3JrfhpT4/MclEvmDZhctWgSP5gVw5X4AlcI87E6dTqccnLgvZjiIAQTaJJAZuSHVVF3Jk403w== + +"@sentry/cli-linux-i686@2.28.6": + version "2.28.6" + resolved "https://registry.yarnpkg.com/@sentry/cli-linux-i686/-/cli-linux-i686-2.28.6.tgz#f7175ca639ee05cf12d808f7fc31d59d6e2ee3b9" + integrity sha512-Tj1+GMc6lFsDRquOqaGKXFpW9QbmNK4TSfynkWKiJxdTEn5jSMlXXfr0r9OQrxu3dCCqEHkhEyU63NYVpgxIPw== + +"@sentry/cli-linux-x64@2.28.6": + version "2.28.6" + resolved "https://registry.yarnpkg.com/@sentry/cli-linux-x64/-/cli-linux-x64-2.28.6.tgz#df0af8d6c8c8c880eb7345c715a4dfa509544a40" + integrity sha512-Dt/Xz784w/z3tEObfyJEMmRIzn0D5qoK53H9kZ6e0yNvJOSKNCSOq5cQk4n1/qeG0K/6SU9dirmvHwFUiVNyYg== + +"@sentry/cli-win32-i686@2.28.6": + version "2.28.6" + resolved "https://registry.yarnpkg.com/@sentry/cli-win32-i686/-/cli-win32-i686-2.28.6.tgz#0df19912d1823b6ec034b4c4c714c7601211c926" + integrity sha512-zkpWtvY3kt+ogVaAbfFr2MEkgMMHJNJUnNMO8Ixce9gh38sybIkDkZNFnVPBXMClJV0APa4QH0EwumYBFZUMuQ== + +"@sentry/cli-win32-x64@2.28.6": + version "2.28.6" + resolved "https://registry.yarnpkg.com/@sentry/cli-win32-x64/-/cli-win32-x64-2.28.6.tgz#2344a206be3b555ec6540740f93a181199962804" + integrity sha512-TG2YzZ9JMeNFzbicdr5fbtsusVGACbrEfHmPgzWGDeLUP90mZxiMTjkXsE1X/5jQEQjB2+fyfXloba/Ugo51hA== + +"@sentry/cli@^2.22.3": + version "2.28.6" + resolved "https://registry.yarnpkg.com/@sentry/cli/-/cli-2.28.6.tgz#645f31b9e742e7bf7668c8f867149359e79b8123" + integrity sha512-o2Ngz7xXuhwHxMi+4BFgZ4qjkX0tdZeOSIZkFAGnTbRhQe5T8bxq6CcQRLdPhqMgqvDn7XuJ3YlFtD3ZjHvD7g== + dependencies: + https-proxy-agent "^5.0.0" + node-fetch "^2.6.7" + progress "^2.0.3" + proxy-from-env "^1.1.0" + which "^2.0.2" + optionalDependencies: + "@sentry/cli-darwin" "2.28.6" + "@sentry/cli-linux-arm" "2.28.6" + "@sentry/cli-linux-arm64" "2.28.6" + "@sentry/cli-linux-i686" "2.28.6" + "@sentry/cli-linux-x64" "2.28.6" + "@sentry/cli-win32-i686" "2.28.6" + "@sentry/cli-win32-x64" "2.28.6" + +"@sentry/core@7.102.1": + version "7.102.1" + resolved "https://registry.yarnpkg.com/@sentry/core/-/core-7.102.1.tgz#855d37b6bba9986a9380864c823e696d3fc5aa01" + integrity sha512-QjY+LSP3du3J/C8x/FfEbRxgZgsWd0jfTJ4P7s9f219I1csK4OeBMC3UA1HwEa0pY/9OF6H/egW2CjOcMM5Pdg== + dependencies: + "@sentry/types" "7.102.1" + "@sentry/utils" "7.102.1" + +"@sentry/react@7.102.1": + version "7.102.1" + resolved "https://registry.yarnpkg.com/@sentry/react/-/react-7.102.1.tgz#c4ef94be7ee7ee4267d513ddccd29ce63f16e48f" + integrity sha512-X4j2DgbktlEifnd21YJKCayAmff5hnaS+9MNz9OonEwD0ARi0ks7bo0wtWHMjPK20992MO+JwczVg/1BXJYDdQ== + dependencies: + "@sentry/browser" "7.102.1" + "@sentry/core" "7.102.1" + "@sentry/types" "7.102.1" + "@sentry/utils" "7.102.1" + hoist-non-react-statics "^3.3.2" + +"@sentry/replay@7.102.1": + version "7.102.1" + resolved "https://registry.yarnpkg.com/@sentry/replay/-/replay-7.102.1.tgz#d6c17332d14dc312b124bbbda8f35d6a982b893c" + integrity sha512-HR/j9dGIvbrId8fh8mQlODx7JrhRmawEd9e9P3laPtogWCg/5TI+XPb2VGSaXOX9VWtb/6Z2UjHsaGjgg6YcuA== + dependencies: + "@sentry-internal/tracing" "7.102.1" + "@sentry/core" "7.102.1" + "@sentry/types" "7.102.1" + "@sentry/utils" "7.102.1" + +"@sentry/types@7.102.1": + version "7.102.1" + resolved "https://registry.yarnpkg.com/@sentry/types/-/types-7.102.1.tgz#18c35f32ecbd12afb9860ca2de7bfff542d10b27" + integrity sha512-htKorf3t/D0XYtM7foTcmG+rM47rDP6XdbvCcX5gBCuCYlzpM1vqCt2rl3FLktZC6TaIpFRJw1TLfx6m+x5jdA== + +"@sentry/utils@7.102.1": + version "7.102.1" + resolved "https://registry.yarnpkg.com/@sentry/utils/-/utils-7.102.1.tgz#45ddcdf2e700d40160347bbdf4233aff3179d398" + integrity sha512-+8WcFjHVV/HROXSAwMuUzveElBFC43EiTG7SNEBNgOUeQzQVTmbUZXyTVgLrUmtoWqvnIxCacoLxtZo1o67kdg== + dependencies: + "@sentry/types" "7.102.1" + +"@sentry/webpack-plugin@2.14.2": + version "2.14.2" + resolved "https://registry.yarnpkg.com/@sentry/webpack-plugin/-/webpack-plugin-2.14.2.tgz#4b3af44e98b967ea0730681f80b08c9f3660ad58" + integrity sha512-BEWF5qerGG/xX0ixEOCYh9gCkc+FHDzXxRMCFkM8yQNGH361ELF578KtuoZxXDy0kWa9QGZxSoP6/HZSnJEF9A== + dependencies: + "@sentry/bundler-plugin-core" "2.14.2" + unplugin "1.0.1" + uuid "^9.0.0" + "@sideway/address@^4.1.3": version "4.1.4" resolved "https://registry.npmjs.org/@sideway/address/-/address-4.1.4.tgz" @@ -4920,6 +5105,11 @@ acorn@^8.0.4, acorn@^8.2.4, acorn@^8.4.1, acorn@^8.5.0, acorn@^8.7.1: resolved "https://registry.npmjs.org/acorn/-/acorn-8.8.2.tgz" integrity sha512-xjIYgE8HBrkpd/sJqOGNspf8uHG+NOHGOw6a/Urj8taM2EXfdNAH2oFcPeIFfsv3+kz/mJrS5VuMqbNLjCa2vw== +acorn@^8.8.1: + version "8.11.3" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.11.3.tgz#71e0b14e13a4ec160724b38fb7b0f233b1b81d7a" + integrity sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg== + aframe-extras@^6.1: version "6.1.1" resolved "https://registry.npmjs.org/aframe-extras/-/aframe-extras-6.1.1.tgz" @@ -7611,6 +7801,11 @@ dotenv@^16.0.3: resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-16.3.1.tgz#369034de7d7e5b120972693352a3bf112172cc3e" integrity sha512-IPzF4w4/Rd94bA9imS68tZBaYyBWSCE47V1RGuMrB94iyTOIEwRmVL2x/4An+6mETpLrKJ5hQkB8W4kFAadeIQ== +dotenv@^16.3.1: + version "16.4.5" + resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-16.4.5.tgz#cdd3b3b604cb327e286b4762e13502f717cb099f" + integrity sha512-ZmdL2rui+eB2YwhsWzjInR8LldtZHGDoQ1ugH85ppHKwpUHL7j7rN0Ti9NCnGiQbhaZ11FpR+7ao1dNsmduNUg== + dtype@^2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/dtype/-/dtype-2.0.0.tgz" @@ -8616,6 +8811,14 @@ find-process@^1.4.4: commander "^5.1.0" debug "^4.1.1" +find-up@5.0.0, find-up@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-5.0.0.tgz#4c92819ecb7083561e4f4a240a86be5198f536fc" + integrity sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng== + dependencies: + locate-path "^6.0.0" + path-exists "^4.0.0" + find-up@^4.0.0, find-up@^4.1.0: version "4.1.0" resolved "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz" @@ -8624,14 +8827,6 @@ find-up@^4.0.0, find-up@^4.1.0: locate-path "^5.0.0" path-exists "^4.0.0" -find-up@^5.0.0: - version "5.0.0" - resolved "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz" - integrity sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng== - dependencies: - locate-path "^6.0.0" - path-exists "^4.0.0" - find-up@^6.3.0: version "6.3.0" resolved "https://registry.yarnpkg.com/find-up/-/find-up-6.3.0.tgz#2abab3d3280b2dc7ac10199ef324c4e002c8c790" @@ -8938,6 +9133,16 @@ glob-to-regexp@^0.4.1: resolved "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz" integrity sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw== +glob@9.3.2: + version "9.3.2" + resolved "https://registry.yarnpkg.com/glob/-/glob-9.3.2.tgz#8528522e003819e63d11c979b30896e0eaf52eda" + integrity sha512-BTv/JhKXFEHsErMte/AnfiSv8yYOLLiyH2lTg8vn02O21zWFgHPTfxtgn1QRe7NRgggUhC8hacR2Re94svHqeA== + dependencies: + fs.realpath "^1.0.0" + minimatch "^7.4.1" + minipass "^4.2.4" + path-scurry "^1.6.1" + glob@^7.1.1, glob@^7.1.2, glob@^7.1.3, glob@^7.1.4, glob@^7.1.6, glob@^7.2.0: version "7.2.3" resolved "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz" @@ -9504,9 +9709,9 @@ http-proxy-agent@^4.0.1: agent-base "6" debug "4" -http-proxy-middleware@^2.0.3: +http-proxy-middleware@2.0.6, http-proxy-middleware@^2.0.3: version "2.0.6" - resolved "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.6.tgz" + resolved "https://registry.yarnpkg.com/http-proxy-middleware/-/http-proxy-middleware-2.0.6.tgz#e1a4dd6979572c7ab5a4e4b55095d1f32a74963f" integrity sha512-ya/UeJ6HVBYxrgYotAZo1KvPWlgB48kUJLDePFeneHsVujFaW5WNj2NgWCAE//B1Dl02BIfYlpNgBy8Kf8Rjmw== dependencies: "@types/http-proxy" "^1.17.8" @@ -10961,9 +11166,9 @@ json2mq@^0.2.0: dependencies: string-convert "^0.2.0" -json5@2.x, json5@^2.1.2, json5@^2.2.2, json5@^2.2.3: +json5@2.x, json5@^2.1.2, json5@^2.2.1, json5@^2.2.2, json5@^2.2.3: version "2.2.3" - resolved "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz" + resolved "https://registry.yarnpkg.com/json5/-/json5-2.2.3.tgz#78cd6f1a19bdc12b73db5ad0c61efd66c1e29283" integrity sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg== json5@^1.0.2: @@ -11413,6 +11618,11 @@ lru-cache@^6.0.0: dependencies: yallist "^4.0.0" +"lru-cache@^9.1.1 || ^10.0.0": + version "10.2.0" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-10.2.0.tgz#0bd445ca57363465900f4d1f9bd8db343a4d95c3" + integrity sha512-2bIM8x+VAf6JT4bKAljS1qUWgMsqZRPGJS6FSahIMPVvctcNhyVp7AJu7quxOW9jwkryBReKZY5tY5JYv2n/7Q== + lucide-react@0.321.0: version "0.321.0" resolved "https://registry.yarnpkg.com/lucide-react/-/lucide-react-0.321.0.tgz#05a2600e0a6551c117fb4e7b2676b1286389d949" @@ -11423,6 +11633,13 @@ lz-string@^1.4.4: resolved "https://registry.npmjs.org/lz-string/-/lz-string-1.5.0.tgz" integrity sha512-h5bgJWpxJNswbU7qCrV0tIKQCaS3blPDrqKWx+QxzuzL1zGUzij9XCWLrSLsJPu5t+eWA/ycetzYAO5IOMcWAQ== +magic-string@0.27.0: + version "0.27.0" + resolved "https://registry.yarnpkg.com/magic-string/-/magic-string-0.27.0.tgz#e4a3413b4bab6d98d2becffd48b4a257effdbbf3" + integrity sha512-8UnnX2PeRAPZuN12svgR9j7M1uWMovg/CEnIwIG0LFkXSJJe4PdfUGiTGl8V9bsBHFUtfVINcSyYxd7q+kx9fA== + dependencies: + "@jridgewell/sourcemap-codec" "^1.4.13" + make-dir@^2.1.0: version "2.1.0" resolved "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz" @@ -12208,6 +12425,13 @@ minimatch@^3.0.4, minimatch@^3.1.1, minimatch@^3.1.2: dependencies: brace-expansion "^1.1.7" +minimatch@^7.4.1: + version "7.4.6" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-7.4.6.tgz#845d6f254d8f4a5e4fd6baf44d5f10c8448365fb" + integrity sha512-sBz8G/YjVniEz6lKPNpKxXwazJe4c19fEfV2GDMX6AjFz+MX9uDWIZW8XreVhkFW3fkIdTv/gxWr/Kks5FFAVw== + dependencies: + brace-expansion "^2.0.1" + minimatch@^9.0.3: version "9.0.3" resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-9.0.3.tgz#a6e00c3de44c3a542bfaae70abfc22420a6da825" @@ -12229,6 +12453,16 @@ minimist@^1.2.0, minimist@^1.2.5, minimist@^1.2.6: resolved "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz" integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA== +minipass@^4.2.4: + version "4.2.8" + resolved "https://registry.yarnpkg.com/minipass/-/minipass-4.2.8.tgz#f0010f64393ecfc1d1ccb5f582bcaf45f48e1a3a" + integrity sha512-fNzuVyifolSLFL4NzpF+wEF4qrgqaaKX0haXPQEdQ7NKAN+WecoKMHV09YcuL/DHxrUsYQOK3MiuDf7Ip2OXfQ== + +"minipass@^5.0.0 || ^6.0.2 || ^7.0.0": + version "7.0.4" + resolved "https://registry.yarnpkg.com/minipass/-/minipass-7.0.4.tgz#dbce03740f50a4786ba994c1fb908844d27b038c" + integrity sha512-jYofLM5Dam9279rdkWzqHozUo4ybjdZmCsDHePy5V/PbBcVMiSZR97gmAy45aqi8CK1lG2ECd356FU86avfwUQ== + mkdirp@^0.5.6: version "0.5.6" resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.6.tgz#7def03d2432dcae4ba1d611445c48396062255f6" @@ -13087,6 +13321,14 @@ path-parse@^1.0.6, path-parse@^1.0.7: resolved "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz" integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== +path-scurry@^1.6.1: + version "1.10.1" + resolved "https://registry.yarnpkg.com/path-scurry/-/path-scurry-1.10.1.tgz#9ba6bf5aa8500fe9fd67df4f0d9483b2b0bfc698" + integrity sha512-MkhCqzzBEpPvxxQ71Md0b1Kk51W01lrYvlMzSUaIzNsODdd7mqhiimSZlr+VegAz5Z6Vzt9Xg2ttE//XBhH3EQ== + dependencies: + lru-cache "^9.1.1 || ^10.0.0" + minipass "^5.0.0 || ^6.0.2 || ^7.0.0" + path-to-regexp@0.1.7: version "0.1.7" resolved "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz" @@ -13621,9 +13863,9 @@ process@^0.11.10: resolved "https://registry.npmjs.org/process/-/process-0.11.10.tgz" integrity sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A== -progress@^2.0.0: +progress@^2.0.0, progress@^2.0.3: version "2.0.3" - resolved "https://registry.npmjs.org/progress/-/progress-2.0.3.tgz" + resolved "https://registry.yarnpkg.com/progress/-/progress-2.0.3.tgz#7e8cf8d8f5b8f239c1bc68beb4eb78567d572ef8" integrity sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA== promise-polyfill@^3.1.0: @@ -16676,6 +16918,16 @@ unpipe@1.0.0, unpipe@~1.0.0: resolved "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz" integrity sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ== +unplugin@1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/unplugin/-/unplugin-1.0.1.tgz#83b528b981cdcea1cad422a12cd02e695195ef3f" + integrity sha512-aqrHaVBWW1JVKBHmGo33T5TxeL0qWzfvjWokObHA9bYmN7eNDkwOxmLjhioHl9878qDFMAaT51XNroRyuz7WxA== + dependencies: + acorn "^8.8.1" + chokidar "^3.5.3" + webpack-sources "^3.2.3" + webpack-virtual-modules "^0.5.0" + update-browserslist-db@^1.0.10, update-browserslist-db@^1.0.11: version "1.0.11" resolved "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.11.tgz" @@ -16809,6 +17061,11 @@ uuid@^8.3.2: resolved "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz" integrity sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg== +uuid@^9.0.0: + version "9.0.1" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-9.0.1.tgz#e188d4c8853cc722220392c424cd637f32293f30" + integrity sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA== + uvu@^0.5.0: version "0.5.6" resolved "https://registry.yarnpkg.com/uvu/-/uvu-0.5.6.tgz#2754ca20bcb0bb59b64e9985e84d2e81058502df" @@ -17170,6 +17427,11 @@ webpack-sources@^3.2.3: resolved "https://registry.npmjs.org/webpack-sources/-/webpack-sources-3.2.3.tgz" integrity sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w== +webpack-virtual-modules@^0.5.0: + version "0.5.0" + resolved "https://registry.yarnpkg.com/webpack-virtual-modules/-/webpack-virtual-modules-0.5.0.tgz#362f14738a56dae107937ab98ea7062e8bdd3b6c" + integrity sha512-kyDivFZ7ZM0BVOUteVbDFhlRt7Ah/CSPwJdi8hBpkK7QLumUqdLtVfm/PX/hkcnrvr0i77fO5+TjZ94Pe+C9iw== + webpack@5.88.2: version "5.88.2" resolved "https://registry.yarnpkg.com/webpack/-/webpack-5.88.2.tgz#f62b4b842f1c6ff580f3fcb2ed4f0b579f4c210e" @@ -17341,9 +17603,9 @@ which@^1.2.12, which@^1.2.14, which@^1.2.9: dependencies: isexe "^2.0.0" -which@^2.0.1: +which@^2.0.1, which@^2.0.2: version "2.0.2" - resolved "https://registry.npmjs.org/which/-/which-2.0.2.tgz" + resolved "https://registry.yarnpkg.com/which/-/which-2.0.2.tgz#7c6a8dd0a636a0327e10b59c9286eee93f3f51b1" integrity sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA== dependencies: isexe "^2.0.0" From d9ab100da3de350b3398a3425868700320b2210e Mon Sep 17 00:00:00 2001 From: Prashant Shahi Date: Tue, 27 Feb 2024 17:15:23 +0530 Subject: [PATCH 09/16] ci(telemetry): include environment variables for ee build (#4603) Signed-off-by: Prashant Shahi --- .github/workflows/push.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/push.yaml b/.github/workflows/push.yaml index 724b4ce1ca..6869cf7fb7 100644 --- a/.github/workflows/push.yaml +++ b/.github/workflows/push.yaml @@ -152,6 +152,12 @@ jobs: echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env echo 'CLARITY_PROJECT_ID="${{ secrets.CLARITY_PROJECT_ID }}"' >> frontend/.env + echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env + echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env + echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env + echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env + echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env + echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env - name: Install dependencies working-directory: frontend run: yarn install From 8f9d643923f27ab7061010374aab0dfb052902ba Mon Sep 17 00:00:00 2001 From: Hayden <6836908+features-not-bugs@users.noreply.github.com> Date: Wed, 28 Feb 2024 02:11:00 +0800 Subject: [PATCH 10/16] Add basic support for secure clickhouse connections (#4178) --- .../app/clickhouseReader/options.go | 26 +++++++++---------- pkg/query-service/main.go | 6 ++--- 2 files changed, 15 insertions(+), 17 deletions(-) diff --git a/pkg/query-service/app/clickhouseReader/options.go b/pkg/query-service/app/clickhouseReader/options.go index f03da2505a..0defced7ed 100644 --- a/pkg/query-service/app/clickhouseReader/options.go +++ b/pkg/query-service/app/clickhouseReader/options.go @@ -2,7 +2,6 @@ package clickhouseReader import ( "context" - "net/url" "time" "github.com/ClickHouse/clickhouse-go/v2" @@ -91,24 +90,23 @@ type Connector func(cfg *namespaceConfig) (clickhouse.Conn, error) func defaultConnector(cfg *namespaceConfig) (clickhouse.Conn, error) { ctx := context.Background() - dsnURL, err := url.Parse(cfg.Datasource) + options, err := clickhouse.ParseDSN(cfg.Datasource) if err != nil { return nil, err } - options := &clickhouse.Options{ - Addr: []string{dsnURL.Host}, - MaxOpenConns: cfg.MaxOpenConns, - MaxIdleConns: cfg.MaxIdleConns, - DialTimeout: cfg.DialTimeout, + + // Check if the DSN contained any of the following options, if not set from configuration + if options.MaxIdleConns == 0 { + options.MaxIdleConns = cfg.MaxIdleConns } - if dsnURL.Query().Get("username") != "" { - auth := clickhouse.Auth{ - Username: dsnURL.Query().Get("username"), - Password: dsnURL.Query().Get("password"), - } - options.Auth = auth + if options.MaxOpenConns == 0 { + options.MaxOpenConns = cfg.MaxOpenConns } - zap.S().Infof("Connecting to Clickhouse at %s, MaxIdleConns: %d, MaxOpenConns: %d, DialTimeout: %s", dsnURL.Host, options.MaxIdleConns, options.MaxOpenConns, options.DialTimeout) + if options.DialTimeout == 0 { + options.DialTimeout = cfg.DialTimeout + } + + zap.S().Infof("Connecting to Clickhouse at %s, Secure: %t, MaxIdleConns: %d, MaxOpenConns: %d, DialTimeout: %s", options.Addr, options.TLS != nil, options.MaxIdleConns, options.MaxOpenConns, options.DialTimeout) db, err := clickhouse.Open(options) if err != nil { return nil, err diff --git a/pkg/query-service/main.go b/pkg/query-service/main.go index bb0f9e1aca..f0602c4dcd 100644 --- a/pkg/query-service/main.go +++ b/pkg/query-service/main.go @@ -48,13 +48,13 @@ func main() { flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)") flag.BoolVar(&preferDelta, "prefer-delta", false, "(prefer delta over cumulative metrics)") flag.BoolVar(&preferSpanMetrics, "prefer-span-metrics", false, "(prefer span metrics for service level metrics)") - flag.IntVar(&maxIdleConns, "max-idle-conns", 50, "(number of connections to maintain in the pool.)") - flag.IntVar(&maxOpenConns, "max-open-conns", 100, "(max connections for use at any time.)") - flag.DurationVar(&dialTimeout, "dial-timeout", 5*time.Second, "(the maximum time to establish a connection.)") flag.StringVar(&ruleRepoURL, "rules.repo-url", constants.AlertHelpPage, "(host address used to build rule link in alert messages)") flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)") flag.StringVar(&fluxInterval, "flux-interval", "5m", "(cache config to use)") flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')") + flag.IntVar(&maxIdleConns, "max-idle-conns", 50, "(number of connections to maintain in the pool, only used with clickhouse if not set in ClickHouseUrl env var DSN.)") + flag.IntVar(&maxOpenConns, "max-open-conns", 100, "(max connections for use at any time, only used with clickhouse if not set in ClickHouseUrl env var DSN.)") + flag.DurationVar(&dialTimeout, "dial-timeout", 5*time.Second, "(the maximum time to establish a connection, only used with clickhouse if not set in ClickHouseUrl env var DSN.)") flag.Parse() loggerMgr := initZapLog() From ddaa464d979ff0c6e55857d008a3070df8c180ba Mon Sep 17 00:00:00 2001 From: Raj Kamal Singh <1133322+raj-k-singh@users.noreply.github.com> Date: Wed, 28 Feb 2024 09:54:50 +0530 Subject: [PATCH 11/16] feat: QS package for integrations (#4578) * chore: bring in latest state of QS api work for integrations * chore: integrations v0 qs API: refactor installed integration struct * chore: finish up with integration lifecycle tests * chore: some cleanup * chore: some more cleanup * chore: some more cleanup * chore: some more cleanup * chore: some more cleanup --------- Co-authored-by: Srikanth Chekuri --- pkg/query-service/app/integrations/Readme.md | 1 + pkg/query-service/app/integrations/manager.go | 208 ++++++++++++++++++ .../app/integrations/manager_test.go | 78 +++++++ pkg/query-service/app/integrations/repo.go | 58 +++++ .../app/integrations/sqlite_repo.go | 168 ++++++++++++++ .../app/integrations/test_utils.go | 161 ++++++++++++++ 6 files changed, 674 insertions(+) create mode 100644 pkg/query-service/app/integrations/Readme.md create mode 100644 pkg/query-service/app/integrations/manager.go create mode 100644 pkg/query-service/app/integrations/manager_test.go create mode 100644 pkg/query-service/app/integrations/repo.go create mode 100644 pkg/query-service/app/integrations/sqlite_repo.go create mode 100644 pkg/query-service/app/integrations/test_utils.go diff --git a/pkg/query-service/app/integrations/Readme.md b/pkg/query-service/app/integrations/Readme.md new file mode 100644 index 0000000000..7ffe1a1a08 --- /dev/null +++ b/pkg/query-service/app/integrations/Readme.md @@ -0,0 +1 @@ +# SigNoz integrations diff --git a/pkg/query-service/app/integrations/manager.go b/pkg/query-service/app/integrations/manager.go new file mode 100644 index 0000000000..3caf352172 --- /dev/null +++ b/pkg/query-service/app/integrations/manager.go @@ -0,0 +1,208 @@ +package integrations + +import ( + "context" + "fmt" + "slices" + "time" + + "go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline" + "go.signoz.io/signoz/pkg/query-service/model" +) + +type IntegrationAuthor struct { + Name string + Email string + HomePage string +} +type IntegrationSummary struct { + Id string + Title string + Description string // A short description + + Author IntegrationAuthor +} + +type IntegrationAssets struct { + // Each integration is expected to specify all log transformations + // in a single pipeline with a source based filter + LogPipeline *logparsingpipeline.PostablePipeline + + // TBD: Dashboards, alerts, saved views, facets (indexed attribs)... +} + +type IntegrationDetails struct { + IntegrationSummary + IntegrationAssets +} + +type IntegrationsListItem struct { + IntegrationSummary + IsInstalled bool +} + +type InstalledIntegration struct { + IntegrationId string `db:"integration_id"` + Config InstalledIntegrationConfig `db:"config_json"` + InstalledAt time.Time `db:"installed_at"` +} +type InstalledIntegrationConfig map[string]interface{} + +type Integration struct { + IntegrationDetails + Installation *InstalledIntegration +} + +type Manager struct { + availableIntegrationsRepo AvailableIntegrationsRepo + installedIntegrationsRepo InstalledIntegrationsRepo +} + +type IntegrationsFilter struct { + IsInstalled *bool +} + +func (m *Manager) ListIntegrations( + ctx context.Context, + filter *IntegrationsFilter, + // Expected to have pagination over time. +) ([]IntegrationsListItem, *model.ApiError) { + available, apiErr := m.availableIntegrationsRepo.list(ctx) + if apiErr != nil { + return nil, model.WrapApiError( + apiErr, "could not fetch available integrations", + ) + } + + installed, apiErr := m.installedIntegrationsRepo.list(ctx) + if apiErr != nil { + return nil, model.WrapApiError( + apiErr, "could not fetch installed integrations", + ) + } + installedIds := []string{} + for _, ii := range installed { + installedIds = append(installedIds, ii.IntegrationId) + } + + result := []IntegrationsListItem{} + for _, ai := range available { + result = append(result, IntegrationsListItem{ + IntegrationSummary: ai.IntegrationSummary, + IsInstalled: slices.Contains(installedIds, ai.Id), + }) + } + + if filter != nil { + if filter.IsInstalled != nil { + filteredResult := []IntegrationsListItem{} + for _, r := range result { + if r.IsInstalled == *filter.IsInstalled { + filteredResult = append(filteredResult, r) + } + } + result = filteredResult + } + } + + return result, nil +} + +func (m *Manager) GetIntegration( + ctx context.Context, + integrationId string, +) (*Integration, *model.ApiError) { + integrationDetails, apiErr := m.getIntegrationDetails( + ctx, integrationId, + ) + if apiErr != nil { + return nil, apiErr + } + + installation, apiErr := m.getInstalledIntegration( + ctx, integrationId, + ) + if apiErr != nil { + return nil, apiErr + } + + return &Integration{ + IntegrationDetails: *integrationDetails, + Installation: installation, + }, nil +} + +func (m *Manager) InstallIntegration( + ctx context.Context, + integrationId string, + config InstalledIntegrationConfig, +) (*IntegrationsListItem, *model.ApiError) { + integrationDetails, apiErr := m.getIntegrationDetails(ctx, integrationId) + if apiErr != nil { + return nil, apiErr + } + + _, apiErr = m.installedIntegrationsRepo.upsert( + ctx, integrationId, config, + ) + if apiErr != nil { + return nil, model.WrapApiError( + apiErr, "could not insert installed integration", + ) + } + + return &IntegrationsListItem{ + IntegrationSummary: integrationDetails.IntegrationSummary, + IsInstalled: true, + }, nil +} + +func (m *Manager) UninstallIntegration( + ctx context.Context, + integrationId string, +) *model.ApiError { + return m.installedIntegrationsRepo.delete(ctx, integrationId) +} + +// Helpers. +func (m *Manager) getIntegrationDetails( + ctx context.Context, + integrationId string, +) (*IntegrationDetails, *model.ApiError) { + ais, apiErr := m.availableIntegrationsRepo.get( + ctx, []string{integrationId}, + ) + if apiErr != nil { + return nil, model.WrapApiError(apiErr, fmt.Sprintf( + "could not fetch integration: %s", integrationId, + )) + } + + integrationDetails, wasFound := ais[integrationId] + if !wasFound { + return nil, model.NotFoundError(fmt.Errorf( + "could not find integration: %s", integrationId, + )) + } + return &integrationDetails, nil +} + +func (m *Manager) getInstalledIntegration( + ctx context.Context, + integrationId string, +) (*InstalledIntegration, *model.ApiError) { + iis, apiErr := m.installedIntegrationsRepo.get( + ctx, []string{integrationId}, + ) + if apiErr != nil { + return nil, model.WrapApiError(apiErr, fmt.Sprintf( + "could not fetch installed integration: %s", integrationId, + )) + } + + installation, wasFound := iis[integrationId] + if !wasFound { + return nil, nil + } + return &installation, nil +} diff --git a/pkg/query-service/app/integrations/manager_test.go b/pkg/query-service/app/integrations/manager_test.go new file mode 100644 index 0000000000..08dd50b255 --- /dev/null +++ b/pkg/query-service/app/integrations/manager_test.go @@ -0,0 +1,78 @@ +package integrations + +import ( + "context" + "testing" + + _ "github.com/mattn/go-sqlite3" + "github.com/stretchr/testify/require" +) + +func TestIntegrationLifecycle(t *testing.T) { + require := require.New(t) + + mgr := NewTestIntegrationsManager(t) + ctx := context.Background() + + ii := true + installedIntegrationsFilter := &IntegrationsFilter{ + IsInstalled: &ii, + } + + installedIntegrations, apiErr := mgr.ListIntegrations( + ctx, installedIntegrationsFilter, + ) + require.Nil(apiErr) + require.Equal([]IntegrationsListItem{}, installedIntegrations) + + availableIntegrations, apiErr := mgr.ListIntegrations(ctx, nil) + require.Nil(apiErr) + require.Equal(2, len(availableIntegrations)) + require.False(availableIntegrations[0].IsInstalled) + require.False(availableIntegrations[1].IsInstalled) + + testIntegrationConfig := map[string]interface{}{} + installed, apiErr := mgr.InstallIntegration( + ctx, availableIntegrations[1].Id, testIntegrationConfig, + ) + require.Nil(apiErr) + require.Equal(installed.Id, availableIntegrations[1].Id) + + integration, apiErr := mgr.GetIntegration(ctx, availableIntegrations[1].Id) + require.Nil(apiErr) + require.Equal(integration.Id, availableIntegrations[1].Id) + require.NotNil(integration.Installation) + + installedIntegrations, apiErr = mgr.ListIntegrations( + ctx, installedIntegrationsFilter, + ) + require.Nil(apiErr) + require.Equal(1, len(installedIntegrations)) + require.Equal(availableIntegrations[1].Id, installedIntegrations[0].Id) + + availableIntegrations, apiErr = mgr.ListIntegrations(ctx, nil) + require.Nil(apiErr) + require.Equal(2, len(availableIntegrations)) + require.False(availableIntegrations[0].IsInstalled) + require.True(availableIntegrations[1].IsInstalled) + + apiErr = mgr.UninstallIntegration(ctx, installed.Id) + require.Nil(apiErr) + + integration, apiErr = mgr.GetIntegration(ctx, availableIntegrations[1].Id) + require.Nil(apiErr) + require.Equal(integration.Id, availableIntegrations[1].Id) + require.Nil(integration.Installation) + + installedIntegrations, apiErr = mgr.ListIntegrations( + ctx, installedIntegrationsFilter, + ) + require.Nil(apiErr) + require.Equal(0, len(installedIntegrations)) + + availableIntegrations, apiErr = mgr.ListIntegrations(ctx, nil) + require.Nil(apiErr) + require.Equal(2, len(availableIntegrations)) + require.False(availableIntegrations[0].IsInstalled) + require.False(availableIntegrations[1].IsInstalled) +} diff --git a/pkg/query-service/app/integrations/repo.go b/pkg/query-service/app/integrations/repo.go new file mode 100644 index 0000000000..9ed46cd247 --- /dev/null +++ b/pkg/query-service/app/integrations/repo.go @@ -0,0 +1,58 @@ +package integrations + +import ( + "context" + "database/sql/driver" + "encoding/json" + + "github.com/pkg/errors" + "go.signoz.io/signoz/pkg/query-service/model" +) + +// For serializing from db +func (c *InstalledIntegrationConfig) Scan(src interface{}) error { + if data, ok := src.([]byte); ok { + return json.Unmarshal(data, &c) + } + return nil +} + +// For serializing to db +func (c *InstalledIntegrationConfig) Value() (driver.Value, error) { + filterSetJson, err := json.Marshal(c) + if err != nil { + return nil, errors.Wrap(err, "could not serialize integration config to JSON") + } + return filterSetJson, nil +} + +type InstalledIntegrationsRepo interface { + list(context.Context) ([]InstalledIntegration, *model.ApiError) + + get( + ctx context.Context, integrationIds []string, + ) (map[string]InstalledIntegration, *model.ApiError) + + upsert( + ctx context.Context, + integrationId string, + config InstalledIntegrationConfig, + ) (*InstalledIntegration, *model.ApiError) + + delete(ctx context.Context, integrationId string) *model.ApiError +} + +type AvailableIntegrationsRepo interface { + list(context.Context) ([]IntegrationDetails, *model.ApiError) + + get( + ctx context.Context, integrationIds []string, + ) (map[string]IntegrationDetails, *model.ApiError) + + // AvailableIntegrationsRepo implementations are expected to cache + // details of installed integrations for quick retrieval. + // + // For v0 only bundled integrations are available, later versions + // are expected to add methods in this interface for pinning installed + // integration details in local cache. +} diff --git a/pkg/query-service/app/integrations/sqlite_repo.go b/pkg/query-service/app/integrations/sqlite_repo.go new file mode 100644 index 0000000000..94e9c4d51d --- /dev/null +++ b/pkg/query-service/app/integrations/sqlite_repo.go @@ -0,0 +1,168 @@ +package integrations + +import ( + "context" + "fmt" + "strings" + + "github.com/jmoiron/sqlx" + "go.signoz.io/signoz/pkg/query-service/model" +) + +func InitSqliteDBIfNeeded(db *sqlx.DB) error { + if db == nil { + return fmt.Errorf("db is required.") + } + + createTablesStatements := ` + CREATE TABLE IF NOT EXISTS integrations_installed( + integration_id TEXT PRIMARY KEY, + config_json TEXT, + installed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ) + ` + _, err := db.Exec(createTablesStatements) + if err != nil { + return fmt.Errorf( + "could not ensure integrations schema in sqlite DB: %w", err, + ) + } + + return nil +} + +type InstalledIntegrationsSqliteRepo struct { + db *sqlx.DB +} + +func NewInstalledIntegrationsSqliteRepo(db *sqlx.DB) ( + *InstalledIntegrationsSqliteRepo, error, +) { + err := InitSqliteDBIfNeeded(db) + if err != nil { + return nil, fmt.Errorf( + "couldn't ensure sqlite schema for installed integrations: %w", err, + ) + } + + return &InstalledIntegrationsSqliteRepo{ + db: db, + }, nil +} + +func (r *InstalledIntegrationsSqliteRepo) list( + ctx context.Context, +) ([]InstalledIntegration, *model.ApiError) { + integrations := []InstalledIntegration{} + + err := r.db.SelectContext( + ctx, &integrations, ` + select + integration_id, + config_json, + installed_at + from integrations_installed + `, + ) + if err != nil { + return nil, model.InternalError(fmt.Errorf( + "could not query installed integrations: %w", err, + )) + } + return integrations, nil +} + +func (r *InstalledIntegrationsSqliteRepo) get( + ctx context.Context, integrationIds []string, +) (map[string]InstalledIntegration, *model.ApiError) { + integrations := []InstalledIntegration{} + + idPlaceholders := []string{} + idValues := []interface{}{} + for _, id := range integrationIds { + idPlaceholders = append(idPlaceholders, "?") + idValues = append(idValues, id) + } + + err := r.db.SelectContext( + ctx, &integrations, fmt.Sprintf(` + select + integration_id, + config_json, + installed_at + from integrations_installed + where integration_id in (%s)`, + strings.Join(idPlaceholders, ", "), + ), + idValues..., + ) + if err != nil { + return nil, model.InternalError(fmt.Errorf( + "could not query installed integrations: %w", err, + )) + } + + result := map[string]InstalledIntegration{} + for _, ii := range integrations { + result[ii.IntegrationId] = ii + } + + return result, nil +} + +func (r *InstalledIntegrationsSqliteRepo) upsert( + ctx context.Context, + integrationId string, + config InstalledIntegrationConfig, +) (*InstalledIntegration, *model.ApiError) { + serializedConfig, err := config.Value() + if err != nil { + return nil, model.BadRequest(fmt.Errorf( + "could not serialize integration config: %w", err, + )) + } + + _, dbErr := r.db.ExecContext( + ctx, ` + INSERT INTO integrations_installed ( + integration_id, + config_json + ) values ($1, $2) + on conflict(integration_id) do update + set config_json=excluded.config_json + `, integrationId, serializedConfig, + ) + if dbErr != nil { + return nil, model.InternalError(fmt.Errorf( + "could not insert record for integration installation: %w", dbErr, + )) + } + + res, apiErr := r.get(ctx, []string{integrationId}) + if apiErr != nil || len(res) < 1 { + return nil, model.WrapApiError( + apiErr, "could not fetch installed integration", + ) + } + + installed := res[integrationId] + + return &installed, nil +} + +func (r *InstalledIntegrationsSqliteRepo) delete( + ctx context.Context, integrationId string, +) *model.ApiError { + _, dbErr := r.db.ExecContext(ctx, ` + DELETE FROM integrations_installed where integration_id = ? + `, integrationId) + + if dbErr != nil { + return model.InternalError(fmt.Errorf( + "could not delete installed integration record for %s: %w", + integrationId, dbErr, + )) + } + + return nil +} diff --git a/pkg/query-service/app/integrations/test_utils.go b/pkg/query-service/app/integrations/test_utils.go new file mode 100644 index 0000000000..6dcb9ec355 --- /dev/null +++ b/pkg/query-service/app/integrations/test_utils.go @@ -0,0 +1,161 @@ +package integrations + +import ( + "context" + "os" + "slices" + "testing" + + "github.com/jmoiron/sqlx" + "go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline" + "go.signoz.io/signoz/pkg/query-service/model" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" +) + +func NewTestSqliteDB(t *testing.T) ( + db *sqlx.DB, dbFilePath string, +) { + testDBFile, err := os.CreateTemp("", "test-signoz-db-*") + if err != nil { + t.Fatalf("could not create temp file for test db: %v", err) + } + testDBFilePath := testDBFile.Name() + t.Cleanup(func() { os.Remove(testDBFilePath) }) + testDBFile.Close() + + testDB, err := sqlx.Open("sqlite3", testDBFilePath) + if err != nil { + t.Fatalf("could not open test db sqlite file: %v", err) + } + + return testDB, testDBFilePath +} + +func NewTestIntegrationsManager(t *testing.T) *Manager { + testDB, _ := NewTestSqliteDB(t) + + installedIntegrationsRepo, err := NewInstalledIntegrationsSqliteRepo(testDB) + if err != nil { + t.Fatalf("could not init sqlite DB for installed integrations: %v", err) + } + + return &Manager{ + availableIntegrationsRepo: &TestAvailableIntegrationsRepo{}, + installedIntegrationsRepo: installedIntegrationsRepo, + } +} + +type TestAvailableIntegrationsRepo struct{} + +func (t *TestAvailableIntegrationsRepo) list( + ctx context.Context, +) ([]IntegrationDetails, *model.ApiError) { + return []IntegrationDetails{ + { + IntegrationSummary: IntegrationSummary{ + Id: "test-integration-1", + Title: "Test Integration 1", + Description: "A test integration", + Author: IntegrationAuthor{ + Name: "signoz", + Email: "integrations@signoz.io", + HomePage: "https://signoz.io", + }, + }, + IntegrationAssets: IntegrationAssets{ + LogPipeline: &logparsingpipeline.PostablePipeline{ + Name: "pipeline1", + Alias: "pipeline1", + Enabled: true, + Filter: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "method", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + }, + Operator: "=", + Value: "GET", + }, + }, + }, + Config: []logparsingpipeline.PipelineOperator{ + { + OrderId: 1, + ID: "add", + Type: "add", + Field: "attributes.test", + Value: "val", + Enabled: true, + Name: "test add", + }, + }, + }, + }, + }, { + IntegrationSummary: IntegrationSummary{ + Id: "test-integration-2", + Title: "Test Integration 2", + Description: "Another test integration", + Author: IntegrationAuthor{ + Name: "signoz", + Email: "integrations@signoz.io", + HomePage: "https://signoz.io", + }, + }, + IntegrationAssets: IntegrationAssets{ + LogPipeline: &logparsingpipeline.PostablePipeline{ + Name: "pipeline2", + Alias: "pipeline2", + Enabled: true, + Filter: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "method", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + }, + Operator: "=", + Value: "GET", + }, + }, + }, + Config: []logparsingpipeline.PipelineOperator{ + { + OrderId: 1, + ID: "add", + Type: "add", + Field: "attributes.test", + Value: "val", + Enabled: true, + Name: "test add", + }, + }, + }, + }, + }, + }, nil +} + +func (t *TestAvailableIntegrationsRepo) get( + ctx context.Context, ids []string, +) (map[string]IntegrationDetails, *model.ApiError) { + availableIntegrations, apiErr := t.list(ctx) + if apiErr != nil { + return nil, apiErr + } + + result := map[string]IntegrationDetails{} + + for _, ai := range availableIntegrations { + if slices.Contains(ids, ai.Id) { + result[ai.Id] = ai + } + } + + return result, nil +} From 9964e3425a7362465d2c2918fbadf24eeef6ca79 Mon Sep 17 00:00:00 2001 From: Rajat Dabade Date: Wed, 28 Feb 2024 14:56:50 +0530 Subject: [PATCH 12/16] Feat: Bar chart (#4562) * feat: added bar panel and configuration for bar chart --- frontend/jest.config.ts | 2 +- frontend/src/assets/Dashboard/BarIcon.tsx | 41 + frontend/src/constants/panelTypes.ts | 2 + frontend/src/constants/queryBuilder.ts | 1 + .../FormAlertRules/ChartPreview/index.tsx | 13 +- .../container/FormAlertRules/QuerySection.tsx | 4 +- .../src/container/FormAlertRules/index.tsx | 19 +- .../GridCard/FullView/contants.ts | 1 + .../GridCard/FullView/index.tsx | 10 +- .../GridCardLayout/GridCard/index.tsx | 13 +- .../src/container/GridPanelSwitch/index.tsx | 5 + .../src/container/GridPanelSwitch/types.ts | 3 + .../container/ListAlertRules/ListAlert.tsx | 4 +- .../ComponentsSlider/constants.ts | 1 + .../ComponentsSlider/menuItems.ts | 2 + .../WidgetGraph/WidgetGraphContainer.tsx | 12 +- .../WidgetGraph/WidgetGraphs.tsx | 5 + .../LeftContainer/WidgetGraph/index.tsx | 3 +- .../NewWidget/RightContainer/constants.ts | 7 + .../container/QueryBuilder/QueryBuilder.tsx | 4 + .../hooks/queryBuilder/useCreateAlerts.tsx | 5 +- .../hooks/queryBuilder/useGetQueryRange.ts | 18 +- .../src/lib/uPlotLib/getUplotChartOptions.ts | 13 +- frontend/src/lib/uPlotLib/utils/constants.ts | 15 + frontend/src/lib/uPlotLib/utils/getAxes.ts | 1 - .../src/lib/uPlotLib/utils/getRenderer.ts | 23 +- .../src/lib/uPlotLib/utils/getSeriesData.ts | 35 +- .../utils/tests/__mocks__/seriesData.ts | 889 ++++++++++++++++++ .../tests/__mocks__/uplotChartOptionsData.ts | 453 +++++++++ .../utils/tests/getSeriesData.test.ts | 32 + .../utils/tests/getUplotChartOptions.test.ts | 68 ++ frontend/src/providers/QueryBuilder.tsx | 8 +- frontend/src/utils/getGraphType.ts | 9 + frontend/src/utils/getSortedSeriesData.ts | 20 + 34 files changed, 1694 insertions(+), 47 deletions(-) create mode 100644 frontend/src/assets/Dashboard/BarIcon.tsx create mode 100644 frontend/src/lib/uPlotLib/utils/constants.ts create mode 100644 frontend/src/lib/uPlotLib/utils/tests/__mocks__/seriesData.ts create mode 100644 frontend/src/lib/uPlotLib/utils/tests/__mocks__/uplotChartOptionsData.ts create mode 100644 frontend/src/lib/uPlotLib/utils/tests/getSeriesData.test.ts create mode 100644 frontend/src/lib/uPlotLib/utils/tests/getUplotChartOptions.test.ts create mode 100644 frontend/src/utils/getGraphType.ts create mode 100644 frontend/src/utils/getSortedSeriesData.ts diff --git a/frontend/jest.config.ts b/frontend/jest.config.ts index 0493353115..7b52ca5cf6 100644 --- a/frontend/jest.config.ts +++ b/frontend/jest.config.ts @@ -24,7 +24,7 @@ const config: Config.InitialOptions = { '^(?!.*\\.(js|jsx|mjs|cjs|ts|tsx|css|json)$)': 'jest-preview/transforms/file', }, transformIgnorePatterns: [ - 'node_modules/(?!(lodash-es|react-dnd|core-dnd|@react-dnd|dnd-core|react-dnd-html5-backend|axios|@signozhq/design-tokens)/)', + 'node_modules/(?!(lodash-es|react-dnd|core-dnd|@react-dnd|dnd-core|react-dnd-html5-backend|axios|@signozhq/design-tokens|d3-interpolate|d3-color)/)', ], setupFilesAfterEnv: ['jest.setup.ts'], testPathIgnorePatterns: ['/node_modules/', '/public/'], diff --git a/frontend/src/assets/Dashboard/BarIcon.tsx b/frontend/src/assets/Dashboard/BarIcon.tsx new file mode 100644 index 0000000000..b8e6b3c52f --- /dev/null +++ b/frontend/src/assets/Dashboard/BarIcon.tsx @@ -0,0 +1,41 @@ +import { CSSProperties } from 'react'; + +function BarIcon({ + fillColor, +}: { + fillColor: CSSProperties['color']; +}): JSX.Element { + return ( + + + + + + ); +} + +export default BarIcon; diff --git a/frontend/src/constants/panelTypes.ts b/frontend/src/constants/panelTypes.ts index d16e5bf92d..c6db5db2da 100644 --- a/frontend/src/constants/panelTypes.ts +++ b/frontend/src/constants/panelTypes.ts @@ -14,6 +14,7 @@ export const PANEL_TYPES_COMPONENT_MAP = { [PANEL_TYPES.TRACE]: null, [PANEL_TYPES.LIST]: LogsPanelComponent, [PANEL_TYPES.EMPTY_WIDGET]: null, + [PANEL_TYPES.BAR]: Uplot, } as const; export const getComponentForPanelType = ( @@ -27,6 +28,7 @@ export const getComponentForPanelType = ( [PANEL_TYPES.TRACE]: null, [PANEL_TYPES.LIST]: dataSource === DataSource.LOGS ? LogsPanelComponent : TracesTableComponent, + [PANEL_TYPES.BAR]: Uplot, [PANEL_TYPES.EMPTY_WIDGET]: null, }; diff --git a/frontend/src/constants/queryBuilder.ts b/frontend/src/constants/queryBuilder.ts index c53873bc5c..936bfccdde 100644 --- a/frontend/src/constants/queryBuilder.ts +++ b/frontend/src/constants/queryBuilder.ts @@ -264,6 +264,7 @@ export enum PANEL_TYPES { TABLE = 'table', LIST = 'list', TRACE = 'trace', + BAR = 'bar', EMPTY_WIDGET = 'EMPTY_WIDGET', } diff --git a/frontend/src/container/FormAlertRules/ChartPreview/index.tsx b/frontend/src/container/FormAlertRules/ChartPreview/index.tsx index 97b540df35..9df51314d0 100644 --- a/frontend/src/container/FormAlertRules/ChartPreview/index.tsx +++ b/frontend/src/container/FormAlertRules/ChartPreview/index.tsx @@ -19,6 +19,8 @@ import { AlertDef } from 'types/api/alerts/def'; import { Query } from 'types/api/queryBuilder/queryBuilderData'; import { EQueryType } from 'types/common/dashboard'; import { GlobalReducer } from 'types/reducer/globalTime'; +import { getGraphType } from 'utils/getGraphType'; +import { getSortedSeriesData } from 'utils/getSortedSeriesData'; import { getTimeRange } from 'utils/getTimeRange'; import { ChartContainer, FailedMessageContainer } from './styles'; @@ -86,7 +88,7 @@ function ChartPreview({ { query: query || initialQueriesMap.metrics, globalSelectedInterval: selectedInterval, - graphType, + graphType: getGraphType(graphType), selectedTime, params: { allowSelectedIntervalForStepGen, @@ -114,6 +116,13 @@ function ChartPreview({ setMaxTimeScale(endTime); }, [maxTime, minTime, globalSelectedInterval, queryResponse]); + if (queryResponse.data && graphType === PANEL_TYPES.BAR) { + const sortedSeriesData = getSortedSeriesData( + queryResponse.data?.payload.data.result, + ); + queryResponse.data.payload.data.result = sortedSeriesData; + } + const chartData = getUPlotChartData(queryResponse?.data?.payload); const containerDimensions = useResizeObserver(graphRef); @@ -153,6 +162,7 @@ function ChartPreview({ ], softMax: null, softMin: null, + panelType: graphType, }), [ yAxisUnit, @@ -165,6 +175,7 @@ function ChartPreview({ t, optionName, alertDef?.condition.targetUnit, + graphType, ], ); diff --git a/frontend/src/container/FormAlertRules/QuerySection.tsx b/frontend/src/container/FormAlertRules/QuerySection.tsx index 619e61af2f..9eb4fd4703 100644 --- a/frontend/src/container/FormAlertRules/QuerySection.tsx +++ b/frontend/src/container/FormAlertRules/QuerySection.tsx @@ -22,6 +22,7 @@ function QuerySection({ setQueryCategory, alertType, runQuery, + panelType, }: QuerySectionProps): JSX.Element { // init namespace for translations const { t } = useTranslation('alerts'); @@ -44,7 +45,7 @@ function QuerySection({ const renderMetricUI = (): JSX.Element => ( void; alertType: AlertTypes; runQuery: VoidFunction; + panelType: PANEL_TYPES; } export default QuerySection; diff --git a/frontend/src/container/FormAlertRules/index.tsx b/frontend/src/container/FormAlertRules/index.tsx index b87f025f9d..396474211f 100644 --- a/frontend/src/container/FormAlertRules/index.tsx +++ b/frontend/src/container/FormAlertRules/index.tsx @@ -10,6 +10,7 @@ import { import saveAlertApi from 'api/alerts/save'; import testAlertApi from 'api/alerts/testAlert'; import { FeatureKeys } from 'constants/features'; +import { QueryParams } from 'constants/query'; import { PANEL_TYPES } from 'constants/queryBuilder'; import ROUTES from 'constants/routes'; import QueryTypeTag from 'container/NewWidget/LeftContainer/QueryTypeTag'; @@ -20,6 +21,7 @@ import { useShareBuilderUrl } from 'hooks/queryBuilder/useShareBuilderUrl'; import { updateStepInterval } from 'hooks/queryBuilder/useStepInterval'; import { MESSAGE, useIsFeatureDisabled } from 'hooks/useFeatureFlag'; import { useNotifications } from 'hooks/useNotifications'; +import useUrlQuery from 'hooks/useUrlQuery'; import history from 'lib/history'; import { mapQueryDataFromApi } from 'lib/newQueryBuilder/queryBuilderMappers/mapQueryDataFromApi'; import { mapQueryDataToApi } from 'lib/newQueryBuilder/queryBuilderMappers/mapQueryDataToApi'; @@ -68,14 +70,23 @@ function FormAlertRules({ GlobalReducer >((state) => state.globalTime); + const urlQuery = useUrlQuery(); + + const panelType = urlQuery.get(QueryParams.panelTypes) as PANEL_TYPES | null; + const { currentQuery, - panelType, stagedQuery, handleRunQuery, + handleSetConfig, + initialDataSource, redirectWithQueryBuilderData, } = useQueryBuilder(); + useEffect(() => { + handleSetConfig(panelType || PANEL_TYPES.TIME_SERIES, initialDataSource); + }, [handleSetConfig, initialDataSource, panelType]); + // use query client const ruleCache = useQueryClient(); @@ -277,7 +288,7 @@ function FormAlertRules({ promQueries: mapQueryDataToApi(currentQuery.promql, 'name').data, chQueries: mapQueryDataToApi(currentQuery.clickhouse_sql, 'name').data, queryType: currentQuery.queryType, - panelType: initQuery.panelType, + panelType: panelType || initQuery.panelType, unit: currentQuery.unit, }, }, @@ -290,6 +301,7 @@ function FormAlertRules({ alertDef, alertType, initQuery, + panelType, ]); const isAlertAvialable = useIsFeatureDisabled( @@ -423,6 +435,7 @@ function FormAlertRules({ selectedInterval={globalSelectedInterval} alertDef={alertDef} yAxisUnit={yAxisUnit || ''} + graphType={panelType || PANEL_TYPES.TIME_SERIES} /> ); @@ -439,6 +452,7 @@ function FormAlertRules({ alertDef={alertDef} selectedInterval={globalSelectedInterval} yAxisUnit={yAxisUnit || ''} + graphType={panelType || PANEL_TYPES.TIME_SERIES} /> ); @@ -495,6 +509,7 @@ function FormAlertRules({ setQueryCategory={onQueryCategoryChange} alertType={alertType || AlertTypes.METRICS_BASED_ALERT} runQuery={handleRunQuery} + panelType={panelType || PANEL_TYPES.TIME_SERIES} /> ; + }; [PANEL_TYPES.EMPTY_WIDGET]: null; }; diff --git a/frontend/src/container/ListAlertRules/ListAlert.tsx b/frontend/src/container/ListAlertRules/ListAlert.tsx index 52d54146b8..4ed655b8e4 100644 --- a/frontend/src/container/ListAlertRules/ListAlert.tsx +++ b/frontend/src/container/ListAlertRules/ListAlert.tsx @@ -120,7 +120,9 @@ function ListAlert({ allAlertRules, refetch }: ListAlertProps): JSX.Element { history.push( `${ROUTES.EDIT_ALERTS}?ruleId=${record.id.toString()}&${ QueryParams.compositeQuery - }=${encodeURIComponent(JSON.stringify(compositeQuery))}`, + }=${encodeURIComponent(JSON.stringify(compositeQuery))}&panelTypes=${ + record.condition.compositeQuery.panelType + }`, ); }) .catch(handleError); diff --git a/frontend/src/container/NewDashboard/ComponentsSlider/constants.ts b/frontend/src/container/NewDashboard/ComponentsSlider/constants.ts index 44512e3a00..11b693be3b 100644 --- a/frontend/src/container/NewDashboard/ComponentsSlider/constants.ts +++ b/frontend/src/container/NewDashboard/ComponentsSlider/constants.ts @@ -9,6 +9,7 @@ export const PANEL_TYPES_INITIAL_QUERY = { [PANEL_TYPES.TABLE]: initialQueriesMap.metrics, [PANEL_TYPES.LIST]: initialQueriesMap.logs, [PANEL_TYPES.TRACE]: initialQueriesMap.traces, + [PANEL_TYPES.BAR]: initialQueriesMap.metrics, [PANEL_TYPES.EMPTY_WIDGET]: initialQueriesMap.metrics, }; diff --git a/frontend/src/container/NewDashboard/ComponentsSlider/menuItems.ts b/frontend/src/container/NewDashboard/ComponentsSlider/menuItems.ts index 1aaa3a71ea..3ea0268cdb 100644 --- a/frontend/src/container/NewDashboard/ComponentsSlider/menuItems.ts +++ b/frontend/src/container/NewDashboard/ComponentsSlider/menuItems.ts @@ -1,3 +1,4 @@ +import BarIcon from 'assets/Dashboard/BarIcon'; import List from 'assets/Dashboard/List'; import TableIcon from 'assets/Dashboard/Table'; import TimeSeriesIcon from 'assets/Dashboard/TimeSeries'; @@ -18,6 +19,7 @@ const Items: ItemsProps[] = [ }, { name: PANEL_TYPES.TABLE, Icon: TableIcon, display: 'Table' }, { name: PANEL_TYPES.LIST, Icon: List, display: 'List' }, + { name: PANEL_TYPES.BAR, Icon: BarIcon, display: 'Bar' }, ]; interface ItemsProps { diff --git a/frontend/src/container/NewWidget/LeftContainer/WidgetGraph/WidgetGraphContainer.tsx b/frontend/src/container/NewWidget/LeftContainer/WidgetGraph/WidgetGraphContainer.tsx index 2472d94092..1e5bcf6721 100644 --- a/frontend/src/container/NewWidget/LeftContainer/WidgetGraph/WidgetGraphContainer.tsx +++ b/frontend/src/container/NewWidget/LeftContainer/WidgetGraph/WidgetGraphContainer.tsx @@ -5,6 +5,8 @@ import { WidgetGraphProps } from 'container/NewWidget/types'; import { useGetWidgetQueryRange } from 'hooks/queryBuilder/useGetWidgetQueryRange'; import useUrlQuery from 'hooks/useUrlQuery'; import { useDashboard } from 'providers/Dashboard/Dashboard'; +import { getGraphType } from 'utils/getGraphType'; +import { getSortedSeriesData } from 'utils/getSortedSeriesData'; import { NotFoundContainer } from './styles'; import WidgetGraph from './WidgetGraphs'; @@ -31,10 +33,17 @@ function WidgetGraphContainer({ const selectedWidget = widgets.find((e) => e.id === widgetId); const getWidgetQueryRange = useGetWidgetQueryRange({ - graphType: selectedGraph, + graphType: getGraphType(selectedGraph), selectedTime: selectedTime.enum, }); + if (getWidgetQueryRange.data && selectedGraph === PANEL_TYPES.BAR) { + const sortedSeriesData = getSortedSeriesData( + getWidgetQueryRange.data?.payload.data.result, + ); + getWidgetQueryRange.data.payload.data.result = sortedSeriesData; + } + if (selectedWidget === undefined) { return Invalid widget; } @@ -83,6 +92,7 @@ function WidgetGraphContainer({ selectedLogFields={selectedLogFields} selectedTracesFields={selectedTracesFields} selectedTime={selectedTime} + selectedGraph={selectedGraph} /> ); } diff --git a/frontend/src/container/NewWidget/LeftContainer/WidgetGraph/WidgetGraphs.tsx b/frontend/src/container/NewWidget/LeftContainer/WidgetGraph/WidgetGraphs.tsx index ccd0a91ea3..647b746c2d 100644 --- a/frontend/src/container/NewWidget/LeftContainer/WidgetGraph/WidgetGraphs.tsx +++ b/frontend/src/container/NewWidget/LeftContainer/WidgetGraph/WidgetGraphs.tsx @@ -1,4 +1,5 @@ import { QueryParams } from 'constants/query'; +import { PANEL_TYPES } from 'constants/queryBuilder'; import GridPanelSwitch from 'container/GridPanelSwitch'; import { ThresholdProps } from 'container/NewWidget/RightContainer/Threshold/types'; import { timePreferance } from 'container/NewWidget/RightContainer/timeItems'; @@ -34,6 +35,7 @@ function WidgetGraph({ selectedLogFields, selectedTracesFields, selectedTime, + selectedGraph, }: WidgetGraphProps): JSX.Element { const { stagedQuery, currentQuery } = useQueryBuilder(); @@ -130,6 +132,7 @@ function WidgetGraph({ maxTimeScale, softMax, softMin, + panelType: selectedGraph, }), [ widgetId, @@ -144,6 +147,7 @@ function WidgetGraph({ maxTimeScale, softMax, softMin, + selectedGraph, ], ); @@ -183,6 +187,7 @@ interface WidgetGraphProps { selectedLogFields: Widgets['selectedLogFields']; selectedTracesFields: Widgets['selectedTracesFields']; selectedTime: timePreferance; + selectedGraph: PANEL_TYPES; } export default WidgetGraph; diff --git a/frontend/src/container/NewWidget/LeftContainer/WidgetGraph/index.tsx b/frontend/src/container/NewWidget/LeftContainer/WidgetGraph/index.tsx index 1f306a41a2..14dcedad08 100644 --- a/frontend/src/container/NewWidget/LeftContainer/WidgetGraph/index.tsx +++ b/frontend/src/container/NewWidget/LeftContainer/WidgetGraph/index.tsx @@ -5,6 +5,7 @@ import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder'; import useUrlQuery from 'hooks/useUrlQuery'; import { useDashboard } from 'providers/Dashboard/Dashboard'; import { memo } from 'react'; +import { getGraphType } from 'utils/getGraphType'; import { WidgetGraphProps } from '../../types'; import PlotTag from './PlotTag'; @@ -34,7 +35,7 @@ function WidgetGraph({ const selectedWidget = widgets.find((e) => e.id === widgetId); const getWidgetQueryRange = useGetWidgetQueryRange({ - graphType: selectedGraph, + graphType: getGraphType(selectedGraph), selectedTime: selectedTime.enum, }); diff --git a/frontend/src/container/NewWidget/RightContainer/constants.ts b/frontend/src/container/NewWidget/RightContainer/constants.ts index a6663ac75c..a3ce202f85 100644 --- a/frontend/src/container/NewWidget/RightContainer/constants.ts +++ b/frontend/src/container/NewWidget/RightContainer/constants.ts @@ -26,6 +26,7 @@ export const panelTypeVsThreshold: { [key in PANEL_TYPES]: boolean } = { [PANEL_TYPES.VALUE]: true, [PANEL_TYPES.TABLE]: true, [PANEL_TYPES.LIST]: false, + [PANEL_TYPES.BAR]: true, [PANEL_TYPES.TRACE]: false, [PANEL_TYPES.EMPTY_WIDGET]: false, } as const; @@ -35,6 +36,7 @@ export const panelTypeVsSoftMinMax: { [key in PANEL_TYPES]: boolean } = { [PANEL_TYPES.VALUE]: false, [PANEL_TYPES.TABLE]: false, [PANEL_TYPES.LIST]: false, + [PANEL_TYPES.BAR]: true, [PANEL_TYPES.TRACE]: false, [PANEL_TYPES.EMPTY_WIDGET]: false, } as const; @@ -44,6 +46,7 @@ export const panelTypeVsDragAndDrop: { [key in PANEL_TYPES]: boolean } = { [PANEL_TYPES.VALUE]: true, [PANEL_TYPES.TABLE]: true, [PANEL_TYPES.LIST]: false, + [PANEL_TYPES.BAR]: false, [PANEL_TYPES.TRACE]: false, [PANEL_TYPES.EMPTY_WIDGET]: false, } as const; @@ -53,6 +56,7 @@ export const panelTypeVsFillSpan: { [key in PANEL_TYPES]: boolean } = { [PANEL_TYPES.VALUE]: false, [PANEL_TYPES.TABLE]: false, [PANEL_TYPES.LIST]: false, + [PANEL_TYPES.BAR]: false, [PANEL_TYPES.TRACE]: false, [PANEL_TYPES.EMPTY_WIDGET]: false, } as const; @@ -62,6 +66,7 @@ export const panelTypeVsYAxisUnit: { [key in PANEL_TYPES]: boolean } = { [PANEL_TYPES.VALUE]: true, [PANEL_TYPES.TABLE]: true, [PANEL_TYPES.LIST]: false, + [PANEL_TYPES.BAR]: true, [PANEL_TYPES.TRACE]: false, [PANEL_TYPES.EMPTY_WIDGET]: false, } as const; @@ -71,6 +76,7 @@ export const panelTypeVsCreateAlert: { [key in PANEL_TYPES]: boolean } = { [PANEL_TYPES.VALUE]: true, [PANEL_TYPES.TABLE]: false, [PANEL_TYPES.LIST]: false, + [PANEL_TYPES.BAR]: true, [PANEL_TYPES.TRACE]: false, [PANEL_TYPES.EMPTY_WIDGET]: false, } as const; @@ -82,6 +88,7 @@ export const panelTypeVsPanelTimePreferences: { [PANEL_TYPES.VALUE]: true, [PANEL_TYPES.TABLE]: true, [PANEL_TYPES.LIST]: false, + [PANEL_TYPES.BAR]: true, [PANEL_TYPES.TRACE]: false, [PANEL_TYPES.EMPTY_WIDGET]: false, } as const; diff --git a/frontend/src/container/QueryBuilder/QueryBuilder.tsx b/frontend/src/container/QueryBuilder/QueryBuilder.tsx index f67533e427..0bdc321c1e 100644 --- a/frontend/src/container/QueryBuilder/QueryBuilder.tsx +++ b/frontend/src/container/QueryBuilder/QueryBuilder.tsx @@ -46,6 +46,10 @@ export const QueryBuilder = memo(function QueryBuilder({ useEffect(() => { if (currentDataSource !== initialDataSource || newPanelType !== panelType) { + if (panelType === PANEL_TYPES.BAR) { + handleSetConfig(PANEL_TYPES.BAR, DataSource.METRICS); + return; + } handleSetConfig(newPanelType, currentDataSource); } }, [ diff --git a/frontend/src/hooks/queryBuilder/useCreateAlerts.tsx b/frontend/src/hooks/queryBuilder/useCreateAlerts.tsx index a80806a8fd..6cf7da047c 100644 --- a/frontend/src/hooks/queryBuilder/useCreateAlerts.tsx +++ b/frontend/src/hooks/queryBuilder/useCreateAlerts.tsx @@ -14,6 +14,7 @@ import { useSelector } from 'react-redux'; import { AppState } from 'store/reducers'; import { Widgets } from 'types/api/dashboard/getAll'; import { GlobalReducer } from 'types/reducer/globalTime'; +import { getGraphType } from 'utils/getGraphType'; const useCreateAlerts = (widget?: Widgets): VoidFunction => { const queryRangeMutation = useMutation(getQueryRangeFormat); @@ -33,7 +34,7 @@ const useCreateAlerts = (widget?: Widgets): VoidFunction => { const { queryPayload } = prepareQueryRangePayload({ query: widget.query, globalSelectedInterval, - graphType: widget.panelTypes, + graphType: getGraphType(widget.panelTypes), selectedTime: widget.timePreferance, variables: getDashboardVariables(selectedDashboard?.data.variables), }); @@ -44,7 +45,7 @@ const useCreateAlerts = (widget?: Widgets): VoidFunction => { history.push( `${ROUTES.ALERTS_NEW}?${QueryParams.compositeQuery}=${encodeURIComponent( JSON.stringify(updatedQuery), - )}`, + )}&${QueryParams.panelTypes}=${widget.panelTypes}`, ); }, onError: () => { diff --git a/frontend/src/hooks/queryBuilder/useGetQueryRange.ts b/frontend/src/hooks/queryBuilder/useGetQueryRange.ts index c54a07461d..e832be1c4c 100644 --- a/frontend/src/hooks/queryBuilder/useGetQueryRange.ts +++ b/frontend/src/hooks/queryBuilder/useGetQueryRange.ts @@ -1,3 +1,4 @@ +import { PANEL_TYPES } from 'constants/queryBuilder'; import { REACT_QUERY_KEY } from 'constants/reactQueryKeys'; import { GetMetricQueryRange, @@ -14,6 +15,17 @@ type UseGetQueryRange = ( ) => UseQueryResult, Error>; export const useGetQueryRange: UseGetQueryRange = (requestData, options) => { + const newRequestData: GetQueryResultsProps = useMemo( + () => ({ + ...requestData, + graphType: + requestData.graphType === PANEL_TYPES.BAR + ? PANEL_TYPES.TIME_SERIES + : requestData.graphType, + }), + [requestData], + ); + const queryKey = useMemo(() => { if (options?.queryKey && Array.isArray(options.queryKey)) { return [...options.queryKey]; @@ -23,11 +35,11 @@ export const useGetQueryRange: UseGetQueryRange = (requestData, options) => { return options.queryKey; } - return [REACT_QUERY_KEY.GET_QUERY_RANGE, requestData]; - }, [options?.queryKey, requestData]); + return [REACT_QUERY_KEY.GET_QUERY_RANGE, newRequestData]; + }, [options?.queryKey, newRequestData]); return useQuery, Error>({ - queryFn: async ({ signal }) => GetMetricQueryRange(requestData, signal), + queryFn: async ({ signal }) => GetMetricQueryRange(newRequestData, signal), ...options, queryKey, }); diff --git a/frontend/src/lib/uPlotLib/getUplotChartOptions.ts b/frontend/src/lib/uPlotLib/getUplotChartOptions.ts index b6e61a2ae6..dae5bbdfd5 100644 --- a/frontend/src/lib/uPlotLib/getUplotChartOptions.ts +++ b/frontend/src/lib/uPlotLib/getUplotChartOptions.ts @@ -4,6 +4,7 @@ /* eslint-disable sonarjs/cognitive-complexity */ import './uPlotLib.styles.scss'; +import { PANEL_TYPES } from 'constants/queryBuilder'; import { FullViewProps } from 'container/GridCardLayout/GridCard/FullView/types'; import { ThresholdProps } from 'container/NewWidget/RightContainer/Threshold/types'; import { Dimensions } from 'hooks/useDimensions'; @@ -19,11 +20,12 @@ import getSeries from './utils/getSeriesData'; import { getXAxisScale } from './utils/getXAxisScale'; import { getYAxisScale } from './utils/getYAxisScale'; -interface GetUPlotChartOptions { +export interface GetUPlotChartOptions { id?: string; apiResponse?: MetricRangePayloadProps; dimensions: Dimensions; isDarkMode: boolean; + panelType?: PANEL_TYPES; onDragSelect?: (startTime: number, endTime: number) => void; yAxisUnit?: string; onClickHandler?: OnClickPluginOpts['onClick']; @@ -55,6 +57,7 @@ export const getUPlotChartOptions = ({ fillSpans, softMax, softMin, + panelType, }: GetUPlotChartOptions): uPlot.Options => { const timeScaleProps = getXAxisScale(minTimeScale, maxTimeScale); @@ -209,12 +212,12 @@ export const getUPlotChartOptions = ({ }, ], }, - series: getSeries( + series: getSeries({ apiResponse, - apiResponse?.data.result, + widgetMetaData: apiResponse?.data.result, graphsVisibilityStates, - fillSpans, - ), + panelType, + }), axes: getAxes(isDarkMode, yAxisUnit), }; }; diff --git a/frontend/src/lib/uPlotLib/utils/constants.ts b/frontend/src/lib/uPlotLib/utils/constants.ts new file mode 100644 index 0000000000..7bba405533 --- /dev/null +++ b/frontend/src/lib/uPlotLib/utils/constants.ts @@ -0,0 +1,15 @@ +// Define type annotations for style and interp +export const drawStyles = { + line: 'line', + bars: 'bars', + barsLeft: 'barsLeft', + barsRight: 'barsRight', + points: 'points', +}; + +export const lineInterpolations = { + linear: 'linear', + stepAfter: 'stepAfter', + stepBefore: 'stepBefore', + spline: 'spline', +}; diff --git a/frontend/src/lib/uPlotLib/utils/getAxes.ts b/frontend/src/lib/uPlotLib/utils/getAxes.ts index 2066f125da..8613ef9cbf 100644 --- a/frontend/src/lib/uPlotLib/utils/getAxes.ts +++ b/frontend/src/lib/uPlotLib/utils/getAxes.ts @@ -61,5 +61,4 @@ const getAxes = (isDarkMode: boolean, yAxisUnit?: string): any => [ }, }, ]; - export default getAxes; diff --git a/frontend/src/lib/uPlotLib/utils/getRenderer.ts b/frontend/src/lib/uPlotLib/utils/getRenderer.ts index 564a4532b0..805644b4cc 100644 --- a/frontend/src/lib/uPlotLib/utils/getRenderer.ts +++ b/frontend/src/lib/uPlotLib/utils/getRenderer.ts @@ -1,30 +1,21 @@ import uPlot from 'uplot'; -// Define type annotations for style and interp -export const drawStyles = { - line: 'line', - bars: 'bars', - barsLeft: 'barsLeft', - barsRight: 'barsRight', - points: 'points', -}; +import { drawStyles, lineInterpolations } from './constants'; -export const lineInterpolations = { - linear: 'linear', - stepAfter: 'stepAfter', - stepBefore: 'stepBefore', - spline: 'spline', -}; - -const { spline: splinePath } = uPlot.paths; +const { spline: splinePath, bars: barsPath } = uPlot.paths; const spline = splinePath && splinePath(); +const bars = barsPath && barsPath(); const getRenderer = (style: any, interp: any): any => { if (style === drawStyles.line && interp === lineInterpolations.spline) { return spline; } + if (style === drawStyles.bars) { + return bars; + } + return null; }; diff --git a/frontend/src/lib/uPlotLib/utils/getSeriesData.ts b/frontend/src/lib/uPlotLib/utils/getSeriesData.ts index 16be3aa04e..cf60a632cb 100644 --- a/frontend/src/lib/uPlotLib/utils/getSeriesData.ts +++ b/frontend/src/lib/uPlotLib/utils/getSeriesData.ts @@ -1,10 +1,13 @@ +/* eslint-disable sonarjs/cognitive-complexity */ +import { PANEL_TYPES } from 'constants/queryBuilder'; import { themeColors } from 'constants/theme'; import getLabelName from 'lib/getLabelName'; import { MetricRangePayloadProps } from 'types/api/metrics/getQueryRange'; import { QueryData } from 'types/api/widgets/getQuery'; +import { drawStyles, lineInterpolations } from './constants'; import { generateColor } from './generateColor'; -import getRenderer, { drawStyles, lineInterpolations } from './getRenderer'; +import getRenderer from './getRenderer'; const paths = ( u: any, @@ -23,17 +26,17 @@ const paths = ( return renderer(u, seriesIdx, idx0, idx1, extendGap, buildClip); }; -const getSeries = ( - apiResponse?: MetricRangePayloadProps, - widgetMetaData: QueryData[] = [], - graphsVisibilityStates?: boolean[], -): uPlot.Options['series'] => { +const getSeries = ({ + apiResponse, + widgetMetaData, + graphsVisibilityStates, + panelType, +}: GetSeriesProps): uPlot.Options['series'] => { const configurations: uPlot.Series[] = [ { label: 'Timestamp', stroke: 'purple' }, ]; const seriesList = apiResponse?.data.result || []; - const newGraphVisibilityStates = graphsVisibilityStates?.slice(1); for (let i = 0; i < seriesList?.length; i += 1) { @@ -52,10 +55,17 @@ const getSeries = ( const seriesObj: any = { paths, - drawStyle: drawStyles.line, - lineInterpolation: lineInterpolations.spline, + drawStyle: + panelType && panelType === PANEL_TYPES.BAR + ? drawStyles.bars + : drawStyles.line, + lineInterpolation: + panelType && panelType === PANEL_TYPES.BAR + ? null + : lineInterpolations.spline, show: newGraphVisibilityStates ? newGraphVisibilityStates[i] : true, label, + fill: panelType && panelType === PANEL_TYPES.BAR ? `${color}40` : undefined, stroke: color, width: 2, spanGaps: true, @@ -72,4 +82,11 @@ const getSeries = ( return configurations; }; +export type GetSeriesProps = { + apiResponse?: MetricRangePayloadProps; + widgetMetaData: QueryData[]; + graphsVisibilityStates?: boolean[]; + panelType?: PANEL_TYPES; +}; + export default getSeries; diff --git a/frontend/src/lib/uPlotLib/utils/tests/__mocks__/seriesData.ts b/frontend/src/lib/uPlotLib/utils/tests/__mocks__/seriesData.ts new file mode 100644 index 0000000000..6986625bf8 --- /dev/null +++ b/frontend/src/lib/uPlotLib/utils/tests/__mocks__/seriesData.ts @@ -0,0 +1,889 @@ +import { PANEL_TYPES } from 'constants/queryBuilder'; + +import { GetSeriesProps } from '../../getSeriesData'; + +export const seriesBarChartData = { + apiResponse: { + data: { + result: [ + { + metric: {}, + values: [ + [1708683840, '6260'], + [1708683240, '6251'], + [1708683780, '6237'], + [1708683660, '6188'], + [1708683720, '6176'], + [1708683360, '6169'], + [1708683480, '6068'], + [1708683540, '6025'], + [1708683300, '6042'], + [1708683420, '6001'], + [1708683600, '5969'], + [1708683900, '5955'], + [1708683180, '4301'], + ], + queryName: 'F1', + legend: 'firstLegend', + }, + { + metric: {}, + values: [ + [1708683240, '3378'], + [1708683300, '3269'], + [1708683360, '3341'], + [1708683420, '3269'], + [1708683480, '3296'], + [1708683540, '3280'], + [1708683600, '3260'], + [1708683660, '3351'], + [1708683720, '3345'], + [1708683780, '3370'], + [1708683840, '3382'], + [1708683900, '3249'], + [1708683960, '212'], + ], + queryName: 'A', + legend: 'secondLegend', + }, + { + metric: {}, + values: [ + [1708683840, '2878'], + [1708683240, '2873'], + [1708683780, '2867'], + [1708683660, '2837'], + [1708683720, '2831'], + [1708683360, '2828'], + [1708683300, '2773'], + [1708683480, '2772'], + [1708683540, '2745'], + [1708683420, '2732'], + [1708683180, '2729'], + [1708683600, '2709'], + [1708683900, '2706'], + ], + queryName: 'B', + legend: 'thirdLegend', + }, + { + metric: { + F2: 'F2', + }, + values: [ + [1708683840, '504'], + [1708683240, '505'], + [1708683780, '503'], + [1708683660, '514'], + [1708683720, '514'], + [1708683360, '513'], + [1708683480, '524'], + [1708683540, '535'], + [1708683300, '496'], + [1708683420, '537'], + [1708683600, '551'], + [1708683900, '543'], + [1708683180, '-1157'], + ], + queryName: 'F2', + legend: 'forthLength', + }, + ], + resultType: '', + newResult: { + status: 'success', + data: { + resultType: '', + result: [ + { + queryName: 'A', + series: [ + { + labels: {}, + labelsArray: null, + values: [ + { + timestamp: 1708683240000, + value: '3378', + }, + { + timestamp: 1708683300000, + value: '3269', + }, + { + timestamp: 1708683360000, + value: '3341', + }, + { + timestamp: 1708683420000, + value: '3269', + }, + { + timestamp: 1708683480000, + value: '3296', + }, + { + timestamp: 1708683540000, + value: '3280', + }, + { + timestamp: 1708683600000, + value: '3260', + }, + { + timestamp: 1708683660000, + value: '3351', + }, + { + timestamp: 1708683720000, + value: '3345', + }, + { + timestamp: 1708683780000, + value: '3370', + }, + { + timestamp: 1708683840000, + value: '3382', + }, + { + timestamp: 1708683900000, + value: '3249', + }, + { + timestamp: 1708683960000, + value: '212', + }, + ], + }, + ], + list: null, + }, + { + queryName: 'B', + series: [ + { + labels: {}, + labelsArray: null, + values: [ + { + timestamp: 1708683840000, + value: '2878', + }, + { + timestamp: 1708683240000, + value: '2873', + }, + { + timestamp: 1708683780000, + value: '2867', + }, + { + timestamp: 1708683660000, + value: '2837', + }, + { + timestamp: 1708683720000, + value: '2831', + }, + { + timestamp: 1708683360000, + value: '2828', + }, + { + timestamp: 1708683300000, + value: '2773', + }, + { + timestamp: 1708683480000, + value: '2772', + }, + { + timestamp: 1708683540000, + value: '2745', + }, + { + timestamp: 1708683420000, + value: '2732', + }, + { + timestamp: 1708683180000, + value: '2729', + }, + { + timestamp: 1708683600000, + value: '2709', + }, + { + timestamp: 1708683900000, + value: '2706', + }, + ], + }, + ], + list: null, + }, + { + queryName: 'F2', + series: [ + { + labels: { + F2: 'F2', + }, + values: [ + { + timestamp: 1708683840000, + value: '504', + }, + { + timestamp: 1708683240000, + value: '505', + }, + { + timestamp: 1708683780000, + value: '503', + }, + { + timestamp: 1708683660000, + value: '514', + }, + { + timestamp: 1708683720000, + value: '514', + }, + { + timestamp: 1708683360000, + value: '513', + }, + { + timestamp: 1708683480000, + value: '524', + }, + { + timestamp: 1708683540000, + value: '535', + }, + { + timestamp: 1708683300000, + value: '496', + }, + { + timestamp: 1708683420000, + value: '537', + }, + { + timestamp: 1708683600000, + value: '551', + }, + { + timestamp: 1708683900000, + value: '543', + }, + { + timestamp: 1708683180000, + value: '-1157', + }, + ], + }, + ], + list: null, + }, + { + queryName: 'F1', + series: [ + { + labels: {}, + labelsArray: null, + values: [ + { + timestamp: 1708683840000, + value: '6260', + }, + { + timestamp: 1708683240000, + value: '6251', + }, + { + timestamp: 1708683780000, + value: '6237', + }, + { + timestamp: 1708683660000, + value: '6188', + }, + { + timestamp: 1708683720000, + value: '6176', + }, + { + timestamp: 1708683360000, + value: '6169', + }, + { + timestamp: 1708683480000, + value: '6068', + }, + { + timestamp: 1708683540000, + value: '6025', + }, + { + timestamp: 1708683300000, + value: '6042', + }, + { + timestamp: 1708683420000, + value: '6001', + }, + { + timestamp: 1708683600000, + value: '5969', + }, + { + timestamp: 1708683900000, + value: '5955', + }, + { + timestamp: 1708683180000, + value: '4301', + }, + ], + }, + ], + list: null, + }, + ], + }, + }, + }, + }, + widgetMetaData: [ + { + metric: {}, + values: [ + [1708683840, '6260'], + [1708683240, '6251'], + [1708683780, '6237'], + [1708683660, '6188'], + [1708683720, '6176'], + [1708683360, '6169'], + [1708683480, '6068'], + [1708683540, '6025'], + [1708683300, '6042'], + [1708683420, '6001'], + [1708683600, '5969'], + [1708683900, '5955'], + [1708683180, '4301'], + ], + queryName: 'F1', + legend: 'firstLegend', + }, + { + metric: {}, + values: [ + [1708683240, '3378'], + [1708683300, '3269'], + [1708683360, '3341'], + [1708683420, '3269'], + [1708683480, '3296'], + [1708683540, '3280'], + [1708683600, '3260'], + [1708683660, '3351'], + [1708683720, '3345'], + [1708683780, '3370'], + [1708683840, '3382'], + [1708683900, '3249'], + [1708683960, '212'], + ], + queryName: 'A', + legend: 'A-A', + }, + { + metric: {}, + values: [ + [1708683840, '2878'], + [1708683240, '2873'], + [1708683780, '2867'], + [1708683660, '2837'], + [1708683720, '2831'], + [1708683360, '2828'], + [1708683300, '2773'], + [1708683480, '2772'], + [1708683540, '2745'], + [1708683420, '2732'], + [1708683180, '2729'], + [1708683600, '2709'], + [1708683900, '2706'], + ], + queryName: 'B', + legend: 'B-B', + }, + { + metric: { + F2: 'F2', + }, + values: [ + [1708683840, '504'], + [1708683240, '505'], + [1708683780, '503'], + [1708683660, '514'], + [1708683720, '514'], + [1708683360, '513'], + [1708683480, '524'], + [1708683540, '535'], + [1708683300, '496'], + [1708683420, '537'], + [1708683600, '551'], + [1708683900, '543'], + [1708683180, '-1157'], + ], + queryName: 'F2', + legend: 'F2', + }, + ], + graphsVisibilityStates: [true, true, true, true, true], + panelType: PANEL_TYPES.BAR, +} as GetSeriesProps; + +export const seriesLineChartData = { + apiResponse: { + data: { + result: [ + { + metric: {}, + values: [ + [1708683840, '6260'], + [1708683240, '6251'], + [1708683780, '6237'], + [1708683660, '6188'], + [1708683720, '6176'], + [1708683360, '6169'], + [1708683480, '6068'], + [1708683540, '6025'], + [1708683300, '6042'], + [1708683420, '6001'], + [1708683600, '5969'], + [1708683900, '5955'], + [1708683180, '4301'], + ], + queryName: 'F1', + legend: 'firstLegend', + }, + { + metric: {}, + values: [ + [1708683240, '3378'], + [1708683300, '3269'], + [1708683360, '3341'], + [1708683420, '3269'], + [1708683480, '3296'], + [1708683540, '3280'], + [1708683600, '3260'], + [1708683660, '3351'], + [1708683720, '3345'], + [1708683780, '3370'], + [1708683840, '3382'], + [1708683900, '3249'], + [1708683960, '212'], + ], + queryName: 'A', + legend: 'secondLegend', + }, + { + metric: {}, + values: [ + [1708683840, '2878'], + [1708683240, '2873'], + [1708683780, '2867'], + [1708683660, '2837'], + [1708683720, '2831'], + [1708683360, '2828'], + [1708683300, '2773'], + [1708683480, '2772'], + [1708683540, '2745'], + [1708683420, '2732'], + [1708683180, '2729'], + [1708683600, '2709'], + [1708683900, '2706'], + ], + queryName: 'B', + legend: 'thirdLegend', + }, + { + metric: { + F2: 'F2', + }, + values: [ + [1708683840, '504'], + [1708683240, '505'], + [1708683780, '503'], + [1708683660, '514'], + [1708683720, '514'], + [1708683360, '513'], + [1708683480, '524'], + [1708683540, '535'], + [1708683300, '496'], + [1708683420, '537'], + [1708683600, '551'], + [1708683900, '543'], + [1708683180, '-1157'], + ], + queryName: 'F2', + legend: 'forthLength', + }, + ], + resultType: '', + newResult: { + status: 'success', + data: { + resultType: '', + result: [ + { + queryName: 'A', + series: [ + { + labels: {}, + labelsArray: null, + values: [ + { + timestamp: 1708683240000, + value: '3378', + }, + { + timestamp: 1708683300000, + value: '3269', + }, + { + timestamp: 1708683360000, + value: '3341', + }, + { + timestamp: 1708683420000, + value: '3269', + }, + { + timestamp: 1708683480000, + value: '3296', + }, + { + timestamp: 1708683540000, + value: '3280', + }, + { + timestamp: 1708683600000, + value: '3260', + }, + { + timestamp: 1708683660000, + value: '3351', + }, + { + timestamp: 1708683720000, + value: '3345', + }, + { + timestamp: 1708683780000, + value: '3370', + }, + { + timestamp: 1708683840000, + value: '3382', + }, + { + timestamp: 1708683900000, + value: '3249', + }, + { + timestamp: 1708683960000, + value: '212', + }, + ], + }, + ], + list: null, + }, + { + queryName: 'B', + series: [ + { + labels: {}, + labelsArray: null, + values: [ + { + timestamp: 1708683840000, + value: '2878', + }, + { + timestamp: 1708683240000, + value: '2873', + }, + { + timestamp: 1708683780000, + value: '2867', + }, + { + timestamp: 1708683660000, + value: '2837', + }, + { + timestamp: 1708683720000, + value: '2831', + }, + { + timestamp: 1708683360000, + value: '2828', + }, + { + timestamp: 1708683300000, + value: '2773', + }, + { + timestamp: 1708683480000, + value: '2772', + }, + { + timestamp: 1708683540000, + value: '2745', + }, + { + timestamp: 1708683420000, + value: '2732', + }, + { + timestamp: 1708683180000, + value: '2729', + }, + { + timestamp: 1708683600000, + value: '2709', + }, + { + timestamp: 1708683900000, + value: '2706', + }, + ], + }, + ], + list: null, + }, + { + queryName: 'F2', + series: [ + { + labels: { + F2: 'F2', + }, + values: [ + { + timestamp: 1708683840000, + value: '504', + }, + { + timestamp: 1708683240000, + value: '505', + }, + { + timestamp: 1708683780000, + value: '503', + }, + { + timestamp: 1708683660000, + value: '514', + }, + { + timestamp: 1708683720000, + value: '514', + }, + { + timestamp: 1708683360000, + value: '513', + }, + { + timestamp: 1708683480000, + value: '524', + }, + { + timestamp: 1708683540000, + value: '535', + }, + { + timestamp: 1708683300000, + value: '496', + }, + { + timestamp: 1708683420000, + value: '537', + }, + { + timestamp: 1708683600000, + value: '551', + }, + { + timestamp: 1708683900000, + value: '543', + }, + { + timestamp: 1708683180000, + value: '-1157', + }, + ], + }, + ], + list: null, + }, + { + queryName: 'F1', + series: [ + { + labels: {}, + labelsArray: null, + values: [ + { + timestamp: 1708683840000, + value: '6260', + }, + { + timestamp: 1708683240000, + value: '6251', + }, + { + timestamp: 1708683780000, + value: '6237', + }, + { + timestamp: 1708683660000, + value: '6188', + }, + { + timestamp: 1708683720000, + value: '6176', + }, + { + timestamp: 1708683360000, + value: '6169', + }, + { + timestamp: 1708683480000, + value: '6068', + }, + { + timestamp: 1708683540000, + value: '6025', + }, + { + timestamp: 1708683300000, + value: '6042', + }, + { + timestamp: 1708683420000, + value: '6001', + }, + { + timestamp: 1708683600000, + value: '5969', + }, + { + timestamp: 1708683900000, + value: '5955', + }, + { + timestamp: 1708683180000, + value: '4301', + }, + ], + }, + ], + list: null, + }, + ], + }, + }, + }, + }, + widgetMetaData: [ + { + metric: {}, + values: [ + [1708683840, '6260'], + [1708683240, '6251'], + [1708683780, '6237'], + [1708683660, '6188'], + [1708683720, '6176'], + [1708683360, '6169'], + [1708683480, '6068'], + [1708683540, '6025'], + [1708683300, '6042'], + [1708683420, '6001'], + [1708683600, '5969'], + [1708683900, '5955'], + [1708683180, '4301'], + ], + queryName: 'F1', + legend: 'firstLegend', + }, + { + metric: {}, + values: [ + [1708683240, '3378'], + [1708683300, '3269'], + [1708683360, '3341'], + [1708683420, '3269'], + [1708683480, '3296'], + [1708683540, '3280'], + [1708683600, '3260'], + [1708683660, '3351'], + [1708683720, '3345'], + [1708683780, '3370'], + [1708683840, '3382'], + [1708683900, '3249'], + [1708683960, '212'], + ], + queryName: 'A', + legend: 'A-A', + }, + { + metric: {}, + values: [ + [1708683840, '2878'], + [1708683240, '2873'], + [1708683780, '2867'], + [1708683660, '2837'], + [1708683720, '2831'], + [1708683360, '2828'], + [1708683300, '2773'], + [1708683480, '2772'], + [1708683540, '2745'], + [1708683420, '2732'], + [1708683180, '2729'], + [1708683600, '2709'], + [1708683900, '2706'], + ], + queryName: 'B', + legend: 'B-B', + }, + { + metric: { + F2: 'F2', + }, + values: [ + [1708683840, '504'], + [1708683240, '505'], + [1708683780, '503'], + [1708683660, '514'], + [1708683720, '514'], + [1708683360, '513'], + [1708683480, '524'], + [1708683540, '535'], + [1708683300, '496'], + [1708683420, '537'], + [1708683600, '551'], + [1708683900, '543'], + [1708683180, '-1157'], + ], + queryName: 'F2', + legend: 'F2', + }, + ], + graphsVisibilityStates: [true, true, true, true, true], + panelType: PANEL_TYPES.TIME_SERIES, +} as GetSeriesProps; diff --git a/frontend/src/lib/uPlotLib/utils/tests/__mocks__/uplotChartOptionsData.ts b/frontend/src/lib/uPlotLib/utils/tests/__mocks__/uplotChartOptionsData.ts new file mode 100644 index 0000000000..d3682939a4 --- /dev/null +++ b/frontend/src/lib/uPlotLib/utils/tests/__mocks__/uplotChartOptionsData.ts @@ -0,0 +1,453 @@ +import { PANEL_TYPES } from 'constants/queryBuilder'; +import { GetUPlotChartOptions } from 'lib/uPlotLib/getUplotChartOptions'; + +export const inputPropsTimeSeries = { + id: '', + dimensions: { + width: 400, + height: 288, + }, + isDarkMode: true, + apiResponse: { + data: { + result: [ + { + metric: { + A: 'A', + }, + values: [ + [1708623120, '122'], + [1708623180, '112'], + [1708623240, '106'], + [1708623300, '106'], + [1708623360, '116'], + [1708623420, '110'], + [1708623480, '110'], + [1708623540, '114'], + [1708623600, '114'], + [1708623660, '118'], + [1708623720, '110'], + [1708623780, '112'], + [1708623840, '116'], + [1708623900, '104'], + [1708623960, '106'], + [1708624020, '120'], + [1708624080, '110'], + [1708624140, '112'], + [1708624200, '110'], + [1708624260, '112'], + [1708624320, '110'], + [1708624380, '112'], + [1708624440, '108'], + [1708624500, '110'], + [1708624560, '114'], + [1708624620, '104'], + [1708624680, '122'], + [1708624740, '112'], + [1708624800, '104'], + [1708624860, '90'], + ], + queryName: 'A', + legend: 'A', + }, + ], + resultType: '', + newResult: { + status: 'success', + data: { + resultType: '', + result: [ + { + queryName: 'A', + series: [ + { + labels: { + A: 'A', + }, + values: [ + { + timestamp: 1708623120000, + value: '122', + }, + { + timestamp: 1708623180000, + value: '112', + }, + { + timestamp: 1708623240000, + value: '106', + }, + { + timestamp: 1708623300000, + value: '106', + }, + { + timestamp: 1708623360000, + value: '116', + }, + { + timestamp: 1708623420000, + value: '110', + }, + { + timestamp: 1708623480000, + value: '110', + }, + { + timestamp: 1708623540000, + value: '114', + }, + { + timestamp: 1708623600000, + value: '114', + }, + { + timestamp: 1708623660000, + value: '118', + }, + { + timestamp: 1708623720000, + value: '110', + }, + { + timestamp: 1708623780000, + value: '112', + }, + { + timestamp: 1708623840000, + value: '116', + }, + { + timestamp: 1708623900000, + value: '104', + }, + { + timestamp: 1708623960000, + value: '106', + }, + { + timestamp: 1708624020000, + value: '120', + }, + { + timestamp: 1708624080000, + value: '110', + }, + { + timestamp: 1708624140000, + value: '112', + }, + { + timestamp: 1708624200000, + value: '110', + }, + { + timestamp: 1708624260000, + value: '112', + }, + { + timestamp: 1708624320000, + value: '110', + }, + { + timestamp: 1708624380000, + value: '112', + }, + { + timestamp: 1708624440000, + value: '108', + }, + { + timestamp: 1708624500000, + value: '110', + }, + { + timestamp: 1708624560000, + value: '114', + }, + { + timestamp: 1708624620000, + value: '104', + }, + { + timestamp: 1708624680000, + value: '122', + }, + { + timestamp: 1708624740000, + value: '112', + }, + { + timestamp: 1708624800000, + value: '104', + }, + { + timestamp: 1708624860000, + value: '90', + }, + ], + }, + ], + list: null, + }, + ], + }, + }, + }, + }, + yAxisUnit: 'none', + minTimeScale: 1708623105, + maxTimeScale: 1708624905, + graphsVisibilityStates: [true, true], + thresholds: [], + softMax: null, + softMin: null, + panelType: PANEL_TYPES.TIME_SERIES, +} as GetUPlotChartOptions; + +export const inputPropsBar = { + id: '', + dimensions: { + width: 400, + height: 288, + }, + isDarkMode: true, + apiResponse: { + data: { + result: [ + { + metric: { + A: 'A', + }, + values: [ + [1708623120, '122'], + [1708623180, '112'], + [1708623240, '106'], + [1708623300, '106'], + [1708623360, '116'], + [1708623420, '110'], + [1708623480, '110'], + [1708623540, '114'], + [1708623600, '114'], + [1708623660, '118'], + [1708623720, '110'], + [1708623780, '112'], + [1708623840, '116'], + [1708623900, '104'], + [1708623960, '106'], + [1708624020, '120'], + [1708624080, '110'], + [1708624140, '112'], + [1708624200, '110'], + [1708624260, '112'], + [1708624320, '110'], + [1708624380, '112'], + [1708624440, '108'], + [1708624500, '110'], + [1708624560, '114'], + [1708624620, '104'], + [1708624680, '122'], + [1708624740, '112'], + [1708624800, '104'], + [1708624860, '90'], + ], + queryName: 'A', + legend: 'A', + }, + ], + resultType: '', + newResult: { + status: 'success', + data: { + resultType: '', + result: [ + { + queryName: 'A', + series: [ + { + labels: { + A: 'A', + }, + values: [ + { + timestamp: 1708623120000, + value: '122', + }, + { + timestamp: 1708623180000, + value: '112', + }, + { + timestamp: 1708623240000, + value: '106', + }, + { + timestamp: 1708623300000, + value: '106', + }, + { + timestamp: 1708623360000, + value: '116', + }, + { + timestamp: 1708623420000, + value: '110', + }, + { + timestamp: 1708623480000, + value: '110', + }, + { + timestamp: 1708623540000, + value: '114', + }, + { + timestamp: 1708623600000, + value: '114', + }, + { + timestamp: 1708623660000, + value: '118', + }, + { + timestamp: 1708623720000, + value: '110', + }, + { + timestamp: 1708623780000, + value: '112', + }, + { + timestamp: 1708623840000, + value: '116', + }, + { + timestamp: 1708623900000, + value: '104', + }, + { + timestamp: 1708623960000, + value: '106', + }, + { + timestamp: 1708624020000, + value: '120', + }, + { + timestamp: 1708624080000, + value: '110', + }, + { + timestamp: 1708624140000, + value: '112', + }, + { + timestamp: 1708624200000, + value: '110', + }, + { + timestamp: 1708624260000, + value: '112', + }, + { + timestamp: 1708624320000, + value: '110', + }, + { + timestamp: 1708624380000, + value: '112', + }, + { + timestamp: 1708624440000, + value: '108', + }, + { + timestamp: 1708624500000, + value: '110', + }, + { + timestamp: 1708624560000, + value: '114', + }, + { + timestamp: 1708624620000, + value: '104', + }, + { + timestamp: 1708624680000, + value: '122', + }, + { + timestamp: 1708624740000, + value: '112', + }, + { + timestamp: 1708624800000, + value: '104', + }, + { + timestamp: 1708624860000, + value: '90', + }, + ], + }, + ], + list: null, + }, + ], + }, + }, + }, + }, + yAxisUnit: 'none', + minTimeScale: 1708623105, + maxTimeScale: 1708624905, + graphsVisibilityStates: [true, true], + thresholds: [], + softMax: null, + softMin: null, + panelType: PANEL_TYPES.BAR, +} as GetUPlotChartOptions; + +export const seriesDataTimeSeries = [ + { + label: 'Timestamp', + stroke: 'purple', + }, + { + drawStyle: 'line', + lineInterpolation: 'spline', + show: true, + label: 'A', + stroke: '#6495ED', + width: 2, + spanGaps: true, + points: { + size: 5, + show: false, + stroke: '#6495ED', + }, + }, +]; + +export const seriesDataBarChart = [ + { + label: 'Timestamp', + stroke: 'purple', + }, + { + drawStyle: 'bars', + lineInterpolation: null, + show: true, + label: 'A', + fill: '#6495ED40', + stroke: '#6495ED', + width: 2, + spanGaps: true, + points: { + size: 5, + show: false, + stroke: '#6495ED', + }, + }, +]; diff --git a/frontend/src/lib/uPlotLib/utils/tests/getSeriesData.test.ts b/frontend/src/lib/uPlotLib/utils/tests/getSeriesData.test.ts new file mode 100644 index 0000000000..75ee3c7f58 --- /dev/null +++ b/frontend/src/lib/uPlotLib/utils/tests/getSeriesData.test.ts @@ -0,0 +1,32 @@ +/* eslint-disable @typescript-eslint/ban-ts-comment */ +/* eslint-disable @typescript-eslint/explicit-function-return-type */ +import getSeries from '../getSeriesData'; +import { + seriesBarChartData, + seriesLineChartData, +} from './__mocks__/seriesData'; + +jest.mock('../getRenderer', () => jest.fn().mockImplementation(() => () => {})); + +describe('Get Series Data', () => { + test('Should return series data for uplot chart', () => { + const seriesData = getSeries(seriesBarChartData); + expect(seriesData.length).toBe(5); + expect(seriesData[1].label).toBe('firstLegend'); + expect(seriesData[1].show).toBe(true); + expect(seriesData[1].fill).toBe('#C7158540'); + expect(seriesData[1].width).toBe(2); + }); + + test('Should return series drawline bar chart for panel type barchart', () => { + const seriesData = getSeries(seriesBarChartData); + // @ts-ignore + expect(seriesData[1].drawStyle).toBe('bars'); + }); + + test('Should return seris drawline line chart for panel type time series', () => { + const seriesData = getSeries(seriesLineChartData); + // @ts-ignore + expect(seriesData[1].drawStyle).toBe('line'); + }); +}); diff --git a/frontend/src/lib/uPlotLib/utils/tests/getUplotChartOptions.test.ts b/frontend/src/lib/uPlotLib/utils/tests/getUplotChartOptions.test.ts new file mode 100644 index 0000000000..a955d787ac --- /dev/null +++ b/frontend/src/lib/uPlotLib/utils/tests/getUplotChartOptions.test.ts @@ -0,0 +1,68 @@ +/* eslint-disable @typescript-eslint/ban-ts-comment */ +import { PANEL_TYPES } from 'constants/queryBuilder'; +import { getUPlotChartOptions } from 'lib/uPlotLib/getUplotChartOptions'; + +import { + inputPropsTimeSeries, + seriesDataBarChart, + seriesDataTimeSeries, +} from './__mocks__/uplotChartOptionsData'; + +jest.mock('../../plugins/tooltipPlugin', () => jest.fn().mockReturnValue({})); +jest.mock('../getSeriesData', () => + jest.fn().mockImplementation((props) => { + const { panelType } = props; + + if (panelType === PANEL_TYPES.TIME_SERIES) { + return seriesDataTimeSeries; + } + return seriesDataBarChart; + }), +); + +describe('getUPlotChartOptions', () => { + test('should return uPlot options', () => { + const options = getUPlotChartOptions(inputPropsTimeSeries); + expect(options.legend?.isolate).toBe(true); + expect(options.width).toBe(inputPropsTimeSeries.dimensions.width); + expect(options.height).toBe(inputPropsTimeSeries.dimensions.height - 30); + expect(options.axes?.length).toBe(2); + expect(options.series[1].label).toBe('A'); + }); + + test('Should return line chart as drawStyle for time series', () => { + const options = getUPlotChartOptions(inputPropsTimeSeries); + // @ts-ignore + expect(options.series[1].drawStyle).toBe('line'); + // @ts-ignore + expect(options.series[1].lineInterpolation).toBe('spline'); + // @ts-ignore + expect(options.series[1].show).toBe(true); + expect(options.series[1].label).toBe('A'); + expect(options.series[1].stroke).toBe('#6495ED'); + expect(options.series[1].width).toBe(2); + expect(options.series[1].spanGaps).toBe(true); + // @ts-ignore + expect(options.series[1].points.size).toBe(5); + }); + + test('should return bar chart as drawStyle for panel type bar', () => { + const options = getUPlotChartOptions({ + ...inputPropsTimeSeries, + panelType: PANEL_TYPES.BAR, + }); + // @ts-ignore + expect(options.series[1].drawStyle).toBe('bars'); + // @ts-ignore + expect(options.series[1].lineInterpolation).toBe(null); + // @ts-ignore + expect(options.series[1].show).toBe(true); + expect(options.series[1].label).toBe('A'); + expect(options.series[1].fill).toBe('#6495ED40'); + expect(options.series[1].stroke).toBe('#6495ED'); + expect(options.series[1].width).toBe(2); + expect(options.series[1].spanGaps).toBe(true); + // @ts-ignore + expect(options.series[1].points.size).toBe(5); + }); +}); diff --git a/frontend/src/providers/QueryBuilder.tsx b/frontend/src/providers/QueryBuilder.tsx index e7177a0536..80bc673a83 100644 --- a/frontend/src/providers/QueryBuilder.tsx +++ b/frontend/src/providers/QueryBuilder.tsx @@ -100,7 +100,13 @@ export function QueryBuilderProvider({ null, ); - const [panelType, setPanelType] = useState(null); + const panelTypeQueryParams = urlQuery.get( + QueryParams.panelTypes, + ) as PANEL_TYPES | null; + + const [panelType, setPanelType] = useState( + panelTypeQueryParams, + ); const [currentQuery, setCurrentQuery] = useState(queryState); const [stagedQuery, setStagedQuery] = useState(null); diff --git a/frontend/src/utils/getGraphType.ts b/frontend/src/utils/getGraphType.ts new file mode 100644 index 0000000000..fa37bf8f4f --- /dev/null +++ b/frontend/src/utils/getGraphType.ts @@ -0,0 +1,9 @@ +import { PANEL_TYPES } from 'constants/queryBuilder'; + +export const getGraphType = (panelType: PANEL_TYPES): PANEL_TYPES => { + // backend don't support graphType as bar, as we consume time series data, sending graphType as time_series whenever we use bar as panel_type + if (panelType === PANEL_TYPES.BAR) { + return PANEL_TYPES.TIME_SERIES; + } + return panelType; +}; diff --git a/frontend/src/utils/getSortedSeriesData.ts b/frontend/src/utils/getSortedSeriesData.ts new file mode 100644 index 0000000000..98336e8b1f --- /dev/null +++ b/frontend/src/utils/getSortedSeriesData.ts @@ -0,0 +1,20 @@ +import { QueryData } from 'types/api/widgets/getQuery'; + +// Sorting the series data in desending matter for plotting cummulative bar chart. +export const getSortedSeriesData = ( + result: QueryData[] | undefined, +): QueryData[] => { + const seriesList = result || []; + + return seriesList.sort((a, b) => { + if (a.values.length === 0) return 1; + if (b.values.length === 0) return -1; + const avgA = + a.values.reduce((acc, curr) => acc + parseFloat(curr[1]), 0) / + a.values.length; + const avgB = + b.values.reduce((acc, curr) => acc + parseFloat(curr[1]), 0) / + b.values.length; + return avgB - avgA; + }); +}; From 8add13743aaf2ab24ba3622db60f82ba8deaf742 Mon Sep 17 00:00:00 2001 From: Srikanth Chekuri Date: Wed, 28 Feb 2024 15:06:47 +0530 Subject: [PATCH 13/16] fix: remove unknown setting from connection string (#4612) --- deploy/docker-swarm/clickhouse-setup/docker-compose.yaml | 2 +- deploy/docker/clickhouse-setup/docker-compose.yaml | 2 +- pkg/query-service/tests/test-deploy/docker-compose.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml b/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml index edc5ead22c..7345f5b338 100644 --- a/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml +++ b/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml @@ -160,7 +160,7 @@ services: - ../dashboards:/root/config/dashboards - ./data/signoz/:/var/lib/signoz/ environment: - - ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces + - ClickHouseUrl=tcp://clickhouse:9000 - ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/ - SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db - DASHBOARDS_PATH=/root/config/dashboards diff --git a/deploy/docker/clickhouse-setup/docker-compose.yaml b/deploy/docker/clickhouse-setup/docker-compose.yaml index cb77c4c024..91474969e4 100644 --- a/deploy/docker/clickhouse-setup/docker-compose.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose.yaml @@ -179,7 +179,7 @@ services: - ../dashboards:/root/config/dashboards - ./data/signoz/:/var/lib/signoz/ environment: - - ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces + - ClickHouseUrl=tcp://clickhouse:9000 - ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/ - SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db - DASHBOARDS_PATH=/root/config/dashboards diff --git a/pkg/query-service/tests/test-deploy/docker-compose.yaml b/pkg/query-service/tests/test-deploy/docker-compose.yaml index 7c9b50199f..3c64daad7b 100644 --- a/pkg/query-service/tests/test-deploy/docker-compose.yaml +++ b/pkg/query-service/tests/test-deploy/docker-compose.yaml @@ -168,7 +168,7 @@ services: - ../dashboards:/root/config/dashboards - ./data/signoz/:/var/lib/signoz/ environment: - - ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces + - ClickHouseUrl=tcp://clickhouse:9000 - ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/ - SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db - DASHBOARDS_PATH=/root/config/dashboards From fe0ba5e3ba09112de6dbb2a9a2e208a455d53f73 Mon Sep 17 00:00:00 2001 From: Vishal Sharma Date: Wed, 28 Feb 2024 17:37:30 +0530 Subject: [PATCH 14/16] fix: create PAT not null error (#4613) * fix: create PAT not null error allow all admins to view all pats * fix: allow revoking of token by all admin users --- ee/query-service/app/api/pat.go | 15 ++------------- ee/query-service/dao/interface.go | 2 +- ee/query-service/dao/sqlite/pat.go | 10 ++++++---- 3 files changed, 9 insertions(+), 18 deletions(-) diff --git a/ee/query-service/app/api/pat.go b/ee/query-service/app/api/pat.go index 49ed36f092..ea43f47fb0 100644 --- a/ee/query-service/app/api/pat.go +++ b/ee/query-service/app/api/pat.go @@ -136,7 +136,7 @@ func (ah *APIHandler) getPATs(w http.ResponseWriter, r *http.Request) { return } zap.S().Infof("Get PATs for user: %+v", user.Id) - pats, apierr := ah.AppDao().ListPATs(ctx, user.Id) + pats, apierr := ah.AppDao().ListPATs(ctx) if apierr != nil { RespondError(w, apierr, nil) return @@ -155,18 +155,7 @@ func (ah *APIHandler) revokePAT(w http.ResponseWriter, r *http.Request) { }, nil) return } - pat, apierr := ah.AppDao().GetPATByID(ctx, id) - if apierr != nil { - RespondError(w, apierr, nil) - return - } - if pat.UserID != user.Id { - RespondError(w, &model.ApiError{ - Typ: model.ErrorUnauthorized, - Err: fmt.Errorf("unauthorized PAT revoke request"), - }, nil) - return - } + zap.S().Debugf("Revoke PAT with id: %+v", id) if apierr := ah.AppDao().RevokePAT(ctx, id, user.Id); apierr != nil { RespondError(w, apierr, nil) diff --git a/ee/query-service/dao/interface.go b/ee/query-service/dao/interface.go index 78155bc23a..695ff860a0 100644 --- a/ee/query-service/dao/interface.go +++ b/ee/query-service/dao/interface.go @@ -39,6 +39,6 @@ type ModelDao interface { UpdatePATLastUsed(ctx context.Context, pat string, lastUsed int64) basemodel.BaseApiError GetPATByID(ctx context.Context, id string) (*model.PAT, basemodel.BaseApiError) GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, basemodel.BaseApiError) - ListPATs(ctx context.Context, userID string) ([]model.PAT, basemodel.BaseApiError) + ListPATs(ctx context.Context) ([]model.PAT, basemodel.BaseApiError) RevokePAT(ctx context.Context, id string, userID string) basemodel.BaseApiError } diff --git a/ee/query-service/dao/sqlite/pat.go b/ee/query-service/dao/sqlite/pat.go index a1752ea238..b2af1640c3 100644 --- a/ee/query-service/dao/sqlite/pat.go +++ b/ee/query-service/dao/sqlite/pat.go @@ -13,7 +13,7 @@ import ( func (m *modelDao) CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basemodel.BaseApiError) { result, err := m.DB().ExecContext(ctx, - "INSERT INTO personal_access_tokens (user_id, token, role, name, created_at, expires_at, updated_at, updated_by_user_id) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)", + "INSERT INTO personal_access_tokens (user_id, token, role, name, created_at, expires_at, updated_at, updated_by_user_id, last_used, revoked) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)", p.UserID, p.Token, p.Role, @@ -22,6 +22,8 @@ func (m *modelDao) CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basem p.ExpiresAt, p.UpdatedAt, p.UpdatedByUserID, + p.LastUsed, + p.Revoked, ) if err != nil { zap.S().Errorf("Failed to insert PAT in db, err: %v", zap.Error(err)) @@ -78,11 +80,11 @@ func (m *modelDao) UpdatePATLastUsed(ctx context.Context, token string, lastUsed return nil } -func (m *modelDao) ListPATs(ctx context.Context, userID string) ([]model.PAT, basemodel.BaseApiError) { +func (m *modelDao) ListPATs(ctx context.Context) ([]model.PAT, basemodel.BaseApiError) { pats := []model.PAT{} - if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE user_id=? and revoked=false ORDER by updated_at DESC;`, userID); err != nil { - zap.S().Errorf("Failed to fetch PATs for user: %s, err: %v", userID, zap.Error(err)) + if err := m.DB().Select(&pats, "SELECT * FROM personal_access_tokens WHERE revoked=false ORDER by updated_at DESC;"); err != nil { + zap.S().Errorf("Failed to fetch PATs err: %v", zap.Error(err)) return nil, model.InternalError(fmt.Errorf("failed to fetch PATs")) } for i := range pats { From 4eb1948e4cdeb91cd8775643e6a2f204dfab0530 Mon Sep 17 00:00:00 2001 From: CheetoDa <31571545+Calm-Rock@users.noreply.github.com> Date: Wed, 28 Feb 2024 18:20:15 +0530 Subject: [PATCH 15/16] feat: added aws monitoring section (#4614) * feat: added aws monitoring section * chore: fix lint issues * chore: fix lint issues * feat: handle redirect for aws monitoring --------- Co-authored-by: Yunus M --- frontend/public/Logos/ec2.svg | 18 +++ frontend/public/Logos/ecs.svg | 18 +++ frontend/public/Logos/eks.svg | 2 + ...cationLogs-linuxamd64-configureReceiver.md | 31 ++++ ...ionLogs-linuxamd64-installOtelCollector.md | 113 ++++++++++++++ ...icationLogs-linuxamd64-runOtelCollector.md | 15 ++ ...cationLogs-linuxarm64-configureReceiver.md | 31 ++++ ...ionLogs-linuxarm64-installOtelCollector.md | 114 ++++++++++++++ ...icationLogs-linuxarm64-runOtelCollector.md | 15 ++ ...cationLogs-macosamd64-configureReceiver.md | 31 ++++ ...ionLogs-macosamd64-installOtelCollector.md | 113 ++++++++++++++ ...icationLogs-macosamd64-runOtelCollector.md | 15 ++ ...cationLogs-macosarm64-configureReceiver.md | 31 ++++ ...ionLogs-macosarm64-installOtelCollector.md | 113 ++++++++++++++ ...icationLogs-macosarm64-runOtelCollector.md | 15 ++ .../hostmetrics-configureHostmetricsJson.md | 15 ++ .../hostmetrics-setupOtelCollector.md | 113 ++++++++++++++ .../hostmetrics-configureHostmetricsJson.md | 15 ++ .../hostmetrics-setupOtelCollector.md | 114 ++++++++++++++ .../hostmetrics-configureHostmetricsJson.md | 15 ++ .../hostmetrics-setupOtelCollector.md | 113 ++++++++++++++ .../hostmetrics-configureHostmetricsJson.md | 15 ++ .../hostmetrics-setupOtelCollector.md | 112 ++++++++++++++ .../md-docs/ecsEc2-createDaemonService.md | 68 +++++++++ .../ECSEc2/md-docs/ecsEc2-createOtelConfig.md | 28 ++++ .../ECSEc2/md-docs/ecsEc2-sendData.md | 90 +++++++++++ .../md-docs/ecsEc2-setupDeamonService.md | 21 +++ .../ecsExternal-createDaemonService.md | 68 +++++++++ .../md-docs/ecsExternal-createOtelConfig.md | 28 ++++ .../md-docs/ecsExternal-sendData.md | 106 +++++++++++++ .../md-docs/ecsExternal-setupDeamonService.md | 21 +++ .../md-docs/ecsFargate-createOtelConfig.md | 30 ++++ ...Fargate-createSidecarCollectorContainer.md | 143 ++++++++++++++++++ .../ecsFargate-deployTaskDefinition.md | 10 ++ .../ECSFargate/md-docs/ecsFargate-sendData.md | 84 ++++++++++ .../ECSFargate/md-docs/ecsFargate-sendLogs.md | 133 ++++++++++++++++ .../EKS/eks-installOtelCollector.md | 24 +++ .../EKS/eks-monitorUsingDashboard.md | 16 ++ ...cationLogs-linuxamd64-configureReceiver.md | 2 +- ...cationLogs-linuxarm64-configureReceiver.md | 2 +- ...cationLogs-macosamd64-configureReceiver.md | 2 +- ...cationLogs-macosarm64-configureReceiver.md | 2 +- .../OnboardingContainer.tsx | 16 ++ .../Steps/MarkdownStep/MarkdownStep.tsx | 3 + .../ModuleStepsContainer.tsx | 67 ++++++++ .../constants/awsMonitoringDocFilePaths.ts | 127 ++++++++++++++++ .../constants/stepsConfig.tsx | 48 ++++++ .../utils/dataSourceUtils.ts | 46 +++++- .../OnboardingContainer/utils/getSteps.ts | 34 +++++ 49 files changed, 2401 insertions(+), 5 deletions(-) create mode 100644 frontend/public/Logos/ec2.svg create mode 100644 frontend/public/Logos/ecs.svg create mode 100644 frontend/public/Logos/eks.svg create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxAMD64/appplicationLogs-linuxamd64-configureReceiver.md create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxAMD64/appplicationLogs-linuxamd64-installOtelCollector.md create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxAMD64/appplicationLogs-linuxamd64-runOtelCollector.md create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxARM64/appplicationLogs-linuxarm64-configureReceiver.md create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxARM64/appplicationLogs-linuxarm64-installOtelCollector.md create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxARM64/appplicationLogs-linuxarm64-runOtelCollector.md create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsAMD64/appplicationLogs-macosamd64-configureReceiver.md create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsAMD64/appplicationLogs-macosamd64-installOtelCollector.md create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsAMD64/appplicationLogs-macosamd64-runOtelCollector.md create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsARM64/appplicationLogs-macosarm64-configureReceiver.md create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsARM64/appplicationLogs-macosarm64-installOtelCollector.md create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsARM64/appplicationLogs-macosarm64-runOtelCollector.md create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/LinuxAMD64/hostmetrics-configureHostmetricsJson.md create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/LinuxAMD64/hostmetrics-setupOtelCollector.md create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/LinuxARM64/hostmetrics-configureHostmetricsJson.md create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/LinuxARM64/hostmetrics-setupOtelCollector.md create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/MacOsAMD64/hostmetrics-configureHostmetricsJson.md create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/MacOsAMD64/hostmetrics-setupOtelCollector.md create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/MacOsARM64/hostmetrics-configureHostmetricsJson.md create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/MacOsARM64/hostmetrics-setupOtelCollector.md create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSEc2/md-docs/ecsEc2-createDaemonService.md create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSEc2/md-docs/ecsEc2-createOtelConfig.md create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSEc2/md-docs/ecsEc2-sendData.md create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSEc2/md-docs/ecsEc2-setupDeamonService.md create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSExternal/md-docs/ecsExternal-createDaemonService.md create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSExternal/md-docs/ecsExternal-createOtelConfig.md create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSExternal/md-docs/ecsExternal-sendData.md create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSExternal/md-docs/ecsExternal-setupDeamonService.md create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSFargate/md-docs/ecsFargate-createOtelConfig.md create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSFargate/md-docs/ecsFargate-createSidecarCollectorContainer.md create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSFargate/md-docs/ecsFargate-deployTaskDefinition.md create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSFargate/md-docs/ecsFargate-sendData.md create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSFargate/md-docs/ecsFargate-sendLogs.md create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EKS/eks-installOtelCollector.md create mode 100644 frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EKS/eks-monitorUsingDashboard.md create mode 100644 frontend/src/container/OnboardingContainer/constants/awsMonitoringDocFilePaths.ts diff --git a/frontend/public/Logos/ec2.svg b/frontend/public/Logos/ec2.svg new file mode 100644 index 0000000000..14f083fd6d --- /dev/null +++ b/frontend/public/Logos/ec2.svg @@ -0,0 +1,18 @@ + + + + Icon-Architecture/64/Arch_Amazon-EC2_64 + Created with Sketch. + + + + + + + + + + + + + \ No newline at end of file diff --git a/frontend/public/Logos/ecs.svg b/frontend/public/Logos/ecs.svg new file mode 100644 index 0000000000..c2ef4c212f --- /dev/null +++ b/frontend/public/Logos/ecs.svg @@ -0,0 +1,18 @@ + + + + Icon-Architecture/64/Arch_Amazon-Elastic-Container-Service_64 + Created with Sketch. + + + + + + + + + + + + + \ No newline at end of file diff --git a/frontend/public/Logos/eks.svg b/frontend/public/Logos/eks.svg new file mode 100644 index 0000000000..b4a9336b1e --- /dev/null +++ b/frontend/public/Logos/eks.svg @@ -0,0 +1,2 @@ + + \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxAMD64/appplicationLogs-linuxamd64-configureReceiver.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxAMD64/appplicationLogs-linuxamd64-configureReceiver.md new file mode 100644 index 0000000000..435465fb71 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxAMD64/appplicationLogs-linuxamd64-configureReceiver.md @@ -0,0 +1,31 @@ +### Step 1: Add filelog receiver to `config.yaml` file of otel collector + +Add the filelog receiver in the receivers section of `config.yaml` file of the **`otecol-contrib`** directory that you created in the previous step + +```bash +receivers: + ... + filelog/app: + include: [ /tmp/app.log ] + start_at: end +... +``` +Replace `/tmp/app.log` with the path to your log file. + +Note: change the `start_at` value to `beginning` if you want to read the log file from the beginning. It may be useful if you want to send old logs to SigNoz. The log records older than the standard log retention period (default 15 days) will be discarded. + +For more configurations that are available for filelog receiver please check [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver). + +  +  + +### Step 2: Include filelog receiver in the Pipeline +We will modify our pipeline inside `config.yaml` to include the receiver we have created above. +```bash +service: + .... + logs: + receivers: [otlp, filelog/app] + processors: [batch] + exporters: [otlp] +``` \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxAMD64/appplicationLogs-linuxamd64-installOtelCollector.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxAMD64/appplicationLogs-linuxamd64-installOtelCollector.md new file mode 100644 index 0000000000..cc45b65165 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxAMD64/appplicationLogs-linuxamd64-installOtelCollector.md @@ -0,0 +1,113 @@ +### Prerequisite +- A Linux based EC2 Instance + +  + +### Setup OpenTelemetry Binary as an agent + +### Step 1: Download otel-collector tar.gz +```bash +wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_linux_amd64.tar.gz +``` + +### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder +```bash +mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_linux_amd64.tar.gz -C otelcol-contrib +``` + +### Step 3: Create config.yaml in folder otelcol-contrib with the below content in it +```bash +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + hostmetrics: + collection_interval: 60s + scrapers: + cpu: {} + disk: {} + load: {} + filesystem: {} + memory: {} + network: {} + paging: {} + process: + mute_process_name_error: true + mute_process_exe_error: true + mute_process_io_error: true + processes: {} + prometheus: + config: + global: + scrape_interval: 60s + scrape_configs: + - job_name: otel-collector-binary + static_configs: + - targets: + # - localhost:8888 +processors: + batch: + send_batch_size: 1000 + timeout: 10s + # Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md + resourcedetection: + detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure. + # Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels. + timeout: 2s + system: + hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback +extensions: + health_check: {} + zpages: {} +exporters: + otlp: + endpoint: "ingest.{{REGION}}.signoz.cloud:443" + tls: + insecure: false + headers: + "signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}" + logging: + verbosity: normal +service: + telemetry: + metrics: + address: 0.0.0.0:8888 + extensions: [health_check, zpages] + pipelines: + metrics: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + metrics/internal: + receivers: [prometheus, hostmetrics] + processors: [resourcedetection, batch] + exporters: [otlp] + traces: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + logs: + receivers: [otlp] + processors: [batch] + exporters: [otlp] +``` +### Step 4: Run OTel Collector + Run this command inside the `otelcol-contrib` directory + +```bash +./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid +``` + +### (Optional Step): View last 50 lines of `otelcol` logs +```bash +tail -f -n 50 otelcol-output.log +``` + +### (Optional Step): Stop `otelcol` +```bash +kill "$(< otel-pid)" +``` + diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxAMD64/appplicationLogs-linuxamd64-runOtelCollector.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxAMD64/appplicationLogs-linuxamd64-runOtelCollector.md new file mode 100644 index 0000000000..8f0ae7fdc1 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxAMD64/appplicationLogs-linuxamd64-runOtelCollector.md @@ -0,0 +1,15 @@ +### Restart the Otel Collector + +Restart the otel collector so that new changes are saved and you should see the logs in the dashboard. + +Kill the process if it was already running using the below command +```bash +kill "$(< otel-pid)" +``` + +Restart the OTel collector when you’re in the `otel-contirb` folder +```bash +./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid +``` + +Now you should be able to see the Logs on your SigNoz Cloud UI diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxARM64/appplicationLogs-linuxarm64-configureReceiver.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxARM64/appplicationLogs-linuxarm64-configureReceiver.md new file mode 100644 index 0000000000..435465fb71 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxARM64/appplicationLogs-linuxarm64-configureReceiver.md @@ -0,0 +1,31 @@ +### Step 1: Add filelog receiver to `config.yaml` file of otel collector + +Add the filelog receiver in the receivers section of `config.yaml` file of the **`otecol-contrib`** directory that you created in the previous step + +```bash +receivers: + ... + filelog/app: + include: [ /tmp/app.log ] + start_at: end +... +``` +Replace `/tmp/app.log` with the path to your log file. + +Note: change the `start_at` value to `beginning` if you want to read the log file from the beginning. It may be useful if you want to send old logs to SigNoz. The log records older than the standard log retention period (default 15 days) will be discarded. + +For more configurations that are available for filelog receiver please check [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver). + +  +  + +### Step 2: Include filelog receiver in the Pipeline +We will modify our pipeline inside `config.yaml` to include the receiver we have created above. +```bash +service: + .... + logs: + receivers: [otlp, filelog/app] + processors: [batch] + exporters: [otlp] +``` \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxARM64/appplicationLogs-linuxarm64-installOtelCollector.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxARM64/appplicationLogs-linuxarm64-installOtelCollector.md new file mode 100644 index 0000000000..1685719272 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxARM64/appplicationLogs-linuxarm64-installOtelCollector.md @@ -0,0 +1,114 @@ +### Prerequisite +- A Linux based EC2 Instance + +  + +### Setup OpenTelemetry Binary as an agent + +### Step 1: Download otel-collector tar.gz +```bash +wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_linux_arm64.tar.gz +``` + +### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder +```bash +mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_linux_arm64.tar.gz -C otelcol-contrib +``` + +### Step 3: Create config.yaml in folder otelcol-contrib with the below content in it +```bash +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + hostmetrics: + collection_interval: 60s + scrapers: + cpu: {} + disk: {} + load: {} + filesystem: {} + memory: {} + network: {} + paging: {} + process: + mute_process_name_error: true + mute_process_exe_error: true + mute_process_io_error: true + processes: {} + prometheus: + config: + global: + scrape_interval: 60s + scrape_configs: + - job_name: otel-collector-binary + static_configs: + - targets: + # - localhost:8888 +processors: + batch: + send_batch_size: 1000 + timeout: 10s + # Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md + resourcedetection: + detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure. + # Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels. + timeout: 2s + system: + hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback +extensions: + health_check: {} + zpages: {} +exporters: + otlp: + endpoint: "ingest.{{REGION}}.signoz.cloud:443" + tls: + insecure: false + headers: + "signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}" + logging: + verbosity: normal +service: + telemetry: + metrics: + address: 0.0.0.0:8888 + extensions: [health_check, zpages] + pipelines: + metrics: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + metrics/internal: + receivers: [prometheus, hostmetrics] + processors: [resourcedetection, batch] + exporters: [otlp] + traces: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + logs: + receivers: [otlp] + processors: [batch] + exporters: [otlp] +``` + +### Step 4: Run OTel Collector + Run this command inside the `otelcol-contrib` directory + +```bash +./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid +``` + +### (Optional Step): View last 50 lines of `otelcol` logs +```bash +tail -f -n 50 otelcol-output.log +``` + +### (Optional Step): Stop `otelcol` +```bash +kill "$(< otel-pid)" +``` + diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxARM64/appplicationLogs-linuxarm64-runOtelCollector.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxARM64/appplicationLogs-linuxarm64-runOtelCollector.md new file mode 100644 index 0000000000..8f0ae7fdc1 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxARM64/appplicationLogs-linuxarm64-runOtelCollector.md @@ -0,0 +1,15 @@ +### Restart the Otel Collector + +Restart the otel collector so that new changes are saved and you should see the logs in the dashboard. + +Kill the process if it was already running using the below command +```bash +kill "$(< otel-pid)" +``` + +Restart the OTel collector when you’re in the `otel-contirb` folder +```bash +./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid +``` + +Now you should be able to see the Logs on your SigNoz Cloud UI diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsAMD64/appplicationLogs-macosamd64-configureReceiver.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsAMD64/appplicationLogs-macosamd64-configureReceiver.md new file mode 100644 index 0000000000..435465fb71 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsAMD64/appplicationLogs-macosamd64-configureReceiver.md @@ -0,0 +1,31 @@ +### Step 1: Add filelog receiver to `config.yaml` file of otel collector + +Add the filelog receiver in the receivers section of `config.yaml` file of the **`otecol-contrib`** directory that you created in the previous step + +```bash +receivers: + ... + filelog/app: + include: [ /tmp/app.log ] + start_at: end +... +``` +Replace `/tmp/app.log` with the path to your log file. + +Note: change the `start_at` value to `beginning` if you want to read the log file from the beginning. It may be useful if you want to send old logs to SigNoz. The log records older than the standard log retention period (default 15 days) will be discarded. + +For more configurations that are available for filelog receiver please check [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver). + +  +  + +### Step 2: Include filelog receiver in the Pipeline +We will modify our pipeline inside `config.yaml` to include the receiver we have created above. +```bash +service: + .... + logs: + receivers: [otlp, filelog/app] + processors: [batch] + exporters: [otlp] +``` \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsAMD64/appplicationLogs-macosamd64-installOtelCollector.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsAMD64/appplicationLogs-macosamd64-installOtelCollector.md new file mode 100644 index 0000000000..4d59ad1b2c --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsAMD64/appplicationLogs-macosamd64-installOtelCollector.md @@ -0,0 +1,113 @@ +### Prerequisite +- A Linux based EC2 Instance + +  + +### Setup OpenTelemetry Binary as an agent + +### Step 1: Download otel-collector tar.gz +```bash +wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_darwin_amd64.tar.gz +``` + +### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder +```bash +mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_darwin_amd64.tar.gz -C otelcol-contrib +``` + +### Step 3: Create config.yaml in folder otelcol-contrib with the below content in it +```bash +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + hostmetrics: + collection_interval: 60s + scrapers: + cpu: {} + disk: {} + load: {} + filesystem: {} + memory: {} + network: {} + paging: {} + process: + mute_process_name_error: true + mute_process_exe_error: true + mute_process_io_error: true + processes: {} + prometheus: + config: + global: + scrape_interval: 60s + scrape_configs: + - job_name: otel-collector-binary + static_configs: + - targets: + # - localhost:8888 +processors: + batch: + send_batch_size: 1000 + timeout: 10s + # Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md + resourcedetection: + detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure. + # Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels. + timeout: 2s + system: + hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback +extensions: + health_check: {} + zpages: {} +exporters: + otlp: + endpoint: "ingest.{{REGION}}.signoz.cloud:443" + tls: + insecure: false + headers: + "signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}" + logging: + verbosity: normal +service: + telemetry: + metrics: + address: 0.0.0.0:8888 + extensions: [health_check, zpages] + pipelines: + metrics: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + metrics/internal: + receivers: [prometheus, hostmetrics] + processors: [resourcedetection, batch] + exporters: [otlp] + traces: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + logs: + receivers: [otlp] + processors: [batch] + exporters: [otlp] +``` +### Step 4: Run OTel Collector + Run this command inside the `otelcol-contrib` directory + +```bash +./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid +``` + +### (Optional Step): View last 50 lines of `otelcol` logs +```bash +tail -f -n 50 otelcol-output.log +``` + +### (Optional Step): Stop `otelcol` +```bash +kill "$(< otel-pid)" +``` + diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsAMD64/appplicationLogs-macosamd64-runOtelCollector.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsAMD64/appplicationLogs-macosamd64-runOtelCollector.md new file mode 100644 index 0000000000..8f0ae7fdc1 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsAMD64/appplicationLogs-macosamd64-runOtelCollector.md @@ -0,0 +1,15 @@ +### Restart the Otel Collector + +Restart the otel collector so that new changes are saved and you should see the logs in the dashboard. + +Kill the process if it was already running using the below command +```bash +kill "$(< otel-pid)" +``` + +Restart the OTel collector when you’re in the `otel-contirb` folder +```bash +./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid +``` + +Now you should be able to see the Logs on your SigNoz Cloud UI diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsARM64/appplicationLogs-macosarm64-configureReceiver.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsARM64/appplicationLogs-macosarm64-configureReceiver.md new file mode 100644 index 0000000000..435465fb71 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsARM64/appplicationLogs-macosarm64-configureReceiver.md @@ -0,0 +1,31 @@ +### Step 1: Add filelog receiver to `config.yaml` file of otel collector + +Add the filelog receiver in the receivers section of `config.yaml` file of the **`otecol-contrib`** directory that you created in the previous step + +```bash +receivers: + ... + filelog/app: + include: [ /tmp/app.log ] + start_at: end +... +``` +Replace `/tmp/app.log` with the path to your log file. + +Note: change the `start_at` value to `beginning` if you want to read the log file from the beginning. It may be useful if you want to send old logs to SigNoz. The log records older than the standard log retention period (default 15 days) will be discarded. + +For more configurations that are available for filelog receiver please check [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver). + +  +  + +### Step 2: Include filelog receiver in the Pipeline +We will modify our pipeline inside `config.yaml` to include the receiver we have created above. +```bash +service: + .... + logs: + receivers: [otlp, filelog/app] + processors: [batch] + exporters: [otlp] +``` \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsARM64/appplicationLogs-macosarm64-installOtelCollector.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsARM64/appplicationLogs-macosarm64-installOtelCollector.md new file mode 100644 index 0000000000..b4b36cb3c6 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsARM64/appplicationLogs-macosarm64-installOtelCollector.md @@ -0,0 +1,113 @@ +### Prerequisite +- A Linux based EC2 Instance + +  + +### Setup OpenTelemetry Binary as an agent + +### Step 1: Download otel-collector tar.gz +```bash +wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_darwin_arm64.tar.gz +``` + +### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder +```bash +mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_darwin_arm64.tar.gz -C otelcol-contrib +``` + +### Step 3: Create config.yaml in folder otelcol-contrib with the below content in it +```bash +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + hostmetrics: + collection_interval: 60s + scrapers: + cpu: {} + disk: {} + load: {} + filesystem: {} + memory: {} + network: {} + paging: {} + process: + mute_process_name_error: true + mute_process_exe_error: true + mute_process_io_error: true + processes: {} + prometheus: + config: + global: + scrape_interval: 60s + scrape_configs: + - job_name: otel-collector-binary + static_configs: + - targets: + # - localhost:8888 +processors: + batch: + send_batch_size: 1000 + timeout: 10s + # Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md + resourcedetection: + detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure. + # Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels. + timeout: 2s + system: + hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback +extensions: + health_check: {} + zpages: {} +exporters: + otlp: + endpoint: "ingest.{{REGION}}.signoz.cloud:443" + tls: + insecure: false + headers: + "signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}" + logging: + verbosity: normal +service: + telemetry: + metrics: + address: 0.0.0.0:8888 + extensions: [health_check, zpages] + pipelines: + metrics: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + metrics/internal: + receivers: [prometheus, hostmetrics] + processors: [resourcedetection, batch] + exporters: [otlp] + traces: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + logs: + receivers: [otlp] + processors: [batch] + exporters: [otlp] +``` +### Step 4: Run OTel Collector + Run this command inside the `otelcol-contrib` directory + +```bash +./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid +``` + +### (Optional Step): View last 50 lines of `otelcol` logs +```bash +tail -f -n 50 otelcol-output.log +``` + +### (Optional Step): Stop `otelcol` +```bash +kill "$(< otel-pid)" +``` + diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsARM64/appplicationLogs-macosarm64-runOtelCollector.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsARM64/appplicationLogs-macosarm64-runOtelCollector.md new file mode 100644 index 0000000000..8f0ae7fdc1 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsARM64/appplicationLogs-macosarm64-runOtelCollector.md @@ -0,0 +1,15 @@ +### Restart the Otel Collector + +Restart the otel collector so that new changes are saved and you should see the logs in the dashboard. + +Kill the process if it was already running using the below command +```bash +kill "$(< otel-pid)" +``` + +Restart the OTel collector when you’re in the `otel-contirb` folder +```bash +./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid +``` + +Now you should be able to see the Logs on your SigNoz Cloud UI diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/LinuxAMD64/hostmetrics-configureHostmetricsJson.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/LinuxAMD64/hostmetrics-configureHostmetricsJson.md new file mode 100644 index 0000000000..5be4c4a528 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/LinuxAMD64/hostmetrics-configureHostmetricsJson.md @@ -0,0 +1,15 @@ +### Step 1: Download/Copy this hostmetrics JSON file + +Download/Copy the `hostmetrics-with-variable.json` from [here](https://github.com/SigNoz/dashboards/blob/main/hostmetrics/hostmetrics-with-variable.json) + +  +  + +### Step 2: Import hostmetrics JSON file to SigNoz Cloud + +Once you click on the **`Done`** button at the bottom right corner, you'll be taken to the SigNoz UI. + +In SigNoZ UI, go to dashboards section and click New Dashboard button and select the import JSON option and upload/paste the `hostmetrics-with-variables.json` file data that you downloaded/copied in last step. + + +To know more detailed instructions, checkout the link [here](https://signoz.io/docs/userguide/hostmetrics/) \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/LinuxAMD64/hostmetrics-setupOtelCollector.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/LinuxAMD64/hostmetrics-setupOtelCollector.md new file mode 100644 index 0000000000..eaba01bfd8 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/LinuxAMD64/hostmetrics-setupOtelCollector.md @@ -0,0 +1,113 @@ +### Prerequisite +- A running EC2 instance + +  + +### Setup OpenTelemetry Binary as an agent + +### Step 1: Download otel-collector tar.gz +```bash +wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_linux_amd64.tar.gz +``` + +### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder +```bash +mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_linux_amd64.tar.gz -C otelcol-contrib +``` + +### Step 3: Create config.yaml in folder otelcol-contrib with the below content in it +```bash +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + hostmetrics: + collection_interval: 60s + scrapers: + cpu: {} + disk: {} + load: {} + filesystem: {} + memory: {} + network: {} + paging: {} + process: + mute_process_name_error: true + mute_process_exe_error: true + mute_process_io_error: true + processes: {} + prometheus: + config: + global: + scrape_interval: 60s + scrape_configs: + - job_name: otel-collector-binary + static_configs: + - targets: + # - localhost:8888 +processors: + batch: + send_batch_size: 1000 + timeout: 10s + # Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md + resourcedetection: + detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure. + # Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels. + timeout: 2s + system: + hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback +extensions: + health_check: {} + zpages: {} +exporters: + otlp: + endpoint: "ingest.{{REGION}}.signoz.cloud:443" + tls: + insecure: false + headers: + "signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}" + logging: + verbosity: normal +service: + telemetry: + metrics: + address: 0.0.0.0:8888 + extensions: [health_check, zpages] + pipelines: + metrics: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + metrics/internal: + receivers: [prometheus, hostmetrics] + processors: [resourcedetection, batch] + exporters: [otlp] + traces: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + logs: + receivers: [otlp] + processors: [batch] + exporters: [otlp] +``` +### Step 4: Run OTel Collector + Run this command inside the `otelcol-contrib` directory + +```bash +./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid +``` + +### (Optional Step): View last 50 lines of `otelcol` logs +```bash +tail -f -n 50 otelcol-output.log +``` + +### (Optional Step): Stop `otelcol` +```bash +kill "$(< otel-pid)" +``` + diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/LinuxARM64/hostmetrics-configureHostmetricsJson.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/LinuxARM64/hostmetrics-configureHostmetricsJson.md new file mode 100644 index 0000000000..5be4c4a528 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/LinuxARM64/hostmetrics-configureHostmetricsJson.md @@ -0,0 +1,15 @@ +### Step 1: Download/Copy this hostmetrics JSON file + +Download/Copy the `hostmetrics-with-variable.json` from [here](https://github.com/SigNoz/dashboards/blob/main/hostmetrics/hostmetrics-with-variable.json) + +  +  + +### Step 2: Import hostmetrics JSON file to SigNoz Cloud + +Once you click on the **`Done`** button at the bottom right corner, you'll be taken to the SigNoz UI. + +In SigNoZ UI, go to dashboards section and click New Dashboard button and select the import JSON option and upload/paste the `hostmetrics-with-variables.json` file data that you downloaded/copied in last step. + + +To know more detailed instructions, checkout the link [here](https://signoz.io/docs/userguide/hostmetrics/) \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/LinuxARM64/hostmetrics-setupOtelCollector.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/LinuxARM64/hostmetrics-setupOtelCollector.md new file mode 100644 index 0000000000..9d23e61295 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/LinuxARM64/hostmetrics-setupOtelCollector.md @@ -0,0 +1,114 @@ +### Prerequisite +- A running EC2 instance + +  + +### Setup OpenTelemetry Binary as an agent + +### Step 1: Download otel-collector tar.gz +```bash +wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_linux_arm64.tar.gz +``` + +### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder +```bash +mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_linux_arm64.tar.gz -C otelcol-contrib +``` + +### Step 3: Create config.yaml in folder otelcol-contrib with the below content in it +```bash +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + hostmetrics: + collection_interval: 60s + scrapers: + cpu: {} + disk: {} + load: {} + filesystem: {} + memory: {} + network: {} + paging: {} + process: + mute_process_name_error: true + mute_process_exe_error: true + mute_process_io_error: true + processes: {} + prometheus: + config: + global: + scrape_interval: 60s + scrape_configs: + - job_name: otel-collector-binary + static_configs: + - targets: + # - localhost:8888 +processors: + batch: + send_batch_size: 1000 + timeout: 10s + # Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md + resourcedetection: + detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure. + # Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels. + timeout: 2s + system: + hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback +extensions: + health_check: {} + zpages: {} +exporters: + otlp: + endpoint: "ingest.{{REGION}}.signoz.cloud:443" + tls: + insecure: false + headers: + "signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}" + logging: + verbosity: normal +service: + telemetry: + metrics: + address: 0.0.0.0:8888 + extensions: [health_check, zpages] + pipelines: + metrics: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + metrics/internal: + receivers: [prometheus, hostmetrics] + processors: [resourcedetection, batch] + exporters: [otlp] + traces: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + logs: + receivers: [otlp] + processors: [batch] + exporters: [otlp] +``` + +### Step 4: Run OTel Collector + Run this command inside the `otelcol-contrib` directory + +```bash +./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid +``` + +### (Optional Step): View last 50 lines of `otelcol` logs +```bash +tail -f -n 50 otelcol-output.log +``` + +### (Optional Step): Stop `otelcol` +```bash +kill "$(< otel-pid)" +``` + diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/MacOsAMD64/hostmetrics-configureHostmetricsJson.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/MacOsAMD64/hostmetrics-configureHostmetricsJson.md new file mode 100644 index 0000000000..5be4c4a528 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/MacOsAMD64/hostmetrics-configureHostmetricsJson.md @@ -0,0 +1,15 @@ +### Step 1: Download/Copy this hostmetrics JSON file + +Download/Copy the `hostmetrics-with-variable.json` from [here](https://github.com/SigNoz/dashboards/blob/main/hostmetrics/hostmetrics-with-variable.json) + +  +  + +### Step 2: Import hostmetrics JSON file to SigNoz Cloud + +Once you click on the **`Done`** button at the bottom right corner, you'll be taken to the SigNoz UI. + +In SigNoZ UI, go to dashboards section and click New Dashboard button and select the import JSON option and upload/paste the `hostmetrics-with-variables.json` file data that you downloaded/copied in last step. + + +To know more detailed instructions, checkout the link [here](https://signoz.io/docs/userguide/hostmetrics/) \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/MacOsAMD64/hostmetrics-setupOtelCollector.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/MacOsAMD64/hostmetrics-setupOtelCollector.md new file mode 100644 index 0000000000..26e95c43a7 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/MacOsAMD64/hostmetrics-setupOtelCollector.md @@ -0,0 +1,113 @@ +### Prerequisite +- A running EC2 instance + +  + +### Setup OpenTelemetry Binary as an agent + +### Step 1: Download otel-collector tar.gz +```bash +wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_darwin_amd64.tar.gz +``` + +### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder +```bash +mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_darwin_amd64.tar.gz -C otelcol-contrib +``` + +### Step 3: Create config.yaml in folder otelcol-contrib with the below content in it +```bash +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + hostmetrics: + collection_interval: 60s + scrapers: + cpu: {} + disk: {} + load: {} + filesystem: {} + memory: {} + network: {} + paging: {} + process: + mute_process_name_error: true + mute_process_exe_error: true + mute_process_io_error: true + processes: {} + prometheus: + config: + global: + scrape_interval: 60s + scrape_configs: + - job_name: otel-collector-binary + static_configs: + - targets: + # - localhost:8888 +processors: + batch: + send_batch_size: 1000 + timeout: 10s + # Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md + resourcedetection: + detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure. + # Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels. + timeout: 2s + system: + hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback +extensions: + health_check: {} + zpages: {} +exporters: + otlp: + endpoint: "ingest.{{REGION}}.signoz.cloud:443" + tls: + insecure: false + headers: + "signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}" + logging: + verbosity: normal +service: + telemetry: + metrics: + address: 0.0.0.0:8888 + extensions: [health_check, zpages] + pipelines: + metrics: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + metrics/internal: + receivers: [prometheus, hostmetrics] + processors: [resourcedetection, batch] + exporters: [otlp] + traces: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + logs: + receivers: [otlp] + processors: [batch] + exporters: [otlp] +``` +### Step 4: Run OTel Collector + Run this command inside the `otelcol-contrib` directory + +```bash +./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid +``` + +### (Optional Step): View last 50 lines of `otelcol` logs +```bash +tail -f -n 50 otelcol-output.log +``` + +### (Optional Step): Stop `otelcol` +```bash +kill "$(< otel-pid)" +``` + diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/MacOsARM64/hostmetrics-configureHostmetricsJson.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/MacOsARM64/hostmetrics-configureHostmetricsJson.md new file mode 100644 index 0000000000..5be4c4a528 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/MacOsARM64/hostmetrics-configureHostmetricsJson.md @@ -0,0 +1,15 @@ +### Step 1: Download/Copy this hostmetrics JSON file + +Download/Copy the `hostmetrics-with-variable.json` from [here](https://github.com/SigNoz/dashboards/blob/main/hostmetrics/hostmetrics-with-variable.json) + +  +  + +### Step 2: Import hostmetrics JSON file to SigNoz Cloud + +Once you click on the **`Done`** button at the bottom right corner, you'll be taken to the SigNoz UI. + +In SigNoZ UI, go to dashboards section and click New Dashboard button and select the import JSON option and upload/paste the `hostmetrics-with-variables.json` file data that you downloaded/copied in last step. + + +To know more detailed instructions, checkout the link [here](https://signoz.io/docs/userguide/hostmetrics/) \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/MacOsARM64/hostmetrics-setupOtelCollector.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/MacOsARM64/hostmetrics-setupOtelCollector.md new file mode 100644 index 0000000000..e35173ba08 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/MacOsARM64/hostmetrics-setupOtelCollector.md @@ -0,0 +1,112 @@ +### Prerequisite +- A running EC2 instance + +  + +### Setup OpenTelemetry Binary as an agent + +### Step 1: Download otel-collector tar.gz +```bash +wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_darwin_arm64.tar.gz +``` +### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder +```bash +mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_darwin_arm64.tar.gz -C otelcol-contrib +``` + +### Step 3: Create config.yaml in folder otelcol-contrib with the below content in it +```bash +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + hostmetrics: + collection_interval: 60s + scrapers: + cpu: {} + disk: {} + load: {} + filesystem: {} + memory: {} + network: {} + paging: {} + process: + mute_process_name_error: true + mute_process_exe_error: true + mute_process_io_error: true + processes: {} + prometheus: + config: + global: + scrape_interval: 60s + scrape_configs: + - job_name: otel-collector-binary + static_configs: + - targets: + # - localhost:8888 +processors: + batch: + send_batch_size: 1000 + timeout: 10s + # Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md + resourcedetection: + detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure. + # Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels. + timeout: 2s + system: + hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback +extensions: + health_check: {} + zpages: {} +exporters: + otlp: + endpoint: "ingest.{{REGION}}.signoz.cloud:443" + tls: + insecure: false + headers: + "signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}" + logging: + verbosity: normal +service: + telemetry: + metrics: + address: 0.0.0.0:8888 + extensions: [health_check, zpages] + pipelines: + metrics: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + metrics/internal: + receivers: [prometheus, hostmetrics] + processors: [resourcedetection, batch] + exporters: [otlp] + traces: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + logs: + receivers: [otlp] + processors: [batch] + exporters: [otlp] +``` +### Step 4: Run OTel Collector + Run this command inside the `otelcol-contrib` directory + +```bash +./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid +``` + +### (Optional Step): View last 50 lines of `otelcol` logs +```bash +tail -f -n 50 otelcol-output.log +``` + +### (Optional Step): Stop `otelcol` +```bash +kill "$(< otel-pid)" +``` + diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSEc2/md-docs/ecsEc2-createDaemonService.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSEc2/md-docs/ecsEc2-createDaemonService.md new file mode 100644 index 0000000000..83bb67039b --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSEc2/md-docs/ecsEc2-createDaemonService.md @@ -0,0 +1,68 @@ +## Create Daemon Service + +Using the template we downloaded and the SigNoz OpenTelemetry Collector configuration we created, we will create the Daemon Service. This can be done following these steps: + +### Step 1: Set the environment variable + +Set the environment variable by running the below command uisng your AWS CLI: + +```bash +export CLUSTER_NAME= +export REGION= +export COMMAND=--config=env:SIGNOZ_CONFIG_CONTENT +export SIGNOZ_CONFIG_PATH=/ecs/signoz/otelcol-daemon.yaml +``` + +`` - Name of your ECS cluster. For example, **my-test-cluster** + +`` - Region in which your ECS cluster is running. For example, **us-east-1** + +  + +### Step 2: Create stack for Daemon Service + +With the environment variables set, you can proceed to create the Daemon service using `cloudformation create-stack` by running the below command using your AWS CLI: + +```bash +aws cloudformation create-stack --stack-name AOCECS-daemon-${CLUSTER_NAME}-${REGION} \ + --template-body file://daemon-template.yaml \ + --parameters ParameterKey=ClusterName,ParameterValue=${CLUSTER_NAME} \ + ParameterKey=CreateIAMRoles,ParameterValue=True \ + ParameterKey=command,ParameterValue=${COMMAND} \ + ParameterKey=SigNozConfigPath,ParameterValue=${SIGNOZ_CONFIG_PATH} \ + --capabilities CAPABILITY_NAMED_IAM \ + --region ${REGION} +``` + +  + +### Step 3: Verify Daemon Service + +To verify that the daemon service is running, you can run the following command, which should output the task ARN of the Daemon service as the output. + +```bash +aws ecs list-tasks --cluster ${CLUSTER_NAME} --region ${REGION} + +``` +  + +### Step 4: Verify Data in SigNoz + +To verify that the data is being sent to SigNoz Cloud, you can go to the dashboard section of SigNoz and import one of the following dashboards below: + +- [instancemetrics.json](https://raw.githubusercontent.com/SigNoz/dashboards/chore/ecs-dashboards/ecs-infra-metrics/instance-metrics.json) +- [hostmetrics-with-variable.json](https://raw.githubusercontent.com/SigNoz/dashboards/main/hostmetrics/hostmetrics-with-variable.json) + +  + +### Optional Step: Clean Up + +In a cloud environment where resources are billed based on usage, cleaning up resources is crucial. This step involves removing the daemon service and any associated resources that were created during the setup process to collect and forward metrics and logs from your ECS infrastructure to SigNoz. To clean up the daemon service, you can run the following command: + +```bash +aws cloudformation delete-stack --stack-name AOCECS-daemon-${CLUSTER_NAME}-${REGION} --region ${REGION} +``` + +  + +Once you follow these steps, you should be able to see your logs and metrics data coming in SigNoz Cloud. To see data for your traces, click on Continue to next step below. \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSEc2/md-docs/ecsEc2-createOtelConfig.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSEc2/md-docs/ecsEc2-createOtelConfig.md new file mode 100644 index 0000000000..1171bc6ba7 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSEc2/md-docs/ecsEc2-createOtelConfig.md @@ -0,0 +1,28 @@ +## Create SigNoz OTel Collector Configuration file + +To setup the SigNoz OTel Collector config, follow these two steps: + +### Step 1: +Navigate to the AWS Parameter store and create a new parameter named **/ecs/signoz/otelcol-daemon.yaml** + +### Step 2: +Download the otelcol-daemon YAML configuration file: +```bash +wget https://github.com/SigNoz/benchmark/raw/main/ecs/otelcol-daemon.yaml +``` +  + +Update `{region}` and `SIGNOZ_INGESTION_KEY` values in your YAML configuration file with your SigNoz cloud values mentioned below: + +{region} : `{{REGION}}` + +SIGNOZ_INGESTION_KEY : `{{SIGNOZ_INGESTION_KEY}}` + +  + +Once you update these values, copy the updated content of the `otelcol-daemon.yaml` file and paste it in the value field of the **/ecs/signoz/otelcol-daemon.yaml** parameter that you created in Step 1. + +  + +**NOTE:** +- After successful set up, feel free to remove `logging` exporter if it gets too noisy. To do so, simply remove the logging exporter from the **exporters** list in the following pipelines: `traces`, `metrics`, and `logs` from the `otelcol-daemon.yaml` file. \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSEc2/md-docs/ecsEc2-sendData.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSEc2/md-docs/ecsEc2-sendData.md new file mode 100644 index 0000000000..c532102a2d --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSEc2/md-docs/ecsEc2-sendData.md @@ -0,0 +1,90 @@ +**NOTE** : If you don't want to send traces data of your application, you can skip this step. + +  + +## Send Traces Data + +To send traces data from applications deployed in ECS to SigNoz Cloud using Daemon Service we created in the previous section, follow these steps: + +### Step 1: Instrument your application +To add OpenTelemetry instrumentation to your application, check out the Application Monitoring section in onboarding you can follow the docs [here](https://signoz.io/docs/instrumentation/). + +  + +### Step 2: Add Entrypoint to your Application Container + +Add an entrypoint to the application container to set the `OTEL_EXPORTER_OTLP_ENDPOINT` environment variable to the endpoint of the daemon service. + +Obtain the endpoint or IP address of the instance on which the task is running. This can be done by querying the metadata service of the instance. For **EC2**, the metadata service is available at **169.254.169.254**. + +The `entryPoint` will look like: + +```yaml +{ + ..., + "entryPoint": [ + "sh", + "-c", + "export OTEL_EXPORTER_OTLP_ENDPOINT=\"http://$(curl http://169.254.169.254/latest/meta-data/local-ipv4):4317\"; " + ], + "command": [], + ... +} +``` + +Replace `` with the commands to start your application. + +  + + +### Step 3: Add Service Name of your Application + +To add the service name of your application, you need to set the `OTEL_RESOURCE_ATTRIBUTES` environment variable of the application container to `service.name=`. + +In your task definition, add the following lines: + +```bash + +... + ContainerDefinitions: + - Name: + ... + Environment: + - Name: OTEL_RESOURCE_ATTRIBUTES + Value: service.name= + ... +... +``` + +If you are using JSON for task definition, then add the following lines: + +```bash +... + "containerDefinitions": [ + { + "name": "", + ... + "environment": [ + { + "name": "OTEL_RESOURCE_ATTRIBUTES", + "value": "service.name=" + } + ], + ... + } + ], +... + +``` + +  + +### Step 4: Rebuild and Deploy Application Container + +Once you follow the above steps, you need to rebuild the application container and deploy it to ECS cluster. + +  + +### Step 5: Verify Data in SigNoz + +Generate some traffic to your application and go to your SigNoz cloud **Services** page to see your application name in the service list. diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSEc2/md-docs/ecsEc2-setupDeamonService.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSEc2/md-docs/ecsEc2-setupDeamonService.md new file mode 100644 index 0000000000..b96761cf86 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSEc2/md-docs/ecsEc2-setupDeamonService.md @@ -0,0 +1,21 @@ + +These steps will help you to collect **metrics, logs and traces** from your ECS infrastructure. + +## Setup Daemon Service + +  + +### Daemon Service Template + +This step guides in downloading a template which will be used to create a new service within your Amazon ECS (Elastic Container Service) cluster. The purpose of this service is to deploy a container that functions as a daemon. This service will run a container that will send data such as ECS infrastructure metrics and logs from docker containers and send it to SigNoz. + +We will use CloudFormation template which includes parameters and configurations that define how the daemon service should be set up. For example, it specifies the container image to use for the daemon, the necessary environment variables, and network settings. + +  + +Download the `daemon-template.yaml` using the command below: + +```bash +wget https://github.com/SigNoz/benchmark/raw/main/ecs/ec2/daemon-template.yaml +``` + diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSExternal/md-docs/ecsExternal-createDaemonService.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSExternal/md-docs/ecsExternal-createDaemonService.md new file mode 100644 index 0000000000..83bb67039b --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSExternal/md-docs/ecsExternal-createDaemonService.md @@ -0,0 +1,68 @@ +## Create Daemon Service + +Using the template we downloaded and the SigNoz OpenTelemetry Collector configuration we created, we will create the Daemon Service. This can be done following these steps: + +### Step 1: Set the environment variable + +Set the environment variable by running the below command uisng your AWS CLI: + +```bash +export CLUSTER_NAME= +export REGION= +export COMMAND=--config=env:SIGNOZ_CONFIG_CONTENT +export SIGNOZ_CONFIG_PATH=/ecs/signoz/otelcol-daemon.yaml +``` + +`` - Name of your ECS cluster. For example, **my-test-cluster** + +`` - Region in which your ECS cluster is running. For example, **us-east-1** + +  + +### Step 2: Create stack for Daemon Service + +With the environment variables set, you can proceed to create the Daemon service using `cloudformation create-stack` by running the below command using your AWS CLI: + +```bash +aws cloudformation create-stack --stack-name AOCECS-daemon-${CLUSTER_NAME}-${REGION} \ + --template-body file://daemon-template.yaml \ + --parameters ParameterKey=ClusterName,ParameterValue=${CLUSTER_NAME} \ + ParameterKey=CreateIAMRoles,ParameterValue=True \ + ParameterKey=command,ParameterValue=${COMMAND} \ + ParameterKey=SigNozConfigPath,ParameterValue=${SIGNOZ_CONFIG_PATH} \ + --capabilities CAPABILITY_NAMED_IAM \ + --region ${REGION} +``` + +  + +### Step 3: Verify Daemon Service + +To verify that the daemon service is running, you can run the following command, which should output the task ARN of the Daemon service as the output. + +```bash +aws ecs list-tasks --cluster ${CLUSTER_NAME} --region ${REGION} + +``` +  + +### Step 4: Verify Data in SigNoz + +To verify that the data is being sent to SigNoz Cloud, you can go to the dashboard section of SigNoz and import one of the following dashboards below: + +- [instancemetrics.json](https://raw.githubusercontent.com/SigNoz/dashboards/chore/ecs-dashboards/ecs-infra-metrics/instance-metrics.json) +- [hostmetrics-with-variable.json](https://raw.githubusercontent.com/SigNoz/dashboards/main/hostmetrics/hostmetrics-with-variable.json) + +  + +### Optional Step: Clean Up + +In a cloud environment where resources are billed based on usage, cleaning up resources is crucial. This step involves removing the daemon service and any associated resources that were created during the setup process to collect and forward metrics and logs from your ECS infrastructure to SigNoz. To clean up the daemon service, you can run the following command: + +```bash +aws cloudformation delete-stack --stack-name AOCECS-daemon-${CLUSTER_NAME}-${REGION} --region ${REGION} +``` + +  + +Once you follow these steps, you should be able to see your logs and metrics data coming in SigNoz Cloud. To see data for your traces, click on Continue to next step below. \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSExternal/md-docs/ecsExternal-createOtelConfig.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSExternal/md-docs/ecsExternal-createOtelConfig.md new file mode 100644 index 0000000000..46f5c3f453 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSExternal/md-docs/ecsExternal-createOtelConfig.md @@ -0,0 +1,28 @@ +## Create SigNoz OTel Collector Configuration file + +To setup the SigNoz OTel Collector config, follow these two steps: + +### Step 1: +Navigate to the AWS Parameter store and create a new parameter named **/ecs/signoz/otelcol-daemon.yaml** + +### Step 2: +Download the otelcol-daemon YAML configuration file: +```bash +wget https://github.com/SigNoz/benchmark/raw/main/ecs/otelcol-daemon.yaml +``` +  + +Update `{region}` and `SIGNOZ_INGESTION_KEY` values in your YAML configuration file with your SigNoz cloud values mentioned below: + +{region} : `{{REGION}}` + +SIGNOZ_INGESTION_KEY : `{{SIGNOZ_INGESTION_KEY}}` + +  + +Once you update these values, copy the updated content of the `otelcol-sidecar.yaml` file and paste it in the value field of the **/ecs/signoz/otelcol-daemon.yaml** parameter that you created in Step 1. + +  + +**NOTE:** +- After successful set up, feel free to remove `logging` exporter if it gets too noisy. To do so, simply remove the logging exporter from the **exporters** list in the following pipelines: `traces`, `metrics`, and `logs` from the `otelcol-daemon.yaml` file. \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSExternal/md-docs/ecsExternal-sendData.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSExternal/md-docs/ecsExternal-sendData.md new file mode 100644 index 0000000000..8db1606870 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSExternal/md-docs/ecsExternal-sendData.md @@ -0,0 +1,106 @@ +**NOTE** : If you don't want to send traces data of your application, you can skip this step. + +  + +## Send Traces Data + +To send traces data from applications deployed in ECS to SigNoz Cloud using Daemon Service we created in the previous section, follow these steps: + +### Step 1: Instrument your application +To add OpenTelemetry instrumentation to your application, check out the Application Monitoring section in onboarding you can follow the docs [here](https://signoz.io/docs/instrumentation/). + +  + +### Step 2: Add Entrypoint to your Application Container + +Add an entrypoint to the application container to set the `OTEL_EXPORTER_OTLP_ENDPOINT` environment variable to the endpoint of the daemon service. + +Obtain the endpoint or IP address of the instance on which the task is running. This can be done using the default network mode, i.e., **Bridge** + +The `entryPoint` will look like: + +```yaml +{ + ..., + "entryPoint": [ + "sh", + "-c", + "export OTEL_EXPORTER_OTLP_ENDPOINT=\"http://$(curl http://169.254.169.254/latest/meta-data/local-ipv4):4317\"; " + ], + "command": [], + ... +} +``` + +  + +In case you are using custom docker networking, you would have to use `ExtraHosts` in your task definition: + +```yaml +{ + ... + "extraHosts": [ + { + "hostname": "signoz-collector", + "ipAddress": "host-gateway" + } + ... + ] +} +``` + +Replace `` with the commands to start your application. + +  + + +### Step 3: Add Service Name of your Application + +To add the service name of your application, you need to set the `OTEL_RESOURCE_ATTRIBUTES` environment variable of the application container to `service.name=`. + +In your task definition, add the following lines: + +```bash +... + ContainerDefinitions: + - Name: + ... + Environment: + - Name: OTEL_RESOURCE_ATTRIBUTES + Value: service.name= + ... +... +``` + +If you are using JSON for task definition, then add the following lines: + +```bash +... + "containerDefinitions": [ + { + "name": "", + ... + "environment": [ + { + "name": "OTEL_RESOURCE_ATTRIBUTES", + "value": "service.name=" + } + ], + ... + } + ], +... + +``` + +  + +### Step 4: Rebuild and Deploy Application Container + +Once you follow the above steps, you need to rebuild the application container and deploy it to ECS cluster. + +  + +### Step 5: Step 5: Verify Data in SigNoz + +Generate some traffic to your application and go to your SigNoz cloud services page to see your application name in the service list. diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSExternal/md-docs/ecsExternal-setupDeamonService.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSExternal/md-docs/ecsExternal-setupDeamonService.md new file mode 100644 index 0000000000..fc1a28fc82 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSExternal/md-docs/ecsExternal-setupDeamonService.md @@ -0,0 +1,21 @@ + +These steps will help you to collect **metrics, logs and traces** from your ECS infrastructure. + +## Setup Daemon Service + +  + +### Daemon Service Template + +This step guides in downloading a template which will be used to create a new service within your Amazon ECS (Elastic Container Service) cluster. The purpose of this service is to deploy a container that functions as a daemon. This service will run a container that will send data such as ECS infrastructure metrics and logs from docker containers and send it to SigNoz. + +We will use CloudFormation template which includes parameters and configurations that define how the daemon service should be set up. For example, it specifies the container image to use for the daemon, the necessary environment variables, and network settings. + +  + +Download the `daemon-template.yaml` using the command below: + +```bash +wget https://github.com/SigNoz/benchmark/raw/main/ecs/external/daemon-template.yaml +``` + diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSFargate/md-docs/ecsFargate-createOtelConfig.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSFargate/md-docs/ecsFargate-createOtelConfig.md new file mode 100644 index 0000000000..9397301031 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSFargate/md-docs/ecsFargate-createOtelConfig.md @@ -0,0 +1,30 @@ +These steps will help you to collect **metrics, logs and traces** from your ECS infrastructure. + +## Create SigNoz OTel Collector Configuration file + +To setup the SigNoz OTel Collector config, follow these two steps: + +### Step 1: +Navigate to the AWS Parameter store and create a new parameter named **/ecs/signoz/otelcol-sidecar.yaml** + +### Step 2: +Download the otelcol-sidecar YAML configuration file: +```bash +wget https://github.com/SigNoz/benchmark/raw/main/ecs/otelcol-sidecar.yaml +``` +  + +Update `{region}` and `SIGNOZ_INGESTION_KEY` values in your YAML configuration file with your SigNoz cloud values mentioned below: + +{region} : `{{REGION}}` + +SIGNOZ_INGESTION_KEY : `{{SIGNOZ_INGESTION_KEY}}` + +  + +Once you update these values, copy the updated content of the `otelcol-sidecar.yaml` file and paste it in the value field of the **/ecs/signoz/otelcol-sidecar.yaml** parameter that you created in Step 1. + +  + +**NOTE:** +- After successful set up, feel free to remove `logging` exporter if it gets too noisy. To do so, simply remove the logging exporter from the **exporters** list in the following pipelines: `traces`, `metrics`, `metrics/aws` and `logs` from the `otelcol-sidecar.yaml` file. \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSFargate/md-docs/ecsFargate-createSidecarCollectorContainer.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSFargate/md-docs/ecsFargate-createSidecarCollectorContainer.md new file mode 100644 index 0000000000..72e75e85f7 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSFargate/md-docs/ecsFargate-createSidecarCollectorContainer.md @@ -0,0 +1,143 @@ +## Create Sidecar Collector Container + +This step involves integrating the SigNoz collector into your ECS task definitions as a sidecar container. The sidecar collector container will run alongside your application container(s) within the same ECS task and will collect ECS container metrics and send them to SigNoz Cloud. Follow these steps to create the Sidecar collector container: + +### Step 1: Update task definition of your application + +In your ECS task definition, include a new container definition specifically for the sidecar container. This container will operate alongside your main application container(s) within the same task definition. The JSON configuration for that will look like this: + +```json +{ + ... + "containerDefinitions": [ + ..., + { + "name": "signoz-collector", + "image": "signoz/signoz-otel-collector:0.88.13", + "user": "root", + "command": [ + "--config=env:SIGNOZ_CONFIG_CONTENT" + ], + "secrets": [ + { + "name": "SIGNOZ_CONFIG_CONTENT", + "valueFrom": "/ecs/signoz/otelcol-sidecar.yaml" + } + ], + "memory": 1024, + "cpu": 512, + "essential": true, + "portMappings": [ + { + "protocol": "tcp", + "containerPort": 4317 + }, + { + "protocol": "tcp", + "containerPort": 4318 + }, + { + "containerPort": 8006, + "protocol": "tcp" + } + ], + "healthCheck": { + "command": [ + "CMD-SHELL", + "wget -qO- http://localhost:13133/ || exit 1" + ], + "interval": 5, + "timeout": 6, + "retries": 5, + "startPeriod": 1 + }, + "logConfiguration": { + "logDriver": "awslogs", + "options": { + "awslogs-group": "/ecs/signoz-otel-EC2-sidcar", + "awslogs-region": "", + "awslogs-stream-prefix": "ecs", + "awslogs-create-group": "True" + } + } + } + ] +... +} +``` + +Replace `` with the Region for your ECS cluster. For example, **us-east-1** + +  + +### Step 2: Update ECS Task Execution Role + +To update the Task Execution role, follow these steps: + +1. **Identify the Role:** Identify the IAM role used by your ECS tasks for execution. It's often named something like **ecsTaskExecutionRole**. + +2. **Edit the Role:** Navigate to the IAM console in the AWS Management Console, find the role by name, and open its details page. + +3. **Attach Policy or add inline Policy:** + +There are two ways to grant access to the Parameter store: + +- **Attach AWS Managed Policies:** If the role doesn't already have the following policy, attach it: + + - `AmazonSSMReadOnlyAccess` + +- **Add Inline Policy:** Alternatively, for more granular control, you can create an inline policy that specifically grants access to only the necessary resources in the Parameter Store. The JSON for the inline policy will be: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Action": [ + "ssm:GetParameter" + ], + "Resource": [ + "arn:aws:ssm:::parameter/ecs/signoz/otelcol-sidecar.yaml" + ], + "Effect": "Allow" + } + ] +} +``` + +  + +### Step 3: Update ECS Task Role + +To update the ECS Task Role, follow these steps: + +1. **Identify the Role:** Determine the IAM role your ECS tasks are currently using to interact with AWS services. This role is specified in the ECS task definition under the "taskRoleArn" field. + +2. **Edit the Role:** Go to the IAM section of the AWS Management Console, locate the role by its name, and open its configuration. + +3. **Attach Policies or Add Inline Policy:** + +There are two ways to grant access to the Parameter store: + +- Attach AWS Managed Policies: If the role doesn't already have the following policies, attach it: + + - AmazonSSMReadOnlyAccess + +- **Add Inline Policy for Granular Access:** For tighter security, you might opt to create an inline policy that specifies exactly which resources the tasks can access and what actions they can perform on those resources. This is particularly important for accessing specific resources like the Parameter Store parameters used by the SigNoz sidecar. The JSON for the inline policy will be: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Action": [ + "ssm:GetParameter" + ], + "Resource": [ + "arn:aws:ssm:::parameter/ecs/signoz/otelcol-sidecar.yaml" + ], + "Effect": "Allow" + } + ] +} +``` diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSFargate/md-docs/ecsFargate-deployTaskDefinition.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSFargate/md-docs/ecsFargate-deployTaskDefinition.md new file mode 100644 index 0000000000..357dee0e9f --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSFargate/md-docs/ecsFargate-deployTaskDefinition.md @@ -0,0 +1,10 @@ +## Deploy the task definition + +If your application runs as an ECS service, you update the service to use the new revision of your task definition. This tells ECS to start new tasks based on this updated definition and gracefully replace the old tasks with the new ones, ensuring minimal disruption to your application. + +**NOTE:** Once the task is running, you should be able to see SigNoz sidecar container logs in CloudWatch Logs because we have set the logDriver parameter to be `awslogs` in our task definition. + + +## Verify data in SigNoz + +To verify that your sidecar container is running, go to the Dashboard section of SigNoz Cloud and import the dashboard **ECS - Container Metrics** Dashboard from [here](https://raw.githubusercontent.com/SigNoz/dashboards/main/ecs-infra-metrics/container-metrics.json). \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSFargate/md-docs/ecsFargate-sendData.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSFargate/md-docs/ecsFargate-sendData.md new file mode 100644 index 0000000000..0e5b55a028 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSFargate/md-docs/ecsFargate-sendData.md @@ -0,0 +1,84 @@ +**NOTE** : If you don't want to send traces data of your application, you can skip this step. + +  + +## Send Traces Data + +To send traces data from applications deployed in ECS to SigNoz Cloud using sidecar container we created in the previous steps, follow these steps: + +### Step 1: Instrument your application +To add OpenTelemetry instrumentation to your application, check out the Application Monitoring section in onboarding you can follow the docs [here](https://signoz.io/docs/instrumentation/). + +  + +### Step 2: Configure OTLP Endpoint + +In your application task definition, you need to set the OTLP endpoint to the endpoint of the sidecar container. This can be done by setting the environment variable `OTEL_EXPORTER_OTLP_ENDPOINT` to the endpoint of the sidecar container. + +Depending on the Network Mode, the ECS task definition will change: + +For **Bridge** network mode, ECS task definition will be: + +```json +{ + ... + "containerDefinitions": [ + { + "name": "", + "environment": [ + { + "name": "OTEL_EXPORTER_OTLP_ENDPOINT", + "value": "http://signoz-collector:4317" + }, + { + "name": "OTEL_RESOURCE_ATTRIBUTES", + "value": "service.name=" + } + ], + "links": [ + "signoz-collector" + ], + ... + } + ] +} +``` +  + +For **AWS VCP** network mode, ECS task definition will be: + +```json +{ + ... + "containerDefinitions": [ + { + "name": "", + "environment": [ + { + "name": "OTEL_EXPORTER_OTLP_ENDPOINT", + "value": "http://localhost:4317" + }, + { + "name": "OTEL_RESOURCE_ATTRIBUTES", + "value": "service.name=" + } + ], + ... + } + ] +} +``` + +Replace `` with the name of your container. + +  + +### Step 3: Rebuild and Deploy Application Container + +After instrumenting your application and configuring the OTLP endpoint, you'll need to rebuild your application container with these changes and deploy it to ECS cluster using the same task definition that we used in the previous section. + +  + +### Step 4: Verify Data in SigNoz + +Generate some traffic to your application and go to your SigNoz cloud **Services** page to see your application name in the service list. diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSFargate/md-docs/ecsFargate-sendLogs.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSFargate/md-docs/ecsFargate-sendLogs.md new file mode 100644 index 0000000000..1b71737d91 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/ECSFargate/md-docs/ecsFargate-sendLogs.md @@ -0,0 +1,133 @@ +**NOTE** : If you don't want to send logs data of your applications deployed on ECS, you can skip this step. + +## Send Logs Data + +To send logs data from applications deployed in ECS to SigNoz Cloud using sidecar container we created in the previous steps, follow these steps: + +### Step 1: Configure Log Router + +Add a new container definition in your ECS task definition for the Fluent Bit log router: + +```json +{ + ... + { + "name": "signoz-log-router", + "image": "906394416424.dkr.ecr.us-west-2.amazonaws.com/aws-for-fluent-bit:stable", + "cpu": 250, + "memory": 512, + "essential": true, + "dependsOn": [ + { + "containerName": "signoz-collector", + "condition": "HEALTHY" + } + ], + "logConfiguration": { + "logDriver": "awslogs", + "options": { + "awslogs-create-group": "True", + "awslogs-group": "/ecs/ecs-signoz-log-router", + "awslogs-region": "us-east-1", + "awslogs-stream-prefix": "ecs" + } + }, + "firelensConfiguration": { + "type": "fluentbit", + "options": { + "enable-ecs-log-metadata": "true" + } + } + } +} +``` + +**NOTE:** When collecting logs from multiple applications, it is recommended to use `-log-router` pattern instead of `signoz-log-router` for container name and `awslogs-group`. It helps to separate log router of different application. + +  + +### Step 2: Send logs to Sidecar Container + +In your application task definition, you need to use `awsfirelens` log driver to send logs to the sidecar otel-collector container via Fluent Bit log router. + +Depending on the Network Mode, update the ECS task definition: + +For **Bridge** network mode: + +```json +{ + ... + "containerDefinitions": [ + { + "name": "", + "dependsOn": [ + { + "containerName": "signoz-log-router", + "condition": "START" + } + ], + "logConfiguration": { + "logDriver": "awsfirelens", + "options": { + "Name": "forward", + "Match": "*", + "Host": "signoz-collector", + "Port": "8006", + "tls": "off", + "tls.verify": "off" + } + }, + "links": [ + "signoz-collector" + ], + ... + } + ] +} +``` + +  + +For **AWS VCP** network mode: + +```json +{ + ... + "containerDefinitions": [ + { + "name": "", + "dependsOn": [ + { + "containerName": "signoz-log-router", + "condition": "START" + } + ], + "logConfiguration": { + "logDriver": "awsfirelens", + "options": { + "Name": "forward", + "Match": "*", + "Host": "localhost", + "Port": "8006", + "tls": "off", + "tls.verify": "off" + } + } + ... + } + ] +} +``` + +### Step 3: Rebuild and Deploy Application Container + +Rebuild your application container and deploy it to ECS cluster using the same task definition that we updated in the previous section. + +  + +### Step 4: Verify Data in SigNoz + +Generate some logs from your application and go to your SigNoz cloud **Logs** page to see your application logs. + + + diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EKS/eks-installOtelCollector.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EKS/eks-installOtelCollector.md new file mode 100644 index 0000000000..946b7fbdbf --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EKS/eks-installOtelCollector.md @@ -0,0 +1,24 @@ +## Install otel-collector in your Kubernetes infra +  + +Add the SigNoz Helm Chart repository +```bash +helm repo add signoz https://charts.signoz.io +``` +  + +If the chart is already present, update the chart to the latest using: +```bash +helm repo update +``` +  + +Install the Kubernetes Infrastructure chart provided by SigNoz +```bash +helm install my-release signoz/k8s-infra \ +--set otelCollectorEndpoint=ingest.{{REGION}}.signoz.cloud:443 \ +--set otelInsecure=false \ +--set signozApiKey={{SIGNOZ_INGESTION_KEY}} \ +--set global.clusterName= +``` +- Replace `` with the name of the Kubernetes cluster or a unique identifier of the cluster. diff --git a/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EKS/eks-monitorUsingDashboard.md b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EKS/eks-monitorUsingDashboard.md new file mode 100644 index 0000000000..77bd5cb87c --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AwsMonitoring/EKS/eks-monitorUsingDashboard.md @@ -0,0 +1,16 @@ +## Monitor using Dashboards + +To visualize the Kubernetes Metrics, you can use one of the following pre-built Dashboards: + +- [K8s Node-Level Metrics](https://github.com/SigNoz/dashboards/blob/main/k8s-node-%26-pod-metrics/k8s-node-level-metrics.json) +- [K8s Pod_level Metrics](https://github.com/SigNoz/dashboards/blob/main/k8s-node-%26-pod-metrics/k8s-pod-level-metrics.json) + +You should copy the JSON data in these files and create a New Dashboard in the Dashboard Tab of SigNoz. + +  + +By following the previous step, you should also be able to see Kubernetes Pod logs in the logs Section of SigNoz. + +  + +To send traces for your application deployed on your Kubernetes cluster, checkout the Application monitoring section of onboarding. \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/LogsManagement/ApplicationLogs/md-docs/LinuxAMD64/appplicationLogs-linuxamd64-configureReceiver.md b/frontend/src/container/OnboardingContainer/Modules/LogsManagement/ApplicationLogs/md-docs/LinuxAMD64/appplicationLogs-linuxamd64-configureReceiver.md index 2b2d346f3a..d2785da486 100644 --- a/frontend/src/container/OnboardingContainer/Modules/LogsManagement/ApplicationLogs/md-docs/LinuxAMD64/appplicationLogs-linuxamd64-configureReceiver.md +++ b/frontend/src/container/OnboardingContainer/Modules/LogsManagement/ApplicationLogs/md-docs/LinuxAMD64/appplicationLogs-linuxamd64-configureReceiver.md @@ -14,7 +14,7 @@ Replace `/tmp/app.log` with the path to your log file. Note: change the `start_at` value to `beginning` if you want to read the log file from the beginning. It may be useful if you want to send old logs to SigNoz. The log records older than the standard log retention period (default 15 days) will be discarded. -For more configurations that are available for syslog receiver please check [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver). +For more configurations that are available for filelog receiver please check [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver). ### Step 2: Include filelog receiver in the Pipeline We will modify our pipeline inside `config.yaml` to include the receiver we have created above. diff --git a/frontend/src/container/OnboardingContainer/Modules/LogsManagement/ApplicationLogs/md-docs/LinuxARM64/appplicationLogs-linuxarm64-configureReceiver.md b/frontend/src/container/OnboardingContainer/Modules/LogsManagement/ApplicationLogs/md-docs/LinuxARM64/appplicationLogs-linuxarm64-configureReceiver.md index 2b2d346f3a..d2785da486 100644 --- a/frontend/src/container/OnboardingContainer/Modules/LogsManagement/ApplicationLogs/md-docs/LinuxARM64/appplicationLogs-linuxarm64-configureReceiver.md +++ b/frontend/src/container/OnboardingContainer/Modules/LogsManagement/ApplicationLogs/md-docs/LinuxARM64/appplicationLogs-linuxarm64-configureReceiver.md @@ -14,7 +14,7 @@ Replace `/tmp/app.log` with the path to your log file. Note: change the `start_at` value to `beginning` if you want to read the log file from the beginning. It may be useful if you want to send old logs to SigNoz. The log records older than the standard log retention period (default 15 days) will be discarded. -For more configurations that are available for syslog receiver please check [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver). +For more configurations that are available for filelog receiver please check [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver). ### Step 2: Include filelog receiver in the Pipeline We will modify our pipeline inside `config.yaml` to include the receiver we have created above. diff --git a/frontend/src/container/OnboardingContainer/Modules/LogsManagement/ApplicationLogs/md-docs/MacOsAMD64/appplicationLogs-macosamd64-configureReceiver.md b/frontend/src/container/OnboardingContainer/Modules/LogsManagement/ApplicationLogs/md-docs/MacOsAMD64/appplicationLogs-macosamd64-configureReceiver.md index 2b2d346f3a..d2785da486 100644 --- a/frontend/src/container/OnboardingContainer/Modules/LogsManagement/ApplicationLogs/md-docs/MacOsAMD64/appplicationLogs-macosamd64-configureReceiver.md +++ b/frontend/src/container/OnboardingContainer/Modules/LogsManagement/ApplicationLogs/md-docs/MacOsAMD64/appplicationLogs-macosamd64-configureReceiver.md @@ -14,7 +14,7 @@ Replace `/tmp/app.log` with the path to your log file. Note: change the `start_at` value to `beginning` if you want to read the log file from the beginning. It may be useful if you want to send old logs to SigNoz. The log records older than the standard log retention period (default 15 days) will be discarded. -For more configurations that are available for syslog receiver please check [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver). +For more configurations that are available for filelog receiver please check [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver). ### Step 2: Include filelog receiver in the Pipeline We will modify our pipeline inside `config.yaml` to include the receiver we have created above. diff --git a/frontend/src/container/OnboardingContainer/Modules/LogsManagement/ApplicationLogs/md-docs/MacOsARM64/appplicationLogs-macosarm64-configureReceiver.md b/frontend/src/container/OnboardingContainer/Modules/LogsManagement/ApplicationLogs/md-docs/MacOsARM64/appplicationLogs-macosarm64-configureReceiver.md index 2b2d346f3a..d2785da486 100644 --- a/frontend/src/container/OnboardingContainer/Modules/LogsManagement/ApplicationLogs/md-docs/MacOsARM64/appplicationLogs-macosarm64-configureReceiver.md +++ b/frontend/src/container/OnboardingContainer/Modules/LogsManagement/ApplicationLogs/md-docs/MacOsARM64/appplicationLogs-macosarm64-configureReceiver.md @@ -14,7 +14,7 @@ Replace `/tmp/app.log` with the path to your log file. Note: change the `start_at` value to `beginning` if you want to read the log file from the beginning. It may be useful if you want to send old logs to SigNoz. The log records older than the standard log retention period (default 15 days) will be discarded. -For more configurations that are available for syslog receiver please check [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver). +For more configurations that are available for filelog receiver please check [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver). ### Step 2: Include filelog receiver in the Pipeline We will modify our pipeline inside `config.yaml` to include the receiver we have created above. diff --git a/frontend/src/container/OnboardingContainer/OnboardingContainer.tsx b/frontend/src/container/OnboardingContainer/OnboardingContainer.tsx index 797e3b396b..ae294e2fb8 100644 --- a/frontend/src/container/OnboardingContainer/OnboardingContainer.tsx +++ b/frontend/src/container/OnboardingContainer/OnboardingContainer.tsx @@ -21,11 +21,13 @@ import { } from './context/OnboardingContext'; import { DataSourceType } from './Steps/DataSource/DataSource'; import { + defaultAwsServices, defaultInfraMetricsType, defaultLogsType, } from './utils/dataSourceUtils'; import { APM_STEPS, + AWS_MONITORING_STEPS, getSteps, INFRASTRUCTURE_MONITORING_STEPS, LOGS_MANAGEMENT_STEPS, @@ -35,6 +37,7 @@ export enum ModulesMap { APM = 'APM', LogsManagement = 'LogsManagement', InfrastructureMonitoring = 'InfrastructureMonitoring', + AwsMonitoring = 'AwsMonitoring', } export interface ModuleProps { @@ -68,6 +71,12 @@ export const useCases = { desc: 'Monitor Kubernetes infrastructure metrics, hostmetrics, or metrics of any third-party integration', }, + AwsMonitoring: { + id: ModulesMap.AwsMonitoring, + title: 'AWS Monitoring', + desc: + 'Monitor your traces, logs and metrics for AWS services like EC2, ECS, EKS etc.', + }, }; export default function Onboarding(): JSX.Element { @@ -173,6 +182,13 @@ export default function Onboarding(): JSX.Element { setSelectedModuleSteps(LOGS_MANAGEMENT_STEPS); updateSelectedDataSource(defaultLogsType); } + } else if (selectedModule?.id === ModulesMap.AwsMonitoring) { + if (selectedDataSource) { + setModuleStepsBasedOnSelectedDataSource(selectedDataSource); + } else { + setSelectedModuleSteps(AWS_MONITORING_STEPS); + updateSelectedDataSource(defaultAwsServices); + } } else if (selectedModule?.id === ModulesMap.APM) { handleAPMSteps(); } diff --git a/frontend/src/container/OnboardingContainer/Steps/MarkdownStep/MarkdownStep.tsx b/frontend/src/container/OnboardingContainer/Steps/MarkdownStep/MarkdownStep.tsx index d39e83ed53..36b07cd191 100644 --- a/frontend/src/container/OnboardingContainer/Steps/MarkdownStep/MarkdownStep.tsx +++ b/frontend/src/container/OnboardingContainer/Steps/MarkdownStep/MarkdownStep.tsx @@ -1,6 +1,7 @@ /* eslint-disable @typescript-eslint/ban-ts-comment */ import { MarkdownRenderer } from 'components/MarkdownRenderer/MarkdownRenderer'; import { ApmDocFilePaths } from 'container/OnboardingContainer/constants/apmDocFilePaths'; +import { AwsMonitoringDocFilePaths } from 'container/OnboardingContainer/constants/awsMonitoringDocFilePaths'; import { InfraMonitoringDocFilePaths } from 'container/OnboardingContainer/constants/infraMonitoringDocFilePaths'; import { LogsManagementDocFilePaths } from 'container/OnboardingContainer/constants/logsManagementDocFilePaths'; import { @@ -66,6 +67,8 @@ export default function MarkdownStep(): JSX.Element { docFilePaths = LogsManagementDocFilePaths; } else if (selectedModule?.id === ModulesMap.InfrastructureMonitoring) { docFilePaths = InfraMonitoringDocFilePaths; + } else if (selectedModule?.id === ModulesMap.AwsMonitoring) { + docFilePaths = AwsMonitoringDocFilePaths; } // @ts-ignore if (docFilePaths && docFilePaths[path]) { diff --git a/frontend/src/container/OnboardingContainer/common/ModuleStepsContainer/ModuleStepsContainer.tsx b/frontend/src/container/OnboardingContainer/common/ModuleStepsContainer/ModuleStepsContainer.tsx index 7362e9a47d..272d2b5083 100644 --- a/frontend/src/container/OnboardingContainer/common/ModuleStepsContainer/ModuleStepsContainer.tsx +++ b/frontend/src/container/OnboardingContainer/common/ModuleStepsContainer/ModuleStepsContainer.tsx @@ -151,6 +151,10 @@ export default function ModuleStepsContainer({ history.push(ROUTES.LOGS_EXPLORER); } else if (selectedModule.id === ModulesMap.InfrastructureMonitoring) { history.push(ROUTES.APPLICATION); + } else if (selectedModule.id === ModulesMap.AwsMonitoring) { + history.push(ROUTES.APPLICATION); + } else { + history.push(ROUTES.APPLICATION); } }; @@ -264,6 +268,69 @@ export default function ModuleStepsContainer({ module: activeStep?.module?.id, }); break; + case stepsMap.sendLogsCloudwatch: + trackEvent('Onboarding V2: Send Logs Cloudwatch', { + dataSource: selectedDataSource?.id, + environment: selectedEnvironment, + module: activeStep?.module?.id, + }); + break; + case stepsMap.setupDaemonService: + trackEvent('Onboarding V2: Setup ECS Daemon Service', { + dataSource: selectedDataSource?.id, + environment: selectedEnvironment, + module: activeStep?.module?.id, + }); + break; + case stepsMap.createOtelConfig: + trackEvent('Onboarding V2: Create ECS OTel Config', { + dataSource: selectedDataSource?.id, + environment: selectedEnvironment, + module: activeStep?.module?.id, + }); + break; + case stepsMap.createDaemonService: + trackEvent('Onboarding V2: Create ECS Daemon Service', { + dataSource: selectedDataSource?.id, + environment: selectedEnvironment, + module: activeStep?.module?.id, + }); + break; + case stepsMap.ecsSendData: + trackEvent('Onboarding V2: ECS send traces data', { + dataSource: selectedDataSource?.id, + environment: selectedEnvironment, + module: activeStep?.module?.id, + }); + break; + case stepsMap.createSidecarCollectorContainer: + trackEvent('Onboarding V2: ECS create Sidecar Container', { + dataSource: selectedDataSource?.id, + environment: selectedEnvironment, + module: activeStep?.module?.id, + }); + break; + case stepsMap.deployTaskDefinition: + trackEvent('Onboarding V2: ECS deploy task definition', { + dataSource: selectedDataSource?.id, + environment: selectedEnvironment, + module: activeStep?.module?.id, + }); + break; + case stepsMap.ecsSendLogsData: + trackEvent('Onboarding V2: ECS Fargate send logs data', { + dataSource: selectedDataSource?.id, + environment: selectedEnvironment, + module: activeStep?.module?.id, + }); + break; + case stepsMap.monitorDashboard: + trackEvent('Onboarding V2: EKS monitor dashboard', { + dataSource: selectedDataSource?.id, + environment: selectedEnvironment, + module: activeStep?.module?.id, + }); + break; default: break; } diff --git a/frontend/src/container/OnboardingContainer/constants/awsMonitoringDocFilePaths.ts b/frontend/src/container/OnboardingContainer/constants/awsMonitoringDocFilePaths.ts new file mode 100644 index 0000000000..76ba77eebf --- /dev/null +++ b/frontend/src/container/OnboardingContainer/constants/awsMonitoringDocFilePaths.ts @@ -0,0 +1,127 @@ +/* eslint-disable simple-import-sort/imports */ + +// Application Logs Start + +// LINUX AMD 64 +import AwsMonitoring_awsEc2ApplicationLogs_linuxAMD64_setupOtelCollector from '../Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxAMD64/appplicationLogs-linuxamd64-installOtelCollector.md'; +import AwsMonitoring_awsEc2ApplicationLogs_linuxAMD64_configureReceiver from '../Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxAMD64/appplicationLogs-linuxamd64-configureReceiver.md'; +import AwsMonitoring_awsEc2ApplicationLogs_linuxAMD64_restartOtelCollector from '../Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxAMD64/appplicationLogs-linuxamd64-runOtelCollector.md'; + +// LINUX ARM 64 +import AwsMonitoring_awsEc2ApplicationLogs_linuxARM64_setupOtelCollector from '../Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxARM64/appplicationLogs-linuxarm64-installOtelCollector.md'; +import AwsMonitoring_awsEc2ApplicationLogs_linuxARM64_configureReceiver from '../Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxARM64/appplicationLogs-linuxarm64-configureReceiver.md'; +import AwsMonitoring_awsEc2ApplicationLogs_linuxARM64_restartOtelCollector from '../Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxARM64/appplicationLogs-linuxarm64-runOtelCollector.md'; + +// MacOS AMD 64 +import AwsMonitoring_awsEc2ApplicationLogs_macOsAMD64_setupOtelCollector from '../Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsAMD64/appplicationLogs-macosamd64-installOtelCollector.md'; +import AwsMonitoring_awsEc2ApplicationLogs_macOsAMD64_configureReceiver from '../Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsAMD64/appplicationLogs-macosamd64-configureReceiver.md'; +import AwsMonitoring_awsEc2ApplicationLogs_macOsAMD64_restartOtelCollector from '../Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsAMD64/appplicationLogs-macosamd64-runOtelCollector.md'; + +// MacOS ARM 64 +import AwsMonitoring_awsEc2ApplicationLogs_macOsARM64_setupOtelCollector from '../Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsARM64/appplicationLogs-macosarm64-installOtelCollector.md'; +import AwsMonitoring_awsEc2ApplicationLogs_macOsARM64_configureReceiver from '../Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsARM64/appplicationLogs-macosarm64-configureReceiver.md'; +import AwsMonitoring_awsEc2ApplicationLogs_macOsARM64_restartOtelCollector from '../Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsARM64/appplicationLogs-macosarm64-runOtelCollector.md'; +// Application Logs End + +// Hostmetrics Start +// LINUX AMD 64 +import AwsMonitoring_awsEc2InfrastructureMetrics_linuxAMD64_setupOtelCollector from '../Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/LinuxAMD64/hostmetrics-setupOtelCollector.md'; +import AwsMonitoring_awsEc2InfrastructureMetrics_linuxAMD64_configureHostmetricsJson from '../Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/LinuxAMD64/hostmetrics-configureHostmetricsJson.md'; + +// LINUX ARM 64 +import AwsMonitoring_awsEc2InfrastructureMetrics_linuxARM64_setupOtelCollector from '../Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/LinuxARM64/hostmetrics-setupOtelCollector.md'; +import AwsMonitoring_awsEc2InfrastructureMetrics_linuxARM64_configureHostmetricsJson from '../Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/LinuxARM64/hostmetrics-configureHostmetricsJson.md'; + +// MacOS AMD 64 +import AwsMonitoring_awsEc2InfrastructureMetrics_macOsAMD64_setupOtelCollector from '../Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/MacOsAMD64/hostmetrics-setupOtelCollector.md'; +import AwsMonitoring_awsEc2InfrastructureMetrics_macOsAMD64_configureHostmetricsJson from '../Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/MacOsAMD64/hostmetrics-configureHostmetricsJson.md'; + +// MacOS ARM 64 +import AwsMonitoring_awsEc2InfrastructureMetrics_macOsARM64_setupOtelCollector from '../Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/MacOsARM64/hostmetrics-setupOtelCollector.md'; +import AwsMonitoring_awsEc2InfrastructureMetrics_macOsARM64_configureHostmetricsJson from '../Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/MacOsARM64/hostmetrics-configureHostmetricsJson.md'; + +// Hostmetrics End +// AwsMonitoring_awsEc2ApplicationLogs_linuxAMD64_restartOtelCollector, + +import AwsMonitoring_awsEcsEc2_setupDaemonService from '../Modules/AwsMonitoring/ECSEc2/md-docs/ecsEc2-setupDeamonService.md'; +import AwsMonitoring_awsEcsEc2_createOtelConfig from '../Modules/AwsMonitoring/ECSEc2/md-docs/ecsEc2-createOtelConfig.md'; +import AwsMonitoring_awsEcsEc2_createDaemonService from '../Modules/AwsMonitoring/ECSEc2/md-docs/ecsEc2-createDaemonService.md'; +import AwsMonitoring_awsEcsEc2_ecsSendData from '../Modules/AwsMonitoring/ECSEc2/md-docs/ecsEc2-sendData.md'; + +import AwsMonitoring_awsEcsExternal_setupDaemonService from '../Modules/AwsMonitoring/ECSExternal/md-docs/ecsExternal-setupDeamonService.md'; +import AwsMonitoring_awsEcsExternal_createOtelConfig from '../Modules/AwsMonitoring/ECSExternal/md-docs/ecsExternal-createOtelConfig.md'; +import AwsMonitoring_awsEcsExternal_createDaemonService from '../Modules/AwsMonitoring/ECSExternal/md-docs/ecsExternal-createDaemonService.md'; +import AwsMonitoring_awsEcsExternal_ecsSendData from '../Modules/AwsMonitoring/ECSExternal/md-docs/ecsExternal-sendData.md'; + +import AwsMonitoring_awsEcsFargate_createOtelConfig from '../Modules/AwsMonitoring/ECSFargate/md-docs/ecsFargate-createOtelConfig.md'; +import AwsMonitoring_awsEcsFargate_createSidecarCollectorContainer from '../Modules/AwsMonitoring/ECSFargate/md-docs/ecsFargate-createSidecarCollectorContainer.md'; +import AwsMonitoring_awsEcsFargate_deployTaskDefinition from '../Modules/AwsMonitoring/ECSFargate/md-docs/ecsFargate-deployTaskDefinition.md'; +import AwsMonitoring_awsEcsFargate_ecsSendData from '../Modules/AwsMonitoring/ECSFargate/md-docs/ecsFargate-sendData.md'; +import AwsMonitoring_awsEcsFargate_ecsSendLogsData from '../Modules/AwsMonitoring/ECSFargate/md-docs/ecsFargate-sendLogs.md'; + +// AWS EKS + +import AwsMonitoring_awsEks_setupOtelCollector from '../Modules/AwsMonitoring/EKS/eks-installOtelCollector.md'; +import AwsMonitoring_awsEks_monitorDashboard from '../Modules/AwsMonitoring/EKS/eks-monitorUsingDashboard.md'; + +export const AwsMonitoringDocFilePaths = { + /// /// AWS EC2 Application Logs + + // Linux AMD64 + AwsMonitoring_awsEc2ApplicationLogs_linuxAMD64_setupOtelCollector, + AwsMonitoring_awsEc2ApplicationLogs_linuxAMD64_configureReceiver, + AwsMonitoring_awsEc2ApplicationLogs_linuxAMD64_restartOtelCollector, + // LINUX ARM 64 + AwsMonitoring_awsEc2ApplicationLogs_linuxARM64_setupOtelCollector, + AwsMonitoring_awsEc2ApplicationLogs_linuxARM64_configureReceiver, + AwsMonitoring_awsEc2ApplicationLogs_linuxARM64_restartOtelCollector, + // MacOS AMD 64 + AwsMonitoring_awsEc2ApplicationLogs_macOsAMD64_setupOtelCollector, + AwsMonitoring_awsEc2ApplicationLogs_macOsAMD64_configureReceiver, + AwsMonitoring_awsEc2ApplicationLogs_macOsAMD64_restartOtelCollector, + // MacOS ARM 64 + AwsMonitoring_awsEc2ApplicationLogs_macOsARM64_setupOtelCollector, + AwsMonitoring_awsEc2ApplicationLogs_macOsARM64_configureReceiver, + AwsMonitoring_awsEc2ApplicationLogs_macOsARM64_restartOtelCollector, + + /// /// AWS EC2 Infrastructure Metrics + + // Linux AMD64 + AwsMonitoring_awsEc2InfrastructureMetrics_linuxAMD64_setupOtelCollector, + AwsMonitoring_awsEc2InfrastructureMetrics_linuxAMD64_configureHostmetricsJson, + + // Linux ARM64 + AwsMonitoring_awsEc2InfrastructureMetrics_linuxARM64_setupOtelCollector, + AwsMonitoring_awsEc2InfrastructureMetrics_linuxARM64_configureHostmetricsJson, + + // MacOS AMD64 + AwsMonitoring_awsEc2InfrastructureMetrics_macOsAMD64_setupOtelCollector, + AwsMonitoring_awsEc2InfrastructureMetrics_macOsAMD64_configureHostmetricsJson, + + // MacOS ARM64 + AwsMonitoring_awsEc2InfrastructureMetrics_macOsARM64_setupOtelCollector, + AwsMonitoring_awsEc2InfrastructureMetrics_macOsARM64_configureHostmetricsJson, + + /// //// AWS ECS EC2 + AwsMonitoring_awsEcsEc2_setupDaemonService, + AwsMonitoring_awsEcsEc2_createOtelConfig, + AwsMonitoring_awsEcsEc2_createDaemonService, + AwsMonitoring_awsEcsEc2_ecsSendData, + + /// //// AWS ECS External + AwsMonitoring_awsEcsExternal_setupDaemonService, + AwsMonitoring_awsEcsExternal_createOtelConfig, + AwsMonitoring_awsEcsExternal_createDaemonService, + AwsMonitoring_awsEcsExternal_ecsSendData, + + /// //// AWS ECS Fargate + AwsMonitoring_awsEcsFargate_createOtelConfig, + AwsMonitoring_awsEcsFargate_createSidecarCollectorContainer, + AwsMonitoring_awsEcsFargate_deployTaskDefinition, + AwsMonitoring_awsEcsFargate_ecsSendData, + AwsMonitoring_awsEcsFargate_ecsSendLogsData, + + /// /// AWS EKS + AwsMonitoring_awsEks_setupOtelCollector, + AwsMonitoring_awsEks_monitorDashboard, +}; diff --git a/frontend/src/container/OnboardingContainer/constants/stepsConfig.tsx b/frontend/src/container/OnboardingContainer/constants/stepsConfig.tsx index 95039b9d18..57b3a597d8 100644 --- a/frontend/src/container/OnboardingContainer/constants/stepsConfig.tsx +++ b/frontend/src/container/OnboardingContainer/constants/stepsConfig.tsx @@ -27,6 +27,14 @@ export const stepsMap = { createHttpPayload: `createHttpPayload`, configureAws: `configureAws`, sendLogsCloudwatch: `sendLogsCloudwatch`, + setupDaemonService: `setupDaemonService`, + createOtelConfig: `createOtelConfig`, + createDaemonService: `createDaemonService`, + ecsSendData: `ecsSendData`, + createSidecarCollectorContainer: `createSidecarCollectorContainer`, + deployTaskDefinition: `deployTaskDefinition`, + ecsSendLogsData: `ecsSendLogsData`, + monitorDashboard: `monitorDashboard`, }; export const DataSourceStep: SelectedModuleStepProps = { @@ -153,3 +161,43 @@ export const SendLogsCloudwatch: SelectedModuleStepProps = { title: 'Send Logs', component: , }; +export const SetupDaemonService: SelectedModuleStepProps = { + id: stepsMap.setupDaemonService, + title: 'Setup Daemon Service', + component: , +}; +export const CreateOtelConfig: SelectedModuleStepProps = { + id: stepsMap.createOtelConfig, + title: 'Create OTel Config', + component: , +}; +export const CreateDaemonService: SelectedModuleStepProps = { + id: stepsMap.createDaemonService, + title: 'Create Daemon Service', + component: , +}; +export const EcsSendData: SelectedModuleStepProps = { + id: stepsMap.ecsSendData, + title: 'Send Traces Data', + component: , +}; +export const CreateSidecarCollectorContainer: SelectedModuleStepProps = { + id: stepsMap.createSidecarCollectorContainer, + title: 'Create Sidecar Collector', + component: , +}; +export const DeployTaskDefinition: SelectedModuleStepProps = { + id: stepsMap.deployTaskDefinition, + title: 'Deploy Task Definition', + component: , +}; +export const EcsSendLogsData: SelectedModuleStepProps = { + id: stepsMap.ecsSendLogsData, + title: 'Send Logs Data', + component: , +}; +export const MonitorDashboard: SelectedModuleStepProps = { + id: stepsMap.monitorDashboard, + title: 'Monitor using Dashboard ', + component: , +}; diff --git a/frontend/src/container/OnboardingContainer/utils/dataSourceUtils.ts b/frontend/src/container/OnboardingContainer/utils/dataSourceUtils.ts index 5b46b02727..7b851feac9 100644 --- a/frontend/src/container/OnboardingContainer/utils/dataSourceUtils.ts +++ b/frontend/src/container/OnboardingContainer/utils/dataSourceUtils.ts @@ -68,6 +68,7 @@ export const frameworksMap = { }, LogsManagement: {}, InfrastructureMonitoring: {}, + AwsMonitoring: {}, }; export const defaultApplicationDataSource = { @@ -212,6 +213,45 @@ const supportedInfraMetrics = [ }, ]; +export const defaultAwsServices = { + name: 'EC2 - Application Logs', + id: 'awsEc2ApplicationLogs', + imgURL: `Logos/ec2.svg`, +}; + +const supportedAwsServices = [ + { + name: 'EC2 - App/Server Logs', + id: 'awsEc2ApplicationLogs', + imgURL: `Logos/ec2.svg`, + }, + { + name: 'EC2 - Infra Metrics', + id: 'awsEc2InfrastructureMetrics', + imgURL: `Logos/ec2.svg`, + }, + { + name: 'ECS - EC2', + id: 'awsEcsEc2', + imgURL: `Logos/ecs.svg`, + }, + { + name: 'ECS - Fargate', + id: 'awsEcsFargate', + imgURL: `Logos/ecs.svg`, + }, + { + name: 'ECS - External', + id: 'awsEcsExternal', + imgURL: `Logos/ecs.svg`, + }, + { + name: 'EKS', + id: 'awsEks', + imgURL: `Logos/eks.svg`, + }, +]; + export const getDataSources = (module: ModuleProps): DataSourceType[] => { if (module.id === ModulesMap.APM) { return supportedLanguages; @@ -221,7 +261,11 @@ export const getDataSources = (module: ModuleProps): DataSourceType[] => { return supportedInfraMetrics; } - return supportedLogsTypes; + if (module.id === ModulesMap.LogsManagement) { + return supportedLogsTypes; + } + + return supportedAwsServices; }; export const getSupportedFrameworks = ({ diff --git a/frontend/src/container/OnboardingContainer/utils/getSteps.ts b/frontend/src/container/OnboardingContainer/utils/getSteps.ts index 5d3baa08b8..4ad252fa89 100644 --- a/frontend/src/container/OnboardingContainer/utils/getSteps.ts +++ b/frontend/src/container/OnboardingContainer/utils/getSteps.ts @@ -6,16 +6,24 @@ import { ConfigureHostmetricsJSON, ConfigureMetricsReceiver, ConfigureReceiver, + CreateDaemonService, CreateHttpPayload, + CreateOtelConfig, + CreateSidecarCollectorContainer, DataSourceStep, + DeployTaskDefinition, + EcsSendData, + EcsSendLogsData, EnvDetailsStep, InstallOpenTelemetryStep, LogsTestConnectionStep, + MonitorDashboard, PlotMetrics, RestartOtelCollector, RunApplicationStep, SelectMethodStep, SendLogsCloudwatch, + SetupDaemonService, SetupLogDrains, SetupOtelCollectorStep, StartContainer, @@ -47,6 +55,8 @@ export const INFRASTRUCTURE_MONITORING_STEPS: SelectedModuleStepProps[] = [ DataSourceStep, ]; +export const AWS_MONITORING_STEPS: SelectedModuleStepProps[] = [DataSourceStep]; + export const getSteps = ({ selectedDataSource, }: GetStepsProps): SelectedModuleStepProps[] => { @@ -72,6 +82,7 @@ export const getSteps = ({ case 'fluentD': case 'fluentBit': case 'logStash': + case 'awsEc2ApplicationLogs': return [ DataSourceStep, EnvDetailsStep, @@ -98,6 +109,7 @@ export const getSteps = ({ case 'kubernetesInfraMetrics': return [DataSourceStep, SetupOtelCollectorStep, PlotMetrics]; case 'hostMetrics': + case 'awsEc2InfrastructureMetrics': return [ DataSourceStep, EnvDetailsStep, @@ -111,6 +123,28 @@ export const getSteps = ({ SetupOtelCollectorStep, ConfigureMetricsReceiver, ]; + case 'awsEcsExternal': + case 'awsEcsEc2': + return [ + DataSourceStep, + SetupDaemonService, + CreateOtelConfig, + CreateDaemonService, + EcsSendData, + ]; + + case 'awsEcsFargate': + return [ + DataSourceStep, + CreateOtelConfig, + CreateSidecarCollectorContainer, + DeployTaskDefinition, + EcsSendData, + EcsSendLogsData, + ]; + case 'awsEks': + return [DataSourceStep, SetupOtelCollectorStep, MonitorDashboard]; + default: return [DataSourceStep]; } From 9af1c2320bd378a828a64670f9240d3d88c64cf0 Mon Sep 17 00:00:00 2001 From: Prashant Shahi Date: Wed, 28 Feb 2024 19:13:23 +0545 Subject: [PATCH 16/16] =?UTF-8?q?chore(signoz):=20=F0=9F=93=8C=20pin=20ver?= =?UTF-8?q?sions:=20SigNoz=200.40.0,=20SigNoz=20OtelCollector=200.88.14?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Prashant Shahi --- deploy/docker-swarm/clickhouse-setup/docker-compose.yaml | 8 ++++---- deploy/docker/clickhouse-setup/docker-compose-core.yaml | 4 ++-- deploy/docker/clickhouse-setup/docker-compose.yaml | 8 ++++---- go.mod | 2 +- go.sum | 4 ++-- pkg/query-service/tests/test-deploy/docker-compose.yaml | 4 ++-- 6 files changed, 15 insertions(+), 15 deletions(-) diff --git a/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml b/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml index 2a2ae9faf1..ae0fbbd357 100644 --- a/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml +++ b/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml @@ -146,7 +146,7 @@ services: condition: on-failure query-service: - image: signoz/query-service:0.39.1 + image: signoz/query-service:0.40.0 command: [ "-config=/root/config/prometheus.yml", @@ -186,7 +186,7 @@ services: <<: *db-depend frontend: - image: signoz/frontend:0.39.1 + image: signoz/frontend:0.40.0 deploy: restart_policy: condition: on-failure @@ -199,7 +199,7 @@ services: - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf otel-collector: - image: signoz/signoz-otel-collector:0.88.13 + image: signoz/signoz-otel-collector:0.88.14 command: [ "--config=/etc/otel-collector-config.yaml", @@ -237,7 +237,7 @@ services: - query-service otel-collector-migrator: - image: signoz/signoz-schema-migrator:0.88.13 + image: signoz/signoz-schema-migrator:0.88.14 deploy: restart_policy: condition: on-failure diff --git a/deploy/docker/clickhouse-setup/docker-compose-core.yaml b/deploy/docker/clickhouse-setup/docker-compose-core.yaml index 525fa5175d..303016b38a 100644 --- a/deploy/docker/clickhouse-setup/docker-compose-core.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose-core.yaml @@ -66,7 +66,7 @@ services: - --storage.path=/data otel-collector-migrator: - image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.13} + image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.14} container_name: otel-migrator command: - "--dsn=tcp://clickhouse:9000" @@ -81,7 +81,7 @@ services: # Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md` otel-collector: container_name: signoz-otel-collector - image: signoz/signoz-otel-collector:0.88.13 + image: signoz/signoz-otel-collector:0.88.14 command: [ "--config=/etc/otel-collector-config.yaml", diff --git a/deploy/docker/clickhouse-setup/docker-compose.yaml b/deploy/docker/clickhouse-setup/docker-compose.yaml index ec3fb0e6ba..a0cc5c4f6b 100644 --- a/deploy/docker/clickhouse-setup/docker-compose.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose.yaml @@ -164,7 +164,7 @@ services: # Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md` query-service: - image: signoz/query-service:${DOCKER_TAG:-0.39.1} + image: signoz/query-service:${DOCKER_TAG:-0.40.0} container_name: signoz-query-service command: [ @@ -203,7 +203,7 @@ services: <<: *db-depend frontend: - image: signoz/frontend:${DOCKER_TAG:-0.39.1} + image: signoz/frontend:${DOCKER_TAG:-0.40.0} container_name: signoz-frontend restart: on-failure depends_on: @@ -215,7 +215,7 @@ services: - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf otel-collector-migrator: - image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.13} + image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.14} container_name: otel-migrator command: - "--dsn=tcp://clickhouse:9000" @@ -229,7 +229,7 @@ services: otel-collector: - image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.88.13} + image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.88.14} container_name: signoz-otel-collector command: [ diff --git a/go.mod b/go.mod index 4b24c57239..0ca9965546 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.21.3 require ( github.com/ClickHouse/clickhouse-go/v2 v2.15.0 github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd - github.com/SigNoz/signoz-otel-collector v0.88.13 + github.com/SigNoz/signoz-otel-collector v0.88.14 github.com/SigNoz/zap_otlp/zap_otlp_encoder v0.0.0-20230822164844-1b861a431974 github.com/SigNoz/zap_otlp/zap_otlp_sync v0.0.0-20230822164844-1b861a431974 github.com/antonmedv/expr v1.15.3 diff --git a/go.sum b/go.sum index ced65a3169..8ba2afd692 100644 --- a/go.sum +++ b/go.sum @@ -98,8 +98,8 @@ github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd h1:Bk43AsDYe0fhkb github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd/go.mod h1:nxRcH/OEdM8QxzH37xkGzomr1O0JpYBRS6pwjsWW6Pc= github.com/SigNoz/prometheus v1.9.78 h1:bB3yuDrRzi/Mv00kWayR9DZbyjTuGfendSqISyDcXiY= github.com/SigNoz/prometheus v1.9.78/go.mod h1:MffmFu2qFILQrOHehx3D0XjYtaZMVfI+Ppeiv98x4Ww= -github.com/SigNoz/signoz-otel-collector v0.88.13 h1:VAVXokL28Hqxo6xyzlCrFS1na/bd1cgqFAVOe1lJjUE= -github.com/SigNoz/signoz-otel-collector v0.88.13/go.mod h1:RH9OEjni6tkh9RgN/meSPxv3kykjcFscqMwJgbUAXmo= +github.com/SigNoz/signoz-otel-collector v0.88.14 h1:/40pH8au6M8PhUhdCXd4c+7nJ9h0VgoDaV9ERKbUtf4= +github.com/SigNoz/signoz-otel-collector v0.88.14/go.mod h1:RH9OEjni6tkh9RgN/meSPxv3kykjcFscqMwJgbUAXmo= github.com/SigNoz/zap_otlp v0.1.0 h1:T7rRcFN87GavY8lDGZj0Z3Xv6OhJA6Pj3I9dNPmqvRc= github.com/SigNoz/zap_otlp v0.1.0/go.mod h1:lcHvbDbRgvDnPxo9lDlaL1JK2PyOyouP/C3ynnYIvyo= github.com/SigNoz/zap_otlp/zap_otlp_encoder v0.0.0-20230822164844-1b861a431974 h1:PKVgdf83Yw+lZJbFtNGBgqXiXNf3+kOXW2qZ7Ms7OaY= diff --git a/pkg/query-service/tests/test-deploy/docker-compose.yaml b/pkg/query-service/tests/test-deploy/docker-compose.yaml index 4144f81d4e..dcbd059364 100644 --- a/pkg/query-service/tests/test-deploy/docker-compose.yaml +++ b/pkg/query-service/tests/test-deploy/docker-compose.yaml @@ -192,7 +192,7 @@ services: <<: *db-depend otel-collector-migrator: - image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.13} + image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.14} container_name: otel-migrator command: - "--dsn=tcp://clickhouse:9000" @@ -205,7 +205,7 @@ services: # condition: service_healthy otel-collector: - image: signoz/signoz-otel-collector:0.88.13 + image: signoz/signoz-otel-collector:0.88.14 container_name: signoz-otel-collector command: [