Merge pull request #3641 from SigNoz/release/v0.30.0

Release/v0.30.0
This commit is contained in:
Srikanth Chekuri 2023-09-27 23:19:15 +05:30 committed by GitHub
commit 4bbe1ea614
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
138 changed files with 3249 additions and 1435 deletions

View File

@ -36,6 +36,7 @@ jobs:
run: |
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
echo 'CLARITY_PROJECT_ID="${{ secrets.CLARITY_PROJECT_ID }}"' >> frontend/.env
- name: Install dependencies
run: cd frontend && yarn install
- name: Run ESLint

View File

@ -133,6 +133,7 @@ jobs:
run: |
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
echo 'CLARITY_PROJECT_ID="${{ secrets.CLARITY_PROJECT_ID }}"' >> frontend/.env
- name: Install dependencies
working-directory: frontend
run: yarn install

View File

@ -151,4 +151,5 @@ test:
go test ./pkg/query-service/app/querier/...
go test ./pkg/query-service/converter/...
go test ./pkg/query-service/formatter/...
go test ./pkg/query-service/tests/integration/...
go test ./pkg/query-service/tests/integration/...
go test ./pkg/query-service/rules/...

View File

@ -144,7 +144,7 @@ services:
condition: on-failure
query-service:
image: signoz/query-service:0.29.3
image: signoz/query-service:0.30.0
command:
[
"-config=/root/config/prometheus.yml",
@ -184,7 +184,7 @@ services:
<<: *clickhouse-depend
frontend:
image: signoz/frontend:0.29.3
image: signoz/frontend:0.30.0
deploy:
restart_policy:
condition: on-failure

View File

@ -162,7 +162,7 @@ services:
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
query-service:
image: signoz/query-service:${DOCKER_TAG:-0.29.3}
image: signoz/query-service:${DOCKER_TAG:-0.30.0}
container_name: signoz-query-service
command:
[
@ -201,7 +201,7 @@ services:
<<: *clickhouse-depend
frontend:
image: signoz/frontend:${DOCKER_TAG:-0.29.3}
image: signoz/frontend:${DOCKER_TAG:-0.30.0}
container_name: signoz-frontend
restart: on-failure
depends_on:

6
frontend/.prettierignore Normal file
View File

@ -0,0 +1,6 @@
# Ignore artifacts:
build
coverage
# Ignore all MD files:
**/*.md

View File

@ -7,7 +7,7 @@ const config: Config.InitialOptions = {
moduleFileExtensions: ['ts', 'tsx', 'js', 'json'],
modulePathIgnorePatterns: ['dist'],
moduleNameMapper: {
'\\.(css|less)$': '<rootDir>/__mocks__/cssMock.ts',
'\\.(css|less|scss)$': '<rootDir>/__mocks__/cssMock.ts',
},
globals: {
extensionsToTreatAsEsm: ['.ts'],

View File

@ -84,9 +84,11 @@
"react-helmet-async": "1.3.0",
"react-i18next": "^11.16.1",
"react-intersection-observer": "9.4.1",
"react-markdown": "8.0.7",
"react-query": "^3.34.19",
"react-redux": "^7.2.2",
"react-router-dom": "^5.2.0",
"react-syntax-highlighter": "15.5.0",
"react-use": "^17.3.2",
"react-virtuoso": "4.0.3",
"redux": "^4.0.5",
@ -150,6 +152,7 @@
"@types/react-redux": "^7.1.11",
"@types/react-resizable": "3.0.3",
"@types/react-router-dom": "^5.1.6",
"@types/react-syntax-highlighter": "15.5.7",
"@types/styled-components": "^5.1.4",
"@types/uuid": "^8.3.1",
"@types/webpack": "^5.28.0",
@ -183,6 +186,7 @@
"lint-staged": "^12.5.0",
"portfinder-sync": "^0.0.2",
"prettier": "2.2.1",
"raw-loader": "4.0.2",
"react-hooks-testing-library": "0.6.0",
"react-hot-loader": "^4.13.0",
"react-resizable": "3.0.4",

View File

@ -25,6 +25,7 @@
"delete_processor_description": "Logs are processed sequentially in processors. Deleting a processor may change content of data processed by other processors",
"search_pipeline_placeholder": "Filter Pipelines",
"pipeline_name_placeholder": "Name",
"pipeline_filter_placeholder": "Filter for selecting logs to be processed by this pipeline. Example: service_name = billing",
"pipeline_tags_placeholder": "Tags",
"pipeline_description_placeholder": "Enter description for your pipeline",
"processor_name_placeholder": "Name",

View File

@ -1,7 +1,10 @@
import { ConfigProvider } from 'antd';
import getLocalStorageApi from 'api/browser/localstorage/get';
import setLocalStorageApi from 'api/browser/localstorage/set';
import NotFound from 'components/NotFound';
import Spinner from 'components/Spinner';
import { FeatureKeys } from 'constants/features';
import { LOCALSTORAGE } from 'constants/localStorage';
import ROUTES from 'constants/routes';
import AppLayout from 'container/AppLayout';
import { useThemeConfig } from 'hooks/useDarkMode';
@ -75,14 +78,26 @@ function App(): JSX.Element {
});
useEffect(() => {
if (isLoggedInState && user && user.userId && user.email) {
const isIdentifiedUser = getLocalStorageApi(LOCALSTORAGE.IS_IDENTIFIED_USER);
if (
isLoggedInState &&
user &&
user.userId &&
user.email &&
!isIdentifiedUser
) {
setLocalStorageApi(LOCALSTORAGE.IS_IDENTIFIED_USER, 'true');
window.analytics.identify(user?.email, {
email: user?.email,
name: user?.name,
});
window.clarity('identify', user.email, user.name);
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [isLoggedInState]);
}, [isLoggedInState, user]);
useEffect(() => {
trackPageView(pathname);

View File

@ -14,7 +14,11 @@ import {
export const Logout = (): void => {
deleteLocalStorageKey(LOCALSTORAGE.AUTH_TOKEN);
deleteLocalStorageKey(LOCALSTORAGE.IS_LOGGED_IN);
deleteLocalStorageKey(LOCALSTORAGE.IS_IDENTIFIED_USER);
deleteLocalStorageKey(LOCALSTORAGE.REFRESH_AUTH_TOKEN);
deleteLocalStorageKey(LOCALSTORAGE.LOGGED_IN_USER_EMAIL);
deleteLocalStorageKey(LOCALSTORAGE.LOGGED_IN_USER_NAME);
deleteLocalStorageKey(LOCALSTORAGE.CHAT_SUPPORT);
store.dispatch({
type: LOGGED_IN,

View File

@ -30,6 +30,7 @@ import { useNotifications } from 'hooks/useNotifications';
import { mapCompositeQueryFromQuery } from 'lib/newQueryBuilder/queryBuilderMappers/mapCompositeQueryFromQuery';
import { useState } from 'react';
import { useCopyToClipboard } from 'react-use';
import { popupContainer } from 'utils/selectPopupContainer';
import { ExploreHeaderToolTip, SaveButtonText } from './constants';
import MenuItemGenerator from './MenuItemGenerator';
@ -170,6 +171,7 @@ function ExplorerCard({
{viewsData?.data.data && viewsData?.data.data.length && (
<Space>
<Select
getPopupContainer={popupContainer}
loading={isLoading || isRefetching}
showSearch
placeholder="Select a view"
@ -204,6 +206,7 @@ function ExplorerCard({
</Button>
)}
<Popover
getPopupContainer={popupContainer}
placement="bottomLeft"
trigger="click"
content={

View File

@ -1,5 +1,5 @@
import { DeleteOutlined } from '@ant-design/icons';
import { Col, Row, Typography } from 'antd';
import { Col, Row, Tooltip, Typography } from 'antd';
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
import { useDeleteView } from 'hooks/saveViews/useDeleteView';
import { useHandleExplorerTabChange } from 'hooks/useHandleExplorerTabChange';
@ -8,7 +8,11 @@ import { MouseEvent, useCallback } from 'react';
import { MenuItemContainer } from './styles';
import { MenuItemLabelGeneratorProps } from './types';
import { deleteViewHandler, getViewDetailsUsingViewKey } from './utils';
import {
deleteViewHandler,
getViewDetailsUsingViewKey,
trimViewName,
} from './utils';
function MenuItemGenerator({
viewName,
@ -71,12 +75,16 @@ function MenuItemGenerator({
});
};
const newViewName = trimViewName(viewName);
return (
<MenuItemContainer onClick={onLabelClickHandler}>
<Row justify="space-between">
<Col span={22}>
<Row>
<Typography.Text strong>{viewName}</Typography.Text>
<Tooltip title={viewName}>
<Typography.Text strong>{newViewName}</Typography.Text>
</Tooltip>
</Row>
<Row>
<Typography.Text type="secondary">Created by {createdBy}</Typography.Text>

View File

@ -174,3 +174,10 @@ export const deleteViewHandler = ({
},
});
};
export const trimViewName = (viewName: string): string => {
if (viewName.length > 20) {
return `${viewName.substring(0, 20)}...`;
}
return viewName;
};

View File

@ -60,12 +60,14 @@ function RawLogView({
const isDarkMode = useIsDarkMode();
const isReadOnlyLog = !isLogsExplorerPage || isReadOnly;
const severityText = data.severity_text ? `${data.severity_text} |` : '';
const text = useMemo(
() =>
typeof data.timestamp === 'string'
? `${dayjs(data.timestamp).format()} | ${data.body}`
: `${dayjs(data.timestamp / 1e6).format()} | ${data.body}`,
[data.timestamp, data.body],
? `${dayjs(data.timestamp).format()} | ${severityText} ${data.body}`
: `${dayjs(data.timestamp / 1e6).format()} | ${severityText} ${data.body}`,
[data.timestamp, data.body, severityText],
);
const handleClickExpand = useCallback(() => {
@ -114,11 +116,6 @@ function RawLogView({
[text],
);
const mouseActions = useMemo(
() => ({ onMouseEnter: handleMouseEnter, onMouseLeave: handleMouseLeave }),
[handleMouseEnter, handleMouseLeave],
);
return (
<RawLogViewContainer
onClick={handleClickExpand}
@ -127,8 +124,8 @@ function RawLogView({
$isDarkMode={isDarkMode}
$isReadOnly={isReadOnly}
$isActiveLog={isHighlighted}
// eslint-disable-next-line react/jsx-props-no-spreading
{...mouseActions}
onMouseEnter={handleMouseEnter}
onMouseLeave={handleMouseLeave}
>
{!isReadOnly && (
<ExpandIconWrapper flex="30px">

View File

@ -0,0 +1,34 @@
.code-snippet-container {
position: relative;
background-color: rgb(43, 43, 43);
}
.code-copy-btn {
position: absolute;
top: 8px;
right: 8px;
display: flex;
justify-content: flex-end;
align-items: center;
button {
cursor: pointer;
background-color: rgba($color: #1d1d1d, $alpha: 0.7);
color: white;
border: none;
padding: 8px;
border-radius: 3px;
transition: all 0.1s;
&:hover {
background-color: rgba($color: #1d1d1d, $alpha: 1);
}
}
&.copied {
button {
background-color: rgba($color: #52c41a, $alpha: 1);
}
}
}

View File

@ -0,0 +1,32 @@
import './CodeCopyBtn.scss';
import { CheckOutlined, CopyOutlined } from '@ant-design/icons';
import cx from 'classnames';
import { useState } from 'react';
export default function CodeCopyBtn({
children,
}: {
children: React.ReactNode;
}): JSX.Element {
const [isSnippetCopied, setIsSnippetCopied] = useState(false);
const handleClick = (): void => {
if (children && Array.isArray(children)) {
setIsSnippetCopied(true);
navigator.clipboard.writeText(children[0].props.children[0]).finally(() => {
setTimeout(() => {
setIsSnippetCopied(false);
}, 1000);
});
}
};
return (
<div className={cx('code-copy-btn', isSnippetCopied ? 'copied' : '')}>
<button type="button" onClick={handleClick}>
{!isSnippetCopied ? <CopyOutlined /> : <CheckOutlined />}
</button>
</div>
);
}

View File

@ -0,0 +1,43 @@
/* eslint-disable react/jsx-props-no-spreading */
import { CodeProps } from 'react-markdown/lib/ast-to-react';
import { Prism as SyntaxHighlighter } from 'react-syntax-highlighter';
import { a11yDark } from 'react-syntax-highlighter/dist/cjs/styles/prism';
import CodeCopyBtn from './CodeCopyBtn/CodeCopyBtn';
function Pre({ children }: { children: React.ReactNode }): JSX.Element {
return (
<pre className="code-snippet-container">
<CodeCopyBtn>{children}</CodeCopyBtn>
{children}
</pre>
);
}
function Code({
node,
inline,
className = 'blog-code',
children,
...props
}: CodeProps): JSX.Element {
const match = /language-(\w+)/.exec(className || '');
return !inline && match ? (
<SyntaxHighlighter
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-ignore
style={a11yDark}
language={match[1]}
PreTag="div"
{...props}
>
{String(children).replace(/\n$/, '')}
</SyntaxHighlighter>
) : (
<code className={className} {...props}>
{children}
</code>
);
}
export { Code, Pre };

View File

@ -7,6 +7,7 @@ import { Tooltip } from 'antd';
import { themeColors } from 'constants/theme';
import { useIsDarkMode } from 'hooks/useDarkMode';
import { useMemo } from 'react';
import { popupContainer } from 'utils/selectPopupContainer';
import { style } from './styles';
@ -61,7 +62,7 @@ function TextToolTip({
);
return (
<Tooltip overlay={overlay}>
<Tooltip getTooltipContainer={popupContainer} overlay={overlay}>
{useFilledIcon ? (
<QuestionCircleFilled style={iconStyle} />
) : (

View File

@ -14,4 +14,5 @@ export enum LOCALSTORAGE {
LOGGED_IN_USER_NAME = 'LOGGED_IN_USER_NAME',
LOGGED_IN_USER_EMAIL = 'LOGGED_IN_USER_EMAIL',
CHAT_SUPPORT = 'CHAT_SUPPORT',
IS_IDENTIFIED_USER = 'IS_IDENTIFIED_USER',
}

View File

@ -74,7 +74,7 @@ export const mapOfOperators = {
traces: tracesAggregateOperatorOptions,
};
export const mapOfFilters: Record<DataSource, QueryAdditionalFilter[]> = {
export const mapOfQueryFilters: Record<DataSource, QueryAdditionalFilter[]> = {
metrics: [
// eslint-disable-next-line sonarjs/no-duplicate-string
{ text: 'Aggregation interval', field: 'stepInterval' },
@ -94,6 +94,24 @@ export const mapOfFilters: Record<DataSource, QueryAdditionalFilter[]> = {
],
};
const commonFormulaFilters: QueryAdditionalFilter[] = [
{
text: 'Having',
field: 'having',
},
{ text: 'Order by', field: 'orderBy' },
{ text: 'Limit', field: 'limit' },
];
export const mapOfFormulaToFilters: Record<
DataSource,
QueryAdditionalFilter[]
> = {
metrics: commonFormulaFilters,
logs: commonFormulaFilters,
traces: commonFormulaFilters,
};
export const REDUCE_TO_VALUES: SelectOption<ReduceOperators, string>[] = [
{ value: 'last', label: 'Latest of values in timeframe' },
{ value: 'sum', label: 'Sum of values in timeframe' },

View File

@ -2,6 +2,7 @@ import { LeftOutlined, RightOutlined } from '@ant-design/icons';
import { Button, Select } from 'antd';
import { DEFAULT_PER_PAGE_OPTIONS, Pagination } from 'hooks/queryPagination';
import { memo, useMemo } from 'react';
import { popupContainer } from 'utils/selectPopupContainer';
import { defaultSelectStyle } from './config';
import { Container } from './styles';
@ -51,6 +52,7 @@ function Controls({
loading={isLoading}
value={countPerPage}
onChange={handleCountItemsPerPageChange}
getPopupContainer={popupContainer}
>
{perPageOptions.map((count) => (
<Select.Option

View File

@ -1,3 +1,5 @@
import './styles.scss';
import { Button, Divider, Space, Typography } from 'antd';
import getNextPrevId from 'api/errors/getNextPrevId';
import Editor from 'components/Editor';
@ -161,7 +163,9 @@ function ErrorDetails(props: ErrorDetailsProps): JSX.Element {
</DashedContainer>
<Typography.Title level={4}>{t('stack_trace')}</Typography.Title>
<Editor onChange={(): void => {}} value={stackTraceValue} readOnly />
<div className="error-container">
<Editor value={stackTraceValue} readOnly />
</div>
<EditorContainer>
<Space direction="vertical">

View File

@ -0,0 +1,3 @@
.error-container {
height: 50vh;
}

View File

@ -20,6 +20,7 @@ import { ErrorResponse, SuccessResponse } from 'types/api';
import { Widgets } from 'types/api/dashboard/getAll';
import { MetricRangePayloadProps } from 'types/api/metrics/getQueryRange';
import AppReducer from 'types/reducer/app';
import { popupContainer } from 'utils/selectPopupContainer';
import {
errorTooltipPosition,
@ -177,6 +178,7 @@ function WidgetHeader({
return (
<WidgetHeaderContainer>
<Dropdown
getPopupContainer={popupContainer}
destroyPopupOnHide
open={isOpen}
onOpenChange={setIsOpen}

View File

@ -34,6 +34,7 @@ import AppActions from 'types/actions';
import { GET_ALL_DASHBOARD_SUCCESS } from 'types/actions/dashboard';
import { Dashboard } from 'types/api/dashboard/getAll';
import AppReducer from 'types/reducer/app';
import { popupContainer } from 'utils/selectPopupContainer';
import ImportJSON from './ImportJSON';
import { ButtonContainer, NewDashboardButton, TableContainer } from './styles';
@ -280,6 +281,7 @@ function ListOfAllDashboard(): JSX.Element {
/>
{newDashboard && (
<Dropdown
getPopupContainer={popupContainer}
disabled={isDashboardListLoading}
trigger={['click']}
menu={menu}

View File

@ -11,6 +11,7 @@ import PopoverContent from 'pages/Logs/PopoverContent';
import { useEventSource } from 'providers/EventSource';
import { useCallback } from 'react';
import { DataSource, StringOperators } from 'types/common/queryBuilder';
import { popupContainer } from 'utils/selectPopupContainer';
import { SpinnerWrapper, Wrapper } from './styles';
@ -43,6 +44,7 @@ function ListViewPanel(): JSX.Element {
return (
<Wrapper>
<Select
getPopupContainer={popupContainer}
style={defaultSelectStyle}
value={config.format?.value}
onChange={config.format?.onChange}
@ -53,7 +55,11 @@ function ListViewPanel(): JSX.Element {
</Select>
{isFormatButtonVisible && (
<Popover placement="right" content={renderPopoverContent}>
<Popover
getPopupContainer={popupContainer}
placement="right"
content={renderPopoverContent}
>
<Button>Format</Button>
</Popover>
)}

View File

@ -2,7 +2,7 @@ import {
initialQueriesMap,
initialQueryBuilderFormValuesMap,
} from 'constants/queryBuilder';
import { FILTERS } from 'container/QueryBuilder/filters/OrderByFilter/config';
import { ORDERBY_FILTERS } from 'container/QueryBuilder/filters/OrderByFilter/config';
import {
BaseAutocompleteData,
DataTypes,
@ -14,7 +14,7 @@ export const defaultLiveQueryDataConfig: Partial<IBuilderQuery> = {
aggregateOperator: LogsAggregatorOperator.NOOP,
disabled: true,
pageSize: 10,
orderBy: [{ columnName: 'timestamp', order: FILTERS.DESC }],
orderBy: [{ columnName: 'timestamp', order: ORDERBY_FILTERS.DESC }],
};
type GetDefaultCompositeQueryParams = {

View File

@ -198,6 +198,7 @@ export const aggregateAttributesResourcesToString = (logData: ILog): string => {
traceId: logData.traceId,
attributes: {},
resources: {},
severity_text: logData.severity_text,
};
Object.keys(logData).forEach((key) => {

View File

@ -30,6 +30,7 @@ import { TLogsLiveTailState } from 'types/api/logs/liveTail';
import { ILog } from 'types/api/logs/log';
import { GlobalReducer } from 'types/reducer/globalTime';
import { ILogsReducer } from 'types/reducer/logs';
import { popupContainer } from 'utils/selectPopupContainer';
import { TIME_PICKER_OPTIONS } from './config';
import { StopContainer, TimePickerCard, TimePickerSelect } from './styles';
@ -165,6 +166,7 @@ function LogLiveTail({ getLogsAggregate }: Props): JSX.Element {
const OptionsPopOverContent = useMemo(
() => (
<TimePickerSelect
getPopupContainer={popupContainer}
disabled={liveTail === 'PLAYING'}
value={liveTailStartRange}
onChange={(value): void => {
@ -236,6 +238,7 @@ function LogLiveTail({ getLogsAggregate }: Props): JSX.Element {
)}
<Popover
getPopupContainer={popupContainer}
placement="bottomRight"
title="Select Live Tail Timing"
trigger="click"

View File

@ -1,13 +1,9 @@
import { Button, Form, Input, Space, Tooltip, Typography } from 'antd';
import setLocalStorageApi from 'api/browser/localstorage/set';
import getUserVersion from 'api/user/getVersion';
import loginApi from 'api/user/login';
import loginPrecheckApi from 'api/user/loginPrecheck';
import afterLogin from 'AppRoutes/utils';
import { FeatureKeys } from 'constants/features';
import { LOCALSTORAGE } from 'constants/localStorage';
import ROUTES from 'constants/routes';
import useFeatureFlag from 'hooks/useFeatureFlag';
import { useNotifications } from 'hooks/useNotifications';
import history from 'lib/history';
import { useEffect, useState } from 'react';
@ -42,9 +38,6 @@ function Login({
const { t } = useTranslation(['login']);
const [isLoading, setIsLoading] = useState<boolean>(false);
const { user } = useSelector<AppState, AppReducer>((state) => state.app);
const isChatSupportEnabled: boolean | undefined = useFeatureFlag(
FeatureKeys.CHAT_SUPPORT,
)?.active;
const [precheckResult, setPrecheckResult] = useState<PrecheckResultType>({
sso: false,
@ -165,21 +158,12 @@ function Login({
password,
});
if (response.statusCode === 200) {
const user = await afterLogin(
await afterLogin(
response.payload.userId,
response.payload.accessJwt,
response.payload.refreshJwt,
);
if (user) {
setLocalStorageApi(LOCALSTORAGE.LOGGED_IN_USER_NAME, user.payload?.name);
setLocalStorageApi(LOCALSTORAGE.LOGGED_IN_USER_EMAIL, user.payload?.email);
setLocalStorageApi(
LOCALSTORAGE.CHAT_SUPPORT,
(isChatSupportEnabled || '').toString(),
);
}
history.push(ROUTES.APPLICATION);
} else {
notifications.error({

View File

@ -1,5 +1,5 @@
import { Button, Typography } from 'antd';
import { FILTERS } from 'container/QueryBuilder/filters/OrderByFilter/config';
import { ORDERBY_FILTERS } from 'container/QueryBuilder/filters/OrderByFilter/config';
import { ShowButtonWrapper } from './styles';
@ -19,7 +19,7 @@ function ShowButton({
return (
<ShowButtonWrapper>
<Typography>
Showing 10 lines {order === FILTERS.ASC ? 'after' : 'before'} match
Showing 10 lines {order === ORDERBY_FILTERS.ASC ? 'after' : 'before'} match
</Typography>
<Button
size="small"

View File

@ -1,7 +1,7 @@
import RawLogView from 'components/Logs/RawLogView';
import Spinner from 'components/Spinner';
import { PANEL_TYPES } from 'constants/queryBuilder';
import { FILTERS } from 'container/QueryBuilder/filters/OrderByFilter/config';
import { ORDERBY_FILTERS } from 'container/QueryBuilder/filters/OrderByFilter/config';
import { useGetExplorerQueryRange } from 'hooks/queryBuilder/useGetExplorerQueryRange';
import { useIsDarkMode } from 'hooks/useDarkMode';
import { memo, useCallback, useEffect, useMemo, useState } from 'react';
@ -87,7 +87,7 @@ function LogsContextList({
timestamp: item.timestamp,
}));
if (order === FILTERS.ASC) {
if (order === ORDERBY_FILTERS.ASC) {
const reversedCurrentLogs = currentLogs.reverse();
setLogs((prevLogs) => [...reversedCurrentLogs, ...prevLogs]);
} else {
@ -111,7 +111,7 @@ function LogsContextList({
const handleShowNextLines = useCallback(() => {
if (isDisabledFetch) return;
const log = order === FILTERS.ASC ? firstLog : lastLog;
const log = order === ORDERBY_FILTERS.ASC ? firstLog : lastLog;
const newRequestData = getRequestData({
stagedQueryData: currentStagedQueryData,
@ -167,7 +167,7 @@ function LogsContextList({
return (
<>
{order === FILTERS.ASC && (
{order === ORDERBY_FILTERS.ASC && (
<ShowButton
isLoading={isFetching}
isDisabled={isDisabledFetch}
@ -186,11 +186,11 @@ function LogsContextList({
initialTopMostItemIndex={0}
data={logs}
itemContent={getItemContent}
followOutput={order === FILTERS.DESC}
followOutput={order === ORDERBY_FILTERS.DESC}
/>
</ListContainer>
{order === FILTERS.DESC && (
{order === ORDERBY_FILTERS.DESC && (
<ShowButton
isLoading={isFetching}
isDisabled={isDisabledFetch}

View File

@ -3,7 +3,7 @@ import { Typography } from 'antd';
import Modal from 'antd/es/modal/Modal';
import RawLogView from 'components/Logs/RawLogView';
import LogsContextList from 'container/LogsContextList';
import { FILTERS } from 'container/QueryBuilder/filters/OrderByFilter/config';
import { ORDERBY_FILTERS } from 'container/QueryBuilder/filters/OrderByFilter/config';
import QueryBuilderSearch from 'container/QueryBuilder/filters/QueryBuilderSearch';
import { useIsDarkMode } from 'hooks/useDarkMode';
import { memo, useCallback, useState } from 'react';
@ -87,7 +87,7 @@ function LogsExplorerContext({
/>
)}
<LogsContextList
order={FILTERS.ASC}
order={ORDERBY_FILTERS.ASC}
filters={filters}
isEdit={isEdit}
log={log}
@ -103,7 +103,7 @@ function LogsExplorerContext({
/>
</LogContainer>
<LogsContextList
order={FILTERS.DESC}
order={ORDERBY_FILTERS.DESC}
filters={filters}
isEdit={isEdit}
log={log}

View File

@ -20,6 +20,7 @@ import {
} from 'types/actions/logs';
import { GlobalReducer } from 'types/reducer/globalTime';
import { ILogsReducer } from 'types/reducer/logs';
import { popupContainer } from 'utils/selectPopupContainer';
import SearchFields from './SearchFields';
import { Container, DropDownContainer } from './styles';
@ -174,6 +175,7 @@ function SearchFilter({
return (
<Container>
<Popover
getPopupContainer={popupContainer}
placement="bottom"
content={
<DropDownContainer>

View File

@ -1,8 +1,9 @@
import './GoLang.styles.scss';
import { MDXProvider } from '@mdx-js/react';
import { Form, Input } from 'antd';
import { Code, Pre } from 'components/MarkdownRenderer/MarkdownRenderer';
import Header from 'container/OnboardingContainer/common/Header/Header';
import ReactMarkdown from 'react-markdown';
import ConnectionStatus from '../common/ConnectionStatus/ConnectionStatus';
import GoLangDocs from './goLang.md';
@ -44,9 +45,14 @@ export default function GoLang({
</div>
<div className="content-container">
<MDXProvider>
<GoLangDocs />
</MDXProvider>
<ReactMarkdown
components={{
pre: Pre,
code: Code,
}}
>
{GoLangDocs}
</ReactMarkdown>
</div>
</div>
)}

View File

@ -4,14 +4,14 @@
From VMs, there are two ways to send data to SigNoz Cloud.
- [Send traces directly to SigNoz Cloud](#send-traces-directly-to-signoz-cloud)
- [Send traces via OTel Collector binary](#send-traces-via-otel-collector-binary) (recommended)
- Send traces directly to SigNoz Cloud (quick start)
- Send traces via OTel Collector binary (recommended)
#### **Send traces directly to SigNoz Cloud**
1. **Install Dependencies**<br></br>
1. **Install Dependencies**
Dependencies related to OpenTelemetry exporter and SDK have to be installed first. Note that we are assuming you are using `gin` request router. If you are using other request routers, check out the [corresponding package](#request-routers).
Dependencies related to OpenTelemetry exporter and SDK have to be installed first. Note that we are assuming you are using `gin` request router. If you are using other request routers, check out the [corresponding package](https://signoz.io/docs/instrumentation/golang/#request-routers).
Run the below commands after navigating to the application source folder:
@ -24,7 +24,7 @@ From VMs, there are two ways to send data to SigNoz Cloud.
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
```
2. **Declare environment variables for configuring OpenTelemetry**<br></br>
2. **Declare environment variables for configuring OpenTelemetry**
Declare the following global variables in `main.go` which we will use to configure OpenTelemetry:
@ -36,7 +36,7 @@ From VMs, there are two ways to send data to SigNoz Cloud.
)
```
3. **Instrument your Go application with OpenTelemetry**<br></br>
3. **Instrument your Go application with OpenTelemetry**
To configure your application to send data we will need a function to initialize OpenTelemetry. Add the following snippet of code in your `main.go` file.
@ -98,7 +98,7 @@ From VMs, there are two ways to send data to SigNoz Cloud.
}
```
4. **Initialize the tracer in main.go**<br></br>
4. **Initialize the tracer in main.go**
Modify the main function to initialise the tracer in `main.go`. Initiate the tracer at the very beginning of our main function.
@ -111,7 +111,7 @@ From VMs, there are two ways to send data to SigNoz Cloud.
}
```
5. **Add the OpenTelemetry Gin middleware**<br></br>
5. **Add the OpenTelemetry Gin middleware**
Configure Gin to use the middleware by adding the following lines in `main.go`.
@ -129,7 +129,7 @@ From VMs, there are two ways to send data to SigNoz Cloud.
}
```
6. **Set environment variables and run your Go Gin application**<br></br>
6. **Set environment variables and run your Go Gin application**
The run command must have some environment variables to send data to SigNoz cloud. The run command:
@ -143,14 +143,14 @@ From VMs, there are two ways to send data to SigNoz Cloud.
`OTEL_EXPORTER_OTLP_HEADERS`: `signoz-access-token=<SIGNOZ-INGESTION-TOKEN>`. Update `<SIGNOZ-INGESTION-TOKEN>` with the ingestion token provided by SigNoz
`OTEL_EXPORTER_OTLP_ENDPOINT`: ingest.{region}.signoz.cloud:443. Depending on the choice of your region for SigNoz cloud, the ingest endpoint will vary according to this table.
`OTEL_EXPORTER_OTLP_ENDPOINT`: ingest.{region}.signoz.cloud:443. Depending on the choice of your region for SigNoz cloud, the ingest endpoint will vary accordingly.
US - ingest.us.signoz.cloud:443 <br></br>
US - ingest.us.signoz.cloud:443
IN - ingest.in.signoz.cloud:443 <br></br>
IN - ingest.in.signoz.cloud:443
EU - ingest.eu.signoz.cloud:443 <br></br>
EU - ingest.eu.signoz.cloud:443
---
@ -161,9 +161,9 @@ OTel Collector binary helps to collect logs, hostmetrics, resource and infra att
You can find instructions to install OTel Collector binary [here](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/) in your VM. Once you are done setting up your OTel Collector binary, you can follow the below steps for instrumenting your Golang application.
1. **Install Dependencies**<br></br>
1. **Install Dependencies**
Dependencies related to OpenTelemetry exporter and SDK have to be installed first. Note that we are assuming you are using `gin` request router. If you are using other request routers, check out the [corresponding package](#request-routers).
Dependencies related to OpenTelemetry exporter and SDK have to be installed first. Note that we are assuming you are using `gin` request router. If you are using other request routers, check out the [corresponding package](https://signoz.io/docs/instrumentation/golang/#request-routers).
Run the below commands after navigating to the application source folder:
@ -176,7 +176,7 @@ You can find instructions to install OTel Collector binary [here](https://signoz
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
```
2. **Declare environment variables for configuring OpenTelemetry**<br></br>
2. **Declare environment variables for configuring OpenTelemetry**
Declare the following global variables in `main.go` which we will use to configure OpenTelemetry:
@ -188,7 +188,7 @@ You can find instructions to install OTel Collector binary [here](https://signoz
)
```
3. **Instrument your Go application with OpenTelemetry**<br></br>
3. **Instrument your Go application with OpenTelemetry**
To configure your application to send data we will need a function to initialize OpenTelemetry. Add the following snippet of code in your `main.go` file.
@ -249,7 +249,7 @@ You can find instructions to install OTel Collector binary [here](https://signoz
return exporter.Shutdown
}
4. **Initialize the tracer in main.go**<br></br>
4. **Initialize the tracer in main.go**
Modify the main function to initialise the tracer in `main.go`. Initiate the tracer at the very beginning of our main function.
@ -262,7 +262,7 @@ You can find instructions to install OTel Collector binary [here](https://signoz
}
```
5. **Add the OpenTelemetry Gin middleware**<br></br>
5. **Add the OpenTelemetry Gin middleware**
Configure Gin to use the middleware by adding the following lines in `main.go`.
@ -280,7 +280,7 @@ You can find instructions to install OTel Collector binary [here](https://signoz
}
```
6. **Set environment variables and run your Go Gin application**<br></br>
6. **Set environment variables and run your Go Gin application**
The run command must have some environment variables to send data to SigNoz. The run command:
@ -288,20 +288,20 @@ You can find instructions to install OTel Collector binary [here](https://signoz
SERVICE_NAME=goGinApp INSECURE_MODE=true OTEL_EXPORTER_OTLP_ENDPOINT=localhost:4317 go run main.go
```
If you want to update your `service_name`, you can modify the `SERVICE_NAME` variable.<br></br>
If you want to update your `service_name`, you can modify the `SERVICE_NAME` variable.
`SERVICE_NAME`: goGinApp (you can name it whatever you want)
---
### Applications Deployed on Kubernetes
For Golang application deployed on Kubernetes, you need to install OTel Collector agent in your k8s infra to collect and send traces to SigNoz Cloud. You can find the instructions to install OTel Collector agent [here](/docs/tutorial/kubernetes-infra-metrics/).
For Golang application deployed on Kubernetes, you need to install OTel Collector agent in your k8s infra to collect and send traces to SigNoz Cloud. You can find the instructions to install OTel Collector agent [here](https://signoz.io/docs/tutorial/kubernetes-infra-metrics/).
Once you have set up OTel Collector agent, you can proceed with OpenTelemetry Golang instrumentation by following the below steps:
1. **Install Dependencies**<br></br>
1. **Install Dependencies**
Dependencies related to OpenTelemetry exporter and SDK have to be installed first. Note that we are assuming you are using `gin` request router. If you are using other request routers, check out the [corresponding package](#request-routers).
Dependencies related to OpenTelemetry exporter and SDK have to be installed first. Note that we are assuming you are using `gin` request router. If you are using other request routers, check out the [corresponding package](https://signoz.io/docs/instrumentation/golang/#request-routers).
Run the below commands after navigating to the application source folder:
@ -314,7 +314,7 @@ Once you have set up OTel Collector agent, you can proceed with OpenTelemetry Go
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
```
2. **Declare environment variables for configuring OpenTelemetry**<br></br>
2. **Declare environment variables for configuring OpenTelemetry**
Declare the following global variables in `main.go` which we will use to configure OpenTelemetry:
@ -326,7 +326,7 @@ Once you have set up OTel Collector agent, you can proceed with OpenTelemetry Go
)
```
3. **Instrument your Go application with OpenTelemetry**<br></br>
3. **Instrument your Go application with OpenTelemetry**
To configure your application to send data we will need a function to initialize OpenTelemetry. Add the following snippet of code in your `main.go` file.
@ -387,7 +387,7 @@ Once you have set up OTel Collector agent, you can proceed with OpenTelemetry Go
return exporter.Shutdown
}
4. **Initialize the tracer in main.go**<br></br>
4. **Initialize the tracer in main.go**
Modify the main function to initialise the tracer in `main.go`. Initiate the tracer at the very beginning of our main function.
@ -400,7 +400,7 @@ Once you have set up OTel Collector agent, you can proceed with OpenTelemetry Go
}
```
5. **Add the OpenTelemetry Gin middleware**<br></br>
5. **Add the OpenTelemetry Gin middleware**
Configure Gin to use the middleware by adding the following lines in `main.go`.
@ -418,7 +418,7 @@ Once you have set up OTel Collector agent, you can proceed with OpenTelemetry Go
}
```
6. **Set environment variables and run your Go Gin application**<br></br>
6. **Set environment variables and run your Go Gin application**
The run command must have some environment variables to send data to SigNoz. The run command:
@ -426,5 +426,5 @@ Once you have set up OTel Collector agent, you can proceed with OpenTelemetry Go
SERVICE_NAME=goGinApp INSECURE_MODE=true OTEL_EXPORTER_OTLP_ENDPOINT=localhost:4317 go run main.go
```
If you want to update your `service_name`, you can modify the `SERVICE_NAME` variable.<br></br>
If you want to update your `service_name`, you can modify the `SERVICE_NAME` variable.
`SERVICE_NAME`: goGinApp (you can name it whatever you want)

View File

@ -1,10 +1,12 @@
import './Java.styles.scss';
import { MDXProvider } from '@mdx-js/react';
import { Form, Input, Select } from 'antd';
import { Code, Pre } from 'components/MarkdownRenderer/MarkdownRenderer';
import Header from 'container/OnboardingContainer/common/Header/Header';
import { useEffect, useState } from 'react';
import ReactMarkdown from 'react-markdown';
import { trackEvent } from 'utils/segmentAnalytics';
import { popupContainer } from 'utils/selectPopupContainer';
import ConnectionStatus from '../common/ConnectionStatus/ConnectionStatus';
import JavaDocs from './md-docs/java.md';
@ -25,7 +27,9 @@ export default function Java({
activeStep: number;
}): JSX.Element {
const [selectedFrameWork, setSelectedFrameWork] = useState('spring_boot');
const [selectedFrameWorkDocs, setSelectedFrameWorkDocs] = useState(
SprintBootDocs,
);
const [form] = Form.useForm();
useEffect(() => {
@ -36,16 +40,22 @@ export default function Java({
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [selectedFrameWork]);
const renderDocs = (): JSX.Element => {
const handleFrameworkChange = (selectedFrameWork: string): void => {
setSelectedFrameWork(selectedFrameWork);
switch (selectedFrameWork) {
case 'tomcat':
return <TomcatDocs />;
setSelectedFrameWorkDocs(TomcatDocs);
break;
case 'spring_boot':
return <SprintBootDocs />;
setSelectedFrameWorkDocs(SprintBootDocs);
break;
case 'jboss':
return <JbossDocs />;
setSelectedFrameWorkDocs(JbossDocs);
break;
default:
return <JavaDocs />;
setSelectedFrameWorkDocs(JavaDocs);
break;
}
};
@ -66,10 +76,11 @@ export default function Java({
<div className="label"> Select Framework </div>
<Select
getPopupContainer={popupContainer}
defaultValue="spring_boot"
style={{ minWidth: 120 }}
placeholder="Select Framework"
onChange={(value): void => setSelectedFrameWork(value)}
onChange={(value): void => handleFrameworkChange(value)}
options={[
{
value: 'spring_boot',
@ -108,7 +119,14 @@ export default function Java({
</div>
<div className="content-container">
<MDXProvider>{renderDocs()}</MDXProvider>
<ReactMarkdown
components={{
pre: Pre,
code: Code,
}}
>
{selectedFrameWorkDocs}
</ReactMarkdown>
</div>
</div>
)}

View File

@ -12,8 +12,8 @@ Based on your application environment, you can choose the setup below to send tr
From VMs, there are two ways to send data to SigNoz Cloud.
- [Send traces directly to SigNoz Cloud](#send-traces-directly-to-signoz-cloud)
- [Send traces via OTel Collector binary](#send-traces-via-otel-collector-binary) (recommended)
- Send traces directly to SigNoz Cloud (quick start)
- Send traces via OTel Collector binary (recommended)
#### **Send traces directly to SigNoz Cloud**
@ -29,21 +29,22 @@ Step 2. Run your application
```bash
OTEL_RESOURCE_ATTRIBUTES=service.name=<app_name> \
OTEL_EXPORTER_OTLP_HEADERS="signoz-access-token=SIGNOZ_INGESTION_KEY" \
OTEL_EXPORTER_OTLP_HEADERS="signoz-access-token=<SIGNOZ_INGESTION_KEY>" \
OTEL_EXPORTER_OTLP_ENDPOINT=https://ingest.{region}.signoz.cloud:443 \
java -javaagent:$PWD/opentelemetry-javaagent.jar -jar <my-app>.jar
java -javaagent:<path>/opentelemetry-javaagent.jar -jar <my-app>.jar
```
- `<app_name>` is the name for your application
- `SIGNOZ_INGESTION_KEY` is the API token provided by SigNoz. You can find your ingestion key from SigNoz cloud account details sent on your email.
- `<SIGNOZ_INGESTION_KEY>` is the API token provided by SigNoz. You can find your ingestion key from SigNoz cloud account details sent on your email.
- `path` - Update it to the path of your downloaded Java JAR agent.
Depending on the choice of your region for SigNoz cloud, the ingest endpoint will vary according to this table.
US - ingest.us.signoz.cloud:443 <br></br>
US - ingest.us.signoz.cloud:443
IN - ingest.in.signoz.cloud:443 <br></br>
IN - ingest.in.signoz.cloud:443
EU - ingest.eu.signoz.cloud:443 <br></br>
EU - ingest.eu.signoz.cloud:443
---
@ -54,43 +55,43 @@ OTel Collector binary helps to collect logs, hostmetrics, resource and infra att
You can find instructions to install OTel Collector binary [here](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/) in your VM. Once you are done setting up your OTel Collector binary, you can follow the below steps for instrumenting your Java application.
Step 1. Download OTel java binary agent<br></br>
Step 1. Download OTel java binary agent
```bash
wget https://github.com/open-telemetry/opentelemetry-java-instrumentation/releases/latest/download/opentelemetry-javaagent.jar
```
Step 2. Run your application<br></br>
Step 2. Run your application
```bash
java -javaagent:$PWD/opentelemetry-javaagent.jar -jar <myapp>.jar
java -javaagent:<path>/opentelemetry-javaagent.jar -jar <myapp>.jar
```
- `<myapp>` is the name of your application jar file
- In case you download `opentelemetry-javaagent.jar` file in different directory than that of the project, replace `$PWD` with the path of the otel jar file.
- `path` - Update it to the path of your downloaded Java JAR agent.
---
### Applications Deployed on Kubernetes
For Java application deployed on Kubernetes, you need to install OTel Collector agent in your k8s infra to collect and send traces to SigNoz Cloud. You can find the instructions to install OTel Collector agent [here](/docs/tutorial/kubernetes-infra-metrics/).
For Java application deployed on Kubernetes, you need to install OTel Collector agent in your k8s infra to collect and send traces to SigNoz Cloud. You can find the instructions to install OTel Collector agent [here](https://signoz.io/docs/tutorial/kubernetes-infra-metrics/).
Once you have set up OTel Collector agent, you can proceed with OpenTelemetry java instrumentation by following the below steps:
1. Download otel java binary<br></br>
Step 1. Download otel java binary
```bash
wget https://github.com/open-telemetry/opentelemetry-java-instrumentation/releases/latest/download/opentelemetry-javaagent.jar
```
```bash
wget https://github.com/open-telemetry/opentelemetry-java-instrumentation/releases/latest/download/opentelemetry-javaagent.jar
```
2. Run your application<br></br>
Step 2. Run your application
```bash
java -javaagent:$PWD/opentelemetry-javaagent.jar -jar <myapp>.jar
```
```bash
java -javaagent:<path>/opentelemetry-javaagent.jar -jar <myapp>.jar
```
- `<myapp>` is the name of your application jar file
- In case you download `opentelemetry-javaagent.jar` file in different directory than that of the project, replace `$PWD` with the path of the otel jar file.
- `<myapp>` is the name of your application jar file
- `path` - Update it to the path of your downloaded Java JAR agent.
3. Make sure to dockerise your application along with OpenTelemetry instrumentation.
Step 3. Make sure to dockerise your application along with OpenTelemetry instrumentation.

View File

@ -12,8 +12,8 @@ Based on your application environment, you can choose the setup below to send tr
From VMs, there are two ways to send data to SigNoz Cloud.
- [Send traces directly to SigNoz Cloud](#send-traces-directly-to-signoz-cloud)
- [Send traces via OTel Collector binary](#send-traces-via-otel-collector-binary) (recommended)
- Send traces directly to SigNoz Cloud (quick start)
- Send traces via OTel Collector binary (recommended)
#### **Send traces directly to SigNoz Cloud**
OpenTelemetry Java agent can send traces directly to SigNoz Cloud.
@ -35,25 +35,25 @@ Step 3. Update `JAVA_OPTS` environment variable
Update `JAVA_OPTS` environment variable with configurations required to send data to SigNoz cloud in your configuration file.
```bash
JAVA_OPTS="-javaagent:/path/opentelemetry-javaagent.jar
JAVA_OPTS="-javaagent:/<path>/opentelemetry-javaagent.jar
-Dotel.exporter.otlp.endpoint=https://ingest.{region}.signoz.cloud:443
-Dotel.exporter.otlp.headers="signoz-access-token=SIGNOZ_INGESTION_KEY"
-Dotel.exporter.otlp.headers="signoz-access-token=<SIGNOZ_INGESTION_KEY>"
-Dotel.resource.attributes="service.name=<app_name>""
```
You need to replace the following things based on your environment:<br></br>
You need to replace the following things based on your environment:
- `path` - Update it to the path of your downloaded Java JAR agent.<br></br>
- `<path>` - Update it to the path of your downloaded Java JAR agent.
- `<app_name>` is the name for your application
- `SIGNOZ_INGESTION_KEY` is the API token provided by SigNoz. You can find your ingestion key from SigNoz cloud account details sent on your email.
- `<SIGNOZ_INGESTION_KEY>` is the API token provided by SigNoz. You can find your ingestion key from SigNoz cloud account details sent on your email.
Depending on the choice of your region for SigNoz cloud, the ingest endpoint will vary according to this table.
US - ingest.us.signoz.cloud:443 <br></br>
US - ingest.us.signoz.cloud:443
IN - ingest.in.signoz.cloud:443 <br></br>
IN - ingest.in.signoz.cloud:443
EU - ingest.eu.signoz.cloud:443 <br></br>
EU - ingest.eu.signoz.cloud:443
Step 4. [Optional] Write the output/logs of standalone.sh script to a file nohup.out as a background thread
@ -69,7 +69,7 @@ OTel Collector binary helps to collect logs, hostmetrics, resource and infra att
You can find instructions to install OTel Collector binary [here](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/) in your VM. Once you are done setting up your OTel Collector binary, you can follow the below steps for instrumenting your Java application.
Step 1. Download OTel java binary agent<br></br>
Step 1. Download OTel java binary agent
```bash
wget https://github.com/open-telemetry/opentelemetry-java-instrumentation/releases/latest/download/opentelemetry-javaagent.jar
```
@ -90,17 +90,17 @@ JAVA_OPTS="-javaagent:/path/opentelemetry-javaagent.jar"
```
where,
- `path` - Update it to the path of your downloaded Java JAR agent.<br></br>
- `path` - Update it to the path of your downloaded Java JAR agent.
---
### Applications Deployed on Kubernetes
For Java application deployed on Kubernetes, you need to install OTel Collector agent in your k8s infra to collect and send traces to SigNoz Cloud. You can find the instructions to install OTel Collector agent [here](/docs/tutorial/kubernetes-infra-metrics/).
For Java application deployed on Kubernetes, you need to install OTel Collector agent in your k8s infra to collect and send traces to SigNoz Cloud. You can find the instructions to install OTel Collector agent [here](https://signoz.io/docs/tutorial/kubernetes-infra-metrics/).
Once you have set up OTel Collector agent, you can proceed with OpenTelemetry java instrumentation by following the below steps:
Step 1. Download otel java binary<br></br>
Step 1. Download otel java binary
```bash
wget https://github.com/open-telemetry/opentelemetry-java-instrumentation/releases/latest/download/opentelemetry-javaagent.jar
@ -121,7 +121,7 @@ JAVA_OPTS="-javaagent:/path/opentelemetry-javaagent.jar"
```
where,
- `path` - Update it to the path of your downloaded Java JAR agent.<br></br>
- `path` - Update it to the path of your downloaded Java JAR agent.
Step 4. Make sure to dockerise your application along with OpenTelemetry instrumentation.

View File

@ -12,8 +12,8 @@ Based on your application environment, you can choose the setup below to send tr
From VMs, there are two ways to send data to SigNoz Cloud.
- [Send traces directly to SigNoz Cloud](#send-traces-directly-to-signoz-cloud)
- [Send traces via OTel Collector binary](#send-traces-via-otel-collector-binary) (recommended)
- Send traces directly to SigNoz Cloud (quick start)
- Send traces via OTel Collector binary (recommended)
#### **Send traces directly to SigNoz Cloud**
OpenTelemetry Java agent can send traces directly to SigNoz Cloud.
@ -27,21 +27,22 @@ wget https://github.com/open-telemetry/opentelemetry-java-instrumentation/releas
Step 2. Run your application
```bash
OTEL_RESOURCE_ATTRIBUTES=service.name=<app_name> \
OTEL_EXPORTER_OTLP_HEADERS="signoz-access-token=SIGNOZ_INGESTION_KEY" \
OTEL_RESOURCE_ATTRIBUTES=service.name=<myapp> \
OTEL_EXPORTER_OTLP_HEADERS="signoz-access-token=<SIGNOZ_INGESTION_KEY>" \
OTEL_EXPORTER_OTLP_ENDPOINT=https://ingest.{region}.signoz.cloud:443 \
java -javaagent:$PWD/opentelemetry-javaagent.jar -jar <my-app>.jar
java -javaagent:<path>/opentelemetry-javaagent.jar -jar <myapp>.jar
```
- `<app_name>` is the name for your application
- `SIGNOZ_INGESTION_KEY` is the API token provided by SigNoz. You can find your ingestion key from SigNoz cloud account details sent on your email.
- `<myapp>` is the name for your application
- `<path>` - update it to the path of your downloaded Java JAR agent
- `<SIGNOZ_INGESTION_KEY>` is the API token provided by SigNoz. You can find your ingestion key from SigNoz cloud account details sent on your email.
Depending on the choice of your region for SigNoz cloud, the ingest endpoint will vary according to this table.
US - ingest.us.signoz.cloud:443 <br></br>
US - ingest.us.signoz.cloud:443
IN - ingest.in.signoz.cloud:443 <br></br>
IN - ingest.in.signoz.cloud:443
EU - ingest.eu.signoz.cloud:443 <br></br>
EU - ingest.eu.signoz.cloud:443
---
@ -51,41 +52,41 @@ OTel Collector binary helps to collect logs, hostmetrics, resource and infra att
You can find instructions to install OTel Collector binary [here](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/) in your VM. Once you are done setting up your OTel Collector binary, you can follow the below steps for instrumenting your Java application.
Step 1. Download OTel java binary agent<br></br>
Step 1. Download OTel java binary agent
```bash
wget https://github.com/open-telemetry/opentelemetry-java-instrumentation/releases/latest/download/opentelemetry-javaagent.jar
```
Step 2. Run your application<br></br>
Step 2. Run your application
```bash
java -javaagent:$PWD/opentelemetry-javaagent.jar -jar <myapp>.jar
java -javaagent:<path>/opentelemetry-javaagent.jar -jar <myapp>.jar
```
- `<myapp>` is the name of your application jar file
- In case you download `opentelemetry-javaagent.jar` file in different directory than that of the project, replace `$PWD` with the path of the otel jar file.
- `<myapp>` is the name of your application
- `<path>` - update it to the path of your downloaded Java JAR agent
---
### Applications Deployed on Kubernetes
For Java application deployed on Kubernetes, you need to install OTel Collector agent in your k8s infra to collect and send traces to SigNoz Cloud. You can find the instructions to install OTel Collector agent [here](/docs/tutorial/kubernetes-infra-metrics/).
For Java application deployed on Kubernetes, you need to install OTel Collector agent in your k8s infra to collect and send traces to SigNoz Cloud. You can find the instructions to install OTel Collector agent [here](https://signoz.io/docs/tutorial/kubernetes-infra-metrics/).
Once you have set up OTel Collector agent, you can proceed with OpenTelemetry java instrumentation by following the below steps:
1. Download otel java binary<br></br>
Step 1. Download otel java binary
```bash
wget https://github.com/open-telemetry/opentelemetry-java-instrumentation/releases/latest/download/opentelemetry-javaagent.jar
```
```bash
wget https://github.com/open-telemetry/opentelemetry-java-instrumentation/releases/latest/download/opentelemetry-javaagent.jar
```
2. Run your application<br></br>
Step 2. Run your application
```bash
java -javaagent:$PWD/opentelemetry-javaagent.jar -jar <myapp>.jar
```
```bash
java -javaagent:<path>/opentelemetry-javaagent.jar -jar <myapp>.jar
```
- `<myapp>` is the name of your application jar file
- In case you download `opentelemetry-javaagent.jar` file in different directory than that of the project, replace `$PWD` with the path of the otel jar file.
- `<myapp>` is the name of your application
- `<path>` - update it to the path of your downloaded Java JAR agent
3. Make sure to dockerise your application along with OpenTelemetry instrumentation.
Step 3. Make sure to dockerise your application along with OpenTelemetry instrumentation.

View File

@ -12,8 +12,8 @@ Based on your application environment, you can choose the setup below to send tr
From VMs, there are two ways to send data to SigNoz Cloud.
- [Send traces directly to SigNoz Cloud](#send-traces-directly-to-signoz-cloud)
- [Send traces via OTel Collector binary](#send-traces-via-otel-collector-binary) (recommended)
- Send traces directly to SigNoz Cloud (quick start)
- Send traces via OTel Collector binary (recommended)
#### **Send traces directly to SigNoz Cloud**
OpenTelemetry Java agent can send traces directly to SigNoz Cloud.
@ -26,28 +26,27 @@ wget https://github.com/open-telemetry/opentelemetry-java-instrumentation/releas
Step 2. Enable the instrumentation agent and run your application
If you run your `.war` package by putting in `webapps` folder, just add `setenv.sh` in your Tomcat `bin` folder.
This should set these environment variables and start sending telemetry data to SigNoz Cloud.
If you run your `.war` package by putting in `webapps` folder, just add `setenv.sh` in your Tomcat `bin` folder. Inside the `setenv.sh` file, add the following environment variables:
```bash
export CATALINA_OPTS="$CATALINA_OPTS -javaagent:/path/to/opentelemetry-javaagent.jar"
export OTEL_EXPORTER_OTLP_HEADERS="signoz-access-token=SIGNOZ_INGESTION_KEY"
export CATALINA_OPTS="$CATALINA_OPTS -javaagent:/<path>/opentelemetry-javaagent.jar"
export OTEL_EXPORTER_OTLP_HEADERS="signoz-access-token=<SIGNOZ_INGESTION_KEY>"
export OTEL_EXPORTER_OTLP_ENDPOINT=https://ingest.{region}.signoz.cloud:443
export OTEL_RESOURCE_ATTRIBUTES=service.name=<app_name>
```
- `<app_name>` is the name for your application
- `SIGNOZ_INGESTION_KEY` is the API token provided by SigNoz. You can find your ingestion key from SigNoz cloud account details sent on your email.
- `<path>` - update it to the path of your downloaded Java JAR agent.
- `<SIGNOZ_INGESTION_KEY>` is the API token provided by SigNoz. You can find your ingestion key from SigNoz cloud account details sent on your email.
Depending on the choice of your region for SigNoz cloud, the ingest endpoint will vary according to this table.
Depending on the choice of your region for SigNoz cloud, the ingest endpoint will vary accordingly.
US - ingest.us.signoz.cloud:443 <br></br>
US - ingest.us.signoz.cloud:443
IN - ingest.in.signoz.cloud:443 <br></br>
IN - ingest.in.signoz.cloud:443
EU - ingest.eu.signoz.cloud:443 <br></br>
EU - ingest.eu.signoz.cloud:443
---
@ -57,12 +56,12 @@ OTel Collector binary helps to collect logs, hostmetrics, resource and infra att
You can find instructions to install OTel Collector binary [here](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/) in your VM. Once you are done setting up your OTel Collector binary, you can follow the below steps for instrumenting your Java application.
Step 1. Download OTel java binary agent<br></br>
Step 1. Download OTel java binary agent
```bash
wget https://github.com/open-telemetry/opentelemetry-java-instrumentation/releases/latest/download/opentelemetry-javaagent.jar
```
Step 2. Enable the instrumentation agent and run your application<br></br>
Step 2. Enable the instrumentation agent and run your application
If you run your `.war` package by putting in `webapps` folder, just add `setenv.sh` in your Tomcat `bin` folder.
@ -70,37 +69,37 @@ This should set these environment variables and start sending telemetry data to
```bash
export CATALINA_OPTS="$CATALINA_OPTS -javaagent:/path/to/opentelemetry-javaagent.jar"
export CATALINA_OPTS="$CATALINA_OPTS -javaagent:/<path>/opentelemetry-javaagent.jar"
```
- path/to - Update it to the path of your downloaded Java JAR agent.
- `<path>` - Update it to the path of your downloaded Java JAR agent.
---
### Applications Deployed on Kubernetes
For Java application deployed on Kubernetes, you need to install OTel Collector agent in your k8s infra to collect and send traces to SigNoz Cloud. You can find the instructions to install OTel Collector agent [here](/docs/tutorial/kubernetes-infra-metrics/).
For Java application deployed on Kubernetes, you need to install OTel Collector agent in your k8s infra to collect and send traces to SigNoz Cloud. You can find the instructions to install OTel Collector agent [here](https://signoz.io/docs/tutorial/kubernetes-infra-metrics/).
Once you have set up OTel Collector agent, you can proceed with OpenTelemetry java instrumentation by following the below steps:
1. Download otel java binary<br></br>
Step 1. Download otel java binary
```bash
wget https://github.com/open-telemetry/opentelemetry-java-instrumentation/releases/latest/download/opentelemetry-javaagent.jar
```
```bash
wget https://github.com/open-telemetry/opentelemetry-java-instrumentation/releases/latest/download/opentelemetry-javaagent.jar
```
2. Enable the instrumentation agent and run your application<br></br>
Step 2. Enable the instrumentation agent and run your application
If you run your `.war` package by putting in `webapps` folder, just add `setenv.sh` in your Tomcat `bin` folder.
This should set the environment variable and start sending telemetry data to SigNoz Cloud.
```bash
export CATALINA_OPTS="$CATALINA_OPTS -javaagent:/path/to/opentelemetry-javaagent.jar"
```
If you run your `.war` package by putting in `webapps` folder, just add `setenv.sh` in your Tomcat `bin` folder.
- path/to - Update it to the path of your downloaded Java JAR agent.
This should set the environment variable and start sending telemetry data to SigNoz Cloud.
3. Make sure to dockerise your application along with OpenTelemetry instrumentation.
```bash
export CATALINA_OPTS="$CATALINA_OPTS -javaagent:/<path>/opentelemetry-javaagent.jar"
```
You can validate if your application is sending traces to SigNoz cloud by following the instructions [here](#validating-instrumentation-by-checking-for-traces).
- `<path>` - Update it to the path of your downloaded Java JAR agent.
Step 3. Make sure to dockerise your application along with OpenTelemetry instrumentation.
You can validate if your application is sending traces to SigNoz cloud by following the instructions [here](https://signoz.io/docs/instrumentation/tomcat/#validating-instrumentation-by-checking-for-traces).

View File

@ -1,10 +1,12 @@
import './Javascript.styles.scss';
import { MDXProvider } from '@mdx-js/react';
import { Form, Input, Select } from 'antd';
import { Code, Pre } from 'components/MarkdownRenderer/MarkdownRenderer';
import Header from 'container/OnboardingContainer/common/Header/Header';
import { useEffect, useState } from 'react';
import ReactMarkdown from 'react-markdown';
import { trackEvent } from 'utils/segmentAnalytics';
import { popupContainer } from 'utils/selectPopupContainer';
import ConnectionStatus from '../common/ConnectionStatus/ConnectionStatus';
import ExpressDocs from './md-docs/express.md';
@ -22,8 +24,10 @@ export default function Javascript({
}: {
activeStep: number;
}): JSX.Element {
const [selectedFrameWork, setSelectedFrameWork] = useState('nodejs');
const [selectedFrameWork, setSelectedFrameWork] = useState('express');
const [selectedFrameWorkDocs, setSelectedFrameWorkDocs] = useState(
ExpressDocs,
);
const [form] = Form.useForm();
useEffect(() => {
@ -34,14 +38,19 @@ export default function Javascript({
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [selectedFrameWork]);
const renderDocs = (): JSX.Element => {
const handleFrameworkChange = (selectedFrameWork: string): void => {
setSelectedFrameWork(selectedFrameWork);
switch (selectedFrameWork) {
case 'nodejs':
return <JavascriptDocs />;
setSelectedFrameWorkDocs(JavascriptDocs);
break;
case 'nestjs':
return <NestJsDocs />;
setSelectedFrameWorkDocs(NestJsDocs);
break;
default:
return <ExpressDocs />;
setSelectedFrameWorkDocs(ExpressDocs);
break;
}
};
@ -62,10 +71,11 @@ export default function Javascript({
<div className="label"> Select Framework </div>
<Select
getPopupContainer={popupContainer}
defaultValue="express"
style={{ minWidth: 120 }}
placeholder="Select Framework"
onChange={(value): void => setSelectedFrameWork(value)}
onChange={(value): void => handleFrameworkChange(value)}
options={[
{
value: 'nodejs',
@ -106,7 +116,14 @@ export default function Javascript({
</div>
<div className="content-container">
<MDXProvider>{renderDocs()}</MDXProvider>
<ReactMarkdown
components={{
pre: Pre,
code: Code,
}}
>
{selectedFrameWorkDocs}
</ReactMarkdown>
</div>
</div>
)}

View File

@ -1,213 +1,212 @@
## Requirements
Supported Versions
^4.0.0
## Send traces to SigNoz Cloud
Based on your application environment, you can choose the setup below to send traces to SigNoz Cloud.
### Application on VMs
From VMs, there are two ways to send data to SigNoz Cloud.
- [Send traces directly to SigNoz Cloud](#send-traces-directly-to-signoz-cloud)
- [Send traces via OTel Collector binary](#send-traces-via-otel-collector-binary) (recommended)
#### **Send traces directly to SigNoz Cloud**
Step 1. Install OpenTelemetry packages
```bash
npm install --save @opentelemetry/api@^1.4.1
npm install --save @opentelemetry/sdk-node@^0.39.1
npm install --save @opentelemetry/auto-instrumentations-node@^0.37.0
npm install --save @opentelemetry/exporter-trace-otlp-http@^0.39.1
```
Step 2. Create tracing.js file<br></br>
You need to configure the endpoint for SigNoz cloud in this file. You can find your ingestion key from SigNoz cloud account details sent on your email.
```js
// tracing.js
'use strict'
const process = require('process');
const opentelemetry = require('@opentelemetry/sdk-node');
const { getNodeAutoInstrumentations } = require('@opentelemetry/auto-instrumentations-node');
const { OTLPTraceExporter } = require('@opentelemetry/exporter-trace-otlp-http');
const { Resource } = require('@opentelemetry/resources');
const { SemanticResourceAttributes } = require('@opentelemetry/semantic-conventions');
// do not set headers in exporterOptions, the OTel spec recommends setting headers through ENV variables
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#specifying-headers-via-environment-variables
// highlight-start
const exporterOptions = {
url: 'https://ingest.{region}.signoz.cloud:443/v1/traces'
}
// highlight-end
const traceExporter = new OTLPTraceExporter(exporterOptions);
const sdk = new opentelemetry.NodeSDK({
traceExporter,
instrumentations: [getNodeAutoInstrumentations()],
resource: new Resource({
// highlight-next-line
[SemanticResourceAttributes.SERVICE_NAME]: 'node_app'
})
});
// initialize the SDK and register with the OpenTelemetry API
// this enables the API to record telemetry
sdk.start()
// gracefully shut down the SDK on process exit
process.on('SIGTERM', () => {
sdk.shutdown()
.then(() => console.log('Tracing terminated'))
.catch((error) => console.log('Error terminating tracing', error))
.finally(() => process.exit(0));
});
```
Depending on the choice of your region for SigNoz cloud, the ingest endpoint will vary according to this table.
US - ingest.us.signoz.cloud:443/v1/traces <br></br>
IN - ingest.in.signoz.cloud:443/v1/traces <br></br>
EU - ingest.eu.signoz.cloud:443/v1/traces <br></br>
Step 3. Run the application<br></br>
Make sure you set the `OTEL_EXPORTER_OTLP_HEADERS` env as follows
```bash
OTEL_EXPORTER_OTLP_HEADERS="signoz-access-token=<SIGNOZ_INGESTION_KEY>" node -r ./tracing.js app.js
```
`SIGNOZ_INGESTION_KEY` is the API token provided by SigNoz. You can find your ingestion key from SigNoz cloud account details sent on your email.
---
#### **Send traces via OTel Collector binary**
OTel Collector binary helps to collect logs, hostmetrics, resource and infra attributes. It is recommended to install Otel Collector binary to collect and send traces to SigNoz cloud. You can correlate signals and have rich contextual data through this way.
You can find instructions to install OTel Collector binary [here](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/) in your VM. Once you are done setting up your OTel Collector binary, you can follow the below steps for instrumenting your Javascript application.
Step 1. Install OpenTelemetry packages
```js
npm install --save @opentelemetry/api@^1.4.1
npm install --save @opentelemetry/sdk-node@^0.39.1
npm install --save @opentelemetry/auto-instrumentations-node@^0.37.0
npm install --save @opentelemetry/exporter-trace-otlp-http@^0.39.1
```
Step 2. Create tracing.js file<br></br>
```js
// tracing.js
'use strict'
const process = require('process');
const opentelemetry = require('@opentelemetry/sdk-node');
const { getNodeAutoInstrumentations } = require('@opentelemetry/auto-instrumentations-node');
const { OTLPTraceExporter } = require('@opentelemetry/exporter-trace-otlp-http');
const { Resource } = require('@opentelemetry/resources');
const { SemanticResourceAttributes } = require('@opentelemetry/semantic-conventions');
const exporterOptions = {
url: process.env.OTEL_EXPORTER_OTLP_ENDPOINT || 'http://localhost:4318/v1/traces',
}
const traceExporter = new OTLPTraceExporter(exporterOptions);
const sdk = new opentelemetry.NodeSDK({
traceExporter,
instrumentations: [getNodeAutoInstrumentations()],
resource: new Resource({
// highlight-next-line
[SemanticResourceAttributes.SERVICE_NAME]: 'node_app'
})
});
// initialize the SDK and register with the OpenTelemetry API
// this enables the API to record telemetry
sdk.start()
// gracefully shut down the SDK on process exit
process.on('SIGTERM', () => {
sdk.shutdown()
.then(() => console.log('Tracing terminated'))
.catch((error) => console.log('Error terminating tracing', error))
.finally(() => process.exit(0));
});
```
Step 3. Run the application<br></br>
```bash
node -r ./tracing.js app.js
```
---
### Applications Deployed on Kubernetes
For Javascript application deployed on Kubernetes, you need to install OTel Collector agent in your k8s infra to collect and send traces to SigNoz Cloud. You can find the instructions to install OTel Collector agent [here](/docs/tutorial/kubernetes-infra-metrics/).
Once you have set up OTel Collector agent, you can proceed with OpenTelemetry Javascript instrumentation by following the below steps:
Step 1. Install OpenTelemetry packages
```bash
npm install --save @opentelemetry/api@^1.4.1
npm install --save @opentelemetry/sdk-node@^0.39.1
npm install --save @opentelemetry/auto-instrumentations-node@^0.37.0
npm install --save @opentelemetry/exporter-trace-otlp-http@^0.39.1
```
Step 2. Create tracing.js file<br></br>
```js
// tracing.js
'use strict'
const process = require('process');
const opentelemetry = require('@opentelemetry/sdk-node');
const { getNodeAutoInstrumentations } = require('@opentelemetry/auto-instrumentations-node');
const { OTLPTraceExporter } = require('@opentelemetry/exporter-trace-otlp-http');
const { Resource } = require('@opentelemetry/resources');
const { SemanticResourceAttributes } = require('@opentelemetry/semantic-conventions');
const exporterOptions = {
url: process.env.OTEL_EXPORTER_OTLP_ENDPOINT || 'http://localhost:4318/v1/traces',
}
const traceExporter = new OTLPTraceExporter(exporterOptions);
const sdk = new opentelemetry.NodeSDK({
traceExporter,
instrumentations: [getNodeAutoInstrumentations()],
resource: new Resource({
// highlight-next-line
[SemanticResourceAttributes.SERVICE_NAME]: 'node_app'
})
});
// initialize the SDK and register with the OpenTelemetry API
// this enables the API to record telemetry
sdk.start()
// gracefully shut down the SDK on process exit
process.on('SIGTERM', () => {
sdk.shutdown()
.then(() => console.log('Tracing terminated'))
.catch((error) => console.log('Error terminating tracing', error))
.finally(() => process.exit(0));
});
```
Step 3. Run the application<br></br>
```bash
node -r ./tracing.js app.js
## Requirements
Supported Versions
^4.0.0
## Send traces to SigNoz Cloud
Based on your application environment, you can choose the setup below to send traces to SigNoz Cloud.
### Application on VMs
From VMs, there are two ways to send data to SigNoz Cloud.
- Send traces directly to SigNoz Cloud (quick start)
- Send traces via OTel Collector binary(recommended)
#### **Send traces directly to SigNoz Cloud**
Step 1. Install OpenTelemetry packages
```bash
npm install --save @opentelemetry/api@^1.4.1
npm install --save @opentelemetry/sdk-node@^0.39.1
npm install --save @opentelemetry/auto-instrumentations-node@^0.37.0
npm install --save @opentelemetry/exporter-trace-otlp-http@^0.39.1
```
Step 2. Create tracing.js file
You need to configure the endpoint for SigNoz cloud in this file. You also need to configure your service name. In this example, we have used `node_app`.
```js
// tracing.js
'use strict'
const process = require('process');
const opentelemetry = require('@opentelemetry/sdk-node');
const { getNodeAutoInstrumentations } = require('@opentelemetry/auto-instrumentations-node');
const { OTLPTraceExporter } = require('@opentelemetry/exporter-trace-otlp-http');
const { Resource } = require('@opentelemetry/resources');
const { SemanticResourceAttributes } = require('@opentelemetry/semantic-conventions');
// do not set headers in exporterOptions, the OTel spec recommends setting headers through ENV variables
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#specifying-headers-via-environment-variables
const exporterOptions = {
url: 'https://ingest.{region}.signoz.cloud:443/v1/traces'
}
const traceExporter = new OTLPTraceExporter(exporterOptions);
const sdk = new opentelemetry.NodeSDK({
traceExporter,
instrumentations: [getNodeAutoInstrumentations()],
resource: new Resource({
[SemanticResourceAttributes.SERVICE_NAME]: 'node_app'
})
});
// initialize the SDK and register with the OpenTelemetry API
// this enables the API to record telemetry
sdk.start()
// gracefully shut down the SDK on process exit
process.on('SIGTERM', () => {
sdk.shutdown()
.then(() => console.log('Tracing terminated'))
.catch((error) => console.log('Error terminating tracing', error))
.finally(() => process.exit(0));
});
```
Depending on the choice of your region for SigNoz cloud, the ingest endpoint will vary according to this table.
US - ingest.us.signoz.cloud:443/v1/traces
IN - ingest.in.signoz.cloud:443/v1/traces
EU - ingest.eu.signoz.cloud:443/v1/traces
Step 3. Run the application
Make sure you set the `OTEL_EXPORTER_OTLP_HEADERS` env as follows
```bash
OTEL_EXPORTER_OTLP_HEADERS="signoz-access-token=<SIGNOZ_INGESTION_KEY>" node -r ./tracing.js app.js
```
`<SIGNOZ_INGESTION_KEY>` is the API token provided by SigNoz. You can find your ingestion key from SigNoz cloud account details sent on your email.
---
#### **Send traces via OTel Collector binary**
OTel Collector binary helps to collect logs, hostmetrics, resource and infra attributes. It is recommended to install Otel Collector binary to collect and send traces to SigNoz cloud. You can correlate signals and have rich contextual data through this way.
You can find instructions to install OTel Collector binary [here](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/) in your VM. Once you are done setting up your OTel Collector binary, you can follow the below steps for instrumenting your Javascript application.
Step 1. Install OpenTelemetry packages
```js
npm install --save @opentelemetry/api@^1.4.1
npm install --save @opentelemetry/sdk-node@^0.39.1
npm install --save @opentelemetry/auto-instrumentations-node@^0.37.0
npm install --save @opentelemetry/exporter-trace-otlp-http@^0.39.1
```
Step 2. Create tracing.js file
You need to configure your service name. In this example, we have used `node_app`.
```js
// tracing.js
'use strict'
const process = require('process');
const opentelemetry = require('@opentelemetry/sdk-node');
const { getNodeAutoInstrumentations } = require('@opentelemetry/auto-instrumentations-node');
const { OTLPTraceExporter } = require('@opentelemetry/exporter-trace-otlp-http');
const { Resource } = require('@opentelemetry/resources');
const { SemanticResourceAttributes } = require('@opentelemetry/semantic-conventions');
const exporterOptions = {
url: process.env.OTEL_EXPORTER_OTLP_ENDPOINT || 'http://localhost:4318/v1/traces',
}
const traceExporter = new OTLPTraceExporter(exporterOptions);
const sdk = new opentelemetry.NodeSDK({
traceExporter,
instrumentations: [getNodeAutoInstrumentations()],
resource: new Resource({
[SemanticResourceAttributes.SERVICE_NAME]: 'node_app'
})
});
// initialize the SDK and register with the OpenTelemetry API
// this enables the API to record telemetry
sdk.start()
// gracefully shut down the SDK on process exit
process.on('SIGTERM', () => {
sdk.shutdown()
.then(() => console.log('Tracing terminated'))
.catch((error) => console.log('Error terminating tracing', error))
.finally(() => process.exit(0));
});
```
Step 3. Run the application
```bash
node -r ./tracing.js app.js
```
---
### Applications Deployed on Kubernetes
For Javascript application deployed on Kubernetes, you need to install OTel Collector agent in your k8s infra to collect and send traces to SigNoz Cloud. You can find the instructions to install OTel Collector agent [here](https://signoz.io/docs/tutorial/kubernetes-infra-metrics/).
Once you have set up OTel Collector agent, you can proceed with OpenTelemetry Javascript instrumentation by following the below steps:
Step 1. Install OpenTelemetry packages
```bash
npm install --save @opentelemetry/api@^1.4.1
npm install --save @opentelemetry/sdk-node@^0.39.1
npm install --save @opentelemetry/auto-instrumentations-node@^0.37.0
npm install --save @opentelemetry/exporter-trace-otlp-http@^0.39.1
```
Step 2. Create tracing.js file
You also need to configure your service name. In this example, we have used `node_app`.
```js
// tracing.js
'use strict'
const process = require('process');
const opentelemetry = require('@opentelemetry/sdk-node');
const { getNodeAutoInstrumentations } = require('@opentelemetry/auto-instrumentations-node');
const { OTLPTraceExporter } = require('@opentelemetry/exporter-trace-otlp-http');
const { Resource } = require('@opentelemetry/resources');
const { SemanticResourceAttributes } = require('@opentelemetry/semantic-conventions');
const exporterOptions = {
url: process.env.OTEL_EXPORTER_OTLP_ENDPOINT || 'http://localhost:4318/v1/traces',
}
const traceExporter = new OTLPTraceExporter(exporterOptions);
const sdk = new opentelemetry.NodeSDK({
traceExporter,
instrumentations: [getNodeAutoInstrumentations()],
resource: new Resource({
[SemanticResourceAttributes.SERVICE_NAME]: 'node_app'
})
});
// initialize the SDK and register with the OpenTelemetry API
// this enables the API to record telemetry
sdk.start()
// gracefully shut down the SDK on process exit
process.on('SIGTERM', () => {
sdk.shutdown()
.then(() => console.log('Tracing terminated'))
.catch((error) => console.log('Error terminating tracing', error))
.finally(() => process.exit(0));
});
```
Step 3. Run the application
```bash
node -r ./tracing.js app.js
```

View File

@ -1,6 +1,6 @@
## Requirements
- Node.js version 14 or newer ([See here](https://github.com/open-telemetry/opentelemetry-js#supported-runtimes))<br></br>
- Node.js version 14 or newer ([See here](https://github.com/open-telemetry/opentelemetry-js#supported-runtimes))
## Send traces to SigNoz Cloud
@ -10,8 +10,8 @@ Based on your application environment, you can choose the setup below to send tr
From VMs, there are two ways to send data to SigNoz Cloud.
- [Send traces directly to SigNoz Cloud](#send-traces-directly-to-signoz-cloud)
- [Send traces via OTel Collector binary](#send-traces-via-otel-collector-binary) (recommended)
- Send traces directly to SigNoz Cloud (quick start)
- Send traces via OTel Collector binary (recommended)
#### **Send traces directly to SigNoz Cloud**
@ -24,9 +24,9 @@ npm install --save @opentelemetry/auto-instrumentations-node@^0.37.0
npm install --save @opentelemetry/exporter-trace-otlp-http@^0.39.1
```
Step 2. Create tracing.js file<br></br>
Step 2. Create tracing.js file
You need to configure the endpoint for SigNoz cloud in this file. You can find your ingestion key from SigNoz cloud account details sent on your email.
You need to configure the endpoint for SigNoz cloud in this file. You also need to configure your service name. In this example, we have used `node_app`.
```js
// tracing.js
@ -41,18 +41,15 @@ const { SemanticResourceAttributes } = require('@opentelemetry/semantic-conventi
// do not set headers in exporterOptions, the OTel spec recommends setting headers through ENV variables
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#specifying-headers-via-environment-variables
// highlight-start
const exporterOptions = {
url: 'https://ingest.{region}.signoz.cloud:443/v1/traces'
}
// highlight-end
const traceExporter = new OTLPTraceExporter(exporterOptions);
const sdk = new opentelemetry.NodeSDK({
traceExporter,
instrumentations: [getNodeAutoInstrumentations()],
resource: new Resource({
// highlight-next-line
[SemanticResourceAttributes.SERVICE_NAME]: 'node_app'
})
});
@ -72,13 +69,13 @@ process.on('SIGTERM', () => {
Depending on the choice of your region for SigNoz cloud, the ingest endpoint will vary according to this table.
US - ingest.us.signoz.cloud:443/v1/traces <br></br>
US - ingest.us.signoz.cloud:443/v1/traces
IN - ingest.in.signoz.cloud:443/v1/traces <br></br>
IN - ingest.in.signoz.cloud:443/v1/traces
EU - ingest.eu.signoz.cloud:443/v1/traces <br></br>
EU - ingest.eu.signoz.cloud:443/v1/traces
Step 3. Run the application<br></br>
Step 3. Run the application
Make sure you set the `OTEL_EXPORTER_OTLP_HEADERS` env as follows
@ -86,7 +83,7 @@ Make sure you set the `OTEL_EXPORTER_OTLP_HEADERS` env as follows
OTEL_EXPORTER_OTLP_HEADERS="signoz-access-token=<SIGNOZ_INGESTION_KEY>" node -r ./tracing.js app.js
```
`SIGNOZ_INGESTION_KEY` is the API token provided by SigNoz. You can find your ingestion key from SigNoz cloud account details sent on your email.
`<SIGNOZ_INGESTION_KEY>` is the API token provided by SigNoz. You can find your ingestion key from SigNoz cloud account details sent on your email.
---
@ -105,7 +102,9 @@ npm install --save @opentelemetry/auto-instrumentations-node@^0.37.0
npm install --save @opentelemetry/exporter-trace-otlp-http@^0.39.1
```
Step 2. Create tracing.js file<br></br>
Step 2. Create tracing.js file
You need to configure your service name. In this example, we have used `node_app`.
```js
// tracing.js
@ -126,7 +125,6 @@ const sdk = new opentelemetry.NodeSDK({
traceExporter,
instrumentations: [getNodeAutoInstrumentations()],
resource: new Resource({
// highlight-next-line
[SemanticResourceAttributes.SERVICE_NAME]: 'node_app'
})
});
@ -144,7 +142,7 @@ process.on('SIGTERM', () => {
});
```
Step 3. Run the application<br></br>
Step 3. Run the application
```bash
node -r ./tracing.js app.js
@ -154,7 +152,7 @@ node -r ./tracing.js app.js
### Applications Deployed on Kubernetes
For Javascript application deployed on Kubernetes, you need to install OTel Collector agent in your k8s infra to collect and send traces to SigNoz Cloud. You can find the instructions to install OTel Collector agent [here](/docs/tutorial/kubernetes-infra-metrics/).
For Javascript application deployed on Kubernetes, you need to install OTel Collector agent in your k8s infra to collect and send traces to SigNoz Cloud. You can find the instructions to install OTel Collector agent [here](https://signoz.io/docs/tutorial/kubernetes-infra-metrics/).
Once you have set up OTel Collector agent, you can proceed with OpenTelemetry Javascript instrumentation by following the below steps:
@ -167,7 +165,9 @@ npm install --save @opentelemetry/auto-instrumentations-node@^0.37.0
npm install --save @opentelemetry/exporter-trace-otlp-http@^0.39.1
```
Step 2. Create tracing.js file<br></br>
Step 2. Create tracing.js file
You need to configure your service name. In this example, we have used `node_app`.
```js
// tracing.js
@ -188,7 +188,6 @@ const sdk = new opentelemetry.NodeSDK({
traceExporter,
instrumentations: [getNodeAutoInstrumentations()],
resource: new Resource({
// highlight-next-line
[SemanticResourceAttributes.SERVICE_NAME]: 'node_app'
})
});
@ -206,7 +205,7 @@ process.on('SIGTERM', () => {
});
```
Step 3. Run the application<br></br>
Step 3. Run the application
```bash
node -r ./tracing.js app.js
```

View File

@ -12,8 +12,8 @@ Based on your application environment, you can choose the setup below to send tr
From VMs, there are two ways to send data to SigNoz Cloud.
- [Send traces directly to SigNoz Cloud](#send-traces-directly-to-signoz-cloud)
- [Send traces via OTel Collector binary](#send-traces-via-otel-collector-binary) (recommended)
- Send traces directly to SigNoz Cloud (quick start)
- Send traces via OTel Collector binary (recommended)
#### **Send traces directly to SigNoz Cloud**
@ -26,14 +26,13 @@ npm install --save @opentelemetry/auto-instrumentations-node@^0.37.0
npm install --save @opentelemetry/exporter-trace-otlp-http@^0.39.1
```
Step 2. Create `tracer.ts` file<br></br>
Step 2. Create `tracer.ts` file
You need to configure the endpoint for SigNoz cloud in this file.
You need to configure the endpoint for SigNoz cloud in this file. You also need to configure your service name. In this example, we have used `sampleNestjsApplication`.
```js
'use strict'
const process = require('process');
//OpenTelemetry
const opentelemetry = require('@opentelemetry/sdk-node');
const { getNodeAutoInstrumentations } = require('@opentelemetry/auto-instrumentations-node');
const { OTLPTraceExporter } = require('@opentelemetry/exporter-trace-otlp-http');
@ -41,9 +40,7 @@ const {Resource} = require('@opentelemetry/resources');
const {SemanticResourceAttributes} = require('@opentelemetry/semantic-conventions');
const exporterOptions = {
// highlight-start
url: 'https://ingest.{region}.signoz.cloud:443/v1/traces'
// highlight-end
}
const traceExporter = new OTLPTraceExporter(exporterOptions);
@ -70,26 +67,26 @@ const sdk = new opentelemetry.NodeSDK({
module.exports = sdk
```
Depending on the choice of your region for SigNoz cloud, the ingest endpoint will vary according to this table.
Depending on the choice of your region for SigNoz cloud, the ingest endpoint will vary accordingly.
US - ingest.us.signoz.cloud:443/v1/traces <br></br>
US - ingest.us.signoz.cloud:443/v1/traces
IN - ingest.in.signoz.cloud:443/v1/traces <br></br>
IN - ingest.in.signoz.cloud:443/v1/traces
EU - ingest.eu.signoz.cloud:443/v1/traces <br></br>
EU - ingest.eu.signoz.cloud:443/v1/traces
Step 3. Import the tracer module where your app starts
Step 3. Import the tracer module where your app starts `(Ex —> main.ts)`
```jsx
const tracer = require('./tracer')
```
Step 4. Start the tracer<br></br>
Step 4. Start the tracer
In the `async function boostrap` section of the application code, initialize the tracer as follows:
In the `async function boostrap` section of the application code `(Ex —> In main.ts)`, initialize the tracer as follows:
```jsx
const tracer = require('./tracer')
@ -100,9 +97,7 @@ import { AppModule } from './app.module';
// OpenTelemetry automatic instrumentation must go here.
async function bootstrap() {
// highlight-start
await tracer.start();
//highlight-end
const app = await NestFactory.create(AppModule);
await app.listen(3001);
}
@ -117,7 +112,7 @@ OTEL_EXPORTER_OTLP_HEADERS="signoz-access-token=<SIGNOZ_INGESTION_KEY>" nest sta
You can now run your Nestjs application. The data captured with OpenTelemetry from your application should start showing on the SigNoz dashboard.
`SIGNOZ_INGESTION_KEY` is the API token provided by SigNoz. You can find your ingestion key from SigNoz cloud account details sent on your email.
`<SIGNOZ_INGESTION_KEY>` is the API token provided by SigNoz. You can find your ingestion key from SigNoz cloud account details sent on your email.
---
@ -136,7 +131,9 @@ npm install --save @opentelemetry/auto-instrumentations-node@^0.37.0
npm install --save @opentelemetry/exporter-trace-otlp-http@^0.39.1
```
Step 2. Create `tracer.ts` file<br></br>
Step 2. Create `tracer.ts` file
You need to configure your service name. In this example, we have used `sampleNestjsApplication`.
```js
'use strict'
@ -149,9 +146,7 @@ const {Resource} = require('@opentelemetry/resources');
const {SemanticResourceAttributes} = require('@opentelemetry/semantic-conventions');
const exporterOptions = {
// highlight-start
url: 'http://localhost:4318/v1/traces'
// highlight-end
}
const traceExporter = new OTLPTraceExporter(exporterOptions);
@ -185,7 +180,7 @@ const tracer = require('./tracer')
```
Step 4. Start the tracer<br></br>
Step 4. Start the tracer
In the `async function boostrap` section of the application code, initialize the tracer as follows:
@ -198,9 +193,7 @@ import { AppModule } from './app.module';
// OpenTelemetry automatic instrumentation must go here.
async function bootstrap() {
// highlight-start
await tracer.start();
//highlight-end
const app = await NestFactory.create(AppModule);
await app.listen(3001);
}
@ -213,7 +206,7 @@ Step 5. Run the application
### Applications Deployed on Kubernetes
For Javascript application deployed on Kubernetes, you need to install OTel Collector agent in your k8s infra to collect and send traces to SigNoz Cloud. You can find the instructions to install OTel Collector agent [here](/docs/tutorial/kubernetes-infra-metrics/).
For Javascript application deployed on Kubernetes, you need to install OTel Collector agent in your k8s infra to collect and send traces to SigNoz Cloud. You can find the instructions to install OTel Collector agent [here](https://signoz.io/docs/tutorial/kubernetes-infra-metrics/).
Once you have set up OTel Collector agent, you can proceed with OpenTelemetry Javascript instrumentation by following the below steps:
@ -226,7 +219,9 @@ npm install --save @opentelemetry/auto-instrumentations-node@^0.37.0
npm install --save @opentelemetry/exporter-trace-otlp-http@^0.39.1
```
Step 2. Create `tracer.ts` file<br></br>
Step 2. Create `tracer.ts` file
You need to configure your service name. In this example, we have used `sampleNestjsApplication`.
```js
'use strict'
@ -239,9 +234,7 @@ const {Resource} = require('@opentelemetry/resources');
const {SemanticResourceAttributes} = require('@opentelemetry/semantic-conventions');
const exporterOptions = {
// highlight-start
url: 'http://localhost:4318/v1/traces'
// highlight-end
}
const traceExporter = new OTLPTraceExporter(exporterOptions);
@ -275,7 +268,7 @@ const tracer = require('./tracer')
```
Step 4. Start the tracer<br></br>
Step 4. Start the tracer
In the `async function boostrap` section of the application code, initialize the tracer as follows:
@ -288,9 +281,7 @@ import { AppModule } from './app.module';
// OpenTelemetry automatic instrumentation must go here.
async function bootstrap() {
// highlight-start
await tracer.start();
//highlight-end
const app = await NestFactory.create(AppModule);
await app.listen(3001);
}

View File

@ -1,10 +1,12 @@
import './Python.styles.scss';
import { MDXProvider } from '@mdx-js/react';
import { Form, Input, Select } from 'antd';
import { Code, Pre } from 'components/MarkdownRenderer/MarkdownRenderer';
import Header from 'container/OnboardingContainer/common/Header/Header';
import { useEffect, useState } from 'react';
import ReactMarkdown from 'react-markdown';
import { trackEvent } from 'utils/segmentAnalytics';
import { popupContainer } from 'utils/selectPopupContainer';
import ConnectionStatus from '../common/ConnectionStatus/ConnectionStatus';
import DjangoDocs from './md-docs/django.md';
@ -27,7 +29,7 @@ export default function Python({
activeStep: number;
}): JSX.Element {
const [selectedFrameWork, setSelectedFrameWork] = useState('django');
const [selectedFrameWorkDocs, setSelectedFrameWorkDocs] = useState(DjangoDocs);
const [form] = Form.useForm();
useEffect(() => {
@ -38,18 +40,25 @@ export default function Python({
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [selectedFrameWork]);
const renderDocs = (): JSX.Element => {
const handleFrameworkChange = (selectedFrameWork: string): void => {
setSelectedFrameWork(selectedFrameWork);
switch (selectedFrameWork) {
case 'django':
return <DjangoDocs />;
setSelectedFrameWorkDocs(DjangoDocs);
break;
case 'fastAPI':
return <FastAPIDocs />;
setSelectedFrameWorkDocs(FastAPIDocs);
break;
case 'flask':
return <FlaskDocs />;
setSelectedFrameWorkDocs(FlaskDocs);
break;
case 'falcon':
return <FalconDocs />;
setSelectedFrameWorkDocs(FalconDocs);
break;
default:
return <PythonDocs />;
setSelectedFrameWorkDocs(PythonDocs);
break;
}
};
@ -70,10 +79,11 @@ export default function Python({
<div className="label"> Select Framework </div>
<Select
getPopupContainer={popupContainer}
defaultValue="Django"
style={{ minWidth: 120 }}
placeholder="Select Framework"
onChange={(value): void => setSelectedFrameWork(value)}
onChange={(value): void => handleFrameworkChange(value)}
options={[
{
value: 'django',
@ -116,7 +126,14 @@ export default function Python({
</div>
<div className="content-container">
<MDXProvider>{renderDocs()}</MDXProvider>
<ReactMarkdown
components={{
pre: Pre,
code: Code,
}}
>
{selectedFrameWorkDocs}
</ReactMarkdown>
</div>
</div>
)}

View File

@ -19,12 +19,12 @@ Based on your application environment, you can choose the setup below to send tr
From VMs, there are two ways to send data to SigNoz Cloud.
- [Send traces directly to SigNoz Cloud](#send-traces-directly-to-signoz-cloud)
- [Send traces via OTel Collector binary](#send-traces-via-otel-collector-binary) (recommended)
- Send traces directly to SigNoz Cloud (quick start)
- Send traces via OTel Collector binary (recommended)
#### **Send traces directly to SigNoz Cloud**
Step 1. Create a virtual environment<br></br>
Step 1. Create a virtual environment
```bash
python3 -m venv .venv
@ -38,22 +38,6 @@ pip install opentelemetry-distro==0.38b0
pip install opentelemetry-exporter-otlp==1.17.0
```
<!-- The dependencies included are briefly explained below:
`opentelemetry-distro` - The distro provides a mechanism to automatically configure some of the more common options for users. It helps to get started with OpenTelemetry auto-instrumentation quickly.
`opentelemetry-exporter-otlp` - This library provides a way to install all OTLP exporters. You will need an exporter to send the data to SigNoz.
:::note
💡 The `opentelemetry-exporter-otlp` is a convenience wrapper package to install all OTLP exporters. Currently, it installs:
- opentelemetry-exporter-otlp-proto-http
- opentelemetry-exporter-otlp-proto-grpc
- (soon) opentelemetry-exporter-otlp-json-http
The `opentelemetry-exporter-otlp-proto-grpc` package installs the gRPC exporter which depends on the `grpcio` package. The installation of `grpcio` may fail on some platforms for various reasons. If you run into such issues, or you don't want to use gRPC, you can install the HTTP exporter instead by installing the `opentelemetry-exporter-otlp-proto-http` package. You need to set the `OTEL_EXPORTER_OTLP_PROTOCOL` environment variable to `http/protobuf` to use the HTTP exporter.
::: -->
Step 3. Add automatic instrumentation
@ -68,22 +52,22 @@ Step 4. Run your application
```bash
OTEL_RESOURCE_ATTRIBUTES=service.name=<service_name> \
OTEL_EXPORTER_OTLP_ENDPOINT="https://ingest.{region}.signoz.cloud:443" \
OTEL_EXPORTER_OTLP_HEADERS="signoz-access-token=SIGNOZ_INGESTION_KEY" \
OTEL_EXPORTER_OTLP_HEADERS="signoz-access-token=<SIGNOZ_INGESTION_KEY>" \
OTEL_EXPORTER_OTLP_PROTOCOL=grpc \
opentelemetry-instrument <your_run_command>
```
- `<service_name>` is the name of the service you want
- <your_run_command> can be `python3 app.py` or `python manage.py runserver --noreload`
- Replace `SIGNOZ_INGESTION_KEY` with the api token provided by SigNoz. You can find it in the email sent by SigNoz with your cloud account details.
- Replace `<SIGNOZ_INGESTION_KEY>` with the api token provided by SigNoz. You can find it in the email sent by SigNoz with your cloud account details.
Depending on the choice of your region for SigNoz cloud, the ingest endpoint will vary according to this table.
US - ingest.us.signoz.cloud:443 <br></br>
US - ingest.us.signoz.cloud:443
IN - ingest.in.signoz.cloud:443 <br></br>
IN - ingest.in.signoz.cloud:443
EU - ingest.eu.signoz.cloud:443 <br></br>
EU - ingest.eu.signoz.cloud:443
Note:
Dont run app in reloader/hot-reload mode as it breaks instrumentation. For example, you can disable the auto reload with `--noreload`.
@ -96,7 +80,7 @@ OTel Collector binary helps to collect logs, hostmetrics, resource and infra att
You can find instructions to install OTel Collector binary [here](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/) in your VM. Once you are done setting up your OTel Collector binary, you can follow the below steps for instrumenting your Python application.
Step 1. Create a virtual environment<br></br>
Step 1. Create a virtual environment
```bash
python3 -m venv .venv
@ -116,7 +100,7 @@ Step 3. Add automatic instrumentation
opentelemetry-bootstrap --action=install
```
Step 4. To run your application and send data to collector in same VM:
Step 4. To run your application and send data to collector in same VM
```bash
OTEL_RESOURCE_ATTRIBUTES=service.name=<service_name> \
@ -142,7 +126,7 @@ For Python application deployed on Kubernetes, you need to install OTel Collecto
Once you have set up OTel Collector agent, you can proceed with OpenTelemetry Python instrumentation by following the below steps:
Step 1. Create a virtual environment<br></br>
Step 1. Create a virtual environment
```bash
python3 -m venv .venv

View File

@ -10,12 +10,12 @@ Based on your application environment, you can choose the setup below to send tr
From VMs, there are two ways to send data to SigNoz Cloud.
- [Send traces directly to SigNoz Cloud](#send-traces-directly-to-signoz-cloud)
- [Send traces via OTel Collector binary](#send-traces-via-otel-collector-binary) (recommended)
- Send traces directly to SigNoz Cloud (quick start)
- Send traces via OTel Collector binary (recommended)
#### **Send traces directly to SigNoz Cloud**
Step 1. Create a virtual environment<br></br>
Step 1. Create a virtual environment
```bash
python3 -m venv .venv
@ -29,23 +29,6 @@ pip install opentelemetry-distro==0.38b0
pip install opentelemetry-exporter-otlp==1.17.0
```
<!-- The dependencies included are briefly explained below:
`opentelemetry-distro` - The distro provides a mechanism to automatically configure some of the more common options for users. It helps to get started with OpenTelemetry auto-instrumentation quickly.
`opentelemetry-exporter-otlp` - This library provides a way to install all OTLP exporters. You will need an exporter to send the data to SigNoz.
:::note
💡 The `opentelemetry-exporter-otlp` is a convenience wrapper package to install all OTLP exporters. Currently, it installs:
- opentelemetry-exporter-otlp-proto-http
- opentelemetry-exporter-otlp-proto-grpc
- (soon) opentelemetry-exporter-otlp-json-http
The `opentelemetry-exporter-otlp-proto-grpc` package installs the gRPC exporter which depends on the `grpcio` package. The installation of `grpcio` may fail on some platforms for various reasons. If you run into such issues, or you don't want to use gRPC, you can install the HTTP exporter instead by installing the `opentelemetry-exporter-otlp-proto-http` package. You need to set the `OTEL_EXPORTER_OTLP_PROTOCOL` environment variable to `http/protobuf` to use the HTTP exporter.
::: -->
Step 3. Add automatic instrumentation
```bash
@ -59,22 +42,22 @@ Step 4. Run your application
```bash
OTEL_RESOURCE_ATTRIBUTES=service.name=<service_name> \
OTEL_EXPORTER_OTLP_ENDPOINT="https://ingest.{region}.signoz.cloud:443" \
OTEL_EXPORTER_OTLP_HEADERS="signoz-access-token=SIGNOZ_INGESTION_KEY" \
OTEL_EXPORTER_OTLP_HEADERS="signoz-access-token=<SIGNOZ_INGESTION_KEY>" \
OTEL_EXPORTER_OTLP_PROTOCOL=grpc \
opentelemetry-instrument <your_run_command>
```
- *`<service_name>`* is the name of the service you want
- *<your_run_command>* can be `python3 app.py` or `flask run`
- Replace `SIGNOZ_INGESTION_KEY` with the api token provided by SigNoz. You can find it in the email sent by SigNoz with your cloud account details.
- *<your_run_command>* can be `python3 app.py` or `gunicorn src.app -b 0.0.0.0:8001`
- Replace `<SIGNOZ_INGESTION_KEY>` with the api token provided by SigNoz. You can find it in the email sent by SigNoz with your cloud account details.
Depending on the choice of your region for SigNoz cloud, the ingest endpoint will vary according to this table.
US - ingest.us.signoz.cloud:443 <br></br>
US - ingest.us.signoz.cloud:443
IN - ingest.in.signoz.cloud:443 <br></br>
IN - ingest.in.signoz.cloud:443
EU - ingest.eu.signoz.cloud:443 <br></br>
EU - ingest.eu.signoz.cloud:443
Note:
Dont run app in reloader/hot-reload mode as it breaks instrumentation. For example, you can disable the auto reload with `--noreload`.
@ -87,7 +70,7 @@ OTel Collector binary helps to collect logs, hostmetrics, resource and infra att
You can find instructions to install OTel Collector binary [here](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/) in your VM. Once you are done setting up your OTel Collector binary, you can follow the below steps for instrumenting your Python application.
Step 1. Create a virtual environment<br></br>
Step 1. Create a virtual environment
```bash
python3 -m venv .venv
@ -108,7 +91,7 @@ opentelemetry-bootstrap --action=install
```
Please make sure that you have installed all the dependencies of your application before running the above command. The command will not install instrumentation for the dependencies which are not installed.
Step 4. To run your application and send data to collector in same VM:
Step 4. To run your application and send data to collector in same VM
```bash
OTEL_RESOURCE_ATTRIBUTES=service.name=<service_name> \
@ -138,7 +121,7 @@ For Python application deployed on Kubernetes, you need to install OTel Collecto
Once you have set up OTel Collector agent, you can proceed with OpenTelemetry Python instrumentation by following the below steps:
Step 1. Create a virtual environment<br></br>
Step 1. Create a virtual environment
```bash
python3 -m venv .venv
@ -160,7 +143,7 @@ opentelemetry-bootstrap --action=install
Please make sure that you have installed all the dependencies of your application before running the above command. The command will not install instrumentation for the dependencies which are not installed.
Step 4. Run your application:
Step 4. Run your application
```bash
OTEL_RESOURCE_ATTRIBUTES=service.name=<service_name> \

View File

@ -10,12 +10,12 @@ Based on your application environment, you can choose the setup below to send tr
From VMs, there are two ways to send data to SigNoz Cloud.
- [Send traces directly to SigNoz Cloud](#send-traces-directly-to-signoz-cloud)
- [Send traces via OTel Collector binary](#send-traces-via-otel-collector-binary) (recommended)
- Send traces directly to SigNoz Cloud (quick start)
- Send traces via OTel Collector binary (recommended)
#### **Send traces directly to SigNoz Cloud**
Step 1. Create a virtual environment<br></br>
Step 1. Create a virtual environment
```bash
python3 -m venv .venv
@ -42,22 +42,22 @@ Step 4. Run your application
```bash
OTEL_RESOURCE_ATTRIBUTES=service.name=<service_name> \
OTEL_EXPORTER_OTLP_ENDPOINT="https://ingest.{region}.signoz.cloud:443" \
OTEL_EXPORTER_OTLP_HEADERS="signoz-access-token=SIGNOZ_INGESTION_KEY" \
OTEL_EXPORTER_OTLP_HEADERS="signoz-access-token=<SIGNOZ_INGESTION_KEY>" \
OTEL_EXPORTER_OTLP_PROTOCOL=grpc \
opentelemetry-instrument <your_run_command>
```
- *`<service_name>`* is the name of the service you want
- *<your_run_command>* can be `python3 app.py` or `python manage.py runserver --noreload`
- Replace `SIGNOZ_INGESTION_KEY` with the api token provided by SigNoz. You can find it in the email sent by SigNoz with your cloud account details.
- *<your_run_command>* can be `python3 app.py` or `uvicorn main:app --host localhost --port 5002`
- Replace `<SIGNOZ_INGESTION_KEY>` with the api token provided by SigNoz. You can find it in the email sent by SigNoz with your cloud account details.
Depending on the choice of your region for SigNoz cloud, the ingest endpoint will vary according to this table.
US - ingest.us.signoz.cloud:443 <br></br>
US - ingest.us.signoz.cloud:443
IN - ingest.in.signoz.cloud:443 <br></br>
IN - ingest.in.signoz.cloud:443
EU - ingest.eu.signoz.cloud:443 <br></br>
EU - ingest.eu.signoz.cloud:443
Note:
Dont run app in reloader/hot-reload mode as it breaks instrumentation. For example, you can disable the auto reload with `--noreload`.
@ -70,7 +70,7 @@ OTel Collector binary helps to collect logs, hostmetrics, resource and infra att
You can find instructions to install OTel Collector binary [here](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/) in your VM. Once you are done setting up your OTel Collector binary, you can follow the below steps for instrumenting your Python application.
Step 1. Create a virtual environment<br></br>
Step 1. Create a virtual environment
```bash
python3 -m venv .venv
@ -92,7 +92,7 @@ opentelemetry-bootstrap --action=install
Please make sure that you have installed all the dependencies of your application before running the above command. The command will not install instrumentation for the dependencies which are not installed.
Step 4. To run your application and send data to collector in same VM:
Step 4. To run your application and send data to collector in same VM
```bash
OTEL_RESOURCE_ATTRIBUTES=service.name=<service_name> \
@ -119,7 +119,7 @@ For Python application deployed on Kubernetes, you need to install OTel Collecto
Once you have set up OTel Collector agent, you can proceed with OpenTelemetry Python instrumentation by following the below steps:
Step 1. Create a virtual environment<br></br>
Step 1. Create a virtual environment
```bash
python3 -m venv .venv
@ -141,7 +141,7 @@ opentelemetry-bootstrap --action=install
Please make sure that you have installed all the dependencies of your application before running the above command. The command will not install instrumentation for the dependencies which are not installed.
Step 4. Run your application:
Step 4. Run your application
```bash
OTEL_RESOURCE_ATTRIBUTES=service.name=<service_name> \

View File

@ -10,12 +10,12 @@ Based on your application environment, you can choose the setup below to send tr
From VMs, there are two ways to send data to SigNoz Cloud.
- [Send traces directly to SigNoz Cloud](#send-traces-directly-to-signoz-cloud)
- [Send traces via OTel Collector binary](#send-traces-via-otel-collector-binary) (recommended)
- Send traces directly to SigNoz Cloud (quick start)
- Send traces via OTel Collector binary (recommended)
#### **Send traces directly to SigNoz Cloud**
Step 1. Create a virtual environment<br></br>
Step 1. Create a virtual environment
```bash
python3 -m venv .venv
@ -42,22 +42,22 @@ Step 4. Run your application
```bash
OTEL_RESOURCE_ATTRIBUTES=service.name=<service_name> \
OTEL_EXPORTER_OTLP_ENDPOINT="https://ingest.{region}.signoz.cloud:443" \
OTEL_EXPORTER_OTLP_HEADERS="signoz-access-token=SIGNOZ_INGESTION_KEY" \
OTEL_EXPORTER_OTLP_HEADERS="signoz-access-token=<SIGNOZ_INGESTION_KEY>" \
OTEL_EXPORTER_OTLP_PROTOCOL=grpc \
opentelemetry-instrument <your_run_command>
```
- *`<service_name>`* is the name of the service you want
- *<your_run_command>* can be `python3 app.py` or `flask run`
- Replace `SIGNOZ_INGESTION_KEY` with the api token provided by SigNoz. You can find it in the email sent by SigNoz with your cloud account details.
- Replace `<SIGNOZ_INGESTION_KEY>` with the api token provided by SigNoz. You can find it in the email sent by SigNoz with your cloud account details.
Depending on the choice of your region for SigNoz cloud, the ingest endpoint will vary according to this table.
US - ingest.us.signoz.cloud:443 <br></br>
US - ingest.us.signoz.cloud:443
IN - ingest.in.signoz.cloud:443 <br></br>
IN - ingest.in.signoz.cloud:443
EU - ingest.eu.signoz.cloud:443 <br></br>
EU - ingest.eu.signoz.cloud:443
Note:
Dont run app in reloader/hot-reload mode as it breaks instrumentation. For example, you can disable the auto reload with `--noreload`.
@ -70,7 +70,7 @@ OTel Collector binary helps to collect logs, hostmetrics, resource and infra att
You can find instructions to install OTel Collector binary [here](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/) in your VM. Once you are done setting up your OTel Collector binary, you can follow the below steps for instrumenting your Python application.
Step 1. Create a virtual environment<br></br>
Step 1. Create a virtual environment
```bash
python3 -m venv .venv
@ -92,7 +92,7 @@ opentelemetry-bootstrap --action=install
Please make sure that you have installed all the dependencies of your application before running the above command. The command will not install instrumentation for the dependencies which are not installed.
Step 4. To run your application and send data to collector in same VM:
Step 4. To run your application and send data to collector in same VM
```bash
OTEL_RESOURCE_ATTRIBUTES=service.name=<service_name> \
@ -121,7 +121,7 @@ For Python application deployed on Kubernetes, you need to install OTel Collecto
Once you have set up OTel Collector agent, you can proceed with OpenTelemetry Python instrumentation by following the below steps:
Step 1. Create a virtual environment<br></br>
Step 1. Create a virtual environment
```bash
python3 -m venv .venv
@ -144,7 +144,7 @@ opentelemetry-bootstrap --action=install
Please make sure that you have installed all the dependencies of your application before running the above command. The command will not install instrumentation for the dependencies which are not installed.
Step 4. Run your application:
Step 4. Run your application
```bash
OTEL_RESOURCE_ATTRIBUTES=service.name=<service_name> \

View File

@ -10,8 +10,8 @@ Based on your application environment, you can choose the setup below to send tr
From VMs, there are two ways to send data to SigNoz Cloud.
- [Send traces directly to SigNoz Cloud](#send-traces-directly-to-signoz-cloud)
- [Send traces via OTel Collector binary](#send-traces-via-otel-collector-binary) (recommended)
- Send traces directly to SigNoz Cloud (quick start)
- Send traces via OTel Collector binary (recommended)
#### **Send traces directly to SigNoz Cloud**
@ -33,21 +33,21 @@ Step 3. Run your application
```bash
OTEL_RESOURCE_ATTRIBUTES=service.name=<service_name> \
OTEL_EXPORTER_OTLP_ENDPOINT="https://ingest.{region}.signoz.cloud:443" \
OTEL_EXPORTER_OTLP_HEADERS="signoz-access-token=SIGNOZ_INGESTION_KEY" \
OTEL_EXPORTER_OTLP_HEADERS="signoz-access-token=<SIGNOZ_INGESTION_KEY>" \
opentelemetry-instrument <your_run_command>
```
- *`<service_name>`* is the name of the service you want
- *`<your_run_command>`* can be `python3 app.py` or `flask run`
- Replace `SIGNOZ_INGESTION_KEY` with the api token provided by SigNoz. You can find it in the email sent by SigNoz with your cloud account details.
- Replace `<SIGNOZ_INGESTION_KEY>` with the api token provided by SigNoz. You can find it in the email sent by SigNoz with your cloud account details.
Depending on the choice of your region for SigNoz cloud, the ingest endpoint will vary according to this table.
US - ingest.us.signoz.cloud:443 <br></br>
US - ingest.us.signoz.cloud:443
IN - ingest.in.signoz.cloud:443 <br></br>
IN - ingest.in.signoz.cloud:443
EU - ingest.eu.signoz.cloud:443 <br></br>
EU - ingest.eu.signoz.cloud:443
Note:
Dont run app in reloader/hot-reload mode as it breaks instrumentation.
@ -73,7 +73,7 @@ Step 2. Add automatic instrumentation
opentelemetry-bootstrap --action=install
```
Step 3. To run your application and send data to collector in same VM:
Step 3. To run your application and send data to collector in same VM
```bash
OTEL_RESOURCE_ATTRIBUTES=service.name=<service_name> \
@ -118,7 +118,7 @@ Step 2. Add automatic instrumentation
opentelemetry-bootstrap --action=install
```
Step 3. Run your application:
Step 3. Run your application
```bash
OTEL_RESOURCE_ATTRIBUTES=service.name=<service_name> \

View File

@ -1,6 +1,18 @@
.infrastructure-monitoring-module-container {
padding: 48px 0;
.module-header {
h1 {
font-size: 24px;
font-weight: 500;
}
h4 {
font-size: 14px;
font-weight: 300;
}
}
.content-container {
.heading {
.title {
@ -21,15 +33,13 @@
flex-direction: column;
}
}
}
h1 {
font-size: 24px;
font-weight: 500;
}
h4 {
font-size: 14px;
font-weight: 300;
}
.header {
display: flex;
gap: 16px;
justify-content: flex-start;
align-items: center;
margin: 16px 0;
}
}

View File

@ -1,39 +1,154 @@
/* eslint-disable jsx-a11y/no-static-element-interactions */
/* eslint-disable jsx-a11y/click-events-have-key-events */
import './InfrastructureMonitoring.styles.scss';
import { MDXProvider } from '@mdx-js/react';
import cx from 'classnames';
import { Code, Pre } from 'components/MarkdownRenderer/MarkdownRenderer';
import { useEffect, useState } from 'react';
import ReactMarkdown from 'react-markdown';
import { trackEvent } from 'utils/segmentAnalytics';
import InfraMonitoringDocs from './infraMonitoringDocs.md';
import Header from '../common/Header/Header';
import hostMetricsMonitoring from './md-docs/hostMetricsMonitoring.md';
import k8sInfraMonitoringDocs from './md-docs/kubernetesInfraMonitoring.md';
import otherMetrics from './md-docs/otherMetrics.md';
export default function InfrastructureMonitoring({
activeStep,
}: {
activeStep: number;
}): JSX.Element {
const docsURL = 'https://signoz.io/docs/userguide/send-metrics-cloud/';
const heading = 'Send Metrics to SigNoz Cloud';
const [selectedInfraMetrics, setSelectedInfraMetrics] = useState('kubernetes');
const [selectedInfraMetricsDocs, setSelectedInfraMetricsDocs] = useState(
k8sInfraMonitoringDocs,
);
useEffect(() => {
// on metrics Type select
trackEvent('Onboarding: APM : Java', {
selectedInfraMetrics,
});
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [selectedInfraMetrics]);
const supportedInfraMetrics = [
{
name: 'Kubernetes Infra Metrics',
id: 'kubernetes',
imgURL: `Logos/kubernetes.svg`,
},
{
name: 'HostMetrics',
id: 'hostMetrics',
imgURL: `Logos/software-window.svg`,
},
{
name: 'Other Metrics',
id: 'otherMetrics',
imgURL: `Logos/cmd-terminal.svg`,
},
];
const handleMetricsTypeChange = (selectedMetricsType: string): void => {
setSelectedInfraMetrics(selectedMetricsType);
switch (selectedMetricsType) {
case 'kubernetes':
setSelectedInfraMetricsDocs(k8sInfraMonitoringDocs);
break;
case 'hostMetrics':
setSelectedInfraMetricsDocs(hostMetricsMonitoring);
break;
case 'otherMetrics':
setSelectedInfraMetricsDocs(otherMetrics);
break;
default:
setSelectedInfraMetricsDocs(otherMetrics);
break;
}
};
const getHeaderBasedOnType = (): JSX.Element => {
switch (selectedInfraMetrics) {
case 'hostMetrics':
return (
<Header
entity="hostMetrics"
heading="Host Metrics"
imgURL="/Logos/software-window.svg"
docsURL="https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/"
imgClassName="supported-logs-type-img"
/>
);
case 'otherMetrics':
return (
<Header
entity="otherMetrics"
heading="Other Metrics"
imgURL="/Logos/cmd-terminal.svg"
docsURL="https://signoz.io/docs/userguide/send-metrics-cloud/"
imgClassName="supported-logs-type-img"
/>
);
default:
return (
<Header
entity="kubernetes"
heading="Kubernetes Metrics"
imgURL="/Logos/kubernetes.svg"
docsURL="https://signoz.io/docs/tutorial/kubernetes-infra-metrics/"
imgClassName="supported-logs-type-img"
/>
);
}
};
return (
<div className="infrastructure-monitoring-module-container">
{activeStep === 2 && (
<div className="content-container">
<div className="header">
<div className="title">
<h1>{heading}</h1>
<div className="detailed-docs-link">
View detailed docs
<a target="_blank" href={docsURL} rel="noreferrer">
here
</a>
</div>
</div>
<>
<div className="module-header">
<h1>Select an Infra Metrics type</h1>
{/* <h4> Choose the logs that you want to receive on SigNoz </h4> */}
</div>
<MDXProvider>
<InfraMonitoringDocs />
</MDXProvider>
</div>
<div className="supported-logs-type-container">
{supportedInfraMetrics.map((logType) => (
<div
className={cx(
'supported-logs-type',
selectedInfraMetrics === logType.id ? 'selected' : '',
)}
key={logType.name}
onClick={(): void => handleMetricsTypeChange(logType.id)}
>
<img
className={cx('supported-logs-type-img')}
src={`${logType.imgURL}`}
alt=""
/>
<div> {logType.name} </div>
</div>
))}
</div>
{getHeaderBasedOnType()}
<div className="content-container">
<ReactMarkdown
components={{
pre: Pre,
code: Code,
}}
>
{selectedInfraMetricsDocs}
</ReactMarkdown>
</div>
</>
)}
{activeStep === 3 && <div> Infra Monitoring Step 3 </div>}
</div>
);
}

View File

@ -0,0 +1,10 @@
## Hostmetrics Monitoring
You can collect Hostmetrics from your VM and send it to SigNoz cloud using OpenTelemetry Collector.
Steps to send hostmetrics to SigNoz Cloud:
- Install OpenTelemetry Collector binary agent. Please find instructions [here](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/#setup-otel-collector-as-agent).
- Import Hostmetrics Dashboard in SigNoz. Please find instructions [here](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/#hostmetrics-dashboard).
Learn how to create dashboards and panels [here](https://signoz.io/docs/userguide/manage-dashboards-and-panels/).

View File

@ -0,0 +1,9 @@
## Kubernetes Infra Metrics
You can collect Kubernetes infra metrics from your k8s cluster and send it to SigNoz cloud using k8s-infra chart.
Steps to send kubernetes infra metrics to SigNoz Cloud:
- Install OpenTelemetry Collectors in your k8s infra. Please find instructions [here](https://signoz.io/docs/tutorial/kubernetes-infra-metrics/).
- Plot metrics in SigNoz UI by following the instructions [here](https://signoz.io/docs/tutorial/kubernetes-infra-metrics/#plot-metrics-in-signoz-ui).

View File

@ -1,3 +1,7 @@
## Send metrics from any third-party integrations
This document helps you to send metrics from any third-party integrations such as RabbitMQ, Nginx, MySQL, etc.
There are two ways in which you can send metrics to SigNoz using OpenTelemetry:
- From your application
@ -5,8 +9,8 @@ There are two ways in which you can send metrics to SigNoz using OpenTelemetry:
In this document, we will cover how to send metrics from OpenTelemetry Collector. The Collector is a swiss-army knife that can collect metrics from various sources and send them to SigNoz.
- [Enable a Specific Metric Receiver](#enable-a-specific-metric-receiver)
- [Enable a Prometheus Receiver](#enable-a-prometheus-receiver)
- Enable a Specific Metric Receiver
- Enable a Prometheus Receiver
## Enable a Specific Metric Receiver
@ -71,11 +75,11 @@ service:
Depending on the choice of your region for SigNoz cloud, the ingest endpoint will vary according to this table.
US - ingest.us.signoz.cloud:443 <br></br>
US - ingest.us.signoz.cloud:443
IN - ingest.in.signoz.cloud:443 <br></br>
IN - ingest.in.signoz.cloud:443
EU - ingest.eu.signoz.cloud:443 <br></br>
EU - ingest.eu.signoz.cloud:443
To enable a new OpenTelemetry receiver, follow the steps below:

View File

@ -1,11 +1,9 @@
import { MDXProvider } from '@mdx-js/react';
import { Tabs } from 'antd';
import { Code, Pre } from 'components/MarkdownRenderer/MarkdownRenderer';
import Header from 'container/OnboardingContainer/common/Header/Header';
import ReactMarkdown from 'react-markdown';
import ConnectionStatus from '../common/LogsConnectionStatus/LogsConnectionStatus';
import LogsFromLogFile from './applicationLogsFromLogFile.md';
import LogsUsingJavaOtelSDK from './applicationLogsUsingJavaOtelSDK.md';
import LogsUsingPythonOtelSDK from './applicationLogsUsingPythonOtelSDK.md';
interface ApplicationLogsProps {
type: string;
@ -14,29 +12,12 @@ interface ApplicationLogsProps {
const collectLogsFromFileURL =
'https://signoz.io/docs/userguide/collect_logs_from_file/';
const collectLogsFromOTELSDK =
'https://signoz.io/docs/userguide/collecting_application_logs_otel_sdk_java/';
export default function ApplicationLogs({
type,
activeStep,
}: ApplicationLogsProps): JSX.Element {
function renderContentForCollectingLogsOtelSDK(language: string): JSX.Element {
if (language === 'Java') {
return <LogsUsingJavaOtelSDK />;
}
return <LogsUsingPythonOtelSDK />;
}
enum ApplicationLogsType {
FROM_LOG_FILE = 'from-log-file',
USING_OTEL_COLLECTOR = 'using-otel-sdk',
}
const docsURL =
type === ApplicationLogsType.FROM_LOG_FILE
? collectLogsFromFileURL
: collectLogsFromOTELSDK;
const docsURL = collectLogsFromFileURL;
return (
<>
@ -44,38 +25,21 @@ export default function ApplicationLogs({
<div className="golang-setup-instructions-container">
<Header
entity="docker"
heading={
type === ApplicationLogsType.FROM_LOG_FILE
? 'Collecting Application Logs from Log file'
: 'Collecting Application Logs Using OTEL SDK'
}
imgURL={`/Logos/${
type === ApplicationLogsType.FROM_LOG_FILE
? 'software-window'
: 'cmd-terminal'
}.svg`}
heading="Collecting Application Logs from Log file"
imgURL={`/Logos/${'software-window'}.svg`}
docsURL={docsURL}
imgClassName="supported-logs-type-img"
/>
<div className="content-container">
<MDXProvider>
{type === ApplicationLogsType.FROM_LOG_FILE && <LogsFromLogFile />}
{type === ApplicationLogsType.USING_OTEL_COLLECTOR && (
<Tabs
defaultActiveKey="1"
items={['Java', 'Python'].map((language, i) => {
const id = String(i + 1);
return {
label: <div className="language-tab-item">{language}</div>,
key: id,
children: renderContentForCollectingLogsOtelSDK(language),
};
})}
/>
)}
</MDXProvider>
<ReactMarkdown
components={{
pre: Pre,
code: Code,
}}
>
{LogsFromLogFile}
</ReactMarkdown>
</div>
</div>
)}

View File

@ -1,7 +1,5 @@
## Collect Application Logs from Log file in SigNoz cloud
If you dont already have a SigNoz cloud account, you can sign up [here](https://signoz.io/teams/).
- Add otel collector binary to your VM by following this [guide](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/).
- Add the filelog reciever to `config.yaml`.
@ -35,4 +33,5 @@ If you dont already have a SigNoz cloud account, you can sign up [here](https
- Now we can restart the otel collector so that new changes are applied.
- The log will be exported, if you add more lines to the log file it will be exported as well
- If there are no errors your logs will be visible on SigNoz UI.

View File

@ -13,7 +13,7 @@ The command for it will look like
OTEL_LOGS_EXPORTER=otlp OTEL_EXPORTER_OTLP_ENDPOINT="http://<IP of SigNoz Backend>:4317" OTEL_RESOURCE_ATTRIBUTES=service.name=<app_name> java -javaagent:/path/opentelemetry-javaagent.jar -jar <myapp>.jar
```
<br></br>
In the below example we will configure a java application to send logs to SigNoz.

View File

@ -1,8 +1,9 @@
import { MDXProvider } from '@mdx-js/react';
import { Code, Pre } from 'components/MarkdownRenderer/MarkdownRenderer';
import Header from 'container/OnboardingContainer/common/Header/Header';
import ReactMarkdown from 'react-markdown';
import ConnectionStatus from '../common/LogsConnectionStatus/LogsConnectionStatus';
import Post from './docker.md';
import DockerDocs from './docker.md';
export default function Docker({
activeStep,
@ -22,9 +23,14 @@ export default function Docker({
/>
<div className="content-container">
<MDXProvider>
<Post />
</MDXProvider>
<ReactMarkdown
components={{
pre: Pre,
code: Code,
}}
>
{DockerDocs}
</ReactMarkdown>
</div>
</div>
)}

View File

@ -1,70 +1,23 @@
## Collect Syslogs in SigNoz cloud
## Collect Docker Container Logs in SigNoz Cloud
### Setup Otel Collector as agent
- Clone this [repository](https://github.com/SigNoz/docker-container-logs)
- Add `config.yaml`
```yaml {22-26}
receivers:
filelog:
include: [/tmp/python.log]
start_at: beginning
operators:
- type: json_parser
timestamp:
parse_from: attributes.time
layout: '%Y-%m-%d,%H:%M:%S %z'
- type: move
from: attributes.message
to: body
- type: remove
field: attributes.time
processors:
batch:
send_batch_size: 10000
send_batch_max_size: 11000
timeout: 10s
exporters:
otlp:
endpoint: 'ingest.{region}.signoz.cloud:443'
tls:
insecure: false
headers:
'signoz-access-token': '<SIGNOZ_INGESTION_KEY>'
service:
pipelines:
logs:
receivers: [filelog]
processors: [batch]
exporters: [otlp/log]
```
- Update `otel-collector-config.yaml` and set the values of `<SIGNOZ_INGESTION_KEY>` and `{region}`.
````
Depending on the choice of your region for SigNoz cloud, the otlp endpoint will vary according to this table.
Depending on the choice of your region for SigNoz cloud, the ingest endpoint will vary accordingly.
| Region | Endpoint |
| ------ | -------------------------- |
| US | ingest.us.signoz.cloud:443 |
| IN | ingest.in.signoz.cloud:443 |
| EU | ingest.eu.signoz.cloud:443 |
US - ingest.us.signoz.cloud:443
* We will start our otel-collector container.
```bash
docker run -d --name signoz-host-otel-collector -p 2255:2255 --user root -v $(pwd)/config.yaml:/etc/otel/config.yaml signoz/signoz-otel-collector:0.79.0
````
IN - ingest.in.signoz.cloud:443
### Run logspout to collect docker container logs and send it to local otel collector.
EU - ingest.eu.signoz.cloud:443
Logspout helps in collecting Docker logs by connecting to Docker socket.
- Run logspout
- Start the containers
```bash
docker compose up -d
```
```bash
docker run --net=host --rm --name="logspout" \
--volume=/var/run/docker.sock:/var/run/docker.sock \
gliderlabs/logspout \
syslog+tcp://<host>:2255
```
- If there are no errors your logs will be exported and will be visible on the SigNoz UI.
For finding the right host for your SigNoz cluster please follow the guide [here](../install/troubleshooting.md#signoz-otel-collector-address-grid).
- If there are no errors your logs will be exported and will be visible on the SigNoz UI.

View File

@ -1,8 +1,10 @@
import { MDXProvider } from '@mdx-js/react';
import { Select } from 'antd';
import { Code, Pre } from 'components/MarkdownRenderer/MarkdownRenderer';
import Header from 'container/OnboardingContainer/common/Header/Header';
import { useEffect, useState } from 'react';
import ReactMarkdown from 'react-markdown';
import { trackEvent } from 'utils/segmentAnalytics';
import { popupContainer } from 'utils/selectPopupContainer';
import FluentBit from './md-docs/fluentBit.md';
import FluentD from './md-docs/fluentD.md';
@ -16,6 +18,7 @@ enum FrameworksMap {
export default function ExistingCollectors(): JSX.Element {
const [selectedFrameWork, setSelectedFrameWork] = useState('fluent_d');
const [selectedFrameWorkDocs, setSelectedFrameWorkDocs] = useState(FluentD);
useEffect(() => {
// on language select
@ -25,14 +28,19 @@ export default function ExistingCollectors(): JSX.Element {
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [selectedFrameWork]);
const renderDocs = (): JSX.Element => {
const handleFrameworkChange = (selectedFrameWork: string): void => {
setSelectedFrameWork(selectedFrameWork);
switch (selectedFrameWork) {
case 'fluent_d':
return <FluentD />;
setSelectedFrameWorkDocs(FluentD);
break;
case 'fluent_bit':
return <FluentBit />;
setSelectedFrameWorkDocs(FluentBit);
break;
default:
return <LogStashDocs />;
setSelectedFrameWorkDocs(LogStashDocs);
break;
}
};
@ -51,10 +59,11 @@ export default function ExistingCollectors(): JSX.Element {
<div className="label"> Select Framework </div>
<Select
getPopupContainer={popupContainer}
defaultValue="fluent_d"
style={{ minWidth: 120 }}
placeholder="Select Framework"
onChange={(value): void => setSelectedFrameWork(value)}
onChange={(value): void => handleFrameworkChange(value)}
options={[
{
value: 'fluent_d',
@ -74,7 +83,14 @@ export default function ExistingCollectors(): JSX.Element {
</div>
<div className="content-container">
<MDXProvider>{renderDocs()}</MDXProvider>
<ReactMarkdown
components={{
pre: Pre,
code: Code,
}}
>
{selectedFrameWorkDocs}
</ReactMarkdown>
</div>
</div>
);

View File

@ -1,66 +1,58 @@
If you use fluentBit to collect logs in your stack with this tutotrial you will be able to send logs from fluentBit to SigNoz.
## Collect Logs Using FluentBit in SigNoz cloud
If you use fluentBit to collect logs in your stack, you will be able to send logs from fluentBit to SigNoz.
At SigNoz we use opentelemetry collector to recieve logs which supports the fluentforward protocol. So you can forward your logs from your fluentBit agent to opentelemetry collector using fluentforward protocol.
### Collect Logs Using FluentBit in SigNoz cloud
* Add otel collector binary to your VM by following this [guide](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/).
- Add otel collector binary to your VM by following this [guide](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/).
- Add fluentforward reciever to your `config.yaml`
* Add fluentforward reciever to your `config.yaml`
```yaml
receivers:
fluentforward:
endpoint: 0.0.0.0:24224
```
Here we have used port 24224 for listening in fluentforward protocol, but you can change it to a port you want.
You can read more about fluentforward receiver [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/fluentforwardreceiver).
```yaml
receivers:
fluentforward:
endpoint: 0.0.0.0:24224
```
* Modify your `config.yaml` and add the above receiver
```yaml {4}
service:
....
logs:
receivers: [otlp, fluentforward]
processors: [batch]
exporters: [otlp]
```
Here we have used port 24224 for listing in fluentforward protocol, but you can change it to a port you want.
You can read more about fluentforward receiver [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/fluentforwardreceiver).
* Add the following to your fluentBit config to forward the logs to otel collector.
```
[OUTPUT]
Name forward
Match *
Host localhost
Port 24224
```
In this config we are forwarding the logs to the otel collector which is listening on port 24224.
Also we are assuming that you are running the fluentBit binary on the host. If not, the value of `host` might change depending on your environment.
- Modify your `config.yaml` and add the above receiver
```yaml {4}
service:
....
logs:
receivers: [otlp, fluentforward]
processors: [batch]
exporters: [otlp]
```
- Change the fluentBit config to forward the logs to otel collector.
* Once you make this changes you can restart fluentBit and otel-binary, and you will be able to see the logs in SigNoz.
```
[INPUT]
Name dummy
Tag dummy.log
Dummy {"message": "mylog", "trace_id": "0000000000000000f4dbb3edd765f620", "span_id": "43222c2d51a7abe3"}
[OUTPUT]
Name forward
Match *
Host otel-collector-host
Port 24224
```
In this example we are generating sample logs and then forwarding them to the otel collector which is listening on port 24224.
`otel-collector-host` has to be replaced by the host where otel-collector is running. For more info check [troubleshooting](../install/troubleshooting.md#signoz-otel-collector-address-grid).
- Once you make this changes you can restart fluentBit and otel-binary, and you will be able to see the logs in SigNoz.
- To properly transform your existing log model into opentelemetry [log](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md) model you can use the different processors provided by opentelemetry. [link](./logs.md#processors-available-for-processing-logs)
eg:-
```yaml
processors:
logstransform:
operators:
- type: trace_parser
trace_id:
parse_from: attributes.trace_id
span_id:
parse_from: attributes.span_id
- type: remove
field: attributes.trace_id
- type: remove
field: attributes.span_id
```
The operations in the above processor will parse the trace_id and span_id from log to opentelemetry log model and remove them from attributes.
* To properly transform your existing log model into opentelemetry [log](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md) model you can use the different processors provided by opentelemetry. ([link](https://signoz.io/docs/userguide/logs/#processors-available-for-processing-logs))
eg:-
```yaml
processors:
logstransform:
operators:
- type: trace_parser
trace_id:
parse_from: attributes.trace_id
span_id:
parse_from: attributes.span_id
- type: remove
field: attributes.trace_id
- type: remove
field: attributes.span_id
```
The operations in the above processor will parse the trace_id and span_id from log to opentelemetry log model and remove them from attributes.

View File

@ -1,24 +1,21 @@
If you use fluentD to collect logs in your stack with this tutotrial you will be able to send logs from fluentD to SigNoz.
## Collect Logs Using FluentD in SigNoz cloud
At SigNoz we use opentelemetry collector to recieve logs which supports the fluentforward protocol. So you can forward your logs from your fluentD agent to opentelemetry collector.
If you use fluentD to collect logs in your stack, you will be able to send logs from fluentD to SigNoz.
### Collect Logs Using FluentD in SigNoz cloud
At SigNoz, we use opentelemetry collector to recieve logs which supports the fluentforward protocol. So you can forward your logs from your fluentD agent to opentelemetry collector.
- Add otel collector binary to your VM by following this [guide](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/).
- Add fluentforward reciever to your `config.yaml`
* Add otel collector binary to your VM by following this [guide](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/).
* Add fluentforward reciever to your `config.yaml`
```yaml
receivers:
fluentforward:
endpoint: 0.0.0.0:24224
fluentforward:
endpoint: 0.0.0.0:24224
```
Here we have used port 24224 for listing in fluentforward protocol, but you can change it to a port you want.
Here we have used port 24224 for listening in fluentforward protocol, but you can change it to a port you want.
You can read more about fluentforward receiver [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/fluentforwardreceiver).
- Modify your `config.yaml` and add the above receiver
* Modify your `config.yaml` and add the above receiver
```yaml {4}
service:
....
@ -28,17 +25,9 @@ At SigNoz we use opentelemetry collector to recieve logs which supports the flue
exporters: [otlp]
```
- Change the fluentD config to forward the logs to otel collector.
* Add the following to your fluentD config to forward the logs to otel collector.
```
<source>
@type sample
sample [{"message": "my log data", "source": "myhost"}, {"message": "my log data 1", "source": "myhost1"}]
tag sample
rate 10000
</source>
<match sample>
<match <directive>>
@type forward
send_timeout 60s
recover_wait 10s
@ -46,34 +35,32 @@ At SigNoz we use opentelemetry collector to recieve logs which supports the flue
<server>
name myserver1
host otel-collector-host
host localhost
port 24224
</server>
</match>
```
In this config we are matching a directive and forwarding logs to the otel collector which is listening on port 24224. Replace `<directive>` with your directive name.
Also we are assuming that you are running the fluentD binary on the host. If not, the value of `host` might change depending on your environment.
* Once you make this changes you can restart fluentD and otel-binary, and you will be able to see the logs in SigNoz.
* To properly transform your existing log model into opentelemetry [log](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md) model you can use the different processors provided by opentelemetry. ([link](https://signoz.io/docs/userguide/logs/#processors-available-for-processing-logs))
In this example we are generating sample logs and then forwarding them to the otel collector which is listening on port 24224.
`otel-collector-host` has to be replaced by the host where otel-collector is running. For more info check [troubleshooting](../install/troubleshooting.md#signoz-otel-collector-address-grid).
eg:-
- Once you make this changes you can restart fluentD and otel-binary, and you will be able to see the logs in SigNoz.
```yaml
processors:
logstransform:
operators:
- type: trace_parser
trace_id:
parse_from: attributes.trace_id
span_id:
parse_from: attributes.span_id
- type: remove
field: attributes.trace_id
- type: remove
field: attributes.span_id
```
- To properly transform your existing log model into opentelemetry [log](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md) model you can use the different processors provided by opentelemetry. [link](./logs.md#processors-available-for-processing-logs)
eg:-
```yaml
processors:
logstransform:
operators:
- type: trace_parser
trace_id:
parse_from: attributes.trace_id
span_id:
parse_from: attributes.span_id
- type: remove
field: attributes.trace_id
- type: remove
field: attributes.span_id
```
The operations in the above processor will parse the trace_id and span_id from log to opentelemetry log model and remove them from attributes.
The operations in the above processor will parse the trace_id and span_id from log to opentelemetry log model and remove them from attributes.

View File

@ -1,12 +1,13 @@
If you use logstash to collect logs in your stack with this tutotrial you will be able to send logs from logstash to SigNoz.
## Collect Logs Using Logstash in SigNoz cloud
At SigNoz we use opentelemetry collector to recieve logs which supports the TCP protocol. So you can forward your logs from your logstash agent to opentelemetry collector
If you use logstash to collect logs in your stack, you will be able to send logs from Logstash to SigNoz.
## Steps to recieve logs from Logstash:
At SigNoz we use OpenTelemetry Collector to recieve logs which supports the TCP protocol. So you can forward your logs from the logstash agent to opentelemetry collector.
- Add fluentforward reciever to your `otel-collector-config.yaml` which is present inside `deploy/docker/clickhouse-setup`
* Add otel collector binary to your VM by following this [guide](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/).
```
* Add the reciever to your `config.yaml`
```yaml
receivers:
tcplog/logstash:
max_log_size: 1MiB
@ -16,47 +17,32 @@ At SigNoz we use opentelemetry collector to recieve logs which supports the TCP
add_attributes: false
operators: []
```
Here we have used port 2255 for listing in TCP protocol, but you can change it to a port you want.
Here we have used port 2255 for listening in TCP protocol, but you can change it to a port you want.
You can read more about tcplog reciver [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/tcplogreceiver).
- Update the pipleline for logs and make the following change in `otel-collector-config.yaml`
```
* Modify your `config.yaml` and add the above receiver
```yaml {4}
service:
...
logs:
receivers: [ otlp, tcplog/logstash ]
processors: [ batch ]
exporters: [ clickhouselogsexporter ]
....
logs:
receivers: [otlp, tcplog/logstash]
processors: [batch]
exporters: [otlp]
```
Here we are adding our clickhouse exporter and creating a pipeline which will collect logs from `tcplog/logstash` receiver, processing it using batch processor and export it to clickhouse.
- Expose the port in port for otel-collector in `docker-compose.yaml` file present in `deploy/docker/clickhouse-setup`
```
otel-collector:
...
ports:
- "2255:2255"
```
- Change the logstash config to forward the logs to otel collector.
* Change the logstash config to forward the logs to otel collector.
```
output {
tcp {
codec => json_lines # this is required otherwise it will send eveything in a single line
host => "otel-collector-host"
host => "localhost"
port => 2255
}
}
```
Here we are configuring logstash to send logs to otel-collector that we ran in the previous step, which is listening on port 2255.
Also we are assuming that you are running the logstash binary on the host. If not, the value of `host` might change depending on your environment.
In this example we are generating sample logs and then forwarding them to the otel collector which is listening on port 2255.
`otel-collector-host` has to be replaced by the host where otel-collector is running. For more info check [troubleshooting](../install/troubleshooting.md#signoz-otel-collector-address-grid).
* Once you make this changes you can otel binary and logstash, and you will be able to see the logs in SigNoz.
- Once you make this changes you can restart logstash and SignNoz, and you will be able to see the logs in SigNoz.
- To properly transform your existing log model into opentelemetry [log](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md) model you can use the different processors provided by opentelemetry. [link](./logs.md#processors-available-for-processing-logs)
* To properly transform your existing log model into opentelemetry [log](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md) model you can use the different processors provided by opentelemetry ([link](https://signoz.io/docs/userguide/logs/#processors-available-for-processing-logs)).

View File

@ -1,8 +1,9 @@
import { MDXProvider } from '@mdx-js/react';
import { Code, Pre } from 'components/MarkdownRenderer/MarkdownRenderer';
import Header from 'container/OnboardingContainer/common/Header/Header';
import ReactMarkdown from 'react-markdown';
import ConnectionStatus from '../common/LogsConnectionStatus/LogsConnectionStatus';
import Post from './kubernetes.md';
import KubernetesDocs from './kubernetes.md';
export default function Kubernetes({
activeStep,
@ -22,9 +23,14 @@ export default function Kubernetes({
/>
<div className="content-container">
<MDXProvider>
<Post />
</MDXProvider>
<ReactMarkdown
components={{
pre: Pre,
code: Code,
}}
>
{KubernetesDocs}
</ReactMarkdown>
</div>
</div>
)}

View File

@ -1,6 +1,6 @@
## Collect Kubernetes Pod Logs in SigNoz Cloud
To collect logs from your kubernetes cluster, you will need to deploy k8s-infra chart. Please follow the guide [here](/docs/tutorial/kubernetes-infra-metrics/). Log collection of pods from all namespaces is enabled by default except for pods in `kube-system` and `hotrod`. To modify the log collection mechanism, please follow the guides below.
To collect logs from your kubernetes cluster, you will need to deploy k8s-infra chart. Please follow the guide [here](https://signoz.io/docs/tutorial/kubernetes-infra-metrics/). Log collection of pods from all namespaces is enabled by default except for pods in `kube-system` and `hotrod`. To modify the log collection mechanism, please follow the guides below.
- [Disable automatic pod logs collection](#steps-to-disable-automatic-pod-logs-collection)
- [Filter/Exclude logs collection](#steps-to-filterexclude-logs-collection)
- [Disable automatic pod logs collection](https://signoz.io/docs/userguide/collect_kubernetes_pod_logs/#steps-to-disable-automatic-pod-logs-collection)
- [Filter/Exclude logs collection](https://signoz.io/docs/userguide/collect_kubernetes_pod_logs/#steps-to-filterexclude-logs-collection)

View File

@ -35,16 +35,6 @@ const supportedLogTypes = [
id: 'application_logs_log_file',
imgURL: `Logos/software-window.svg`,
},
{
name: 'NodeJS Winston Logs ',
id: 'nodejs',
imgURL: `Logos/node-js.svg`,
},
{
name: 'Application Logs using OTEL SDK',
id: 'application_logs_otel_sdk',
imgURL: `Logos/cmd-terminal.svg`,
},
{
name: 'Logs from existing collectors',
id: 'existing_collectors',

View File

@ -1,8 +1,9 @@
import { MDXProvider } from '@mdx-js/react';
import { Code, Pre } from 'components/MarkdownRenderer/MarkdownRenderer';
import Header from 'container/OnboardingContainer/common/Header/Header';
import ReactMarkdown from 'react-markdown';
import ConnectionStatus from '../common/LogsConnectionStatus/LogsConnectionStatus';
import Post from './nodejs.md';
import NodeJsDocs from './nodejs.md';
export default function Nodejs({
activeStep,
@ -22,9 +23,14 @@ export default function Nodejs({
/>
<div className="content-container">
<MDXProvider>
<Post />
</MDXProvider>
<ReactMarkdown
components={{
pre: Pre,
code: Code,
}}
>
{NodeJsDocs}
</ReactMarkdown>
</div>
</div>
)}

View File

@ -1,8 +1,9 @@
import { MDXProvider } from '@mdx-js/react';
import { Code, Pre } from 'components/MarkdownRenderer/MarkdownRenderer';
import Header from 'container/OnboardingContainer/common/Header/Header';
import ReactMarkdown from 'react-markdown';
import ConnectionStatus from '../common/LogsConnectionStatus/LogsConnectionStatus';
import Post from './syslogs.md';
import SysLogsDocs from './syslogs.md';
export default function SysLogs({
activeStep,
@ -22,9 +23,14 @@ export default function SysLogs({
/>
<div className="content-container">
<MDXProvider>
<Post />
</MDXProvider>
<ReactMarkdown
components={{
pre: Pre,
code: Code,
}}
>
{SysLogsDocs}
</ReactMarkdown>
</div>
</div>
)}

View File

@ -1,11 +1,7 @@
## Collect Syslogs in SigNoz cloud
If you dont already have a SigNoz cloud account, you can sign up [here](https://signoz.io/teams/).
<Tabs>
<TabItem value="VM" label="VM" default>
- Add otel collector binary to your VM by following this [guide](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/).
- Add the syslog reciever to `config.yaml` to otel-collector.
```yaml {2-10}
@ -22,7 +18,7 @@ If you dont already have a SigNoz cloud account, you can sign up [here](https
```
Here we are collecting the logs and moving message from attributes to body using operators that are available.
You can read more about operators [here](./logs.md#operators-for-parsing-and-manipulating-logs).
You can read more about operators [here](https://signoz.io/docs/userguide/logs/#operators-for-parsing-and-manipulating-logs).
For more configurations that are available for syslog receiver please check [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/syslogreceiver).
@ -70,14 +66,10 @@ If you dont already have a SigNoz cloud account, you can sign up [here](https
action.resumeRetryCount="10"
queue.type="linkedList" queue.size="10000" template="UTCTraditionalForwardFormat")
```
So that you have retires and queue in place to de-couple the sending from the other logging action.
The value of `target` might vary depending on where SigNoz is deployed, since it is deployed on the same host I am using `0.0.0.0` for more help you can visit [here](../install/troubleshooting.md#signoz-otel-collector-address-grid).
So that you have retries and queue in place to de-couple the sending from other logging action. Also, we are assuming that you are running the otel binary on the same host. If not, the value of target might change depending on your environment.
- Now restart your rsyslog service by running `sudo systemctl restart rsyslog.service`
- You can check the status of service by running `sudo systemctl status rsyslog.service`
- If there are no errors your logs will be visible on SigNoz UI.
</TabItem>
</Tabs>
- You can check the status of service by running `sudo systemctl status rsyslog.service`
- If there are no errors your logs will be visible on SigNoz UI.

View File

@ -43,21 +43,21 @@ const useCases = {
id: ModulesMap.APM,
title: 'Application Monitoring',
desc:
'Monitor performance of your applications & troubleshoot problems by installing within your infra.',
'Monitor application metrics like p99 latency, error rates, external API calls, and db calls.',
stepDesc: defaultStepDesc,
},
LogsManagement: {
id: ModulesMap.LogsManagement,
title: 'Logs Management',
desc:
'Easily search and filter logs with query builder and automatically detect logs from K8s cluster.',
'Easily filter and query logs, build dashboards and alerts based on attributes in logs',
stepDesc: 'Choose the logs that you want to receive on SigNoz',
},
InfrastructureMonitoring: {
id: ModulesMap.InfrastructureMonitoring,
title: 'Infrastructure Monitoring',
desc:
'Easily search and filter logs with query builder and automatically detect logs from K8s cluster.',
'Monitor Kubernetes infrastructure metrics, hostmetrics, or metrics of any third-party integration',
stepDesc: defaultStepDesc,
},
};

View File

@ -0,0 +1,64 @@
import { Form } from 'antd';
import { initialQueryBuilderFormValuesMap } from 'constants/queryBuilder';
import QueryBuilderSearch from 'container/QueryBuilder/filters/QueryBuilderSearch';
import isEqual from 'lodash-es/isEqual';
import { useTranslation } from 'react-i18next';
import { TagFilter } from 'types/api/queryBuilder/queryBuilderData';
import { ProcessorFormField } from '../../AddNewProcessor/config';
import { formValidationRules } from '../../config';
import { FormLabelStyle } from '../styles';
function TagFilterInput({
value,
onChange,
placeholder,
}: TagFilterInputProps): JSX.Element {
const query = { ...initialQueryBuilderFormValuesMap.logs };
if (value) {
query.filters = value;
}
const onQueryChange = (newValue: TagFilter): void => {
// Avoid unnecessary onChange calls
if (!isEqual(newValue, query.filters)) {
onChange(newValue);
}
};
return (
<QueryBuilderSearch
query={query}
onChange={onQueryChange}
placeholder={placeholder}
/>
);
}
interface TagFilterInputProps {
onChange: (filter: TagFilter) => void;
value: TagFilter;
placeholder: string;
}
function FilterInput({ fieldData }: FilterInputProps): JSX.Element {
const { t } = useTranslation('pipeline');
return (
<Form.Item
required={false}
label={<FormLabelStyle>{fieldData.fieldName}</FormLabelStyle>}
key={fieldData.id}
rules={formValidationRules}
name={fieldData.name}
>
{/* Antd form will supply value and onChange to <TagFilterInput /> here.
// @ts-ignore */}
<TagFilterInput placeholder={t(fieldData.placeholder)} />
</Form.Item>
);
}
interface FilterInputProps {
fieldData: ProcessorFormField;
}
export default FilterInput;

View File

@ -1,31 +0,0 @@
import { Form, Input } from 'antd';
import { useTranslation } from 'react-i18next';
import { ProcessorFormField } from '../../AddNewProcessor/config';
import { formValidationRules } from '../../config';
import { FormLabelStyle } from '../styles';
function FilterSearch({ fieldData }: FilterSearchProps): JSX.Element {
const { t } = useTranslation('pipeline');
return (
<Form.Item
required={false}
label={<FormLabelStyle>{fieldData.fieldName}</FormLabelStyle>}
key={fieldData.id}
rules={formValidationRules}
name={fieldData.name}
>
<Input.Search
id={fieldData.id.toString()}
name={fieldData.name}
placeholder={t(fieldData.placeholder)}
allowClear
/>
</Form.Item>
);
}
interface FilterSearchProps {
fieldData: ProcessorFormField;
}
export default FilterSearch;

View File

@ -0,0 +1,24 @@
import './styles.scss';
import { queryFilterTags } from 'hooks/queryBuilder/useTag';
import { PipelineData } from 'types/api/pipeline/def';
function PipelineFilterPreview({
filter,
}: PipelineFilterPreviewProps): JSX.Element {
return (
<div className="pipeline-filter-preview-container">
{queryFilterTags(filter).map((tag) => (
<div className="pipeline-filter-preview-condition" key={tag}>
{tag}
</div>
))}
</div>
);
}
interface PipelineFilterPreviewProps {
filter: PipelineData['filter'];
}
export default PipelineFilterPreview;

View File

@ -0,0 +1,10 @@
.pipeline-filter-preview-condition {
padding: 0 0.2em;
}
.pipeline-filter-preview-container {
display: flex;
flex-wrap: wrap;
gap: 0.4em;
font-size: 0.75rem;
}

View File

@ -4,6 +4,7 @@ import { PipelineData, ProcessorData } from 'types/api/pipeline/def';
import { PipelineIndexIcon } from '../AddNewProcessor/styles';
import { ColumnDataStyle, ListDataStyle, ProcessorIndexIcon } from '../styles';
import PipelineFilterPreview from './PipelineFilterPreview';
const componentMap: ComponentMap = {
orderId: ({ record }) => <PipelineIndexIcon>{record}</PipelineIndexIcon>,
@ -14,6 +15,7 @@ const componentMap: ComponentMap = {
),
id: ({ record }) => <ProcessorIndexIcon>{record}</ProcessorIndexIcon>,
name: ({ record }) => <ListDataStyle>{record}</ListDataStyle>,
filter: ({ record }) => <PipelineFilterPreview filter={record} />,
};
function TableComponents({
@ -31,7 +33,9 @@ type ComponentMap = {
[key: string]: React.FC<{ record: Record }>;
};
export type Record = PipelineData['orderId'] & ProcessorData;
export type Record = PipelineData['orderId'] &
PipelineData['filter'] &
ProcessorData;
interface TableComponentsProps {
columnKey: string;

View File

@ -8,15 +8,16 @@ import {
import DeploymentStage from '../Layouts/ChangeHistory/DeploymentStage';
import DeploymentTime from '../Layouts/ChangeHistory/DeploymentTime';
import DescriptionTextArea from './AddNewPipeline/FormFields/DescriptionTextArea';
import FilterInput from './AddNewPipeline/FormFields/FilterInput';
import NameInput from './AddNewPipeline/FormFields/NameInput';
export const pipelineFields = [
{
id: 1,
fieldName: 'Filter',
placeholder: 'search_pipeline_placeholder',
placeholder: 'pipeline_filter_placeholder',
name: 'filter',
component: NameInput,
component: FilterInput,
},
{
id: 2,

View File

@ -1,7 +1,33 @@
import { Pipeline, PipelineData } from 'types/api/pipeline/def';
import { DataTypes } from 'types/api/queryBuilder/queryAutocompleteResponse';
import { TagFilter } from 'types/api/queryBuilder/queryBuilderData';
export const configurationVersion = '1.0';
export function mockPipelineFilter(
key: string,
op: string,
value: string,
): TagFilter {
return {
op: 'AND',
items: [
{
id: `${key}-${value}`,
key: {
key,
dataType: DataTypes.String,
type: '',
isColumn: false,
isJSON: false,
},
op,
value,
},
],
};
}
export const pipelineMockData: Array<PipelineData> = [
{
id: '4453c8b0-c0fd-42bf-bf09-7cc1b04ccdc9',
@ -10,7 +36,7 @@ export const pipelineMockData: Array<PipelineData> = [
alias: 'apachecommonparser',
description: 'This is a desc',
enabled: false,
filter: 'attributes.source == nginx',
filter: mockPipelineFilter('source', '=', 'nginx'),
config: [
{
orderId: 1,
@ -43,7 +69,7 @@ export const pipelineMockData: Array<PipelineData> = [
alias: 'movingpipelinenew',
description: 'This is a desc of move',
enabled: false,
filter: 'attributes.method == POST',
filter: mockPipelineFilter('method', '=', 'POST'),
config: [
{
orderId: 1,

View File

@ -1,5 +1,6 @@
import { render } from '@testing-library/react';
import { I18nextProvider } from 'react-i18next';
import { QueryClient, QueryClientProvider } from 'react-query';
import { Provider } from 'react-redux';
import { MemoryRouter } from 'react-router-dom';
import i18n from 'ReactI18';
@ -27,25 +28,36 @@ beforeAll(() => {
matchMedia();
});
const queryClient = new QueryClient({
defaultOptions: {
queries: {
refetchOnWindowFocus: false,
},
},
});
describe('PipelinePage container test', () => {
it('should render AddNewPipeline section', () => {
const setActionType = jest.fn();
const selectedPipelineData = pipelineMockData[0];
const isActionType = 'add-pipeline';
const { asFragment } = render(
<MemoryRouter>
<Provider store={store}>
<I18nextProvider i18n={i18n}>
<AddNewPipeline
isActionType={isActionType}
setActionType={setActionType}
selectedPipelineData={selectedPipelineData}
setShowSaveButton={jest.fn()}
setCurrPipelineData={jest.fn()}
currPipelineData={pipelineMockData}
/>
</I18nextProvider>
</Provider>
<QueryClientProvider client={queryClient}>
<Provider store={store}>
<I18nextProvider i18n={i18n}>
<AddNewPipeline
isActionType={isActionType}
setActionType={setActionType}
selectedPipelineData={selectedPipelineData}
setShowSaveButton={jest.fn()}
setCurrPipelineData={jest.fn()}
currPipelineData={pipelineMockData}
/>
</I18nextProvider>
</Provider>
</QueryClientProvider>
</MemoryRouter>,
);
expect(asFragment()).toMatchSnapshot();

View File

@ -1,4 +1,4 @@
import { pipelineMockData } from '../mocks/pipeline';
import { mockPipelineFilter, pipelineMockData } from '../mocks/pipeline';
import {
processorFields,
processorTypes,
@ -68,7 +68,7 @@ describe('Utils testing of Pipeline Page', () => {
...pipelineMockData[findRecordIndex],
name: 'updated name',
description: 'changed description',
filter: 'value == test',
filter: mockPipelineFilter('value', '=', 'test'),
tags: ['test'],
};
const editedData = getEditedDataSource(

View File

@ -5,6 +5,7 @@ import { MAX_FORMULAS, MAX_QUERIES } from 'constants/queryBuilder';
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
// ** Constants
import { memo, useEffect, useMemo } from 'react';
import { DataSource } from 'types/common/queryBuilder';
// ** Components
import { Formula, Query } from './components';
@ -79,11 +80,27 @@ export const QueryBuilder = memo(function QueryBuilder({
/>
</Col>
))}
{currentQuery.builder.queryFormulas.map((formula, index) => (
<Col key={formula.queryName} span={24}>
<Formula formula={formula} index={index} />
</Col>
))}
{currentQuery.builder.queryFormulas.map((formula, index) => {
const isAllMetricDataSource = currentQuery.builder.queryData.every(
(query) => query.dataSource === DataSource.METRICS,
);
const query =
currentQuery.builder.queryData[index] ||
currentQuery.builder.queryData[0];
return (
<Col key={formula.queryName} span={24}>
<Formula
filterConfigs={filterConfigs}
query={query}
isAdditionalFilterEnable={isAllMetricDataSource}
formula={formula}
index={index}
/>
</Col>
);
})}
</Row>
</Col>

View File

@ -21,13 +21,13 @@ export const AdditionalFiltersToggler = memo(function AdditionalFiltersToggler({
setIsOpenedFilters((prevState) => !prevState);
};
const filtersTexts: ReactNode = listOfAdditionalFilter.map((str, index) => {
const filtersTexts: ReactNode = listOfAdditionalFilter?.map((str, index) => {
const isNextLast = index + 1 === listOfAdditionalFilter.length - 1;
if (index === listOfAdditionalFilter.length - 1) {
return (
<Fragment key={str}>
{listOfAdditionalFilter.length > 1 && 'and'}{' '}
{listOfAdditionalFilter?.length > 1 && 'and'}{' '}
<StyledLink>{str.toUpperCase()}</StyledLink>
</Fragment>
);

View File

@ -1,3 +1,13 @@
import { IBuilderFormula } from 'types/api/queryBuilder/queryBuilderData';
import { QueryBuilderProps } from 'container/QueryBuilder/QueryBuilder.interfaces';
import {
IBuilderFormula,
IBuilderQuery,
} from 'types/api/queryBuilder/queryBuilderData';
export type FormulaProps = { formula: IBuilderFormula; index: number };
export type FormulaProps = {
formula: IBuilderFormula;
index: number;
query: IBuilderQuery;
filterConfigs: Partial<QueryBuilderProps['filterConfigs']>;
isAdditionalFilterEnable: boolean;
};

View File

@ -1,22 +1,45 @@
import { Col, Input } from 'antd';
import { Col, Input, Row } from 'antd';
// ** Components
import { ListItemWrapper, ListMarker } from 'container/QueryBuilder/components';
import {
FilterLabel,
ListItemWrapper,
ListMarker,
} from 'container/QueryBuilder/components';
import HavingFilter from 'container/QueryBuilder/filters/Formula/Having/HavingFilter';
import LimitFilter from 'container/QueryBuilder/filters/Formula/Limit/Limit';
import OrderByFilter from 'container/QueryBuilder/filters/Formula/OrderBy/OrderByFilter';
// ** Hooks
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
import { ChangeEvent, useCallback } from 'react';
import { useQueryOperations } from 'hooks/queryBuilder/useQueryBuilderOperations';
import { ChangeEvent, useCallback, useMemo } from 'react';
import { IBuilderFormula } from 'types/api/queryBuilder/queryBuilderData';
import { AdditionalFiltersToggler } from '../AdditionalFiltersToggler';
// ** Types
import { FormulaProps } from './Formula.interfaces';
const { TextArea } = Input;
export function Formula({ index, formula }: FormulaProps): JSX.Element {
export function Formula({
index,
formula,
filterConfigs,
query,
isAdditionalFilterEnable,
}: FormulaProps): JSX.Element {
const {
removeQueryBuilderEntityByIndex,
handleSetFormulaData,
} = useQueryBuilder();
const {
listOfAdditionalFormulaFilters,
handleChangeFormulaData,
} = useQueryOperations({
index,
query,
filterConfigs,
formula,
});
const handleDelete = useCallback(() => {
removeQueryBuilderEntityByIndex('queryFormulas', index);
}, [index, removeQueryBuilderEntityByIndex]);
@ -43,6 +66,75 @@ export function Formula({ index, formula }: FormulaProps): JSX.Element {
[index, formula, handleSetFormulaData],
);
const handleChangeLimit = useCallback(
(value: IBuilderFormula['limit']) => {
handleChangeFormulaData('limit', value);
},
[handleChangeFormulaData],
);
const handleChangeHavingFilter = useCallback(
(value: IBuilderFormula['having']) => {
handleChangeFormulaData('having', value);
},
[handleChangeFormulaData],
);
const handleChangeOrderByFilter = useCallback(
(value: IBuilderFormula['orderBy']) => {
handleChangeFormulaData('orderBy', value);
},
[handleChangeFormulaData],
);
const renderAdditionalFilters = useMemo(
() => (
<>
<Col span={11}>
<Row gutter={[11, 5]}>
<Col flex="5.93rem">
<FilterLabel label="Limit" />
</Col>
<Col flex="1 1 12.5rem">
<LimitFilter formula={formula} onChange={handleChangeLimit} />
</Col>
</Row>
</Col>
<Col span={11}>
<Row gutter={[11, 5]}>
<Col flex="5.93rem">
<FilterLabel label="HAVING" />
</Col>
<Col flex="1 1 12.5rem">
<HavingFilter formula={formula} onChange={handleChangeHavingFilter} />
</Col>
</Row>
</Col>
<Col span={11}>
<Row gutter={[11, 5]}>
<Col flex="5.93rem">
<FilterLabel label="Order by" />
</Col>
<Col flex="1 1 12.5rem">
<OrderByFilter
query={query}
formula={formula}
onChange={handleChangeOrderByFilter}
/>
</Col>
</Row>
</Col>
</>
),
[
formula,
handleChangeHavingFilter,
handleChangeLimit,
handleChangeOrderByFilter,
query,
],
);
return (
<ListItemWrapper onDelete={handleDelete}>
<Col span={24}>
@ -54,7 +146,7 @@ export function Formula({ index, formula }: FormulaProps): JSX.Element {
/>
</Col>
<Col span={24}>
<TextArea
<Input.TextArea
name="expression"
onChange={handleChange}
size="middle"
@ -71,6 +163,17 @@ export function Formula({ index, formula }: FormulaProps): JSX.Element {
addonBefore="Legend Format"
/>
</Col>
{isAdditionalFilterEnable && (
<Col span={24}>
<AdditionalFiltersToggler
listOfAdditionalFilter={listOfAdditionalFormulaFilters}
>
<Row gutter={[0, 11]} justify="space-between">
{renderAdditionalFilters}
</Row>
</AdditionalFiltersToggler>
</Col>
)}
</ListItemWrapper>
);
}

View File

@ -21,7 +21,7 @@ import AggregateEveryFilter from 'container/QueryBuilder/filters/AggregateEveryF
import LimitFilter from 'container/QueryBuilder/filters/LimitFilter/LimitFilter';
import QueryBuilderSearch from 'container/QueryBuilder/filters/QueryBuilderSearch';
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
import { useQueryOperations } from 'hooks/queryBuilder/useQueryOperations';
import { useQueryOperations } from 'hooks/queryBuilder/useQueryBuilderOperations';
// ** Hooks
import { ChangeEvent, memo, ReactNode, useCallback } from 'react';
import { IBuilderQuery } from 'types/api/queryBuilder/queryBuilderData';

View File

@ -22,6 +22,7 @@ import {
} from 'types/api/queryBuilder/queryAutocompleteResponse';
import { DataSource } from 'types/common/queryBuilder';
import { ExtendedSelectOption } from 'types/common/select';
import { popupContainer } from 'utils/selectPopupContainer';
import { transformToUpperCase } from 'utils/transformToUpperCase';
import { selectStyle } from '../QueryBuilderSearch/config';
@ -172,6 +173,7 @@ export const AggregatorFilter = memo(function AggregatorFilter({
return (
<AutoComplete
getPopupContainer={popupContainer}
placeholder={placeholder}
style={selectStyle}
showArrow={false}

View File

@ -0,0 +1,198 @@
import { Select } from 'antd';
import { HAVING_OPERATORS, initialHavingValues } from 'constants/queryBuilder';
import { HavingFilterTag } from 'container/QueryBuilder/components';
import { useTagValidation } from 'hooks/queryBuilder/useTagValidation';
import {
transformFromStringToHaving,
transformHavingToStringValue,
} from 'lib/query/transformQueryBuilderData';
import { useCallback, useEffect, useMemo, useState } from 'react';
import { Having, HavingForm } from 'types/api/queryBuilder/queryBuilderData';
import { SelectOption } from 'types/common/select';
import { popupContainer } from 'utils/selectPopupContainer';
import { getHavingObject, isValidHavingValue } from '../../utils';
import { HavingFilterProps, HavingTagRenderProps } from './types';
function HavingFilter({ formula, onChange }: HavingFilterProps): JSX.Element {
const { having } = formula;
const [searchText, setSearchText] = useState<string>('');
const [localValues, setLocalValues] = useState<string[]>([]);
const [currentFormValue, setCurrentFormValue] = useState<HavingForm>(
initialHavingValues,
);
const [options, setOptions] = useState<SelectOption<string, string>[]>([]);
const { isMulti } = useTagValidation(
currentFormValue.op,
currentFormValue.value,
);
const columnName = formula.expression.toUpperCase();
const aggregatorOptions: SelectOption<string, string>[] = useMemo(
() => [{ label: columnName, value: columnName }],
[columnName],
);
const handleUpdateTag = useCallback(
(value: string) => {
const filteredValues = localValues.filter(
(currentValue) => currentValue !== value,
);
const having: Having[] = filteredValues.map(transformFromStringToHaving);
onChange(having);
setSearchText(value);
},
[localValues, onChange],
);
const generateOptions = useCallback(
(currentString: string) => {
const [aggregator = '', op = '', ...restValue] = currentString.split(' ');
let newOptions: SelectOption<string, string>[] = [];
const isAggregatorExist = columnName
.toLowerCase()
.includes(currentString.toLowerCase());
const isAggregatorChosen = aggregator === columnName;
if (isAggregatorExist || aggregator === '') {
newOptions = aggregatorOptions;
}
if ((isAggregatorChosen && op === '') || op) {
const filteredOperators = HAVING_OPERATORS.filter((num) =>
num.toLowerCase().includes(op.toLowerCase()),
);
newOptions = filteredOperators.map((opt) => ({
label: `${columnName} ${opt} ${restValue && restValue.join(' ')}`,
value: `${columnName} ${opt} ${restValue && restValue.join(' ')}`,
}));
}
setOptions(newOptions);
},
[aggregatorOptions, columnName],
);
const parseSearchText = useCallback(
(text: string) => {
const { columnName, op, value } = getHavingObject(text);
setCurrentFormValue({ columnName, op, value });
generateOptions(text);
},
[generateOptions],
);
const tagRender = ({
label,
value,
closable,
disabled,
onClose,
}: HavingTagRenderProps): JSX.Element => {
const handleClose = (): void => {
onClose();
setSearchText('');
};
return (
<HavingFilterTag
label={label}
value={value}
closable={closable}
disabled={disabled}
onClose={handleClose}
onUpdate={handleUpdateTag}
/>
);
};
const handleSearch = (search: string): void => {
const trimmedSearch = search.replace(/\s\s+/g, ' ').trimStart();
const currentSearch = isMulti
? trimmedSearch
: trimmedSearch.split(' ').slice(0, 3).join(' ');
const isValidSearch = isValidHavingValue(currentSearch);
if (isValidSearch) {
setSearchText(currentSearch);
}
};
useEffect(() => {
setLocalValues(transformHavingToStringValue(having || []));
}, [having]);
useEffect(() => {
parseSearchText(searchText);
}, [searchText, parseSearchText]);
const resetChanges = (): void => {
setSearchText('');
setCurrentFormValue(initialHavingValues);
setOptions(aggregatorOptions);
};
const handleDeselect = (value: string): void => {
const result = localValues.filter((item) => item !== value);
const having: Having[] = result.map(transformFromStringToHaving);
onChange(having);
resetChanges();
};
const handleSelect = (currentValue: string): void => {
const { columnName, op, value } = getHavingObject(currentValue);
const isCompletedValue = value.every((item) => !!item);
const isClearSearch = isCompletedValue && columnName && op;
setSearchText(isClearSearch ? '' : currentValue);
};
const handleChange = (values: string[]): void => {
const having: Having[] = values.map(transformFromStringToHaving);
const isSelectable =
currentFormValue.value.length > 0 &&
currentFormValue.value.every((value) => !!value);
if (isSelectable) {
onChange(having);
resetChanges();
}
};
return (
<Select
getPopupContainer={popupContainer}
autoClearSearchValue={false}
mode="multiple"
onSearch={handleSearch}
searchValue={searchText}
data-testid="havingSelectFormula"
placeholder="Count(operation) > 5"
style={{ width: '100%' }}
tagRender={tagRender}
onDeselect={handleDeselect}
onSelect={handleSelect}
onChange={handleChange}
value={localValues}
>
{options.map((opt) => (
<Select.Option key={opt.value} value={opt.value} title="havingOption">
{opt.label}
</Select.Option>
))}
</Select>
);
}
export default HavingFilter;

View File

@ -0,0 +1,12 @@
import { HavingFilterTagProps } from 'container/QueryBuilder/components/HavingFilterTag/HavingFilterTag.interfaces';
import {
Having,
IBuilderFormula,
} from 'types/api/queryBuilder/queryBuilderData';
export type HavingFilterProps = {
formula: IBuilderFormula;
onChange: (having: Having[]) => void;
};
export type HavingTagRenderProps = Omit<HavingFilterTagProps, 'onUpdate'>;

View File

@ -0,0 +1,20 @@
import { InputNumber } from 'antd';
import { selectStyle } from '../../QueryBuilderSearch/config';
import { handleKeyDownLimitFilter } from '../../utils';
import { LimitFilterProps } from './types';
function LimitFilter({ onChange, formula }: LimitFilterProps): JSX.Element {
return (
<InputNumber
min={1}
type="number"
value={formula.limit}
style={selectStyle}
onChange={onChange}
onKeyDown={handleKeyDownLimitFilter}
/>
);
}
export default LimitFilter;

View File

@ -0,0 +1,6 @@
import { IBuilderFormula } from 'types/api/queryBuilder/queryBuilderData';
export interface LimitFilterProps {
onChange: (values: number | null) => void;
formula: IBuilderFormula;
}

View File

@ -0,0 +1,84 @@
import { Select, Spin } from 'antd';
import { useGetAggregateKeys } from 'hooks/queryBuilder/useGetAggregateKeys';
import { useMemo } from 'react';
import { MetricAggregateOperator } from 'types/common/queryBuilder';
import { popupContainer } from 'utils/selectPopupContainer';
import { selectStyle } from '../../QueryBuilderSearch/config';
import { OrderByProps } from './types';
import { useOrderByFormulaFilter } from './useOrderByFormulaFilter';
function OrderByFilter({
formula,
onChange,
query,
}: OrderByProps): JSX.Element {
const {
debouncedSearchText,
createOptions,
aggregationOptions,
handleChange,
handleSearchKeys,
selectedValue,
generateOptions,
} = useOrderByFormulaFilter({
query,
onChange,
formula,
});
const { data, isFetching } = useGetAggregateKeys(
{
aggregateAttribute: query.aggregateAttribute.key,
dataSource: query.dataSource,
aggregateOperator: query.aggregateOperator,
searchText: debouncedSearchText,
},
{
enabled: !!query.aggregateAttribute.key,
keepPreviousData: true,
},
);
const optionsData = useMemo(() => {
const keyOptions = createOptions(data?.payload?.attributeKeys || []);
const groupByOptions = createOptions(query.groupBy);
const options =
query.aggregateOperator === MetricAggregateOperator.NOOP
? keyOptions
: [...groupByOptions, ...aggregationOptions];
return generateOptions(options);
}, [
aggregationOptions,
createOptions,
data?.payload?.attributeKeys,
generateOptions,
query.aggregateOperator,
query.groupBy,
]);
const isDisabledSelect =
!query.aggregateAttribute.key ||
query.aggregateOperator === MetricAggregateOperator.NOOP;
return (
<Select
getPopupContainer={popupContainer}
mode="tags"
style={selectStyle}
onSearch={handleSearchKeys}
showSearch
disabled={isDisabledSelect}
showArrow={false}
value={selectedValue}
labelInValue
filterOption={false}
options={optionsData}
notFoundContent={isFetching ? <Spin size="small" /> : null}
onChange={handleChange}
/>
);
}
export default OrderByFilter;

View File

@ -0,0 +1,12 @@
import {
IBuilderFormula,
IBuilderQuery,
} from 'types/api/queryBuilder/queryBuilderData';
export interface OrderByProps {
formula: IBuilderFormula;
query: IBuilderQuery;
onChange: (value: IBuilderFormula['orderBy']) => void;
}
export type IOrderByFormulaFilterProps = OrderByProps;

View File

@ -0,0 +1,127 @@
import { DEBOUNCE_DELAY } from 'constants/queryBuilderFilterConfig';
import useDebounce from 'hooks/useDebounce';
import { IOption } from 'hooks/useResourceAttribute/types';
import isEqual from 'lodash-es/isEqual';
import uniqWith from 'lodash-es/uniqWith';
import { parse } from 'papaparse';
import { useMemo, useState } from 'react';
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
import { OrderByPayload } from 'types/api/queryBuilder/queryBuilderData';
import { ORDERBY_FILTERS } from '../../OrderByFilter/config';
import { SIGNOZ_VALUE } from '../../OrderByFilter/constants';
import { UseOrderByFilterResult } from '../../OrderByFilter/useOrderByFilter';
import {
getLabelFromValue,
mapLabelValuePairs,
orderByValueDelimiter,
} from '../../OrderByFilter/utils';
import { getRemoveOrderFromValue } from '../../QueryBuilderSearch/utils';
import { getUniqueOrderByValues, getValidOrderByResult } from '../../utils';
import { IOrderByFormulaFilterProps } from './types';
import { transformToOrderByStringValuesByFormula } from './utils';
export const useOrderByFormulaFilter = ({
onChange,
formula,
}: IOrderByFormulaFilterProps): UseOrderByFilterResult => {
const [searchText, setSearchText] = useState<string>('');
const debouncedSearchText = useDebounce(searchText, DEBOUNCE_DELAY);
const handleSearchKeys = (searchText: string): void =>
setSearchText(searchText);
const handleChange = (values: IOption[]): void => {
const validResult = getValidOrderByResult(values);
const result = getUniqueOrderByValues(validResult);
const orderByValues: OrderByPayload[] = result.map((item) => {
const match = parse(item.value, { delimiter: orderByValueDelimiter });
if (!match) {
return {
columnName: item.value,
order: ORDERBY_FILTERS.ASC,
};
}
const [columnName, order] = match.data.flat() as string[];
const columnNameValue =
columnName === SIGNOZ_VALUE ? SIGNOZ_VALUE : columnName;
const orderValue = order ?? ORDERBY_FILTERS.ASC;
return {
columnName: columnNameValue,
order: orderValue,
};
});
setSearchText('');
onChange(orderByValues);
};
const aggregationOptions = [
{
label: `${formula.expression} ${ORDERBY_FILTERS.ASC}`,
value: `${SIGNOZ_VALUE}${orderByValueDelimiter}${ORDERBY_FILTERS.ASC}`,
},
{
label: `${formula.expression} ${ORDERBY_FILTERS.DESC}`,
value: `${SIGNOZ_VALUE}${orderByValueDelimiter}${ORDERBY_FILTERS.DESC}`,
},
];
const selectedValue = transformToOrderByStringValuesByFormula(formula);
const createOptions = (data: BaseAutocompleteData[]): IOption[] =>
mapLabelValuePairs(data).flat();
const customValue: IOption[] = useMemo(() => {
if (!searchText) return [];
return [
{
label: `${searchText} ${ORDERBY_FILTERS.ASC}`,
value: `${searchText}${orderByValueDelimiter}${ORDERBY_FILTERS.ASC}`,
},
{
label: `${searchText} ${ORDERBY_FILTERS.DESC}`,
value: `${searchText}${orderByValueDelimiter}${ORDERBY_FILTERS.DESC}`,
},
];
}, [searchText]);
const generateOptions = (options: IOption[]): IOption[] => {
const currentCustomValue = options.find(
(keyOption) =>
getRemoveOrderFromValue(keyOption.value) === debouncedSearchText,
)
? []
: customValue;
const result = [...currentCustomValue, ...options];
const uniqResult = uniqWith(result, isEqual);
return uniqResult.filter(
(option) =>
!getLabelFromValue(selectedValue).includes(
getRemoveOrderFromValue(option.value),
),
);
};
return {
searchText,
debouncedSearchText,
selectedValue,
aggregationOptions,
createOptions,
handleChange,
handleSearchKeys,
generateOptions,
};
};

View File

@ -0,0 +1,26 @@
import { IOption } from 'hooks/useResourceAttribute/types';
import { IBuilderFormula } from 'types/api/queryBuilder/queryBuilderData';
import { SIGNOZ_VALUE } from '../../OrderByFilter/constants';
import { orderByValueDelimiter } from '../../OrderByFilter/utils';
export const transformToOrderByStringValuesByFormula = (
formula: IBuilderFormula,
): IOption[] => {
const prepareSelectedValue: IOption[] =
formula?.orderBy?.map((item) => {
if (item.columnName === SIGNOZ_VALUE) {
return {
label: `${formula.expression} ${item.order}`,
value: `${item.columnName}${orderByValueDelimiter}${item.order}`,
};
}
return {
label: `${item.columnName} ${item.order}`,
value: `${item.columnName}${orderByValueDelimiter}${item.order}`,
};
}) || [];
return prepareSelectedValue;
};

View File

@ -18,6 +18,7 @@ import { memo, useCallback, useEffect, useState } from 'react';
import { useQueryClient } from 'react-query';
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
import { SelectOption } from 'types/common/select';
import { popupContainer } from 'utils/selectPopupContainer';
import { selectStyle } from '../QueryBuilderSearch/config';
import { GroupByFilterProps } from './GroupByFilter.interfaces';
@ -169,6 +170,7 @@ export const GroupByFilter = memo(function GroupByFilter({
return (
<Select
getPopupContainer={popupContainer}
mode="tags"
style={selectStyle}
onSearch={handleSearchKeys}

View File

@ -1,7 +1,6 @@
import { Select } from 'antd';
// ** Constants
import { HAVING_OPERATORS, initialHavingValues } from 'constants/queryBuilder';
import { HAVING_FILTER_REGEXP } from 'constants/regExp';
import { HavingFilterTag } from 'container/QueryBuilder/components';
import { HavingTagRenderProps } from 'container/QueryBuilder/components/HavingFilterTag/HavingFilterTag.interfaces';
// ** Hooks
@ -16,12 +15,12 @@ import { useCallback, useEffect, useMemo, useState } from 'react';
import { Having, HavingForm } from 'types/api/queryBuilder/queryBuilderData';
import { DataSource } from 'types/common/queryBuilder';
import { SelectOption } from 'types/common/select';
import { popupContainer } from 'utils/selectPopupContainer';
import { getHavingObject, isValidHavingValue } from '../utils';
// ** Types
import { HavingFilterProps } from './HavingFilter.interfaces';
const { Option } = Select;
export function HavingFilter({
query,
onChange,
@ -59,13 +58,6 @@ export function HavingFilter({
[columnName],
);
const getHavingObject = useCallback((currentSearch: string): HavingForm => {
const textArr = currentSearch.split(' ');
const [columnName = '', op = '', ...value] = textArr;
return { columnName, op, value };
}, []);
const generateOptions = useCallback(
(search: string): void => {
const [aggregator = '', op = '', ...restValue] = search.split(' ');
@ -97,19 +89,6 @@ export function HavingFilter({
[columnName, aggregatorOptions],
);
const isValidHavingValue = useCallback(
(search: string): boolean => {
const values = getHavingObject(search).value.join(' ');
if (values) {
return HAVING_FILTER_REGEXP.test(values);
}
return true;
},
[getHavingObject],
);
const handleSearch = useCallback(
(search: string): void => {
const trimmedSearch = search.replace(/\s\s+/g, ' ').trimStart();
@ -124,7 +103,7 @@ export function HavingFilter({
setSearchText(currentSearch);
}
},
[isMulti, isValidHavingValue],
[isMulti],
);
const resetChanges = useCallback((): void => {
@ -199,7 +178,7 @@ export function HavingFilter({
generateOptions(text);
},
[generateOptions, getHavingObject],
[generateOptions],
);
const handleDeselect = (value: string): void => {
@ -217,13 +196,11 @@ export function HavingFilter({
setLocalValues(transformHavingToStringValue(having));
}, [having]);
const isMetricsDataSource = useMemo(
() => query.dataSource === DataSource.METRICS,
[query.dataSource],
);
const isMetricsDataSource = query.dataSource === DataSource.METRICS;
return (
<Select
getPopupContainer={popupContainer}
autoClearSearchValue={false}
mode="multiple"
onSearch={handleSearch}
@ -240,9 +217,9 @@ export function HavingFilter({
onSelect={handleSelect}
>
{options.map((opt) => (
<Option key={opt.value} value={opt.value} title="havingOption">
<Select.Option key={opt.value} value={opt.value} title="havingOption">
{opt.label}
</Option>
</Select.Option>
))}
</Select>
);

View File

@ -1,30 +1,12 @@
import { InputNumber } from 'antd';
import { useMemo } from 'react';
import { IBuilderQuery } from 'types/api/queryBuilder/queryBuilderData';
import { DataSource } from 'types/common/queryBuilder';
import { selectStyle } from '../QueryBuilderSearch/config';
import { handleKeyDownLimitFilter } from '../utils';
function LimitFilter({ onChange, query }: LimitFilterProps): JSX.Element {
const handleKeyDown = (event: {
keyCode: number;
which: number;
preventDefault: () => void;
}): void => {
const keyCode = event.keyCode || event.which;
const isBackspace = keyCode === 8;
const isNumeric =
(keyCode >= 48 && keyCode <= 57) || (keyCode >= 96 && keyCode <= 105);
if (!isNumeric && !isBackspace) {
event.preventDefault();
}
};
const isMetricsDataSource = useMemo(
() => query.dataSource === DataSource.METRICS,
[query.dataSource],
);
const isMetricsDataSource = query.dataSource === DataSource.METRICS;
const isDisabled = isMetricsDataSource && !query.aggregateAttribute.key;
@ -36,7 +18,7 @@ function LimitFilter({ onChange, query }: LimitFilterProps): JSX.Element {
style={selectStyle}
disabled={isDisabled}
onChange={onChange}
onKeyDown={handleKeyDown}
onKeyDown={handleKeyDownLimitFilter}
/>
);
}

View File

@ -9,9 +9,9 @@ export type OrderByFilterProps = {
};
export type OrderByFilterValue = {
disabled: boolean | undefined;
disabled?: boolean;
key: string;
label: string;
title: string | undefined;
title?: string;
value: string;
};

Some files were not shown because too many files have changed in this diff Show More