Compare commits
25 Commits
v0.52.0-cl
...
v0.52.0-cl
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a4878f6430 | ||
|
|
4489df6f39 | ||
|
|
06c075466b | ||
|
|
62be3e7c13 | ||
|
|
bb84960442 | ||
|
|
52199361d5 | ||
|
|
f031845300 | ||
|
|
6f73bb6eca | ||
|
|
fe398bcc49 | ||
|
|
6781c29082 | ||
|
|
eb146491f2 | ||
|
|
ae325ec1ca | ||
|
|
fd6f0574f5 | ||
|
|
b819a90c80 | ||
|
|
a6848f6abd | ||
|
|
abe65975c9 | ||
|
|
5cedd57aa2 | ||
|
|
80a7b9d16d | ||
|
|
9f7b2542ec | ||
|
|
4a4c9f26a2 | ||
|
|
c957c0f757 | ||
|
|
3ff0aa4b4b | ||
|
|
063c9adba6 | ||
|
|
5c3ce146fa | ||
|
|
481bb6e8b8 |
@@ -28,6 +28,7 @@ import (
|
||||
"go.signoz.io/signoz/ee/query-service/integrations/gateway"
|
||||
"go.signoz.io/signoz/ee/query-service/interfaces"
|
||||
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||
"go.signoz.io/signoz/pkg/query-service/migrate"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
|
||||
@@ -179,6 +180,13 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go func() {
|
||||
err = migrate.ClickHouseMigrate(reader.GetConn(), serverOptions.Cluster)
|
||||
if err != nil {
|
||||
zap.L().Error("error while running clickhouse migrations", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
// initiate opamp
|
||||
_, err = opAmpModel.InitDB(localDB)
|
||||
if err != nil {
|
||||
@@ -728,6 +736,7 @@ func makeRulesManager(
|
||||
DisableRules: disableRules,
|
||||
FeatureFlags: fm,
|
||||
Reader: ch,
|
||||
EvalDelay: baseconst.GetEvalDelay(),
|
||||
}
|
||||
|
||||
// create Manager
|
||||
|
||||
@@ -9,9 +9,9 @@ export function ErrorResponseHandler(error: AxiosError): ErrorResponse {
|
||||
// making the error status code as standard Error Status Code
|
||||
const statusCode = response.status as ErrorStatusCode;
|
||||
|
||||
if (statusCode >= 400 && statusCode < 500) {
|
||||
const { data } = response as AxiosResponse;
|
||||
const { data } = response as AxiosResponse;
|
||||
|
||||
if (statusCode >= 400 && statusCode < 500) {
|
||||
if (statusCode === 404) {
|
||||
return {
|
||||
statusCode,
|
||||
@@ -34,12 +34,11 @@ export function ErrorResponseHandler(error: AxiosError): ErrorResponse {
|
||||
body: JSON.stringify((response.data as any).data),
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
statusCode,
|
||||
payload: null,
|
||||
error: 'Something went wrong',
|
||||
message: null,
|
||||
message: data?.error,
|
||||
};
|
||||
}
|
||||
if (request) {
|
||||
|
||||
@@ -23,6 +23,10 @@ export const metricQueryFunctionOptions: SelectOption<string, string>[] = [
|
||||
value: QueryFunctionsTypes.ABSOLUTE,
|
||||
label: 'Absolute',
|
||||
},
|
||||
{
|
||||
value: QueryFunctionsTypes.RUNNING_DIFF,
|
||||
label: 'Running Diff',
|
||||
},
|
||||
{
|
||||
value: QueryFunctionsTypes.LOG_2,
|
||||
label: 'Log2',
|
||||
@@ -103,6 +107,9 @@ export const queryFunctionsTypesConfig: QueryFunctionConfigType = {
|
||||
absolute: {
|
||||
showInput: false,
|
||||
},
|
||||
runningDiff: {
|
||||
showInput: false,
|
||||
},
|
||||
log2: {
|
||||
showInput: false,
|
||||
},
|
||||
|
||||
@@ -0,0 +1,78 @@
|
||||
import AlertChannels from 'container/AllAlertChannels';
|
||||
import { allAlertChannels } from 'mocks-server/__mockdata__/alerts';
|
||||
import { act, fireEvent, render, screen, waitFor } from 'tests/test-utils';
|
||||
|
||||
jest.mock('hooks/useFetch', () => ({
|
||||
__esModule: true,
|
||||
default: jest.fn().mockImplementation(() => ({
|
||||
payload: allAlertChannels,
|
||||
})),
|
||||
}));
|
||||
|
||||
const successNotification = jest.fn();
|
||||
jest.mock('hooks/useNotifications', () => ({
|
||||
__esModule: true,
|
||||
useNotifications: jest.fn(() => ({
|
||||
notifications: {
|
||||
success: successNotification,
|
||||
error: jest.fn(),
|
||||
},
|
||||
})),
|
||||
}));
|
||||
|
||||
describe('Alert Channels Settings List page', () => {
|
||||
beforeEach(() => {
|
||||
render(<AlertChannels />);
|
||||
});
|
||||
afterEach(() => {
|
||||
jest.restoreAllMocks();
|
||||
});
|
||||
describe('Should display the Alert Channels page properly', () => {
|
||||
it('Should check if "The alerts will be sent to all the configured channels." is visible ', () => {
|
||||
expect(screen.getByText('sending_channels_note')).toBeInTheDocument();
|
||||
});
|
||||
it('Should check if "New Alert Channel" Button is visble ', () => {
|
||||
expect(screen.getByText('button_new_channel')).toBeInTheDocument();
|
||||
});
|
||||
it('Should check if the help icon is visible and displays "tooltip_notification_channels ', async () => {
|
||||
const helpIcon = screen.getByLabelText('question-circle');
|
||||
|
||||
fireEvent.mouseOver(helpIcon);
|
||||
|
||||
await waitFor(() => {
|
||||
const tooltip = screen.getByText('tooltip_notification_channels');
|
||||
expect(tooltip).toBeInTheDocument();
|
||||
});
|
||||
});
|
||||
});
|
||||
describe('Should check if the channels table is properly displayed', () => {
|
||||
it('Should check if the table columns are properly displayed', () => {
|
||||
expect(screen.getByText('column_channel_name')).toBeInTheDocument();
|
||||
expect(screen.getByText('column_channel_type')).toBeInTheDocument();
|
||||
expect(screen.getByText('column_channel_action')).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('Should check if the data in the table is displayed properly', () => {
|
||||
expect(screen.getByText('Dummy-Channel')).toBeInTheDocument();
|
||||
expect(screen.getAllByText('slack')[0]).toBeInTheDocument();
|
||||
expect(screen.getAllByText('column_channel_edit')[0]).toBeInTheDocument();
|
||||
expect(screen.getAllByText('Delete')[0]).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('Should check if clicking on Delete displays Success Toast "Channel Deleted Successfully"', async () => {
|
||||
const deleteButton = screen.getAllByRole('button', { name: 'Delete' })[0];
|
||||
expect(deleteButton).toBeInTheDocument();
|
||||
|
||||
act(() => {
|
||||
fireEvent.click(deleteButton);
|
||||
});
|
||||
|
||||
await waitFor(() => {
|
||||
expect(successNotification).toBeCalledWith({
|
||||
message: 'Success',
|
||||
description: 'channel_delete_success',
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,72 @@
|
||||
import AlertChannels from 'container/AllAlertChannels';
|
||||
import { allAlertChannels } from 'mocks-server/__mockdata__/alerts';
|
||||
import { fireEvent, render, screen, waitFor } from 'tests/test-utils';
|
||||
|
||||
jest.mock('hooks/useFetch', () => ({
|
||||
__esModule: true,
|
||||
default: jest.fn().mockImplementation(() => ({
|
||||
payload: allAlertChannels,
|
||||
})),
|
||||
}));
|
||||
|
||||
const successNotification = jest.fn();
|
||||
jest.mock('hooks/useNotifications', () => ({
|
||||
__esModule: true,
|
||||
useNotifications: jest.fn(() => ({
|
||||
notifications: {
|
||||
success: successNotification,
|
||||
error: jest.fn(),
|
||||
},
|
||||
})),
|
||||
}));
|
||||
|
||||
jest.mock('hooks/useComponentPermission', () => ({
|
||||
__esModule: true,
|
||||
default: jest.fn().mockImplementation(() => [false]),
|
||||
}));
|
||||
|
||||
describe('Alert Channels Settings List page (Normal User)', () => {
|
||||
beforeEach(() => {
|
||||
render(<AlertChannels />);
|
||||
});
|
||||
afterEach(() => {
|
||||
jest.restoreAllMocks();
|
||||
});
|
||||
describe('Should display the Alert Channels page properly', () => {
|
||||
it('Should check if "The alerts will be sent to all the configured channels." is visible ', () => {
|
||||
expect(screen.getByText('sending_channels_note')).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('Should check if "New Alert Channel" Button is visble and disabled', () => {
|
||||
const newAlertButton = screen.getByRole('button', {
|
||||
name: 'plus button_new_channel',
|
||||
});
|
||||
expect(newAlertButton).toBeInTheDocument();
|
||||
expect(newAlertButton).toBeDisabled();
|
||||
});
|
||||
it('Should check if the help icon is visible and displays "tooltip_notification_channels ', async () => {
|
||||
const helpIcon = screen.getByLabelText('question-circle');
|
||||
|
||||
fireEvent.mouseOver(helpIcon);
|
||||
|
||||
await waitFor(() => {
|
||||
const tooltip = screen.getByText('tooltip_notification_channels');
|
||||
expect(tooltip).toBeInTheDocument();
|
||||
});
|
||||
});
|
||||
});
|
||||
describe('Should check if the channels table is properly displayed', () => {
|
||||
it('Should check if the table columns are properly displayed', () => {
|
||||
expect(screen.getByText('column_channel_name')).toBeInTheDocument();
|
||||
expect(screen.getByText('column_channel_type')).toBeInTheDocument();
|
||||
expect(screen.queryByText('column_channel_action')).not.toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('Should check if the data in the table is displayed properly', () => {
|
||||
expect(screen.getByText('Dummy-Channel')).toBeInTheDocument();
|
||||
expect(screen.getAllByText('slack')[0]).toBeInTheDocument();
|
||||
expect(screen.queryByText('column_channel_edit')).not.toBeInTheDocument();
|
||||
expect(screen.queryByText('Delete')).not.toBeInTheDocument();
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,424 @@
|
||||
/* eslint-disable sonarjs/no-duplicate-string */
|
||||
/* eslint-disable sonarjs/no-identical-functions */
|
||||
|
||||
import CreateAlertChannels from 'container/CreateAlertChannels';
|
||||
import { ChannelType } from 'container/CreateAlertChannels/config';
|
||||
import {
|
||||
opsGenieDescriptionDefaultValue,
|
||||
opsGenieMessageDefaultValue,
|
||||
opsGeniePriorityDefaultValue,
|
||||
pagerDutyAdditionalDetailsDefaultValue,
|
||||
pagerDutyDescriptionDefaultVaule,
|
||||
pagerDutySeverityTextDefaultValue,
|
||||
slackDescriptionDefaultValue,
|
||||
slackTitleDefaultValue,
|
||||
} from 'mocks-server/__mockdata__/alerts';
|
||||
import { server } from 'mocks-server/server';
|
||||
import { rest } from 'msw';
|
||||
import { fireEvent, render, screen, waitFor } from 'tests/test-utils';
|
||||
|
||||
import { testLabelInputAndHelpValue } from './testUtils';
|
||||
|
||||
const successNotification = jest.fn();
|
||||
const errorNotification = jest.fn();
|
||||
jest.mock('hooks/useNotifications', () => ({
|
||||
__esModule: true,
|
||||
useNotifications: jest.fn(() => ({
|
||||
notifications: {
|
||||
success: successNotification,
|
||||
error: errorNotification,
|
||||
},
|
||||
})),
|
||||
}));
|
||||
|
||||
jest.mock('hooks/useFeatureFlag', () => ({
|
||||
__esModule: true,
|
||||
default: jest.fn().mockImplementation(() => ({
|
||||
active: true,
|
||||
})),
|
||||
}));
|
||||
|
||||
describe('Create Alert Channel', () => {
|
||||
afterEach(() => {
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
describe('Should check if the new alert channel is properly displayed with the cascading fields of slack channel ', () => {
|
||||
beforeEach(() => {
|
||||
render(<CreateAlertChannels preType={ChannelType.Slack} />);
|
||||
});
|
||||
afterEach(() => {
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
it('Should check if the title is "New Notification Channels"', () => {
|
||||
expect(screen.getByText('page_title_create')).toBeInTheDocument();
|
||||
});
|
||||
it('Should check if the name label and textbox are displayed properly ', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_channel_name',
|
||||
testId: 'channel-name-textbox',
|
||||
});
|
||||
});
|
||||
it('Should check if Send resolved alerts label and checkbox are displayed properly ', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_send_resolved',
|
||||
testId: 'field-send-resolved-checkbox',
|
||||
});
|
||||
});
|
||||
it('Should check if channel type label and dropdown are displayed properly', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_channel_type',
|
||||
testId: 'channel-type-select',
|
||||
});
|
||||
});
|
||||
// Default Channel type (Slack) fields
|
||||
it('Should check if the selected item in the type dropdown has text "Slack"', () => {
|
||||
expect(screen.getByText('Slack')).toBeInTheDocument();
|
||||
});
|
||||
it('Should check if Webhook URL label and input are displayed properly ', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_webhook_url',
|
||||
testId: 'webhook-url-textbox',
|
||||
});
|
||||
});
|
||||
it('Should check if Recepient label, input, and help text are displayed properly ', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_slack_recipient',
|
||||
testId: 'slack-channel-textbox',
|
||||
helpText: 'slack_channel_help',
|
||||
});
|
||||
});
|
||||
|
||||
it('Should check if Title label and text area are displayed properly ', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_slack_title',
|
||||
testId: 'title-textarea',
|
||||
});
|
||||
});
|
||||
it('Should check if Title contains template', () => {
|
||||
const titleTextArea = screen.getByTestId('title-textarea');
|
||||
|
||||
expect(titleTextArea).toHaveTextContent(slackTitleDefaultValue);
|
||||
});
|
||||
it('Should check if Description label and text area are displayed properly ', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_slack_description',
|
||||
testId: 'description-textarea',
|
||||
});
|
||||
});
|
||||
it('Should check if Description contains template', () => {
|
||||
const descriptionTextArea = screen.getByTestId('description-textarea');
|
||||
|
||||
expect(descriptionTextArea).toHaveTextContent(slackDescriptionDefaultValue);
|
||||
});
|
||||
it('Should check if the form buttons are displayed properly (Save, Test, Back)', () => {
|
||||
expect(screen.getByText('button_save_channel')).toBeInTheDocument();
|
||||
expect(screen.getByText('button_test_channel')).toBeInTheDocument();
|
||||
expect(screen.getByText('button_return')).toBeInTheDocument();
|
||||
});
|
||||
it('Should check if saving the form without filling the name displays "Something went wrong"', async () => {
|
||||
const saveButton = screen.getByRole('button', {
|
||||
name: 'button_save_channel',
|
||||
});
|
||||
|
||||
fireEvent.click(saveButton);
|
||||
|
||||
await waitFor(() =>
|
||||
expect(errorNotification).toHaveBeenCalledWith({
|
||||
description: 'Something went wrong',
|
||||
message: 'Error',
|
||||
}),
|
||||
);
|
||||
});
|
||||
it('Should check if clicking on Test button shows "An alert has been sent to this channel" success message if testing passes', async () => {
|
||||
server.use(
|
||||
rest.post('http://localhost/api/v1/testChannel', (req, res, ctx) =>
|
||||
res(
|
||||
ctx.status(200),
|
||||
ctx.json({
|
||||
status: 'success',
|
||||
data: 'test alert sent',
|
||||
}),
|
||||
),
|
||||
),
|
||||
);
|
||||
const testButton = screen.getByRole('button', {
|
||||
name: 'button_test_channel',
|
||||
});
|
||||
|
||||
fireEvent.click(testButton);
|
||||
|
||||
await waitFor(() =>
|
||||
expect(successNotification).toHaveBeenCalledWith({
|
||||
message: 'Success',
|
||||
description: 'channel_test_done',
|
||||
}),
|
||||
);
|
||||
});
|
||||
it('Should check if clicking on Test button shows "Something went wrong" error message if testing fails', async () => {
|
||||
const testButton = screen.getByRole('button', {
|
||||
name: 'button_test_channel',
|
||||
});
|
||||
|
||||
fireEvent.click(testButton);
|
||||
|
||||
await waitFor(() =>
|
||||
expect(errorNotification).toHaveBeenCalledWith({
|
||||
message: 'Error',
|
||||
description: 'channel_test_failed',
|
||||
}),
|
||||
);
|
||||
});
|
||||
});
|
||||
describe('New Alert Channel Cascading Fields Based on Channel Type', () => {
|
||||
describe('Webhook', () => {
|
||||
beforeEach(() => {
|
||||
render(<CreateAlertChannels preType={ChannelType.Webhook} />);
|
||||
});
|
||||
|
||||
it('Should check if the selected item in the type dropdown has text "Webhook"', () => {
|
||||
expect(screen.getByText('Webhook')).toBeInTheDocument();
|
||||
});
|
||||
it('Should check if Webhook URL label and input are displayed properly ', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_webhook_url',
|
||||
testId: 'webhook-url-textbox',
|
||||
});
|
||||
});
|
||||
it('Should check if Webhook User Name label, input, and help text are displayed properly ', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_webhook_username',
|
||||
testId: 'webhook-username-textbox',
|
||||
helpText: 'help_webhook_username',
|
||||
});
|
||||
});
|
||||
it('Should check if Password label and textbox, and help text are displayed properly', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'Password (optional)',
|
||||
testId: 'webhook-password-textbox',
|
||||
helpText: 'help_webhook_password',
|
||||
});
|
||||
});
|
||||
});
|
||||
describe('PagerDuty', () => {
|
||||
beforeEach(() => {
|
||||
render(<CreateAlertChannels preType={ChannelType.Pagerduty} />);
|
||||
});
|
||||
|
||||
it('Should check if the selected item in the type dropdown has text "Pagerduty"', () => {
|
||||
expect(screen.getByText('Pagerduty')).toBeInTheDocument();
|
||||
});
|
||||
it('Should check if Routing key label, required, and textbox are displayed properly', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_pager_routing_key',
|
||||
testId: 'pager-routing-key-textbox',
|
||||
});
|
||||
});
|
||||
it('Should check if Description label, required, info (Shows up as description in pagerduty), and text area are displayed properly', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_pager_description',
|
||||
testId: 'pager-description-textarea',
|
||||
helpText: 'help_pager_description',
|
||||
});
|
||||
});
|
||||
it('Should check if the description contains default template', () => {
|
||||
const descriptionTextArea = screen.getByTestId(
|
||||
'pager-description-textarea',
|
||||
);
|
||||
|
||||
expect(descriptionTextArea).toHaveTextContent(
|
||||
pagerDutyDescriptionDefaultVaule,
|
||||
);
|
||||
});
|
||||
it('Should check if Severity label, info (help_pager_severity), and textbox are displayed properly', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_pager_severity',
|
||||
testId: 'pager-severity-textbox',
|
||||
helpText: 'help_pager_severity',
|
||||
});
|
||||
});
|
||||
it('Should check if Severity contains the default template', () => {
|
||||
const severityTextbox = screen.getByTestId('pager-severity-textbox');
|
||||
|
||||
expect(severityTextbox).toHaveValue(pagerDutySeverityTextDefaultValue);
|
||||
});
|
||||
it('Should check if Additional Information label, text area, and help text (help_pager_details) are displayed properly', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_pager_details',
|
||||
testId: 'pager-additional-details-textarea',
|
||||
helpText: 'help_pager_details',
|
||||
});
|
||||
});
|
||||
it('Should check if Additional Information contains the default template', () => {
|
||||
const detailsTextArea = screen.getByTestId(
|
||||
'pager-additional-details-textarea',
|
||||
);
|
||||
|
||||
expect(detailsTextArea).toHaveValue(pagerDutyAdditionalDetailsDefaultValue);
|
||||
});
|
||||
it('Should check if Group label, text area, and info (help_pager_group) are displayed properly', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_pager_group',
|
||||
testId: 'pager-group-textarea',
|
||||
helpText: 'help_pager_group',
|
||||
});
|
||||
});
|
||||
it('Should check if Class label, text area, and info (help_pager_class) are displayed properly', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_pager_class',
|
||||
testId: 'pager-class-textarea',
|
||||
helpText: 'help_pager_class',
|
||||
});
|
||||
});
|
||||
it('Should check if Client label, text area, and info (Shows up as event source in Pagerduty) are displayed properly', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_pager_client',
|
||||
testId: 'pager-client-textarea',
|
||||
helpText: 'help_pager_client',
|
||||
});
|
||||
});
|
||||
it('Should check if Client input contains the default value "SigNoz Alert Manager"', () => {
|
||||
const clientTextArea = screen.getByTestId('pager-client-textarea');
|
||||
|
||||
expect(clientTextArea).toHaveValue('SigNoz Alert Manager');
|
||||
});
|
||||
it('Should check if Client URL label, text area, and info (Shows up as event source link in Pagerduty) are displayed properly', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_pager_client_url',
|
||||
testId: 'pager-client-url-textarea',
|
||||
helpText: 'help_pager_client_url',
|
||||
});
|
||||
});
|
||||
it('Should check if Client URL contains the default value "https://enter-signoz-host-n-port-here/alerts"', () => {
|
||||
const clientUrlTextArea = screen.getByTestId('pager-client-url-textarea');
|
||||
|
||||
expect(clientUrlTextArea).toHaveValue(
|
||||
'https://enter-signoz-host-n-port-here/alerts',
|
||||
);
|
||||
});
|
||||
});
|
||||
describe('Opsgenie', () => {
|
||||
beforeEach(() => {
|
||||
render(<CreateAlertChannels preType={ChannelType.Opsgenie} />);
|
||||
});
|
||||
|
||||
it('Should check if the selected item in the type dropdown has text "Opsgenie"', () => {
|
||||
expect(screen.getByText('Opsgenie')).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('Should check if API key label, required, and textbox are displayed properly', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_opsgenie_api_key',
|
||||
testId: 'opsgenie-api-key-textbox',
|
||||
required: true,
|
||||
});
|
||||
});
|
||||
|
||||
it('Should check if Message label, required, info (Shows up as message in opsgenie), and text area are displayed properly', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_opsgenie_message',
|
||||
testId: 'opsgenie-message-textarea',
|
||||
helpText: 'help_opsgenie_message',
|
||||
required: true,
|
||||
});
|
||||
});
|
||||
|
||||
it('Should check if Message contains the default template ', () => {
|
||||
const messageTextArea = screen.getByTestId('opsgenie-message-textarea');
|
||||
|
||||
expect(messageTextArea).toHaveValue(opsGenieMessageDefaultValue);
|
||||
});
|
||||
|
||||
it('Should check if Description label, required, info (Shows up as description in opsgenie), and text area are displayed properly `{{ if gt (len .Alerts.Firing) 0 -}}', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_opsgenie_description',
|
||||
testId: 'opsgenie-description-textarea',
|
||||
helpText: 'help_opsgenie_description',
|
||||
required: true,
|
||||
});
|
||||
});
|
||||
|
||||
it('Should check if Description label, required, info (Shows up as description in opsgenie), and text area are displayed properly `{{ if gt (len .Alerts.Firing) 0 -}}', () => {
|
||||
const descriptionTextArea = screen.getByTestId(
|
||||
'opsgenie-description-textarea',
|
||||
);
|
||||
|
||||
expect(descriptionTextArea).toHaveTextContent(
|
||||
opsGenieDescriptionDefaultValue,
|
||||
);
|
||||
});
|
||||
|
||||
it('Should check if Priority label, required, info (help_opsgenie_priority), and text area are displayed properly', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_opsgenie_priority',
|
||||
testId: 'opsgenie-priority-textarea',
|
||||
helpText: 'help_opsgenie_priority',
|
||||
required: true,
|
||||
});
|
||||
});
|
||||
|
||||
it('Should check if Message contains the default template', () => {
|
||||
const priorityTextArea = screen.getByTestId('opsgenie-priority-textarea');
|
||||
|
||||
expect(priorityTextArea).toHaveValue(opsGeniePriorityDefaultValue);
|
||||
});
|
||||
});
|
||||
describe('Opsgenie', () => {
|
||||
beforeEach(() => {
|
||||
render(<CreateAlertChannels preType={ChannelType.Email} />);
|
||||
});
|
||||
|
||||
it('Should check if the selected item in the type dropdown has text "Email"', () => {
|
||||
expect(screen.getByText('Email')).toBeInTheDocument();
|
||||
});
|
||||
it('Should check if API key label, required, info(help_email_to), and textbox are displayed properly', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_email_to',
|
||||
testId: 'email-to-textbox',
|
||||
helpText: 'help_email_to',
|
||||
required: true,
|
||||
});
|
||||
});
|
||||
});
|
||||
describe('Microsoft Teams', () => {
|
||||
beforeEach(() => {
|
||||
render(<CreateAlertChannels preType={ChannelType.MsTeams} />);
|
||||
});
|
||||
|
||||
it('Should check if the selected item in the type dropdown has text "msteams"', () => {
|
||||
expect(screen.getByText('msteams')).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('Should check if Webhook URL label and input are displayed properly ', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_webhook_url',
|
||||
testId: 'webhook-url-textbox',
|
||||
});
|
||||
});
|
||||
|
||||
it('Should check if Title label and text area are displayed properly ', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_slack_title',
|
||||
testId: 'title-textarea',
|
||||
});
|
||||
});
|
||||
|
||||
it('Should check if Title contains template', () => {
|
||||
const titleTextArea = screen.getByTestId('title-textarea');
|
||||
|
||||
expect(titleTextArea).toHaveTextContent(slackTitleDefaultValue);
|
||||
});
|
||||
it('Should check if Description label and text area are displayed properly ', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_slack_description',
|
||||
testId: 'description-textarea',
|
||||
});
|
||||
});
|
||||
|
||||
it('Should check if Description contains template', () => {
|
||||
const descriptionTextArea = screen.getByTestId('description-textarea');
|
||||
|
||||
expect(descriptionTextArea).toHaveTextContent(slackDescriptionDefaultValue);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,348 @@
|
||||
/* eslint-disable sonarjs/no-duplicate-string */
|
||||
/* eslint-disable sonarjs/no-identical-functions */
|
||||
|
||||
import { SIGNOZ_UPGRADE_PLAN_URL } from 'constants/app';
|
||||
import CreateAlertChannels from 'container/CreateAlertChannels';
|
||||
import { ChannelType } from 'container/CreateAlertChannels/config';
|
||||
import {
|
||||
opsGenieDescriptionDefaultValue,
|
||||
opsGenieMessageDefaultValue,
|
||||
opsGeniePriorityDefaultValue,
|
||||
pagerDutyAdditionalDetailsDefaultValue,
|
||||
pagerDutyDescriptionDefaultVaule,
|
||||
pagerDutySeverityTextDefaultValue,
|
||||
slackDescriptionDefaultValue,
|
||||
slackTitleDefaultValue,
|
||||
} from 'mocks-server/__mockdata__/alerts';
|
||||
import { render, screen } from 'tests/test-utils';
|
||||
|
||||
import { testLabelInputAndHelpValue } from './testUtils';
|
||||
|
||||
describe('Create Alert Channel (Normal User)', () => {
|
||||
afterEach(() => {
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
describe('Should check if the new alert channel is properly displayed with the cascading fields of slack channel ', () => {
|
||||
beforeEach(() => {
|
||||
render(<CreateAlertChannels preType={ChannelType.Slack} />);
|
||||
});
|
||||
it('Should check if the title is "New Notification Channels"', () => {
|
||||
expect(screen.getByText('page_title_create')).toBeInTheDocument();
|
||||
});
|
||||
it('Should check if the name label and textbox are displayed properly ', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_channel_name',
|
||||
testId: 'channel-name-textbox',
|
||||
});
|
||||
});
|
||||
it('Should check if Send resolved alerts label and checkbox are displayed properly ', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_send_resolved',
|
||||
testId: 'field-send-resolved-checkbox',
|
||||
});
|
||||
});
|
||||
it('Should check if channel type label and dropdown are displayed properly', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_channel_type',
|
||||
testId: 'channel-type-select',
|
||||
});
|
||||
});
|
||||
// Default Channel type (Slack) fields
|
||||
it('Should check if the selected item in the type dropdown has text "Slack"', () => {
|
||||
expect(screen.getByText('Slack')).toBeInTheDocument();
|
||||
});
|
||||
it('Should check if Webhook URL label and input are displayed properly ', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_webhook_url',
|
||||
testId: 'webhook-url-textbox',
|
||||
});
|
||||
});
|
||||
it('Should check if Recepient label, input, and help text are displayed properly ', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_slack_recipient',
|
||||
testId: 'slack-channel-textbox',
|
||||
helpText: 'slack_channel_help',
|
||||
});
|
||||
});
|
||||
|
||||
it('Should check if Title label and text area are displayed properly ', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_slack_title',
|
||||
testId: 'title-textarea',
|
||||
});
|
||||
});
|
||||
it('Should check if Title contains template', () => {
|
||||
const titleTextArea = screen.getByTestId('title-textarea');
|
||||
|
||||
expect(titleTextArea).toHaveTextContent(slackTitleDefaultValue);
|
||||
});
|
||||
it('Should check if Description label and text area are displayed properly ', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_slack_description',
|
||||
testId: 'description-textarea',
|
||||
});
|
||||
});
|
||||
it('Should check if Description contains template', () => {
|
||||
const descriptionTextArea = screen.getByTestId('description-textarea');
|
||||
|
||||
expect(descriptionTextArea).toHaveTextContent(slackDescriptionDefaultValue);
|
||||
});
|
||||
it('Should check if the form buttons are displayed properly (Save, Test, Back)', () => {
|
||||
expect(screen.getByText('button_save_channel')).toBeInTheDocument();
|
||||
expect(screen.getByText('button_test_channel')).toBeInTheDocument();
|
||||
expect(screen.getByText('button_return')).toBeInTheDocument();
|
||||
});
|
||||
});
|
||||
describe('New Alert Channel Cascading Fields Based on Channel Type', () => {
|
||||
describe('Webhook', () => {
|
||||
beforeEach(() => {
|
||||
render(<CreateAlertChannels preType={ChannelType.Webhook} />);
|
||||
});
|
||||
|
||||
it('Should check if the selected item in the type dropdown has text "Webhook"', () => {
|
||||
expect(screen.getByText('Webhook')).toBeInTheDocument();
|
||||
});
|
||||
it('Should check if Webhook URL label and input are displayed properly ', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_webhook_url',
|
||||
testId: 'webhook-url-textbox',
|
||||
});
|
||||
});
|
||||
it('Should check if Webhook User Name label, input, and help text are displayed properly ', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_webhook_username',
|
||||
testId: 'webhook-username-textbox',
|
||||
helpText: 'help_webhook_username',
|
||||
});
|
||||
});
|
||||
it('Should check if Password label and textbox, and help text are displayed properly', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'Password (optional)',
|
||||
testId: 'webhook-password-textbox',
|
||||
helpText: 'help_webhook_password',
|
||||
});
|
||||
});
|
||||
});
|
||||
describe('PagerDuty', () => {
|
||||
beforeEach(() => {
|
||||
render(<CreateAlertChannels preType={ChannelType.Pagerduty} />);
|
||||
});
|
||||
|
||||
it('Should check if the selected item in the type dropdown has text "Pagerduty"', () => {
|
||||
expect(screen.getByText('Pagerduty')).toBeInTheDocument();
|
||||
});
|
||||
it('Should check if Routing key label, required, and textbox are displayed properly', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_pager_routing_key',
|
||||
testId: 'pager-routing-key-textbox',
|
||||
});
|
||||
});
|
||||
it('Should check if Description label, required, info (Shows up as description in pagerduty), and text area are displayed properly', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_pager_description',
|
||||
testId: 'pager-description-textarea',
|
||||
helpText: 'help_pager_description',
|
||||
});
|
||||
});
|
||||
it('Should check if the description contains default template', () => {
|
||||
const descriptionTextArea = screen.getByTestId(
|
||||
'pager-description-textarea',
|
||||
);
|
||||
|
||||
expect(descriptionTextArea).toHaveTextContent(
|
||||
pagerDutyDescriptionDefaultVaule,
|
||||
);
|
||||
});
|
||||
it('Should check if Severity label, info (help_pager_severity), and textbox are displayed properly', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_pager_severity',
|
||||
testId: 'pager-severity-textbox',
|
||||
helpText: 'help_pager_severity',
|
||||
});
|
||||
});
|
||||
it('Should check if Severity contains the default template', () => {
|
||||
const severityTextbox = screen.getByTestId('pager-severity-textbox');
|
||||
|
||||
expect(severityTextbox).toHaveValue(pagerDutySeverityTextDefaultValue);
|
||||
});
|
||||
it('Should check if Additional Information label, text area, and help text (help_pager_details) are displayed properly', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_pager_details',
|
||||
testId: 'pager-additional-details-textarea',
|
||||
helpText: 'help_pager_details',
|
||||
});
|
||||
});
|
||||
it('Should check if Additional Information contains the default template', () => {
|
||||
const detailsTextArea = screen.getByTestId(
|
||||
'pager-additional-details-textarea',
|
||||
);
|
||||
|
||||
expect(detailsTextArea).toHaveValue(pagerDutyAdditionalDetailsDefaultValue);
|
||||
});
|
||||
it('Should check if Group label, text area, and info (help_pager_group) are displayed properly', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_pager_group',
|
||||
testId: 'pager-group-textarea',
|
||||
helpText: 'help_pager_group',
|
||||
});
|
||||
});
|
||||
it('Should check if Class label, text area, and info (help_pager_class) are displayed properly', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_pager_class',
|
||||
testId: 'pager-class-textarea',
|
||||
helpText: 'help_pager_class',
|
||||
});
|
||||
});
|
||||
it('Should check if Client label, text area, and info (Shows up as event source in Pagerduty) are displayed properly', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_pager_client',
|
||||
testId: 'pager-client-textarea',
|
||||
helpText: 'help_pager_client',
|
||||
});
|
||||
});
|
||||
it('Should check if Client input contains the default value "SigNoz Alert Manager"', () => {
|
||||
const clientTextArea = screen.getByTestId('pager-client-textarea');
|
||||
|
||||
expect(clientTextArea).toHaveValue('SigNoz Alert Manager');
|
||||
});
|
||||
it('Should check if Client URL label, text area, and info (Shows up as event source link in Pagerduty) are displayed properly', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_pager_client_url',
|
||||
testId: 'pager-client-url-textarea',
|
||||
helpText: 'help_pager_client_url',
|
||||
});
|
||||
});
|
||||
it('Should check if Client URL contains the default value "https://enter-signoz-host-n-port-here/alerts"', () => {
|
||||
const clientUrlTextArea = screen.getByTestId('pager-client-url-textarea');
|
||||
|
||||
expect(clientUrlTextArea).toHaveValue(
|
||||
'https://enter-signoz-host-n-port-here/alerts',
|
||||
);
|
||||
});
|
||||
});
|
||||
describe('Opsgenie', () => {
|
||||
beforeEach(() => {
|
||||
render(<CreateAlertChannels preType={ChannelType.Opsgenie} />);
|
||||
});
|
||||
|
||||
it('Should check if the selected item in the type dropdown has text "Opsgenie"', () => {
|
||||
expect(screen.getByText('Opsgenie')).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('Should check if API key label, required, and textbox are displayed properly', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_opsgenie_api_key',
|
||||
testId: 'opsgenie-api-key-textbox',
|
||||
required: true,
|
||||
});
|
||||
});
|
||||
|
||||
it('Should check if Message label, required, info (Shows up as message in opsgenie), and text area are displayed properly', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_opsgenie_message',
|
||||
testId: 'opsgenie-message-textarea',
|
||||
helpText: 'help_opsgenie_message',
|
||||
required: true,
|
||||
});
|
||||
});
|
||||
|
||||
it('Should check if Message contains the default template ', () => {
|
||||
const messageTextArea = screen.getByTestId('opsgenie-message-textarea');
|
||||
|
||||
expect(messageTextArea).toHaveValue(opsGenieMessageDefaultValue);
|
||||
});
|
||||
|
||||
it('Should check if Description label, required, info (Shows up as description in opsgenie), and text area are displayed properly `{{ if gt (len .Alerts.Firing) 0 -}}', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_opsgenie_description',
|
||||
testId: 'opsgenie-description-textarea',
|
||||
helpText: 'help_opsgenie_description',
|
||||
required: true,
|
||||
});
|
||||
});
|
||||
|
||||
it('Should check if Description label, required, info (Shows up as description in opsgenie), and text area are displayed properly `{{ if gt (len .Alerts.Firing) 0 -}}', () => {
|
||||
const descriptionTextArea = screen.getByTestId(
|
||||
'opsgenie-description-textarea',
|
||||
);
|
||||
|
||||
expect(descriptionTextArea).toHaveTextContent(
|
||||
opsGenieDescriptionDefaultValue,
|
||||
);
|
||||
});
|
||||
|
||||
it('Should check if Priority label, required, info (help_opsgenie_priority), and text area are displayed properly', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_opsgenie_priority',
|
||||
testId: 'opsgenie-priority-textarea',
|
||||
helpText: 'help_opsgenie_priority',
|
||||
required: true,
|
||||
});
|
||||
});
|
||||
|
||||
it('Should check if Message contains the default template', () => {
|
||||
const priorityTextArea = screen.getByTestId('opsgenie-priority-textarea');
|
||||
|
||||
expect(priorityTextArea).toHaveValue(opsGeniePriorityDefaultValue);
|
||||
});
|
||||
});
|
||||
describe('Opsgenie', () => {
|
||||
beforeEach(() => {
|
||||
render(<CreateAlertChannels preType={ChannelType.Email} />);
|
||||
});
|
||||
|
||||
it('Should check if the selected item in the type dropdown has text "Email"', () => {
|
||||
expect(screen.getByText('Email')).toBeInTheDocument();
|
||||
});
|
||||
it('Should check if API key label, required, info(help_email_to), and textbox are displayed properly', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_email_to',
|
||||
testId: 'email-to-textbox',
|
||||
helpText: 'help_email_to',
|
||||
required: true,
|
||||
});
|
||||
});
|
||||
});
|
||||
describe('Microsoft Teams', () => {
|
||||
beforeEach(() => {
|
||||
render(<CreateAlertChannels preType={ChannelType.MsTeams} />);
|
||||
});
|
||||
|
||||
it('Should check if the selected item in the type dropdown has text "Microsoft Teams (Supported in Paid Plans Only)"', () => {
|
||||
expect(
|
||||
screen.getByText('Microsoft Teams (Supported in Paid Plans Only)'),
|
||||
).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('Should check if the upgrade plan message is shown', () => {
|
||||
expect(screen.getByText('Upgrade to a Paid Plan')).toBeInTheDocument();
|
||||
expect(
|
||||
screen.getByText(/This feature is available for paid plans only./),
|
||||
).toBeInTheDocument();
|
||||
const link = screen.getByRole('link', { name: 'Click here' });
|
||||
expect(link).toBeInTheDocument();
|
||||
expect(link).toHaveAttribute('href', SIGNOZ_UPGRADE_PLAN_URL);
|
||||
expect(screen.getByText(/to Upgrade/)).toBeInTheDocument();
|
||||
});
|
||||
it('Should check if the form buttons are displayed properly (Save, Test, Back)', () => {
|
||||
expect(
|
||||
screen.getByRole('button', { name: 'button_save_channel' }),
|
||||
).toBeInTheDocument();
|
||||
expect(
|
||||
screen.getByRole('button', { name: 'button_test_channel' }),
|
||||
).toBeInTheDocument();
|
||||
expect(
|
||||
screen.getByRole('button', { name: 'button_return' }),
|
||||
).toBeInTheDocument();
|
||||
});
|
||||
it('Should check if save and test buttons are disabled', () => {
|
||||
expect(
|
||||
screen.getByRole('button', { name: 'button_save_channel' }),
|
||||
).toBeDisabled();
|
||||
expect(
|
||||
screen.getByRole('button', { name: 'button_test_channel' }),
|
||||
).toBeDisabled();
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,118 @@
|
||||
import EditAlertChannels from 'container/EditAlertChannels';
|
||||
import {
|
||||
editAlertChannelInitialValue,
|
||||
editSlackDescriptionDefaultValue,
|
||||
slackTitleDefaultValue,
|
||||
} from 'mocks-server/__mockdata__/alerts';
|
||||
import { render, screen } from 'tests/test-utils';
|
||||
|
||||
import { testLabelInputAndHelpValue } from './testUtils';
|
||||
|
||||
const successNotification = jest.fn();
|
||||
const errorNotification = jest.fn();
|
||||
jest.mock('hooks/useNotifications', () => ({
|
||||
__esModule: true,
|
||||
useNotifications: jest.fn(() => ({
|
||||
notifications: {
|
||||
success: successNotification,
|
||||
error: errorNotification,
|
||||
},
|
||||
})),
|
||||
}));
|
||||
|
||||
jest.mock('hooks/useFeatureFlag', () => ({
|
||||
__esModule: true,
|
||||
default: jest.fn().mockImplementation(() => ({
|
||||
active: true,
|
||||
})),
|
||||
}));
|
||||
|
||||
describe('Should check if the edit alert channel is properly displayed ', () => {
|
||||
beforeEach(() => {
|
||||
render(<EditAlertChannels initialValue={editAlertChannelInitialValue} />);
|
||||
});
|
||||
afterEach(() => {
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
it('Should check if the title is "Edit Notification Channels"', () => {
|
||||
expect(screen.getByText('page_title_edit')).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('Should check if the name label and textbox are displayed properly ', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_channel_name',
|
||||
testId: 'channel-name-textbox',
|
||||
value: 'Dummy-Channel',
|
||||
});
|
||||
});
|
||||
it('Should check if Send resolved alerts label and checkbox are displayed properly and the checkbox is checked ', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_send_resolved',
|
||||
testId: 'field-send-resolved-checkbox',
|
||||
});
|
||||
expect(screen.getByTestId('field-send-resolved-checkbox')).toBeChecked();
|
||||
});
|
||||
|
||||
it('Should check if channel type label and dropdown are displayed properly', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_channel_type',
|
||||
testId: 'channel-type-select',
|
||||
});
|
||||
});
|
||||
|
||||
it('Should check if the selected item in the type dropdown has text "Slack"', () => {
|
||||
expect(screen.getByText('Slack')).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('Should check if Webhook URL label and input are displayed properly ', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_webhook_url',
|
||||
testId: 'webhook-url-textbox',
|
||||
value:
|
||||
'https://discord.com/api/webhooks/dummy_webhook_id/dummy_webhook_token/slack',
|
||||
});
|
||||
});
|
||||
|
||||
it('Should check if Recepient label, input, and help text are displayed properly ', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_slack_recipient',
|
||||
testId: 'slack-channel-textbox',
|
||||
helpText: 'slack_channel_help',
|
||||
value: '#dummy_channel',
|
||||
});
|
||||
});
|
||||
|
||||
it('Should check if Title label and text area are displayed properly ', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_slack_title',
|
||||
testId: 'title-textarea',
|
||||
});
|
||||
});
|
||||
|
||||
it('Should check if Title contains template', () => {
|
||||
const titleTextArea = screen.getByTestId('title-textarea');
|
||||
|
||||
expect(titleTextArea).toHaveTextContent(slackTitleDefaultValue);
|
||||
});
|
||||
|
||||
it('Should check if Description label and text area are displayed properly ', () => {
|
||||
testLabelInputAndHelpValue({
|
||||
labelText: 'field_slack_description',
|
||||
testId: 'description-textarea',
|
||||
});
|
||||
});
|
||||
|
||||
it('Should check if Description contains template', () => {
|
||||
const descriptionTextArea = screen.getByTestId('description-textarea');
|
||||
|
||||
expect(descriptionTextArea).toHaveTextContent(
|
||||
editSlackDescriptionDefaultValue,
|
||||
);
|
||||
});
|
||||
|
||||
it('Should check if the form buttons are displayed properly (Save, Test, Back)', () => {
|
||||
expect(screen.getByText('button_save_channel')).toBeInTheDocument();
|
||||
expect(screen.getByText('button_test_channel')).toBeInTheDocument();
|
||||
expect(screen.getByText('button_return')).toBeInTheDocument();
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,31 @@
|
||||
import { screen } from 'tests/test-utils';
|
||||
|
||||
export const testLabelInputAndHelpValue = ({
|
||||
labelText,
|
||||
testId,
|
||||
helpText,
|
||||
required = false,
|
||||
value,
|
||||
}: {
|
||||
labelText: string;
|
||||
testId: string;
|
||||
helpText?: string;
|
||||
required?: boolean;
|
||||
value?: string;
|
||||
}): void => {
|
||||
const label = screen.getByText(labelText);
|
||||
expect(label).toBeInTheDocument();
|
||||
|
||||
const input = screen.getByTestId(testId);
|
||||
expect(input).toBeInTheDocument();
|
||||
|
||||
if (helpText !== undefined) {
|
||||
expect(screen.getByText(helpText)).toBeInTheDocument();
|
||||
}
|
||||
if (required) {
|
||||
expect(input).toBeRequired();
|
||||
}
|
||||
if (value) {
|
||||
expect(input).toHaveValue(value);
|
||||
}
|
||||
};
|
||||
@@ -27,6 +27,7 @@ function EmailForm({ setSelectedConfig }: EmailFormProps): JSX.Element {
|
||||
<Input
|
||||
onChange={handleInputChange('to')}
|
||||
placeholder={t('placeholder_email_to')}
|
||||
data-testid="email-to-textbox"
|
||||
/>
|
||||
</Form.Item>
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@ function MsTeams({ setSelectedConfig }: MsTeamsProps): JSX.Element {
|
||||
webhook_url: event.target.value,
|
||||
}));
|
||||
}}
|
||||
data-testid="webhook-url-textbox"
|
||||
/>
|
||||
</Form.Item>
|
||||
|
||||
@@ -30,6 +31,7 @@ function MsTeams({ setSelectedConfig }: MsTeamsProps): JSX.Element {
|
||||
title: event.target.value,
|
||||
}))
|
||||
}
|
||||
data-testid="title-textarea"
|
||||
/>
|
||||
</Form.Item>
|
||||
|
||||
@@ -41,6 +43,7 @@ function MsTeams({ setSelectedConfig }: MsTeamsProps): JSX.Element {
|
||||
text: event.target.value,
|
||||
}))
|
||||
}
|
||||
data-testid="description-textarea"
|
||||
placeholder={t('placeholder_slack_description')}
|
||||
/>
|
||||
</Form.Item>
|
||||
|
||||
@@ -20,7 +20,10 @@ function OpsgenieForm({ setSelectedConfig }: OpsgenieFormProps): JSX.Element {
|
||||
return (
|
||||
<>
|
||||
<Form.Item name="api_key" label={t('field_opsgenie_api_key')} required>
|
||||
<Input onChange={handleInputChange('api_key')} />
|
||||
<Input
|
||||
onChange={handleInputChange('api_key')}
|
||||
data-testid="opsgenie-api-key-textbox"
|
||||
/>
|
||||
</Form.Item>
|
||||
|
||||
<Form.Item
|
||||
@@ -33,6 +36,7 @@ function OpsgenieForm({ setSelectedConfig }: OpsgenieFormProps): JSX.Element {
|
||||
rows={4}
|
||||
onChange={handleInputChange('message')}
|
||||
placeholder={t('placeholder_opsgenie_message')}
|
||||
data-testid="opsgenie-message-textarea"
|
||||
/>
|
||||
</Form.Item>
|
||||
|
||||
@@ -46,6 +50,7 @@ function OpsgenieForm({ setSelectedConfig }: OpsgenieFormProps): JSX.Element {
|
||||
rows={4}
|
||||
onChange={handleInputChange('description')}
|
||||
placeholder={t('placeholder_opsgenie_description')}
|
||||
data-testid="opsgenie-description-textarea"
|
||||
/>
|
||||
</Form.Item>
|
||||
|
||||
@@ -59,6 +64,7 @@ function OpsgenieForm({ setSelectedConfig }: OpsgenieFormProps): JSX.Element {
|
||||
rows={4}
|
||||
onChange={handleInputChange('priority')}
|
||||
placeholder={t('placeholder_opsgenie_priority')}
|
||||
data-testid="opsgenie-priority-textarea"
|
||||
/>
|
||||
</Form.Item>
|
||||
</>
|
||||
|
||||
@@ -18,6 +18,7 @@ function PagerForm({ setSelectedConfig }: PagerFormProps): JSX.Element {
|
||||
routing_key: event.target.value,
|
||||
}));
|
||||
}}
|
||||
data-testid="pager-routing-key-textbox"
|
||||
/>
|
||||
</Form.Item>
|
||||
|
||||
@@ -36,6 +37,7 @@ function PagerForm({ setSelectedConfig }: PagerFormProps): JSX.Element {
|
||||
}))
|
||||
}
|
||||
placeholder={t('placeholder_pager_description')}
|
||||
data-testid="pager-description-textarea"
|
||||
/>
|
||||
</Form.Item>
|
||||
|
||||
@@ -51,6 +53,7 @@ function PagerForm({ setSelectedConfig }: PagerFormProps): JSX.Element {
|
||||
severity: event.target.value,
|
||||
}))
|
||||
}
|
||||
data-testid="pager-severity-textbox"
|
||||
/>
|
||||
</Form.Item>
|
||||
|
||||
@@ -67,6 +70,7 @@ function PagerForm({ setSelectedConfig }: PagerFormProps): JSX.Element {
|
||||
details: event.target.value,
|
||||
}))
|
||||
}
|
||||
data-testid="pager-additional-details-textarea"
|
||||
/>
|
||||
</Form.Item>
|
||||
|
||||
@@ -97,6 +101,7 @@ function PagerForm({ setSelectedConfig }: PagerFormProps): JSX.Element {
|
||||
group: event.target.value,
|
||||
}))
|
||||
}
|
||||
data-testid="pager-group-textarea"
|
||||
/>
|
||||
</Form.Item>
|
||||
|
||||
@@ -112,6 +117,7 @@ function PagerForm({ setSelectedConfig }: PagerFormProps): JSX.Element {
|
||||
class: event.target.value,
|
||||
}))
|
||||
}
|
||||
data-testid="pager-class-textarea"
|
||||
/>
|
||||
</Form.Item>
|
||||
<Form.Item
|
||||
@@ -126,6 +132,7 @@ function PagerForm({ setSelectedConfig }: PagerFormProps): JSX.Element {
|
||||
client: event.target.value,
|
||||
}))
|
||||
}
|
||||
data-testid="pager-client-textarea"
|
||||
/>
|
||||
</Form.Item>
|
||||
|
||||
@@ -141,6 +148,7 @@ function PagerForm({ setSelectedConfig }: PagerFormProps): JSX.Element {
|
||||
client_url: event.target.value,
|
||||
}))
|
||||
}
|
||||
data-testid="pager-client-url-textarea"
|
||||
/>
|
||||
</Form.Item>
|
||||
</>
|
||||
|
||||
@@ -19,6 +19,7 @@ function Slack({ setSelectedConfig }: SlackProps): JSX.Element {
|
||||
api_url: event.target.value,
|
||||
}));
|
||||
}}
|
||||
data-testid="webhook-url-textbox"
|
||||
/>
|
||||
</Form.Item>
|
||||
|
||||
@@ -34,11 +35,13 @@ function Slack({ setSelectedConfig }: SlackProps): JSX.Element {
|
||||
channel: event.target.value,
|
||||
}))
|
||||
}
|
||||
data-testid="slack-channel-textbox"
|
||||
/>
|
||||
</Form.Item>
|
||||
|
||||
<Form.Item name="title" label={t('field_slack_title')}>
|
||||
<TextArea
|
||||
data-testid="title-textarea"
|
||||
rows={4}
|
||||
// value={`[{{ .Status | toUpper }}{{ if eq .Status \"firing\" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }}\n{{- if gt (len .CommonLabels) (len .GroupLabels) -}}\n{{\" \"}}(\n{{- with .CommonLabels.Remove .GroupLabels.Names }}\n {{- range $index, $label := .SortedPairs -}}\n {{ if $index }}, {{ end }}\n {{- $label.Name }}=\"{{ $label.Value -}}\"\n {{- end }}\n{{- end -}}\n)\n{{- end }}`}
|
||||
onChange={(event): void =>
|
||||
@@ -59,6 +62,7 @@ function Slack({ setSelectedConfig }: SlackProps): JSX.Element {
|
||||
}))
|
||||
}
|
||||
placeholder={t('placeholder_slack_description')}
|
||||
data-testid="description-textarea"
|
||||
/>
|
||||
</Form.Item>
|
||||
</>
|
||||
|
||||
@@ -17,6 +17,7 @@ function WebhookSettings({ setSelectedConfig }: WebhookProps): JSX.Element {
|
||||
api_url: event.target.value,
|
||||
}));
|
||||
}}
|
||||
data-testid="webhook-url-textbox"
|
||||
/>
|
||||
</Form.Item>
|
||||
<Form.Item
|
||||
@@ -31,6 +32,7 @@ function WebhookSettings({ setSelectedConfig }: WebhookProps): JSX.Element {
|
||||
username: event.target.value,
|
||||
}));
|
||||
}}
|
||||
data-testid="webhook-username-textbox"
|
||||
/>
|
||||
</Form.Item>
|
||||
<Form.Item
|
||||
@@ -46,6 +48,7 @@ function WebhookSettings({ setSelectedConfig }: WebhookProps): JSX.Element {
|
||||
password: event.target.value,
|
||||
}));
|
||||
}}
|
||||
data-testid="webhook-password-textbox"
|
||||
/>
|
||||
</Form.Item>
|
||||
</>
|
||||
|
||||
@@ -85,6 +85,7 @@ function FormAlertChannels({
|
||||
<Form initialValues={initialValue} layout="vertical" form={formInstance}>
|
||||
<Form.Item label={t('field_channel_name')} labelAlign="left" name="name">
|
||||
<Input
|
||||
data-testid="channel-name-textbox"
|
||||
disabled={editing}
|
||||
onChange={(event): void => {
|
||||
setSelectedConfig((state) => ({
|
||||
@@ -102,6 +103,7 @@ function FormAlertChannels({
|
||||
>
|
||||
<Switch
|
||||
defaultChecked={initialValue?.send_resolved}
|
||||
data-testid="field-send-resolved-checkbox"
|
||||
onChange={(value): void => {
|
||||
setSelectedConfig((state) => ({
|
||||
...state,
|
||||
@@ -112,24 +114,37 @@ function FormAlertChannels({
|
||||
</Form.Item>
|
||||
|
||||
<Form.Item label={t('field_channel_type')} labelAlign="left" name="type">
|
||||
<Select disabled={editing} onChange={onTypeChangeHandler} value={type}>
|
||||
<Select.Option value="slack" key="slack">
|
||||
<Select
|
||||
disabled={editing}
|
||||
onChange={onTypeChangeHandler}
|
||||
value={type}
|
||||
data-testid="channel-type-select"
|
||||
>
|
||||
<Select.Option value="slack" key="slack" data-testid="select-option">
|
||||
Slack
|
||||
</Select.Option>
|
||||
<Select.Option value="webhook" key="webhook">
|
||||
<Select.Option value="webhook" key="webhook" data-testid="select-option">
|
||||
Webhook
|
||||
</Select.Option>
|
||||
<Select.Option value="pagerduty" key="pagerduty">
|
||||
<Select.Option
|
||||
value="pagerduty"
|
||||
key="pagerduty"
|
||||
data-testid="select-option"
|
||||
>
|
||||
Pagerduty
|
||||
</Select.Option>
|
||||
<Select.Option value="opsgenie" key="opsgenie">
|
||||
<Select.Option
|
||||
value="opsgenie"
|
||||
key="opsgenie"
|
||||
data-testid="select-option"
|
||||
>
|
||||
Opsgenie
|
||||
</Select.Option>
|
||||
<Select.Option value="email" key="email">
|
||||
<Select.Option value="email" key="email" data-testid="select-option">
|
||||
Email
|
||||
</Select.Option>
|
||||
{!isOssFeature?.active && (
|
||||
<Select.Option value="msteams" key="msteams">
|
||||
<Select.Option value="msteams" key="msteams" data-testid="select-option">
|
||||
<div>
|
||||
Microsoft Teams {!isUserOnEEPlan && '(Supported in Paid Plans Only)'}{' '}
|
||||
</div>
|
||||
|
||||
@@ -43,6 +43,10 @@
|
||||
background: var(--bg-ink-400);
|
||||
cursor: pointer;
|
||||
|
||||
.dashboard-title {
|
||||
color: var(--bg-vanilla-100);
|
||||
}
|
||||
|
||||
.title-with-action {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
@@ -1048,6 +1052,10 @@
|
||||
border: 1px solid var(--bg-vanilla-200);
|
||||
background: var(--bg-vanilla-100);
|
||||
|
||||
.dashboard-title {
|
||||
color: var(--bg-slate-300);
|
||||
}
|
||||
|
||||
.title-with-action {
|
||||
.dashboard-title {
|
||||
.ant-typography {
|
||||
|
||||
@@ -66,7 +66,7 @@ import {
|
||||
} from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useSelector } from 'react-redux';
|
||||
import { generatePath } from 'react-router-dom';
|
||||
import { generatePath, Link } from 'react-router-dom';
|
||||
import { useCopyToClipboard } from 'react-use';
|
||||
import { AppState } from 'store/reducers';
|
||||
import { Dashboard } from 'types/api/dashboard/getAll';
|
||||
@@ -455,7 +455,9 @@ function DashboardsList(): JSX.Element {
|
||||
alt="dashboard-image"
|
||||
/>
|
||||
<Typography.Text data-testid={`dashboard-title-${index}`}>
|
||||
{dashboard.name}
|
||||
<Link to={getLink()} className="dashboard-title">
|
||||
{dashboard.name}
|
||||
</Link>
|
||||
</Typography.Text>
|
||||
</div>
|
||||
|
||||
|
||||
112
frontend/src/container/Login/__tests__/Login.test.tsx
Normal file
112
frontend/src/container/Login/__tests__/Login.test.tsx
Normal file
@@ -0,0 +1,112 @@
|
||||
import Login from 'container/Login';
|
||||
import { act, fireEvent, render, screen, waitFor } from 'tests/test-utils';
|
||||
|
||||
const errorNotification = jest.fn();
|
||||
jest.mock('hooks/useNotifications', () => ({
|
||||
__esModule: true,
|
||||
useNotifications: jest.fn(() => ({
|
||||
notifications: {
|
||||
error: errorNotification,
|
||||
},
|
||||
})),
|
||||
}));
|
||||
|
||||
describe('Login Flow', () => {
|
||||
test('Login form is rendered correctly', async () => {
|
||||
render(<Login ssoerror="" jwt="" refreshjwt="" userId="" withPassword="" />);
|
||||
|
||||
const headingElement = screen.getByRole('heading', {
|
||||
name: 'login_page_title',
|
||||
});
|
||||
expect(headingElement).toBeInTheDocument();
|
||||
|
||||
const textboxElement = screen.getByRole('textbox');
|
||||
expect(textboxElement).toBeInTheDocument();
|
||||
|
||||
const buttonElement = screen.getByRole('button', {
|
||||
name: 'button_initiate_login',
|
||||
});
|
||||
expect(buttonElement).toBeInTheDocument();
|
||||
|
||||
const noAccountPromptElement = screen.getByText('prompt_no_account');
|
||||
expect(noAccountPromptElement).toBeInTheDocument();
|
||||
});
|
||||
|
||||
test(`Display "invalid_email" if email is not provided`, async () => {
|
||||
render(<Login ssoerror="" jwt="" refreshjwt="" userId="" withPassword="" />);
|
||||
|
||||
const buttonElement = screen.getByText('button_initiate_login');
|
||||
fireEvent.click(buttonElement);
|
||||
|
||||
await waitFor(() =>
|
||||
expect(errorNotification).toHaveBeenCalledWith({
|
||||
message: 'invalid_email',
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
test('Display invalid_config if invalid email is provided and next clicked', async () => {
|
||||
render(<Login ssoerror="" jwt="" refreshjwt="" userId="" withPassword="" />);
|
||||
|
||||
const textboxElement = screen.getByRole('textbox');
|
||||
fireEvent.change(textboxElement, {
|
||||
target: { value: 'failEmail@signoz.io' },
|
||||
});
|
||||
|
||||
const buttonElement = screen.getByRole('button', {
|
||||
name: 'button_initiate_login',
|
||||
});
|
||||
fireEvent.click(buttonElement);
|
||||
|
||||
await waitFor(() =>
|
||||
expect(errorNotification).toHaveBeenCalledWith({
|
||||
message: 'invalid_config',
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
test('providing shaheer@signoz.io as email and pressing next, should make the login_with_sso button visible', async () => {
|
||||
render(<Login ssoerror="" jwt="" refreshjwt="" userId="" withPassword="" />);
|
||||
act(() => {
|
||||
fireEvent.change(screen.getByTestId('email'), {
|
||||
target: { value: 'shaheer@signoz.io' },
|
||||
});
|
||||
|
||||
fireEvent.click(screen.getByTestId('initiate_login'));
|
||||
});
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText('login_with_sso')).toBeInTheDocument();
|
||||
});
|
||||
});
|
||||
|
||||
test('Display email, password, forgot password if password=Y', () => {
|
||||
render(<Login ssoerror="" jwt="" refreshjwt="" userId="" withPassword="Y" />);
|
||||
|
||||
const emailTextBox = screen.getByTestId('email');
|
||||
expect(emailTextBox).toBeInTheDocument();
|
||||
|
||||
const passwordTextBox = screen.getByTestId('password');
|
||||
expect(passwordTextBox).toBeInTheDocument();
|
||||
|
||||
const forgotPasswordLink = screen.getByText('forgot_password');
|
||||
expect(forgotPasswordLink).toBeInTheDocument();
|
||||
});
|
||||
|
||||
test('Display tooltip with "prompt_forgot_password" if forgot password is clicked while password=Y', async () => {
|
||||
render(<Login ssoerror="" jwt="" refreshjwt="" userId="" withPassword="Y" />);
|
||||
const forgotPasswordLink = screen.getByText('forgot_password');
|
||||
|
||||
act(() => {
|
||||
fireEvent.mouseOver(forgotPasswordLink);
|
||||
});
|
||||
|
||||
await waitFor(() => {
|
||||
const forgotPasswordTooltip = screen.getByRole('tooltip', {
|
||||
name: 'prompt_forgot_password',
|
||||
});
|
||||
expect(forgotPasswordLink).toBeInTheDocument();
|
||||
expect(forgotPasswordTooltip).toBeInTheDocument();
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -220,6 +220,7 @@ function Login({
|
||||
<Input
|
||||
type="email"
|
||||
id="loginEmail"
|
||||
data-testid="email"
|
||||
required
|
||||
placeholder={t('placeholder_email')}
|
||||
autoFocus
|
||||
@@ -231,7 +232,12 @@ function Login({
|
||||
<ParentContainer>
|
||||
<Label htmlFor="Password">{t('label_password')}</Label>
|
||||
<FormContainer.Item name="password">
|
||||
<Input.Password required id="currentPassword" disabled={isLoading} />
|
||||
<Input.Password
|
||||
required
|
||||
id="currentPassword"
|
||||
data-testid="password"
|
||||
disabled={isLoading}
|
||||
/>
|
||||
</FormContainer.Item>
|
||||
<Tooltip title={t('prompt_forgot_password')}>
|
||||
<Typography.Link>{t('forgot_password')}</Typography.Link>
|
||||
@@ -250,6 +256,7 @@ function Login({
|
||||
loading={precheckInProcess}
|
||||
type="primary"
|
||||
onClick={onNextHandler}
|
||||
data-testid="initiate_login"
|
||||
>
|
||||
{t('button_initiate_login')}
|
||||
</Button>
|
||||
|
||||
@@ -90,18 +90,23 @@ function PasswordContainer(): JSX.Element {
|
||||
return (
|
||||
<Card>
|
||||
<Space direction="vertical" size="small">
|
||||
<Typography.Title level={4} style={{ marginTop: 0 }}>
|
||||
<Typography.Title
|
||||
level={4}
|
||||
style={{ marginTop: 0 }}
|
||||
data-testid="change-password-header"
|
||||
>
|
||||
{t('change_password', {
|
||||
ns: 'settings',
|
||||
})}
|
||||
</Typography.Title>
|
||||
<Space direction="vertical">
|
||||
<Typography>
|
||||
<Typography data-testid="current-password-label">
|
||||
{t('current_password', {
|
||||
ns: 'settings',
|
||||
})}
|
||||
</Typography>
|
||||
<Password
|
||||
data-testid="current-password-textbox"
|
||||
disabled={isLoading}
|
||||
placeholder={defaultPlaceHolder}
|
||||
onChange={(event): void => {
|
||||
@@ -111,12 +116,13 @@ function PasswordContainer(): JSX.Element {
|
||||
/>
|
||||
</Space>
|
||||
<Space direction="vertical">
|
||||
<Typography>
|
||||
<Typography data-testid="new-password-label">
|
||||
{t('new_password', {
|
||||
ns: 'settings',
|
||||
})}
|
||||
</Typography>
|
||||
<Password
|
||||
data-testid="new-password-textbox"
|
||||
disabled={isLoading}
|
||||
placeholder={defaultPlaceHolder}
|
||||
onChange={(event): void => {
|
||||
@@ -129,6 +135,7 @@ function PasswordContainer(): JSX.Element {
|
||||
<Space>
|
||||
{isPasswordPolicyError && (
|
||||
<Typography.Paragraph
|
||||
data-testid="validation-message"
|
||||
style={{
|
||||
color: '#D89614',
|
||||
marginTop: '0.50rem',
|
||||
@@ -143,8 +150,13 @@ function PasswordContainer(): JSX.Element {
|
||||
loading={isLoading}
|
||||
onClick={onChangePasswordClickHandler}
|
||||
type="primary"
|
||||
data-testid="update-password-button"
|
||||
>
|
||||
<Save size={12} style={{ marginRight: '8px' }} />{' '}
|
||||
<Save
|
||||
size={12}
|
||||
style={{ marginRight: '8px' }}
|
||||
data-testid="update-password-icon"
|
||||
/>{' '}
|
||||
{t('change_password', {
|
||||
ns: 'settings',
|
||||
})}
|
||||
|
||||
@@ -86,8 +86,11 @@ function UserInfo(): JSX.Element {
|
||||
|
||||
<Flex gap={16}>
|
||||
<Space>
|
||||
<Typography className="userInfo-label">Name</Typography>
|
||||
<Typography className="userInfo-label" data-testid="name-label">
|
||||
Name
|
||||
</Typography>
|
||||
<NameInput
|
||||
data-testid="name-textbox"
|
||||
placeholder="Your Name"
|
||||
onChange={(event): void => {
|
||||
setChangedName(event.target.value);
|
||||
@@ -102,6 +105,7 @@ function UserInfo(): JSX.Element {
|
||||
loading={loading}
|
||||
disabled={loading}
|
||||
onClick={onClickUpdateHandler}
|
||||
data-testid="update-name-button"
|
||||
type="primary"
|
||||
>
|
||||
<PencilIcon size={12} /> Update
|
||||
@@ -109,13 +113,29 @@ function UserInfo(): JSX.Element {
|
||||
</Flex>
|
||||
|
||||
<Space>
|
||||
<Typography className="userInfo-label"> Email </Typography>
|
||||
<Input className="userInfo-value" value={user.email} disabled />
|
||||
<Typography className="userInfo-label" data-testid="email-label">
|
||||
{' '}
|
||||
Email{' '}
|
||||
</Typography>
|
||||
<Input
|
||||
className="userInfo-value"
|
||||
data-testid="email-textbox"
|
||||
value={user.email}
|
||||
disabled
|
||||
/>
|
||||
</Space>
|
||||
|
||||
<Space>
|
||||
<Typography className="userInfo-label"> Role </Typography>
|
||||
<Input className="userInfo-value" value={role || ''} disabled />
|
||||
<Typography className="userInfo-label" data-testid="role-label">
|
||||
{' '}
|
||||
Role{' '}
|
||||
</Typography>
|
||||
<Input
|
||||
className="userInfo-value"
|
||||
value={role || ''}
|
||||
disabled
|
||||
data-testid="role-textbox"
|
||||
/>
|
||||
</Space>
|
||||
</Space>
|
||||
</Card>
|
||||
|
||||
219
frontend/src/container/MySettings/__tests__/MySettings.test.tsx
Normal file
219
frontend/src/container/MySettings/__tests__/MySettings.test.tsx
Normal file
@@ -0,0 +1,219 @@
|
||||
import MySettingsContainer from 'container/MySettings';
|
||||
import { act, fireEvent, render, screen, waitFor } from 'tests/test-utils';
|
||||
|
||||
const toggleThemeFunction = jest.fn();
|
||||
|
||||
jest.mock('hooks/useDarkMode', () => ({
|
||||
__esModule: true,
|
||||
useIsDarkMode: jest.fn(() => ({
|
||||
toggleTheme: toggleThemeFunction,
|
||||
})),
|
||||
default: jest.fn(() => ({
|
||||
toggleTheme: toggleThemeFunction,
|
||||
})),
|
||||
}));
|
||||
|
||||
const errorNotification = jest.fn();
|
||||
const successNotification = jest.fn();
|
||||
jest.mock('hooks/useNotifications', () => ({
|
||||
__esModule: true,
|
||||
useNotifications: jest.fn(() => ({
|
||||
notifications: {
|
||||
error: errorNotification,
|
||||
success: successNotification,
|
||||
},
|
||||
})),
|
||||
}));
|
||||
|
||||
enum ThemeOptions {
|
||||
Dark = 'Dark',
|
||||
Light = 'Light',
|
||||
}
|
||||
|
||||
describe('MySettings Flows', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
|
||||
render(<MySettingsContainer />);
|
||||
});
|
||||
|
||||
describe('Dark/Light Theme Switch', () => {
|
||||
it('Should display Dark and Light theme buttons properly', async () => {
|
||||
expect(screen.getByText('Dark')).toBeInTheDocument();
|
||||
|
||||
const darkThemeIcon = screen.getByTestId('dark-theme-icon');
|
||||
expect(darkThemeIcon).toBeInTheDocument();
|
||||
expect(darkThemeIcon.tagName).toBe('svg');
|
||||
|
||||
expect(screen.getByText('Light')).toBeInTheDocument();
|
||||
const lightThemeIcon = screen.getByTestId('light-theme-icon');
|
||||
expect(lightThemeIcon).toBeInTheDocument();
|
||||
expect(lightThemeIcon.tagName).toBe('svg');
|
||||
});
|
||||
|
||||
it('Should activate Dark and Light buttons on click', async () => {
|
||||
const initialSelectedOption = screen.getByRole('radio', {
|
||||
name: ThemeOptions.Dark,
|
||||
});
|
||||
expect(initialSelectedOption).toBeChecked();
|
||||
|
||||
const newThemeOption = screen.getByRole('radio', {
|
||||
name: ThemeOptions.Light,
|
||||
});
|
||||
fireEvent.click(newThemeOption);
|
||||
|
||||
expect(newThemeOption).toBeChecked();
|
||||
});
|
||||
|
||||
it('Should switch the them on clicking Light theme', async () => {
|
||||
const lightThemeOption = screen.getByRole('radio', {
|
||||
name: /light/i,
|
||||
});
|
||||
fireEvent.click(lightThemeOption);
|
||||
|
||||
await waitFor(() => {
|
||||
expect(toggleThemeFunction).toBeCalled();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('User Details Form', () => {
|
||||
it('Should properly display the User Details Form', () => {
|
||||
const userDetailsHeader = screen.getByRole('heading', {
|
||||
name: /user details/i,
|
||||
});
|
||||
const nameLabel = screen.getByTestId('name-label');
|
||||
const nameTextbox = screen.getByTestId('name-textbox');
|
||||
const updateNameButton = screen.getByTestId('update-name-button');
|
||||
const emailLabel = screen.getByTestId('email-label');
|
||||
const emailTextbox = screen.getByTestId('email-textbox');
|
||||
const roleLabel = screen.getByTestId('role-label');
|
||||
const roleTextbox = screen.getByTestId('role-textbox');
|
||||
|
||||
expect(userDetailsHeader).toBeInTheDocument();
|
||||
expect(nameLabel).toBeInTheDocument();
|
||||
expect(nameTextbox).toBeInTheDocument();
|
||||
expect(updateNameButton).toBeInTheDocument();
|
||||
expect(emailLabel).toBeInTheDocument();
|
||||
expect(emailTextbox).toBeInTheDocument();
|
||||
expect(roleLabel).toBeInTheDocument();
|
||||
expect(roleTextbox).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('Should update the name on clicking Update button', async () => {
|
||||
const nameTextbox = screen.getByTestId('name-textbox');
|
||||
const updateNameButton = screen.getByTestId('update-name-button');
|
||||
|
||||
act(() => {
|
||||
fireEvent.change(nameTextbox, { target: { value: 'New Name' } });
|
||||
});
|
||||
|
||||
fireEvent.click(updateNameButton);
|
||||
|
||||
await waitFor(() =>
|
||||
expect(successNotification).toHaveBeenCalledWith({
|
||||
message: 'success',
|
||||
}),
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Reset password', () => {
|
||||
let currentPasswordTextbox: Node | Window;
|
||||
let newPasswordTextbox: Node | Window;
|
||||
let submitButtonElement: HTMLElement;
|
||||
|
||||
beforeEach(() => {
|
||||
currentPasswordTextbox = screen.getByTestId('current-password-textbox');
|
||||
newPasswordTextbox = screen.getByTestId('new-password-textbox');
|
||||
submitButtonElement = screen.getByTestId('update-password-button');
|
||||
});
|
||||
|
||||
it('Should properly display the Password Reset Form', () => {
|
||||
const passwordResetHeader = screen.getByTestId('change-password-header');
|
||||
expect(passwordResetHeader).toBeInTheDocument();
|
||||
|
||||
const currentPasswordLabel = screen.getByTestId('current-password-label');
|
||||
expect(currentPasswordLabel).toBeInTheDocument();
|
||||
|
||||
expect(currentPasswordTextbox).toBeInTheDocument();
|
||||
|
||||
const newPasswordLabel = screen.getByTestId('new-password-label');
|
||||
expect(newPasswordLabel).toBeInTheDocument();
|
||||
|
||||
expect(newPasswordTextbox).toBeInTheDocument();
|
||||
expect(submitButtonElement).toBeInTheDocument();
|
||||
|
||||
const savePasswordIcon = screen.getByTestId('update-password-icon');
|
||||
expect(savePasswordIcon).toBeInTheDocument();
|
||||
expect(savePasswordIcon.tagName).toBe('svg');
|
||||
});
|
||||
|
||||
it('Should display validation error if password is less than 8 characters', async () => {
|
||||
const currentPasswordTextbox = screen.getByTestId(
|
||||
'current-password-textbox',
|
||||
);
|
||||
act(() => {
|
||||
fireEvent.change(currentPasswordTextbox, { target: { value: '123' } });
|
||||
});
|
||||
const validationMessage = await screen.findByTestId('validation-message');
|
||||
|
||||
await waitFor(() => {
|
||||
expect(validationMessage).toHaveTextContent(
|
||||
'Password must a have minimum of 8 characters',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
test("Should display 'inavlid credentials' error if different current and new passwords are provided", async () => {
|
||||
act(() => {
|
||||
fireEvent.change(currentPasswordTextbox, {
|
||||
target: { value: '123456879' },
|
||||
});
|
||||
|
||||
fireEvent.change(newPasswordTextbox, { target: { value: '123456789' } });
|
||||
});
|
||||
|
||||
fireEvent.click(submitButtonElement);
|
||||
|
||||
await waitFor(() => expect(errorNotification).toHaveBeenCalled());
|
||||
});
|
||||
|
||||
it('Should check if the "Change Password" button is disabled in case current / new password is less than 8 characters', () => {
|
||||
act(() => {
|
||||
fireEvent.change(currentPasswordTextbox, {
|
||||
target: { value: '123' },
|
||||
});
|
||||
fireEvent.change(newPasswordTextbox, { target: { value: '123' } });
|
||||
});
|
||||
|
||||
expect(submitButtonElement).toBeDisabled();
|
||||
});
|
||||
|
||||
test("Should check if 'Change Password' button is enabled when password is at least 8 characters ", async () => {
|
||||
expect(submitButtonElement).toBeDisabled();
|
||||
|
||||
act(() => {
|
||||
fireEvent.change(currentPasswordTextbox, {
|
||||
target: { value: '123456789' },
|
||||
});
|
||||
fireEvent.change(newPasswordTextbox, { target: { value: '1234567890' } });
|
||||
});
|
||||
|
||||
expect(submitButtonElement).toBeEnabled();
|
||||
});
|
||||
|
||||
test("Should check if 'Change Password' button is disabled when current and new passwords are the same ", async () => {
|
||||
expect(submitButtonElement).toBeDisabled();
|
||||
|
||||
act(() => {
|
||||
fireEvent.change(currentPasswordTextbox, {
|
||||
target: { value: '123456789' },
|
||||
});
|
||||
fireEvent.change(newPasswordTextbox, { target: { value: '123456789' } });
|
||||
});
|
||||
|
||||
expect(submitButtonElement).toBeDisabled();
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -17,7 +17,7 @@ function MySettings(): JSX.Element {
|
||||
{
|
||||
label: (
|
||||
<div className="theme-option">
|
||||
<Moon size={12} /> Dark{' '}
|
||||
<Moon data-testid="dark-theme-icon" size={12} /> Dark{' '}
|
||||
</div>
|
||||
),
|
||||
value: 'dark',
|
||||
@@ -25,7 +25,7 @@ function MySettings(): JSX.Element {
|
||||
{
|
||||
label: (
|
||||
<div className="theme-option">
|
||||
<Sun size={12} /> Light{' '}
|
||||
<Sun size={12} data-testid="light-theme-icon" /> Light{' '}
|
||||
</div>
|
||||
),
|
||||
value: 'light',
|
||||
@@ -63,6 +63,7 @@ function MySettings(): JSX.Element {
|
||||
value={theme}
|
||||
optionType="button"
|
||||
buttonStyle="solid"
|
||||
data-testid="theme-selector"
|
||||
/>
|
||||
</div>
|
||||
|
||||
@@ -74,7 +75,12 @@ function MySettings(): JSX.Element {
|
||||
<Password />
|
||||
</div>
|
||||
|
||||
<Button className="flexBtn" onClick={(): void => Logout()} type="primary">
|
||||
<Button
|
||||
className="flexBtn"
|
||||
onClick={(): void => Logout()}
|
||||
type="primary"
|
||||
data-testid="logout-button"
|
||||
>
|
||||
<LogOut size={12} /> Logout
|
||||
</Button>
|
||||
</Space>
|
||||
|
||||
@@ -268,6 +268,7 @@ export const panelTypeDataSourceFormValuesMap: Record<
|
||||
'aggregateOperator',
|
||||
'aggregateAttribute',
|
||||
'groupBy',
|
||||
'reduceTo',
|
||||
'limit',
|
||||
'having',
|
||||
'orderBy',
|
||||
@@ -286,6 +287,7 @@ export const panelTypeDataSourceFormValuesMap: Record<
|
||||
'aggregateOperator',
|
||||
'aggregateAttribute',
|
||||
'groupBy',
|
||||
'reduceTo',
|
||||
'limit',
|
||||
'having',
|
||||
'orderBy',
|
||||
@@ -305,6 +307,7 @@ export const panelTypeDataSourceFormValuesMap: Record<
|
||||
'aggregateOperator',
|
||||
'aggregateAttribute',
|
||||
'groupBy',
|
||||
'reduceTo',
|
||||
'limit',
|
||||
'having',
|
||||
'orderBy',
|
||||
|
||||
@@ -1,24 +1,43 @@
|
||||
## Install otel-collector in your Kubernetes infra
|
||||
|
||||
### Install otel-collector in your Kubernetes infra
|
||||
|
||||
Add the SigNoz Helm Chart repository
|
||||
```bash
|
||||
helm repo add signoz https://charts.signoz.io
|
||||
```
|
||||
|
||||
|
||||
|
||||
If the chart is already present, update the chart to the latest using:
|
||||
```bash
|
||||
helm repo update
|
||||
```
|
||||
|
||||
|
||||
|
||||
Install the Kubernetes Infrastructure chart provided by SigNoz
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra \
|
||||
--set otelCollectorEndpoint=ingest.{{REGION}}.signoz.cloud:443 \
|
||||
--set otelInsecure=false \
|
||||
--set signozApiKey={{SIGNOZ_INGESTION_KEY}} \
|
||||
--set global.clusterName=<CLUSTER_NAME>
|
||||
For generic Kubernetes clusters, you can create *override-values.yaml* with the following configuration:
|
||||
|
||||
```yaml
|
||||
global:
|
||||
cloud: others
|
||||
clusterName: <CLUSTER_NAME>
|
||||
deploymentEnvironment: <DEPLOYMENT_ENVIRONMENT>
|
||||
otelCollectorEndpoint: ingest.{{REGION}}.signoz.cloud:443
|
||||
otelInsecure: false
|
||||
signozApiKey: {{SIGNOZ_INGESTION_KEY}}
|
||||
presets:
|
||||
otlpExporter:
|
||||
enabled: true
|
||||
loggingExporter:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
- Replace `<CLUSTER_NAME>` with the name of the Kubernetes cluster or a unique identifier of the cluster.
|
||||
- Replace `<DEPLOYMENT_ENVIRONMENT>` with the deployment environment of your application. Example: **"staging"**, **"production"**, etc.
|
||||
|
||||
|
||||
|
||||
To install the k8s-infra chart with the above configuration, run the following command:
|
||||
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra -f override-values.yaml
|
||||
```
|
||||
|
||||
@@ -1,24 +1,43 @@
|
||||
## Install otel-collector in your Kubernetes infra
|
||||
|
||||
### Install otel-collector in your Kubernetes infra
|
||||
|
||||
Add the SigNoz Helm Chart repository
|
||||
```bash
|
||||
helm repo add signoz https://charts.signoz.io
|
||||
```
|
||||
|
||||
|
||||
|
||||
If the chart is already present, update the chart to the latest using:
|
||||
```bash
|
||||
helm repo update
|
||||
```
|
||||
|
||||
|
||||
|
||||
Install the Kubernetes Infrastructure chart provided by SigNoz
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra \
|
||||
--set otelCollectorEndpoint=ingest.{{REGION}}.signoz.cloud:443 \
|
||||
--set otelInsecure=false \
|
||||
--set signozApiKey={{SIGNOZ_INGESTION_KEY}} \
|
||||
--set global.clusterName=<CLUSTER_NAME>
|
||||
For generic Kubernetes clusters, you can create *override-values.yaml* with the following configuration:
|
||||
|
||||
```yaml
|
||||
global:
|
||||
cloud: others
|
||||
clusterName: <CLUSTER_NAME>
|
||||
deploymentEnvironment: <DEPLOYMENT_ENVIRONMENT>
|
||||
otelCollectorEndpoint: ingest.{{REGION}}.signoz.cloud:443
|
||||
otelInsecure: false
|
||||
signozApiKey: {{SIGNOZ_INGESTION_KEY}}
|
||||
presets:
|
||||
otlpExporter:
|
||||
enabled: true
|
||||
loggingExporter:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
- Replace `<CLUSTER_NAME>` with the name of the Kubernetes cluster or a unique identifier of the cluster.
|
||||
- Replace `<DEPLOYMENT_ENVIRONMENT>` with the deployment environment of your application. Example: **"staging"**, **"production"**, etc.
|
||||
|
||||
|
||||
|
||||
To install the k8s-infra chart with the above configuration, run the following command:
|
||||
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra -f override-values.yaml
|
||||
```
|
||||
|
||||
@@ -1,24 +1,43 @@
|
||||
## Install otel-collector in your Kubernetes infra
|
||||
|
||||
### Install otel-collector in your Kubernetes infra
|
||||
|
||||
Add the SigNoz Helm Chart repository
|
||||
```bash
|
||||
helm repo add signoz https://charts.signoz.io
|
||||
```
|
||||
|
||||
|
||||
|
||||
If the chart is already present, update the chart to the latest using:
|
||||
```bash
|
||||
helm repo update
|
||||
```
|
||||
|
||||
|
||||
|
||||
Install the Kubernetes Infrastructure chart provided by SigNoz
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra \
|
||||
--set otelCollectorEndpoint=ingest.{{REGION}}.signoz.cloud:443 \
|
||||
--set otelInsecure=false \
|
||||
--set signozApiKey={{SIGNOZ_INGESTION_KEY}} \
|
||||
--set global.clusterName=<CLUSTER_NAME>
|
||||
For generic Kubernetes clusters, you can create *override-values.yaml* with the following configuration:
|
||||
|
||||
```yaml
|
||||
global:
|
||||
cloud: others
|
||||
clusterName: <CLUSTER_NAME>
|
||||
deploymentEnvironment: <DEPLOYMENT_ENVIRONMENT>
|
||||
otelCollectorEndpoint: ingest.{{REGION}}.signoz.cloud:443
|
||||
otelInsecure: false
|
||||
signozApiKey: {{SIGNOZ_INGESTION_KEY}}
|
||||
presets:
|
||||
otlpExporter:
|
||||
enabled: true
|
||||
loggingExporter:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
- Replace `<CLUSTER_NAME>` with the name of the Kubernetes cluster or a unique identifier of the cluster.
|
||||
- Replace `<DEPLOYMENT_ENVIRONMENT>` with the deployment environment of your application. Example: **"staging"**, **"production"**, etc.
|
||||
|
||||
|
||||
|
||||
To install the k8s-infra chart with the above configuration, run the following command:
|
||||
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra -f override-values.yaml
|
||||
```
|
||||
|
||||
@@ -1,24 +1,43 @@
|
||||
## Install otel-collector in your Kubernetes infra
|
||||
|
||||
### Install otel-collector in your Kubernetes infra
|
||||
|
||||
Add the SigNoz Helm Chart repository
|
||||
```bash
|
||||
helm repo add signoz https://charts.signoz.io
|
||||
```
|
||||
|
||||
|
||||
|
||||
If the chart is already present, update the chart to the latest using:
|
||||
```bash
|
||||
helm repo update
|
||||
```
|
||||
|
||||
|
||||
|
||||
Install the Kubernetes Infrastructure chart provided by SigNoz
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra \
|
||||
--set otelCollectorEndpoint=ingest.{{REGION}}.signoz.cloud:443 \
|
||||
--set otelInsecure=false \
|
||||
--set signozApiKey={{SIGNOZ_INGESTION_KEY}} \
|
||||
--set global.clusterName=<CLUSTER_NAME>
|
||||
For generic Kubernetes clusters, you can create *override-values.yaml* with the following configuration:
|
||||
|
||||
```yaml
|
||||
global:
|
||||
cloud: others
|
||||
clusterName: <CLUSTER_NAME>
|
||||
deploymentEnvironment: <DEPLOYMENT_ENVIRONMENT>
|
||||
otelCollectorEndpoint: ingest.{{REGION}}.signoz.cloud:443
|
||||
otelInsecure: false
|
||||
signozApiKey: {{SIGNOZ_INGESTION_KEY}}
|
||||
presets:
|
||||
otlpExporter:
|
||||
enabled: true
|
||||
loggingExporter:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
- Replace `<CLUSTER_NAME>` with the name of the Kubernetes cluster or a unique identifier of the cluster.
|
||||
- Replace `<DEPLOYMENT_ENVIRONMENT>` with the deployment environment of your application. Example: **"staging"**, **"production"**, etc.
|
||||
|
||||
|
||||
|
||||
To install the k8s-infra chart with the above configuration, run the following command:
|
||||
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra -f override-values.yaml
|
||||
```
|
||||
|
||||
@@ -1,24 +1,43 @@
|
||||
## Install otel-collector in your Kubernetes infra
|
||||
|
||||
### Install otel-collector in your Kubernetes infra
|
||||
|
||||
Add the SigNoz Helm Chart repository
|
||||
```bash
|
||||
helm repo add signoz https://charts.signoz.io
|
||||
```
|
||||
|
||||
|
||||
|
||||
If the chart is already present, update the chart to the latest using:
|
||||
```bash
|
||||
helm repo update
|
||||
```
|
||||
|
||||
|
||||
|
||||
Install the Kubernetes Infrastructure chart provided by SigNoz
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra \
|
||||
--set otelCollectorEndpoint=ingest.{{REGION}}.signoz.cloud:443 \
|
||||
--set otelInsecure=false \
|
||||
--set signozApiKey={{SIGNOZ_INGESTION_KEY}} \
|
||||
--set global.clusterName=<CLUSTER_NAME>
|
||||
For generic Kubernetes clusters, you can create *override-values.yaml* with the following configuration:
|
||||
|
||||
```yaml
|
||||
global:
|
||||
cloud: others
|
||||
clusterName: <CLUSTER_NAME>
|
||||
deploymentEnvironment: <DEPLOYMENT_ENVIRONMENT>
|
||||
otelCollectorEndpoint: ingest.{{REGION}}.signoz.cloud:443
|
||||
otelInsecure: false
|
||||
signozApiKey: {{SIGNOZ_INGESTION_KEY}}
|
||||
presets:
|
||||
otlpExporter:
|
||||
enabled: true
|
||||
loggingExporter:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
- Replace `<CLUSTER_NAME>` with the name of the Kubernetes cluster or a unique identifier of the cluster.
|
||||
- Replace `<DEPLOYMENT_ENVIRONMENT>` with the deployment environment of your application. Example: **"staging"**, **"production"**, etc.
|
||||
|
||||
|
||||
|
||||
To install the k8s-infra chart with the above configuration, run the following command:
|
||||
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra -f override-values.yaml
|
||||
```
|
||||
|
||||
@@ -1,24 +1,43 @@
|
||||
## Install otel-collector in your Kubernetes infra
|
||||
|
||||
### Install otel-collector in your Kubernetes infra
|
||||
|
||||
Add the SigNoz Helm Chart repository
|
||||
```bash
|
||||
helm repo add signoz https://charts.signoz.io
|
||||
```
|
||||
|
||||
|
||||
|
||||
If the chart is already present, update the chart to the latest using:
|
||||
```bash
|
||||
helm repo update
|
||||
```
|
||||
|
||||
|
||||
|
||||
Install the Kubernetes Infrastructure chart provided by SigNoz
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra \
|
||||
--set otelCollectorEndpoint=ingest.{{REGION}}.signoz.cloud:443 \
|
||||
--set otelInsecure=false \
|
||||
--set signozApiKey={{SIGNOZ_INGESTION_KEY}} \
|
||||
--set global.clusterName=<CLUSTER_NAME>
|
||||
For generic Kubernetes clusters, you can create *override-values.yaml* with the following configuration:
|
||||
|
||||
```yaml
|
||||
global:
|
||||
cloud: others
|
||||
clusterName: <CLUSTER_NAME>
|
||||
deploymentEnvironment: <DEPLOYMENT_ENVIRONMENT>
|
||||
otelCollectorEndpoint: ingest.{{REGION}}.signoz.cloud:443
|
||||
otelInsecure: false
|
||||
signozApiKey: {{SIGNOZ_INGESTION_KEY}}
|
||||
presets:
|
||||
otlpExporter:
|
||||
enabled: true
|
||||
loggingExporter:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
- Replace `<CLUSTER_NAME>` with the name of the Kubernetes cluster or a unique identifier of the cluster.
|
||||
- Replace `<DEPLOYMENT_ENVIRONMENT>` with the deployment environment of your application. Example: **"staging"**, **"production"**, etc.
|
||||
|
||||
|
||||
|
||||
To install the k8s-infra chart with the above configuration, run the following command:
|
||||
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra -f override-values.yaml
|
||||
```
|
||||
|
||||
@@ -1,24 +1,43 @@
|
||||
## Install otel-collector in your Kubernetes infra
|
||||
|
||||
### Install otel-collector in your Kubernetes infra
|
||||
|
||||
Add the SigNoz Helm Chart repository
|
||||
```bash
|
||||
helm repo add signoz https://charts.signoz.io
|
||||
```
|
||||
|
||||
|
||||
|
||||
If the chart is already present, update the chart to the latest using:
|
||||
```bash
|
||||
helm repo update
|
||||
```
|
||||
|
||||
|
||||
|
||||
Install the Kubernetes Infrastructure chart provided by SigNoz
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra \
|
||||
--set otelCollectorEndpoint=ingest.{{REGION}}.signoz.cloud:443 \
|
||||
--set otelInsecure=false \
|
||||
--set signozApiKey={{SIGNOZ_INGESTION_KEY}} \
|
||||
--set global.clusterName=<CLUSTER_NAME>
|
||||
For generic Kubernetes clusters, you can create *override-values.yaml* with the following configuration:
|
||||
|
||||
```yaml
|
||||
global:
|
||||
cloud: others
|
||||
clusterName: <CLUSTER_NAME>
|
||||
deploymentEnvironment: <DEPLOYMENT_ENVIRONMENT>
|
||||
otelCollectorEndpoint: ingest.{{REGION}}.signoz.cloud:443
|
||||
otelInsecure: false
|
||||
signozApiKey: {{SIGNOZ_INGESTION_KEY}}
|
||||
presets:
|
||||
otlpExporter:
|
||||
enabled: true
|
||||
loggingExporter:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
- Replace `<CLUSTER_NAME>` with the name of the Kubernetes cluster or a unique identifier of the cluster.
|
||||
- Replace `<DEPLOYMENT_ENVIRONMENT>` with the deployment environment of your application. Example: **"staging"**, **"production"**, etc.
|
||||
|
||||
|
||||
|
||||
To install the k8s-infra chart with the above configuration, run the following command:
|
||||
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra -f override-values.yaml
|
||||
```
|
||||
|
||||
@@ -1,24 +1,43 @@
|
||||
## Install otel-collector in your Kubernetes infra
|
||||
|
||||
### Install otel-collector in your Kubernetes infra
|
||||
|
||||
Add the SigNoz Helm Chart repository
|
||||
```bash
|
||||
helm repo add signoz https://charts.signoz.io
|
||||
```
|
||||
|
||||
|
||||
|
||||
If the chart is already present, update the chart to the latest using:
|
||||
```bash
|
||||
helm repo update
|
||||
```
|
||||
|
||||
|
||||
|
||||
Install the Kubernetes Infrastructure chart provided by SigNoz
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra \
|
||||
--set otelCollectorEndpoint=ingest.{{REGION}}.signoz.cloud:443 \
|
||||
--set otelInsecure=false \
|
||||
--set signozApiKey={{SIGNOZ_INGESTION_KEY}} \
|
||||
--set global.clusterName=<CLUSTER_NAME>
|
||||
For generic Kubernetes clusters, you can create *override-values.yaml* with the following configuration:
|
||||
|
||||
```yaml
|
||||
global:
|
||||
cloud: others
|
||||
clusterName: <CLUSTER_NAME>
|
||||
deploymentEnvironment: <DEPLOYMENT_ENVIRONMENT>
|
||||
otelCollectorEndpoint: ingest.{{REGION}}.signoz.cloud:443
|
||||
otelInsecure: false
|
||||
signozApiKey: {{SIGNOZ_INGESTION_KEY}}
|
||||
presets:
|
||||
otlpExporter:
|
||||
enabled: true
|
||||
loggingExporter:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
- Replace `<CLUSTER_NAME>` with the name of the Kubernetes cluster or a unique identifier of the cluster.
|
||||
- Replace `<DEPLOYMENT_ENVIRONMENT>` with the deployment environment of your application. Example: **"staging"**, **"production"**, etc.
|
||||
|
||||
|
||||
|
||||
To install the k8s-infra chart with the above configuration, run the following command:
|
||||
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra -f override-values.yaml
|
||||
```
|
||||
|
||||
@@ -1,24 +1,43 @@
|
||||
## Install otel-collector in your Kubernetes infra
|
||||
|
||||
### Install otel-collector in your Kubernetes infra
|
||||
|
||||
Add the SigNoz Helm Chart repository
|
||||
```bash
|
||||
helm repo add signoz https://charts.signoz.io
|
||||
```
|
||||
|
||||
|
||||
|
||||
If the chart is already present, update the chart to the latest using:
|
||||
```bash
|
||||
helm repo update
|
||||
```
|
||||
|
||||
|
||||
|
||||
Install the Kubernetes Infrastructure chart provided by SigNoz
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra \
|
||||
--set otelCollectorEndpoint=ingest.{{REGION}}.signoz.cloud:443 \
|
||||
--set otelInsecure=false \
|
||||
--set signozApiKey={{SIGNOZ_INGESTION_KEY}} \
|
||||
--set global.clusterName=<CLUSTER_NAME>
|
||||
For generic Kubernetes clusters, you can create *override-values.yaml* with the following configuration:
|
||||
|
||||
```yaml
|
||||
global:
|
||||
cloud: others
|
||||
clusterName: <CLUSTER_NAME>
|
||||
deploymentEnvironment: <DEPLOYMENT_ENVIRONMENT>
|
||||
otelCollectorEndpoint: ingest.{{REGION}}.signoz.cloud:443
|
||||
otelInsecure: false
|
||||
signozApiKey: {{SIGNOZ_INGESTION_KEY}}
|
||||
presets:
|
||||
otlpExporter:
|
||||
enabled: true
|
||||
loggingExporter:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
- Replace `<CLUSTER_NAME>` with the name of the Kubernetes cluster or a unique identifier of the cluster.
|
||||
- Replace `<DEPLOYMENT_ENVIRONMENT>` with the deployment environment of your application. Example: **"staging"**, **"production"**, etc.
|
||||
|
||||
|
||||
|
||||
To install the k8s-infra chart with the above configuration, run the following command:
|
||||
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra -f override-values.yaml
|
||||
```
|
||||
|
||||
@@ -1,24 +1,43 @@
|
||||
## Install otel-collector in your Kubernetes infra
|
||||
|
||||
### Install otel-collector in your Kubernetes infra
|
||||
|
||||
Add the SigNoz Helm Chart repository
|
||||
```bash
|
||||
helm repo add signoz https://charts.signoz.io
|
||||
```
|
||||
|
||||
|
||||
|
||||
If the chart is already present, update the chart to the latest using:
|
||||
```bash
|
||||
helm repo update
|
||||
```
|
||||
|
||||
|
||||
|
||||
Install the Kubernetes Infrastructure chart provided by SigNoz
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra \
|
||||
--set otelCollectorEndpoint=ingest.{{REGION}}.signoz.cloud:443 \
|
||||
--set otelInsecure=false \
|
||||
--set signozApiKey={{SIGNOZ_INGESTION_KEY}} \
|
||||
--set global.clusterName=<CLUSTER_NAME>
|
||||
For generic Kubernetes clusters, you can create *override-values.yaml* with the following configuration:
|
||||
|
||||
```yaml
|
||||
global:
|
||||
cloud: others
|
||||
clusterName: <CLUSTER_NAME>
|
||||
deploymentEnvironment: <DEPLOYMENT_ENVIRONMENT>
|
||||
otelCollectorEndpoint: ingest.{{REGION}}.signoz.cloud:443
|
||||
otelInsecure: false
|
||||
signozApiKey: {{SIGNOZ_INGESTION_KEY}}
|
||||
presets:
|
||||
otlpExporter:
|
||||
enabled: true
|
||||
loggingExporter:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
- Replace `<CLUSTER_NAME>` with the name of the Kubernetes cluster or a unique identifier of the cluster.
|
||||
- Replace `<DEPLOYMENT_ENVIRONMENT>` with the deployment environment of your application. Example: **"staging"**, **"production"**, etc.
|
||||
|
||||
|
||||
|
||||
To install the k8s-infra chart with the above configuration, run the following command:
|
||||
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra -f override-values.yaml
|
||||
```
|
||||
|
||||
@@ -1,24 +1,43 @@
|
||||
## Install otel-collector in your Kubernetes infra
|
||||
|
||||
### Install otel-collector in your Kubernetes infra
|
||||
|
||||
Add the SigNoz Helm Chart repository
|
||||
```bash
|
||||
helm repo add signoz https://charts.signoz.io
|
||||
```
|
||||
|
||||
|
||||
|
||||
If the chart is already present, update the chart to the latest using:
|
||||
```bash
|
||||
helm repo update
|
||||
```
|
||||
|
||||
|
||||
|
||||
Install the Kubernetes Infrastructure chart provided by SigNoz
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra \
|
||||
--set otelCollectorEndpoint=ingest.{{REGION}}.signoz.cloud:443 \
|
||||
--set otelInsecure=false \
|
||||
--set signozApiKey={{SIGNOZ_INGESTION_KEY}} \
|
||||
--set global.clusterName=<CLUSTER_NAME>
|
||||
For generic Kubernetes clusters, you can create *override-values.yaml* with the following configuration:
|
||||
|
||||
```yaml
|
||||
global:
|
||||
cloud: others
|
||||
clusterName: <CLUSTER_NAME>
|
||||
deploymentEnvironment: <DEPLOYMENT_ENVIRONMENT>
|
||||
otelCollectorEndpoint: ingest.{{REGION}}.signoz.cloud:443
|
||||
otelInsecure: false
|
||||
signozApiKey: {{SIGNOZ_INGESTION_KEY}}
|
||||
presets:
|
||||
otlpExporter:
|
||||
enabled: true
|
||||
loggingExporter:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
- Replace `<CLUSTER_NAME>` with the name of the Kubernetes cluster or a unique identifier of the cluster.
|
||||
- Replace `<DEPLOYMENT_ENVIRONMENT>` with the deployment environment of your application. Example: **"staging"**, **"production"**, etc.
|
||||
|
||||
|
||||
|
||||
To install the k8s-infra chart with the above configuration, run the following command:
|
||||
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra -f override-values.yaml
|
||||
```
|
||||
|
||||
@@ -1,24 +1,43 @@
|
||||
## Install otel-collector in your Kubernetes infra
|
||||
|
||||
### Install otel-collector in your Kubernetes infra
|
||||
|
||||
Add the SigNoz Helm Chart repository
|
||||
```bash
|
||||
helm repo add signoz https://charts.signoz.io
|
||||
```
|
||||
|
||||
|
||||
|
||||
If the chart is already present, update the chart to the latest using:
|
||||
```bash
|
||||
helm repo update
|
||||
```
|
||||
|
||||
|
||||
|
||||
Install the Kubernetes Infrastructure chart provided by SigNoz
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra \
|
||||
--set otelCollectorEndpoint=ingest.{{REGION}}.signoz.cloud:443 \
|
||||
--set otelInsecure=false \
|
||||
--set signozApiKey={{SIGNOZ_INGESTION_KEY}} \
|
||||
--set global.clusterName=<CLUSTER_NAME>
|
||||
For generic Kubernetes clusters, you can create *override-values.yaml* with the following configuration:
|
||||
|
||||
```yaml
|
||||
global:
|
||||
cloud: others
|
||||
clusterName: <CLUSTER_NAME>
|
||||
deploymentEnvironment: <DEPLOYMENT_ENVIRONMENT>
|
||||
otelCollectorEndpoint: ingest.{{REGION}}.signoz.cloud:443
|
||||
otelInsecure: false
|
||||
signozApiKey: {{SIGNOZ_INGESTION_KEY}}
|
||||
presets:
|
||||
otlpExporter:
|
||||
enabled: true
|
||||
loggingExporter:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
- Replace `<CLUSTER_NAME>` with the name of the Kubernetes cluster or a unique identifier of the cluster.
|
||||
- Replace `<DEPLOYMENT_ENVIRONMENT>` with the deployment environment of your application. Example: **"staging"**, **"production"**, etc.
|
||||
|
||||
|
||||
|
||||
To install the k8s-infra chart with the above configuration, run the following command:
|
||||
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra -f override-values.yaml
|
||||
```
|
||||
|
||||
@@ -1,24 +1,43 @@
|
||||
## Install otel-collector in your Kubernetes infra
|
||||
|
||||
### Install otel-collector in your Kubernetes infra
|
||||
|
||||
Add the SigNoz Helm Chart repository
|
||||
```bash
|
||||
helm repo add signoz https://charts.signoz.io
|
||||
```
|
||||
|
||||
|
||||
|
||||
If the chart is already present, update the chart to the latest using:
|
||||
```bash
|
||||
helm repo update
|
||||
```
|
||||
|
||||
|
||||
|
||||
Install the Kubernetes Infrastructure chart provided by SigNoz
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra \
|
||||
--set otelCollectorEndpoint=ingest.{{REGION}}.signoz.cloud:443 \
|
||||
--set otelInsecure=false \
|
||||
--set signozApiKey={{SIGNOZ_INGESTION_KEY}} \
|
||||
--set global.clusterName=<CLUSTER_NAME>
|
||||
For generic Kubernetes clusters, you can create *override-values.yaml* with the following configuration:
|
||||
|
||||
```yaml
|
||||
global:
|
||||
cloud: others
|
||||
clusterName: <CLUSTER_NAME>
|
||||
deploymentEnvironment: <DEPLOYMENT_ENVIRONMENT>
|
||||
otelCollectorEndpoint: ingest.{{REGION}}.signoz.cloud:443
|
||||
otelInsecure: false
|
||||
signozApiKey: {{SIGNOZ_INGESTION_KEY}}
|
||||
presets:
|
||||
otlpExporter:
|
||||
enabled: true
|
||||
loggingExporter:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
- Replace `<CLUSTER_NAME>` with the name of the Kubernetes cluster or a unique identifier of the cluster.
|
||||
- Replace `<DEPLOYMENT_ENVIRONMENT>` with the deployment environment of your application. Example: **"staging"**, **"production"**, etc.
|
||||
|
||||
|
||||
|
||||
To install the k8s-infra chart with the above configuration, run the following command:
|
||||
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra -f override-values.yaml
|
||||
```
|
||||
|
||||
@@ -1,24 +1,43 @@
|
||||
## Install otel-collector in your Kubernetes infra
|
||||
|
||||
### Install otel-collector in your Kubernetes infra
|
||||
|
||||
Add the SigNoz Helm Chart repository
|
||||
```bash
|
||||
helm repo add signoz https://charts.signoz.io
|
||||
```
|
||||
|
||||
|
||||
|
||||
If the chart is already present, update the chart to the latest using:
|
||||
```bash
|
||||
helm repo update
|
||||
```
|
||||
|
||||
|
||||
|
||||
Install the Kubernetes Infrastructure chart provided by SigNoz
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra \
|
||||
--set otelCollectorEndpoint=ingest.{{REGION}}.signoz.cloud:443 \
|
||||
--set otelInsecure=false \
|
||||
--set signozApiKey={{SIGNOZ_INGESTION_KEY}} \
|
||||
--set global.clusterName=<CLUSTER_NAME>
|
||||
For generic Kubernetes clusters, you can create *override-values.yaml* with the following configuration:
|
||||
|
||||
```yaml
|
||||
global:
|
||||
cloud: others
|
||||
clusterName: <CLUSTER_NAME>
|
||||
deploymentEnvironment: <DEPLOYMENT_ENVIRONMENT>
|
||||
otelCollectorEndpoint: ingest.{{REGION}}.signoz.cloud:443
|
||||
otelInsecure: false
|
||||
signozApiKey: {{SIGNOZ_INGESTION_KEY}}
|
||||
presets:
|
||||
otlpExporter:
|
||||
enabled: true
|
||||
loggingExporter:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
- Replace `<CLUSTER_NAME>` with the name of the Kubernetes cluster or a unique identifier of the cluster.
|
||||
- Replace `<DEPLOYMENT_ENVIRONMENT>` with the deployment environment of your application. Example: **"staging"**, **"production"**, etc.
|
||||
|
||||
|
||||
|
||||
To install the k8s-infra chart with the above configuration, run the following command:
|
||||
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra -f override-values.yaml
|
||||
```
|
||||
|
||||
@@ -1,24 +1,43 @@
|
||||
## Install otel-collector in your Kubernetes infra
|
||||
|
||||
### Install otel-collector in your Kubernetes infra
|
||||
|
||||
Add the SigNoz Helm Chart repository
|
||||
```bash
|
||||
helm repo add signoz https://charts.signoz.io
|
||||
```
|
||||
|
||||
|
||||
|
||||
If the chart is already present, update the chart to the latest using:
|
||||
```bash
|
||||
helm repo update
|
||||
```
|
||||
|
||||
|
||||
|
||||
Install the Kubernetes Infrastructure chart provided by SigNoz
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra \
|
||||
--set otelCollectorEndpoint=ingest.{{REGION}}.signoz.cloud:443 \
|
||||
--set otelInsecure=false \
|
||||
--set signozApiKey={{SIGNOZ_INGESTION_KEY}} \
|
||||
--set global.clusterName=<CLUSTER_NAME>
|
||||
For generic Kubernetes clusters, you can create *override-values.yaml* with the following configuration:
|
||||
|
||||
```yaml
|
||||
global:
|
||||
cloud: others
|
||||
clusterName: <CLUSTER_NAME>
|
||||
deploymentEnvironment: <DEPLOYMENT_ENVIRONMENT>
|
||||
otelCollectorEndpoint: ingest.{{REGION}}.signoz.cloud:443
|
||||
otelInsecure: false
|
||||
signozApiKey: {{SIGNOZ_INGESTION_KEY}}
|
||||
presets:
|
||||
otlpExporter:
|
||||
enabled: true
|
||||
loggingExporter:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
- Replace `<CLUSTER_NAME>` with the name of the Kubernetes cluster or a unique identifier of the cluster.
|
||||
- Replace `<DEPLOYMENT_ENVIRONMENT>` with the deployment environment of your application. Example: **"staging"**, **"production"**, etc.
|
||||
|
||||
|
||||
|
||||
To install the k8s-infra chart with the above configuration, run the following command:
|
||||
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra -f override-values.yaml
|
||||
```
|
||||
|
||||
@@ -1,24 +1,43 @@
|
||||
## Install otel-collector in your Kubernetes infra
|
||||
|
||||
### Install otel-collector in your Kubernetes infra
|
||||
|
||||
Add the SigNoz Helm Chart repository
|
||||
```bash
|
||||
helm repo add signoz https://charts.signoz.io
|
||||
```
|
||||
|
||||
|
||||
|
||||
If the chart is already present, update the chart to the latest using:
|
||||
```bash
|
||||
helm repo update
|
||||
```
|
||||
|
||||
|
||||
|
||||
Install the Kubernetes Infrastructure chart provided by SigNoz
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra \
|
||||
--set otelCollectorEndpoint=ingest.{{REGION}}.signoz.cloud:443 \
|
||||
--set otelInsecure=false \
|
||||
--set signozApiKey={{SIGNOZ_INGESTION_KEY}} \
|
||||
--set global.clusterName=<CLUSTER_NAME>
|
||||
For generic Kubernetes clusters, you can create *override-values.yaml* with the following configuration:
|
||||
|
||||
```yaml
|
||||
global:
|
||||
cloud: others
|
||||
clusterName: <CLUSTER_NAME>
|
||||
deploymentEnvironment: <DEPLOYMENT_ENVIRONMENT>
|
||||
otelCollectorEndpoint: ingest.{{REGION}}.signoz.cloud:443
|
||||
otelInsecure: false
|
||||
signozApiKey: {{SIGNOZ_INGESTION_KEY}}
|
||||
presets:
|
||||
otlpExporter:
|
||||
enabled: true
|
||||
loggingExporter:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
- Replace `<CLUSTER_NAME>` with the name of the Kubernetes cluster or a unique identifier of the cluster.
|
||||
- Replace `<DEPLOYMENT_ENVIRONMENT>` with the deployment environment of your application. Example: **"staging"**, **"production"**, etc.
|
||||
|
||||
|
||||
|
||||
To install the k8s-infra chart with the above configuration, run the following command:
|
||||
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra -f override-values.yaml
|
||||
```
|
||||
|
||||
@@ -1,24 +1,43 @@
|
||||
## Install otel-collector in your Kubernetes infra
|
||||
|
||||
### Install otel-collector in your Kubernetes infra
|
||||
|
||||
Add the SigNoz Helm Chart repository
|
||||
```bash
|
||||
helm repo add signoz https://charts.signoz.io
|
||||
```
|
||||
|
||||
|
||||
|
||||
If the chart is already present, update the chart to the latest using:
|
||||
```bash
|
||||
helm repo update
|
||||
```
|
||||
|
||||
|
||||
|
||||
Install the Kubernetes Infrastructure chart provided by SigNoz
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra \
|
||||
--set otelCollectorEndpoint=ingest.{{REGION}}.signoz.cloud:443 \
|
||||
--set otelInsecure=false \
|
||||
--set signozApiKey={{SIGNOZ_INGESTION_KEY}} \
|
||||
--set global.clusterName=<CLUSTER_NAME>
|
||||
For generic Kubernetes clusters, you can create *override-values.yaml* with the following configuration:
|
||||
|
||||
```yaml
|
||||
global:
|
||||
cloud: others
|
||||
clusterName: <CLUSTER_NAME>
|
||||
deploymentEnvironment: <DEPLOYMENT_ENVIRONMENT>
|
||||
otelCollectorEndpoint: ingest.{{REGION}}.signoz.cloud:443
|
||||
otelInsecure: false
|
||||
signozApiKey: {{SIGNOZ_INGESTION_KEY}}
|
||||
presets:
|
||||
otlpExporter:
|
||||
enabled: true
|
||||
loggingExporter:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
- Replace `<CLUSTER_NAME>` with the name of the Kubernetes cluster or a unique identifier of the cluster.
|
||||
- Replace `<DEPLOYMENT_ENVIRONMENT>` with the deployment environment of your application. Example: **"staging"**, **"production"**, etc.
|
||||
|
||||
|
||||
|
||||
To install the k8s-infra chart with the above configuration, run the following command:
|
||||
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra -f override-values.yaml
|
||||
```
|
||||
|
||||
@@ -1,24 +1,43 @@
|
||||
## Install otel-collector in your Kubernetes infra
|
||||
|
||||
### Install otel-collector in your Kubernetes infra
|
||||
|
||||
Add the SigNoz Helm Chart repository
|
||||
```bash
|
||||
helm repo add signoz https://charts.signoz.io
|
||||
```
|
||||
|
||||
|
||||
|
||||
If the chart is already present, update the chart to the latest using:
|
||||
```bash
|
||||
helm repo update
|
||||
```
|
||||
|
||||
|
||||
|
||||
Install the Kubernetes Infrastructure chart provided by SigNoz
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra \
|
||||
--set otelCollectorEndpoint=ingest.{{REGION}}.signoz.cloud:443 \
|
||||
--set otelInsecure=false \
|
||||
--set signozApiKey={{SIGNOZ_INGESTION_KEY}} \
|
||||
--set global.clusterName=<CLUSTER_NAME>
|
||||
For generic Kubernetes clusters, you can create *override-values.yaml* with the following configuration:
|
||||
|
||||
```yaml
|
||||
global:
|
||||
cloud: others
|
||||
clusterName: <CLUSTER_NAME>
|
||||
deploymentEnvironment: <DEPLOYMENT_ENVIRONMENT>
|
||||
otelCollectorEndpoint: ingest.{{REGION}}.signoz.cloud:443
|
||||
otelInsecure: false
|
||||
signozApiKey: {{SIGNOZ_INGESTION_KEY}}
|
||||
presets:
|
||||
otlpExporter:
|
||||
enabled: true
|
||||
loggingExporter:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
- Replace `<CLUSTER_NAME>` with the name of the Kubernetes cluster or a unique identifier of the cluster.
|
||||
- Replace `<DEPLOYMENT_ENVIRONMENT>` with the deployment environment of your application. Example: **"staging"**, **"production"**, etc.
|
||||
|
||||
|
||||
|
||||
To install the k8s-infra chart with the above configuration, run the following command:
|
||||
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra -f override-values.yaml
|
||||
```
|
||||
|
||||
@@ -1,24 +1,43 @@
|
||||
## Install otel-collector in your Kubernetes infra
|
||||
|
||||
### Install otel-collector in your Kubernetes infra
|
||||
|
||||
Add the SigNoz Helm Chart repository
|
||||
```bash
|
||||
helm repo add signoz https://charts.signoz.io
|
||||
```
|
||||
|
||||
|
||||
|
||||
If the chart is already present, update the chart to the latest using:
|
||||
```bash
|
||||
helm repo update
|
||||
```
|
||||
|
||||
|
||||
|
||||
Install the Kubernetes Infrastructure chart provided by SigNoz
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra \
|
||||
--set otelCollectorEndpoint=ingest.{{REGION}}.signoz.cloud:443 \
|
||||
--set otelInsecure=false \
|
||||
--set signozApiKey={{SIGNOZ_INGESTION_KEY}} \
|
||||
--set global.clusterName=<CLUSTER_NAME>
|
||||
For generic Kubernetes clusters, you can create *override-values.yaml* with the following configuration:
|
||||
|
||||
```yaml
|
||||
global:
|
||||
cloud: others
|
||||
clusterName: <CLUSTER_NAME>
|
||||
deploymentEnvironment: <DEPLOYMENT_ENVIRONMENT>
|
||||
otelCollectorEndpoint: ingest.{{REGION}}.signoz.cloud:443
|
||||
otelInsecure: false
|
||||
signozApiKey: {{SIGNOZ_INGESTION_KEY}}
|
||||
presets:
|
||||
otlpExporter:
|
||||
enabled: true
|
||||
loggingExporter:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
- Replace `<CLUSTER_NAME>` with the name of the Kubernetes cluster or a unique identifier of the cluster.
|
||||
- Replace `<DEPLOYMENT_ENVIRONMENT>` with the deployment environment of your application. Example: **"staging"**, **"production"**, etc.
|
||||
|
||||
|
||||
|
||||
To install the k8s-infra chart with the above configuration, run the following command:
|
||||
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra -f override-values.yaml
|
||||
```
|
||||
|
||||
@@ -1,24 +1,43 @@
|
||||
## Install otel-collector in your Kubernetes infra
|
||||
|
||||
### Install otel-collector in your Kubernetes infra
|
||||
|
||||
Add the SigNoz Helm Chart repository
|
||||
```bash
|
||||
helm repo add signoz https://charts.signoz.io
|
||||
```
|
||||
|
||||
|
||||
|
||||
If the chart is already present, update the chart to the latest using:
|
||||
```bash
|
||||
helm repo update
|
||||
```
|
||||
|
||||
|
||||
|
||||
Install the Kubernetes Infrastructure chart provided by SigNoz
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra \
|
||||
--set otelCollectorEndpoint=ingest.{{REGION}}.signoz.cloud:443 \
|
||||
--set otelInsecure=false \
|
||||
--set signozApiKey={{SIGNOZ_INGESTION_KEY}} \
|
||||
--set global.clusterName=<CLUSTER_NAME>
|
||||
For generic Kubernetes clusters, you can create *override-values.yaml* with the following configuration:
|
||||
|
||||
```yaml
|
||||
global:
|
||||
cloud: others
|
||||
clusterName: <CLUSTER_NAME>
|
||||
deploymentEnvironment: <DEPLOYMENT_ENVIRONMENT>
|
||||
otelCollectorEndpoint: ingest.{{REGION}}.signoz.cloud:443
|
||||
otelInsecure: false
|
||||
signozApiKey: {{SIGNOZ_INGESTION_KEY}}
|
||||
presets:
|
||||
otlpExporter:
|
||||
enabled: true
|
||||
loggingExporter:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
- Replace `<CLUSTER_NAME>` with the name of the Kubernetes cluster or a unique identifier of the cluster.
|
||||
- Replace `<DEPLOYMENT_ENVIRONMENT>` with the deployment environment of your application. Example: **"staging"**, **"production"**, etc.
|
||||
|
||||
|
||||
|
||||
To install the k8s-infra chart with the above configuration, run the following command:
|
||||
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra -f override-values.yaml
|
||||
```
|
||||
|
||||
@@ -1,24 +1,43 @@
|
||||
## Install otel-collector in your Kubernetes infra
|
||||
|
||||
### Install otel-collector in your Kubernetes infra
|
||||
|
||||
Add the SigNoz Helm Chart repository
|
||||
```bash
|
||||
helm repo add signoz https://charts.signoz.io
|
||||
```
|
||||
|
||||
|
||||
|
||||
If the chart is already present, update the chart to the latest using:
|
||||
```bash
|
||||
helm repo update
|
||||
```
|
||||
|
||||
|
||||
|
||||
Install the Kubernetes Infrastructure chart provided by SigNoz
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra \
|
||||
--set otelCollectorEndpoint=ingest.{{REGION}}.signoz.cloud:443 \
|
||||
--set otelInsecure=false \
|
||||
--set signozApiKey={{SIGNOZ_INGESTION_KEY}} \
|
||||
--set global.clusterName=<CLUSTER_NAME>
|
||||
For generic Kubernetes clusters, you can create *override-values.yaml* with the following configuration:
|
||||
|
||||
```yaml
|
||||
global:
|
||||
cloud: others
|
||||
clusterName: <CLUSTER_NAME>
|
||||
deploymentEnvironment: <DEPLOYMENT_ENVIRONMENT>
|
||||
otelCollectorEndpoint: ingest.{{REGION}}.signoz.cloud:443
|
||||
otelInsecure: false
|
||||
signozApiKey: {{SIGNOZ_INGESTION_KEY}}
|
||||
presets:
|
||||
otlpExporter:
|
||||
enabled: true
|
||||
loggingExporter:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
- Replace `<CLUSTER_NAME>` with the name of the Kubernetes cluster or a unique identifier of the cluster.
|
||||
- Replace `<DEPLOYMENT_ENVIRONMENT>` with the deployment environment of your application. Example: **"staging"**, **"production"**, etc.
|
||||
|
||||
|
||||
|
||||
To install the k8s-infra chart with the above configuration, run the following command:
|
||||
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra -f override-values.yaml
|
||||
```
|
||||
|
||||
@@ -1,24 +1,43 @@
|
||||
## Install otel-collector in your Kubernetes infra
|
||||
|
||||
### Install otel-collector in your Kubernetes infra
|
||||
|
||||
Add the SigNoz Helm Chart repository
|
||||
```bash
|
||||
helm repo add signoz https://charts.signoz.io
|
||||
```
|
||||
|
||||
|
||||
|
||||
If the chart is already present, update the chart to the latest using:
|
||||
```bash
|
||||
helm repo update
|
||||
```
|
||||
|
||||
|
||||
|
||||
Install the Kubernetes Infrastructure chart provided by SigNoz
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra \
|
||||
--set otelCollectorEndpoint=ingest.{{REGION}}.signoz.cloud:443 \
|
||||
--set otelInsecure=false \
|
||||
--set signozApiKey={{SIGNOZ_INGESTION_KEY}} \
|
||||
--set global.clusterName=<CLUSTER_NAME>
|
||||
For generic Kubernetes clusters, you can create *override-values.yaml* with the following configuration:
|
||||
|
||||
```yaml
|
||||
global:
|
||||
cloud: others
|
||||
clusterName: <CLUSTER_NAME>
|
||||
deploymentEnvironment: <DEPLOYMENT_ENVIRONMENT>
|
||||
otelCollectorEndpoint: ingest.{{REGION}}.signoz.cloud:443
|
||||
otelInsecure: false
|
||||
signozApiKey: {{SIGNOZ_INGESTION_KEY}}
|
||||
presets:
|
||||
otlpExporter:
|
||||
enabled: true
|
||||
loggingExporter:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
- Replace `<CLUSTER_NAME>` with the name of the Kubernetes cluster or a unique identifier of the cluster.
|
||||
- Replace `<DEPLOYMENT_ENVIRONMENT>` with the deployment environment of your application. Example: **"staging"**, **"production"**, etc.
|
||||
|
||||
|
||||
|
||||
To install the k8s-infra chart with the above configuration, run the following command:
|
||||
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra -f override-values.yaml
|
||||
```
|
||||
|
||||
@@ -1,24 +1,47 @@
|
||||
## Install otel-collector in your Kubernetes infra
|
||||
|
||||
### Install otel-collector in your Kubernetes infra
|
||||
|
||||
Add the SigNoz Helm Chart repository
|
||||
```bash
|
||||
helm repo add signoz https://charts.signoz.io
|
||||
```
|
||||
|
||||
|
||||
|
||||
If the chart is already present, update the chart to the latest using:
|
||||
```bash
|
||||
helm repo update
|
||||
```
|
||||
|
||||
|
||||
|
||||
Install the Kubernetes Infrastructure chart provided by SigNoz
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra \
|
||||
--set otelCollectorEndpoint=ingest.{{REGION}}.signoz.cloud:443 \
|
||||
--set otelInsecure=false \
|
||||
--set signozApiKey={{SIGNOZ_INGESTION_KEY}} \
|
||||
--set global.clusterName=<CLUSTER_NAME>
|
||||
For generic Kubernetes clusters, you can create *override-values.yaml* with the following configuration:
|
||||
|
||||
```yaml
|
||||
global:
|
||||
cloud: aws
|
||||
clusterName: <CLUSTER_NAME>
|
||||
deploymentEnvironment: <DEPLOYMENT_ENVIRONMENT>
|
||||
otelCollectorEndpoint: ingest.{region}.signoz.cloud:443
|
||||
otelInsecure: false
|
||||
signozApiKey: <SIGNOZ_INGESTION_KEY>
|
||||
presets:
|
||||
otlpExporter:
|
||||
enabled: true
|
||||
loggingExporter:
|
||||
enabled: false
|
||||
resourceDetection:
|
||||
detectors:
|
||||
- eks
|
||||
- system
|
||||
```
|
||||
|
||||
- Replace `<CLUSTER_NAME>` with the name of the Kubernetes cluster or a unique identifier of the cluster.
|
||||
- Replace `<DEPLOYMENT_ENVIRONMENT>` with the deployment environment of your application. Example: **"staging"**, **"production"**, etc.
|
||||
|
||||
|
||||
|
||||
To install the k8s-infra chart with the above configuration, run the following command:
|
||||
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra -f override-values.yaml
|
||||
```
|
||||
|
||||
@@ -5,17 +5,39 @@ Add the SigNoz Helm Chart repository
|
||||
helm repo add signoz https://charts.signoz.io
|
||||
```
|
||||
|
||||
|
||||
|
||||
If the chart is already present, update the chart to the latest using:
|
||||
```bash
|
||||
helm repo update
|
||||
```
|
||||
|
||||
Install the Kubernetes Infrastructure chart provided by SigNoz
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra \
|
||||
--set otelCollectorEndpoint=ingest.{{REGION}}.signoz.cloud:443 \
|
||||
--set otelInsecure=false \
|
||||
--set signozApiKey={{SIGNOZ_INGESTION_KEY}} \
|
||||
--set global.clusterName=<CLUSTER_NAME>
|
||||
|
||||
|
||||
For generic Kubernetes clusters, you can create *override-values.yaml* with the following configuration:
|
||||
|
||||
```yaml
|
||||
global:
|
||||
cloud: others
|
||||
clusterName: <CLUSTER_NAME>
|
||||
deploymentEnvironment: <DEPLOYMENT_ENVIRONMENT>
|
||||
otelCollectorEndpoint: ingest.{{REGION}}.signoz.cloud:443
|
||||
otelInsecure: false
|
||||
signozApiKey: {{SIGNOZ_INGESTION_KEY}}
|
||||
presets:
|
||||
otlpExporter:
|
||||
enabled: true
|
||||
loggingExporter:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
- Replace `<CLUSTER_NAME>` with the name of the Kubernetes cluster or a unique identifier of the cluster.
|
||||
- Replace `<DEPLOYMENT_ENVIRONMENT>` with the deployment environment of your application. Example: **"staging"**, **"production"**, etc.
|
||||
|
||||
|
||||
|
||||
To install the k8s-infra chart with the above configuration, run the following command:
|
||||
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra -f override-values.yaml
|
||||
```
|
||||
|
||||
@@ -1,24 +1,43 @@
|
||||
## Install otel-collector in your Kubernetes infra
|
||||
|
||||
### Install otel-collector in your Kubernetes infra
|
||||
|
||||
Add the SigNoz Helm Chart repository
|
||||
```bash
|
||||
helm repo add signoz https://charts.signoz.io
|
||||
```
|
||||
|
||||
|
||||
|
||||
If the chart is already present, update the chart to the latest using:
|
||||
```bash
|
||||
helm repo update
|
||||
```
|
||||
|
||||
|
||||
|
||||
Install the Kubernetes Infrastructure chart provided by SigNoz
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra \
|
||||
--set otelCollectorEndpoint=ingest.{{REGION}}.signoz.cloud:443 \
|
||||
--set otelInsecure=false \
|
||||
--set signozApiKey={{SIGNOZ_INGESTION_KEY}} \
|
||||
--set global.clusterName=<CLUSTER_NAME>
|
||||
For generic Kubernetes clusters, you can create *override-values.yaml* with the following configuration:
|
||||
|
||||
```yaml
|
||||
global:
|
||||
cloud: others
|
||||
clusterName: <CLUSTER_NAME>
|
||||
deploymentEnvironment: <DEPLOYMENT_ENVIRONMENT>
|
||||
otelCollectorEndpoint: ingest.{{REGION}}.signoz.cloud:443
|
||||
otelInsecure: false
|
||||
signozApiKey: {{SIGNOZ_INGESTION_KEY}}
|
||||
presets:
|
||||
otlpExporter:
|
||||
enabled: true
|
||||
loggingExporter:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
- Replace `<CLUSTER_NAME>` with the name of the Kubernetes cluster or a unique identifier of the cluster.
|
||||
- Replace `<DEPLOYMENT_ENVIRONMENT>` with the deployment environment of your application. Example: **"staging"**, **"production"**, etc.
|
||||
|
||||
|
||||
|
||||
To install the k8s-infra chart with the above configuration, run the following command:
|
||||
|
||||
```bash
|
||||
helm install my-release signoz/k8s-infra -f override-values.yaml
|
||||
```
|
||||
|
||||
@@ -41,3 +41,37 @@ div[class*='-setup-instructions-container'] {
|
||||
.service-name-container {
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.intgeration-page-container {
|
||||
background-color: var(--bg-ink-400);
|
||||
border-color: var(--bg-slate-500);
|
||||
}
|
||||
|
||||
.intgeration-page-container-text {
|
||||
color: var(--bg-vanilla-400);
|
||||
}
|
||||
|
||||
.navigate-integrations-page-btn {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
color: var(--bg-vanilla-100);
|
||||
background-color: var(--bg-slate-300);
|
||||
box-shadow: none;
|
||||
border: none;
|
||||
}
|
||||
|
||||
.dataSourceName {
|
||||
color: var(--bg-vanilla-100);
|
||||
font-weight: 600;
|
||||
cursor: pointer;
|
||||
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 8px;
|
||||
}
|
||||
|
||||
.lightMode {
|
||||
.dataSourceName {
|
||||
color: var(--bg-slate-500);
|
||||
}
|
||||
}
|
||||
@@ -6,6 +6,7 @@ import { LoadingOutlined } from '@ant-design/icons';
|
||||
import { Button, Card, Form, Input, Select, Space, Typography } from 'antd';
|
||||
import logEvent from 'api/common/logEvent';
|
||||
import cx from 'classnames';
|
||||
import ROUTES from 'constants/routes';
|
||||
import { useOnboardingContext } from 'container/OnboardingContainer/context/OnboardingContext';
|
||||
import { useCases } from 'container/OnboardingContainer/OnboardingContainer';
|
||||
import {
|
||||
@@ -14,9 +15,10 @@ import {
|
||||
hasFrameworks,
|
||||
} from 'container/OnboardingContainer/utils/dataSourceUtils';
|
||||
import { useNotifications } from 'hooks/useNotifications';
|
||||
import { Check } from 'lucide-react';
|
||||
import { Blocks, Check } from 'lucide-react';
|
||||
import { useEffect, useState } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useHistory } from 'react-router-dom';
|
||||
import { popupContainer } from 'utils/selectPopupContainer';
|
||||
|
||||
export interface DataSourceType {
|
||||
@@ -29,6 +31,7 @@ export interface DataSourceType {
|
||||
export default function DataSource(): JSX.Element {
|
||||
const [form] = Form.useForm();
|
||||
const { t } = useTranslation(['common']);
|
||||
const history = useHistory();
|
||||
|
||||
const {
|
||||
serviceName,
|
||||
@@ -127,6 +130,10 @@ export default function DataSource(): JSX.Element {
|
||||
}
|
||||
};
|
||||
|
||||
const goToIntegrationsPage = (): void => {
|
||||
history.push(ROUTES.INTEGRATIONS);
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="module-container">
|
||||
<Typography.Text className="data-source-title">
|
||||
@@ -156,7 +163,7 @@ export default function DataSource(): JSX.Element {
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<Typography.Text className="serviceName">
|
||||
<Typography.Text className="dataSourceName">
|
||||
{dataSource.name}
|
||||
</Typography.Text>
|
||||
</div>
|
||||
@@ -214,6 +221,20 @@ export default function DataSource(): JSX.Element {
|
||||
</>
|
||||
)}
|
||||
|
||||
<div className="request-entity-container intgeration-page-container">
|
||||
<Typography.Text className="intgeration-page-container-text">
|
||||
Not able to find datasources you are looking for, check our Integrations
|
||||
page which allows more sources of sending data
|
||||
</Typography.Text>
|
||||
<Button
|
||||
onClick={goToIntegrationsPage}
|
||||
icon={<Blocks size={14} />}
|
||||
className="navigate-integrations-page-btn"
|
||||
>
|
||||
Go to integrations
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
<div className="request-entity-container">
|
||||
<Typography.Text>
|
||||
Cannot find what you’re looking for? Request a data source
|
||||
|
||||
@@ -544,19 +544,21 @@ export const Query = memo(function Query({
|
||||
)}
|
||||
</Col>
|
||||
|
||||
{isVersionV4 && isMetricsDataSource && panelType === PANEL_TYPES.TABLE && (
|
||||
<Col flex="1 1 12.5rem">
|
||||
<Row>
|
||||
<Col span={6}>
|
||||
<FilterLabel label="Reduce to" />
|
||||
</Col>
|
||||
{isVersionV4 &&
|
||||
isMetricsDataSource &&
|
||||
(panelType === PANEL_TYPES.TABLE || panelType === PANEL_TYPES.PIE) && (
|
||||
<Col flex="1 1 12.5rem">
|
||||
<Row>
|
||||
<Col span={6}>
|
||||
<FilterLabel label="Reduce to" />
|
||||
</Col>
|
||||
|
||||
<Col span={18}>
|
||||
<ReduceToFilter query={query} onChange={handleChangeReduceTo} />
|
||||
</Col>
|
||||
</Row>
|
||||
</Col>
|
||||
)}
|
||||
<Col span={18}>
|
||||
<ReduceToFilter query={query} onChange={handleChangeReduceTo} />
|
||||
</Col>
|
||||
</Row>
|
||||
</Col>
|
||||
)}
|
||||
</Row>
|
||||
</Col>
|
||||
)}
|
||||
|
||||
49
frontend/src/mocks-server/__mockdata__/alerts.ts
Normal file
49
frontend/src/mocks-server/__mockdata__/alerts.ts
Normal file
@@ -0,0 +1,49 @@
|
||||
export const allAlertChannels = [
|
||||
{
|
||||
id: '3',
|
||||
created_at: '2023-08-09T04:45:19.239344617Z',
|
||||
updated_at: '2024-06-27T11:37:14.841184399Z',
|
||||
name: 'Dummy-Channel',
|
||||
type: 'slack',
|
||||
data:
|
||||
'{"name":"Dummy-Channel","slack_configs":[{"api_url":"https://discord.com/api/webhooks/dummy_webhook_id/dummy_webhook_token/slack","channel":"#dummy_channel","send_resolved":true,"text":"{{ range .Alerts -}}\\n *Alert:* {{ .Labels.alertname }}{{ if .Labels.severity }} - {{ .Labels.severity }}{{ end }} dummy_summary\\n\\n *Summary:* {{ .Annotations.summary }}\\n *Description:* {{ .Annotations.description }}\\n\\n *Details:*\\n {{ range .Labels.SortedPairs }} • *{{ .Name }}:* {{ .Value }}\\n {{ end }}\\n {{ end }}","title":"[{{ .Status | toUpper }}{{ if eq .Status \\"firing\\" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }}\\n {{- if gt (len .CommonLabels) (len .GroupLabels) -}}\\n {{\\" \\"}}(\\n {{- with .CommonLabels.Remove .GroupLabels.Names }}\\n {{- range $index, $label := .SortedPairs -}}\\n {{ if $index }}, {{ end }}\\n {{- $label.Name }}=\\"{{ $label.Value -}}\\"\\n {{- end }}\\n {{- end -}}\\n )\\n {{- end }}"}]}',
|
||||
},
|
||||
];
|
||||
|
||||
export const editAlertChannelInitialValue = {
|
||||
api_url:
|
||||
'https://discord.com/api/webhooks/dummy_webhook_id/dummy_webhook_token/slack',
|
||||
channel: '#dummy_channel',
|
||||
send_resolved: true,
|
||||
text:
|
||||
'{{ range .Alerts -}}\n *Alert:* {{ .Labels.alertname }}{{ if .Labels.severity }} - {{ .Labels.severity }}{{ end }} dummy_summary\n\n *Summary:* {{ .Annotations.summary }}\n *Description:* {{ .Annotations.description }}\n\n *Details:*\n {{ range .Labels.SortedPairs }} • *{{ .Name }}:* {{ .Value }}\n {{ end }}\n {{ end }}',
|
||||
title:
|
||||
'[{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }}\n {{- if gt (len .CommonLabels) (len .GroupLabels) -}}\n {{" "}}(\n {{- with .CommonLabels.Remove .GroupLabels.Names }}\n {{- range $index, $label := .SortedPairs -}}\n {{ if $index }}, {{ end }}\n {{- $label.Name }}="{{ $label.Value -}}"\n {{- end }}\n {{- end -}}\n )\n {{- end }}',
|
||||
type: 'slack',
|
||||
name: 'Dummy-Channel',
|
||||
};
|
||||
|
||||
export const slackTitleDefaultValue = `[{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }} {{- if gt (len .CommonLabels) (len .GroupLabels) -}} {{" "}}( {{- with .CommonLabels.Remove .GroupLabels.Names }} {{- range $index, $label := .SortedPairs -}} {{ if $index }}, {{ end }} {{- $label.Name }}="{{ $label.Value -}}" {{- end }} {{- end -}} ) {{- end }}`;
|
||||
|
||||
export const slackDescriptionDefaultValue = `{{ range .Alerts -}} *Alert:* {{ .Labels.alertname }}{{ if .Labels.severity }} - {{ .Labels.severity }}{{ end }} *Summary:* {{ .Annotations.summary }} *Description:* {{ .Annotations.description }} *RelatedLogs:* {{ if gt (len .Annotations.related_logs) 0 -}} View in <{{ .Annotations.related_logs }}|logs explorer> {{- end}} *RelatedTraces:* {{ if gt (len .Annotations.related_traces) 0 -}} View in <{{ .Annotations.related_traces }}|traces explorer> {{- end}} *Details:* {{ range .Labels.SortedPairs }} • *{{ .Name }}:* {{ .Value }} {{ end }} {{ end }}`;
|
||||
|
||||
export const editSlackDescriptionDefaultValue = `{{ range .Alerts -}} *Alert:* {{ .Labels.alertname }}{{ if .Labels.severity }} - {{ .Labels.severity }}{{ end }} dummy_summary *Summary:* {{ .Annotations.summary }} *Description:* {{ .Annotations.description }} *Details:* {{ range .Labels.SortedPairs }} • *{{ .Name }}:* {{ .Value }} {{ end }} {{ end }}`;
|
||||
|
||||
export const pagerDutyDescriptionDefaultVaule = `{{ if gt (len .Alerts.Firing) 0 -}} Alerts Firing: {{ range .Alerts.Firing }} - Message: {{ .Annotations.description }} Labels: {{ range .Labels.SortedPairs }} - {{ .Name }} = {{ .Value }} {{ end }} Annotations: {{ range .Annotations.SortedPairs }} - {{ .Name }} = {{ .Value }} {{ end }} Source: {{ .GeneratorURL }} {{ end }} {{- end }} {{ if gt (len .Alerts.Resolved) 0 -}} Alerts Resolved: {{ range .Alerts.Resolved }} - Message: {{ .Annotations.description }} Labels: {{ range .Labels.SortedPairs }} - {{ .Name }} = {{ .Value }} {{ end }} Annotations: {{ range .Annotations.SortedPairs }} - {{ .Name }} = {{ .Value }} {{ end }} Source: {{ .GeneratorURL }} {{ end }} {{- end }}`;
|
||||
|
||||
export const pagerDutyAdditionalDetailsDefaultValue = JSON.stringify({
|
||||
firing: `{{ template "pagerduty.default.instances" .Alerts.Firing }}`,
|
||||
resolved: `{{ template "pagerduty.default.instances" .Alerts.Resolved }}`,
|
||||
num_firing: '{{ .Alerts.Firing | len }}',
|
||||
num_resolved: '{{ .Alerts.Resolved | len }}',
|
||||
});
|
||||
|
||||
export const opsGenieMessageDefaultValue = `{{ .CommonLabels.alertname }}`;
|
||||
|
||||
export const opsGenieDescriptionDefaultValue = `{{ if gt (len .Alerts.Firing) 0 -}} Alerts Firing: {{ range .Alerts.Firing }} - Message: {{ .Annotations.description }} Labels: {{ range .Labels.SortedPairs }} - {{ .Name }} = {{ .Value }} {{ end }} Annotations: {{ range .Annotations.SortedPairs }} - {{ .Name }} = {{ .Value }} {{ end }} Source: {{ .GeneratorURL }} {{ end }} {{- end }} {{ if gt (len .Alerts.Resolved) 0 -}} Alerts Resolved: {{ range .Alerts.Resolved }} - Message: {{ .Annotations.description }} Labels: {{ range .Labels.SortedPairs }} - {{ .Name }} = {{ .Value }} {{ end }} Annotations: {{ range .Annotations.SortedPairs }} - {{ .Name }} = {{ .Value }} {{ end }} Source: {{ .GeneratorURL }} {{ end }} {{- end }}`;
|
||||
|
||||
export const opsGeniePriorityDefaultValue =
|
||||
'{{ if eq (index .Alerts 0).Labels.severity "critical" }}P1{{ else if eq (index .Alerts 0).Labels.severity "warning" }}P2{{ else if eq (index .Alerts 0).Labels.severity "info" }}P3{{ else }}P4{{ end }}';
|
||||
|
||||
export const pagerDutySeverityTextDefaultValue =
|
||||
'{{ (index .Alerts 0).Labels.severity }}';
|
||||
@@ -1,5 +1,6 @@
|
||||
import { rest } from 'msw';
|
||||
|
||||
import { allAlertChannels } from './__mockdata__/alerts';
|
||||
import { billingSuccessResponse } from './__mockdata__/billing';
|
||||
import {
|
||||
dashboardSuccessResponse,
|
||||
@@ -131,6 +132,26 @@ export const handlers = [
|
||||
return res(ctx.status(500));
|
||||
},
|
||||
),
|
||||
rest.get('http://localhost/api/v1/loginPrecheck', (req, res, ctx) => {
|
||||
const email = req.url.searchParams.get('email');
|
||||
if (email === 'failEmail@signoz.io') {
|
||||
return res(ctx.status(500));
|
||||
}
|
||||
|
||||
return res(
|
||||
ctx.status(200),
|
||||
ctx.json({
|
||||
status: 'success',
|
||||
data: {
|
||||
sso: true,
|
||||
ssoUrl: '',
|
||||
canSelfRegister: false,
|
||||
isUser: true,
|
||||
ssoError: '',
|
||||
},
|
||||
}),
|
||||
);
|
||||
}),
|
||||
|
||||
rest.get('http://localhost/api/v2/licenses', (req, res, ctx) =>
|
||||
res(ctx.status(200), ctx.json(licensesSuccessResponse)),
|
||||
@@ -154,6 +175,24 @@ export const handlers = [
|
||||
rest.post('http://localhost/api/v1/invite', (_, res, ctx) =>
|
||||
res(ctx.status(200), ctx.json(inviteUser)),
|
||||
),
|
||||
rest.put('http://localhost/api/v1/user/:id', (_, res, ctx) =>
|
||||
res(
|
||||
ctx.status(200),
|
||||
ctx.json({
|
||||
data: 'user updated successfully',
|
||||
}),
|
||||
),
|
||||
),
|
||||
rest.post('http://localhost/api/v1/changePassword', (_, res, ctx) =>
|
||||
res(
|
||||
ctx.status(403),
|
||||
ctx.json({
|
||||
status: 'error',
|
||||
errorType: 'forbidden',
|
||||
error: 'invalid credentials',
|
||||
}),
|
||||
),
|
||||
),
|
||||
|
||||
rest.get(
|
||||
'http://localhost/api/v3/autocomplete/aggregate_attributes',
|
||||
@@ -181,4 +220,16 @@ export const handlers = [
|
||||
}),
|
||||
),
|
||||
),
|
||||
rest.post('http://localhost/api/v1//channels', (_, res, ctx) =>
|
||||
res(ctx.status(200), ctx.json(allAlertChannels)),
|
||||
),
|
||||
rest.delete('http://localhost/api/v1/channels/:id', (_, res, ctx) =>
|
||||
res(
|
||||
ctx.status(200),
|
||||
ctx.json({
|
||||
status: 'success',
|
||||
data: 'notification channel successfully deleted',
|
||||
}),
|
||||
),
|
||||
),
|
||||
];
|
||||
|
||||
32
frontend/src/pages/EditRules/EditRules.styles.scss
Normal file
32
frontend/src/pages/EditRules/EditRules.styles.scss
Normal file
@@ -0,0 +1,32 @@
|
||||
.edit-rules-container {
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
margin-top: 5rem;
|
||||
}
|
||||
|
||||
|
||||
.edit-rules-card {
|
||||
width: 20rem;
|
||||
padding: 1rem;
|
||||
}
|
||||
|
||||
.content {
|
||||
font-style: normal;
|
||||
font-weight: 300;
|
||||
font-size: 18px;
|
||||
line-height: 20px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
text-align: center;
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
.btn-container {
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
margin-top: 2rem;
|
||||
}
|
||||
|
||||
6
frontend/src/pages/EditRules/constants.ts
Normal file
6
frontend/src/pages/EditRules/constants.ts
Normal file
@@ -0,0 +1,6 @@
|
||||
export const returnToAlertsPage = 'Return to Alerts Page';
|
||||
|
||||
export const errorMessageReceivedFromBackend = 'sql: no rows in result set';
|
||||
|
||||
export const improvedErrorMessage =
|
||||
'The Alert that you are trying to access does not exist.';
|
||||
@@ -1,19 +1,27 @@
|
||||
import './EditRules.styles.scss';
|
||||
|
||||
import { Button, Card } from 'antd';
|
||||
import get from 'api/alerts/get';
|
||||
import Spinner from 'components/Spinner';
|
||||
import { QueryParams } from 'constants/query';
|
||||
import ROUTES from 'constants/routes';
|
||||
import EditRulesContainer from 'container/EditRules';
|
||||
import { useNotifications } from 'hooks/useNotifications';
|
||||
import useUrlQuery from 'hooks/useUrlQuery';
|
||||
import history from 'lib/history';
|
||||
import { useEffect } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useQuery } from 'react-query';
|
||||
import { useLocation } from 'react-router-dom';
|
||||
|
||||
import {
|
||||
errorMessageReceivedFromBackend,
|
||||
improvedErrorMessage,
|
||||
returnToAlertsPage,
|
||||
} from './constants';
|
||||
|
||||
function EditRules(): JSX.Element {
|
||||
const { search } = useLocation();
|
||||
const params = new URLSearchParams(search);
|
||||
const params = useUrlQuery();
|
||||
const ruleId = params.get('ruleId');
|
||||
|
||||
const { t } = useTranslation('common');
|
||||
|
||||
const isValidRuleId = ruleId !== null && String(ruleId).length !== 0;
|
||||
@@ -31,6 +39,14 @@ function EditRules(): JSX.Element {
|
||||
|
||||
const { notifications } = useNotifications();
|
||||
|
||||
const clickHandler = (): void => {
|
||||
params.delete(QueryParams.compositeQuery);
|
||||
params.delete(QueryParams.panelTypes);
|
||||
params.delete(QueryParams.ruleId);
|
||||
params.delete(QueryParams.relativeTime);
|
||||
history.push(`${ROUTES.LIST_ALL_ALERT}?${params.toString()}`);
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
if (!isValidRuleId) {
|
||||
notifications.error({
|
||||
@@ -45,7 +61,22 @@ function EditRules(): JSX.Element {
|
||||
ruleId == null ||
|
||||
(data?.payload?.data === undefined && !isLoading)
|
||||
) {
|
||||
return <div>{data?.error || t('something_went_wrong')}</div>;
|
||||
return (
|
||||
<div className="edit-rules-container">
|
||||
<Card size="small" className="edit-rules-card">
|
||||
<p className="content">
|
||||
{data?.message === errorMessageReceivedFromBackend
|
||||
? improvedErrorMessage
|
||||
: data?.error || t('something_went_wrong')}
|
||||
</p>
|
||||
<div className="btn-container">
|
||||
<Button type="default" size="large" onClick={clickHandler}>
|
||||
{returnToAlertsPage}
|
||||
</Button>
|
||||
</div>
|
||||
</Card>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
if (isLoading || isRefetching || !data?.payload) {
|
||||
|
||||
@@ -153,7 +153,7 @@ export const GetStartedContent = (): TGetStartedContentSection[] => {
|
||||
href="https://signoz-community.slack.com/archives/C01HWQ1R0BC"
|
||||
target="_blank"
|
||||
>
|
||||
#general
|
||||
#dummy_channel
|
||||
</Typography.Link>
|
||||
</>
|
||||
),
|
||||
|
||||
@@ -158,6 +158,7 @@ export enum QueryFunctionsTypes {
|
||||
CLAMP_MIN = 'clampMin',
|
||||
CLAMP_MAX = 'clampMax',
|
||||
ABSOLUTE = 'absolute',
|
||||
RUNNING_DIFF = 'runningDiff',
|
||||
LOG_2 = 'log2',
|
||||
LOG_10 = 'log10',
|
||||
CUMULATIVE_SUM = 'cumSum',
|
||||
|
||||
8
go.mod
8
go.mod
@@ -3,7 +3,7 @@ module go.signoz.io/signoz
|
||||
go 1.21.3
|
||||
|
||||
require (
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.20.0
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.23.2
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2
|
||||
github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd
|
||||
github.com/SigNoz/signoz-otel-collector v0.102.2
|
||||
@@ -46,7 +46,7 @@ require (
|
||||
github.com/sethvargo/go-password v0.2.0
|
||||
github.com/smartystreets/goconvey v1.8.1
|
||||
github.com/soheilhy/cmux v0.1.5
|
||||
github.com/srikanthccv/ClickHouse-go-mock v0.7.0
|
||||
github.com/srikanthccv/ClickHouse-go-mock v0.8.0
|
||||
github.com/stretchr/testify v1.9.0
|
||||
go.opentelemetry.io/collector/component v0.102.1
|
||||
go.opentelemetry.io/collector/confmap v0.102.1
|
||||
@@ -83,7 +83,7 @@ require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
|
||||
github.com/ClickHouse/ch-go v0.61.3 // indirect
|
||||
github.com/ClickHouse/ch-go v0.61.5 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect
|
||||
github.com/andybalholm/brotli v1.1.0 // indirect
|
||||
github.com/aws/aws-sdk-go v1.53.16 // indirect
|
||||
@@ -156,7 +156,7 @@ require (
|
||||
github.com/segmentio/backo-go v1.0.1 // indirect
|
||||
github.com/shirou/gopsutil/v3 v3.24.4 // indirect
|
||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
||||
github.com/shopspring/decimal v1.3.1 // indirect
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/smarty/assertions v1.15.0 // indirect
|
||||
github.com/spf13/cobra v1.8.0 // indirect
|
||||
|
||||
16
go.sum
16
go.sum
@@ -48,10 +48,10 @@ github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mx
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/ClickHouse/ch-go v0.61.3 h1:MmBwUhXrAOBZK7n/sWBzq6FdIQ01cuF2SaaO8KlDRzI=
|
||||
github.com/ClickHouse/ch-go v0.61.3/go.mod h1:1PqXjMz/7S1ZUaKvwPA3i35W2bz2mAMFeCi6DIXgGwQ=
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.20.0 h1:bvlLQ31XJfl7MxIqAq2l1G6JhHYzqEXdvfpMeU6bkKc=
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.20.0/go.mod h1:VQfyA+tCwCRw2G7ogfY8V0fq/r0yJWzy8UDrjiP/Lbs=
|
||||
github.com/ClickHouse/ch-go v0.61.5 h1:zwR8QbYI0tsMiEcze/uIMK+Tz1D3XZXLdNrlaOpeEI4=
|
||||
github.com/ClickHouse/ch-go v0.61.5/go.mod h1:s1LJW/F/LcFs5HJnuogFMta50kKDO0lf9zzfrbl0RQg=
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.23.2 h1:+DAKPMnxLS7pduQZsrJc8OhdLS2L9MfDEJ2TS+hpYDM=
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.23.2/go.mod h1:aNap51J1OM3yxQJRgM+AlP/MPkGBCL8A74uQThoQhR0=
|
||||
github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU=
|
||||
github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
|
||||
@@ -691,8 +691,8 @@ github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFt
|
||||
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
|
||||
github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU=
|
||||
github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
|
||||
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
|
||||
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||
github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
|
||||
github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo=
|
||||
@@ -716,8 +716,8 @@ github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
|
||||
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/srikanthccv/ClickHouse-go-mock v0.7.0 h1:XhRMX2663xkDGq3DYavw8m75O94s9u76hOIjo9QBl8c=
|
||||
github.com/srikanthccv/ClickHouse-go-mock v0.7.0/go.mod h1:IJZ/eL1h4cOy/Jo3PzNKXSPmqRus15BC2MbduYPpA/g=
|
||||
github.com/srikanthccv/ClickHouse-go-mock v0.8.0 h1:DeeM8XLbTFl6sjYPPwazPEXx7kmRV8TgPFVkt1SqT0Y=
|
||||
github.com/srikanthccv/ClickHouse-go-mock v0.8.0/go.mod h1:pgJm+apjvi7FHxEdgw1Bt4MRbUYpVxyhKQ/59Wkig24=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -61,6 +62,8 @@ const (
|
||||
primaryNamespace = "clickhouse"
|
||||
archiveNamespace = "clickhouse-archive"
|
||||
signozTraceDBName = "signoz_traces"
|
||||
signozHistoryDBName = "signoz_analytics"
|
||||
ruleStateHistoryTableName = "distributed_rule_state_history"
|
||||
signozDurationMVTable = "distributed_durationSort"
|
||||
signozUsageExplorerTable = "distributed_usage_explorer"
|
||||
signozSpansTable = "distributed_signoz_spans"
|
||||
@@ -4357,6 +4360,128 @@ func (r *ClickHouseReader) GetLogAttributeValues(ctx context.Context, req *v3.Fi
|
||||
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetQBFilterSuggestionsForLogs(
|
||||
ctx context.Context,
|
||||
req *v3.QBFilterSuggestionsRequest,
|
||||
) (*v3.QBFilterSuggestionsResponse, *model.ApiError) {
|
||||
suggestions := v3.QBFilterSuggestionsResponse{
|
||||
AttributeKeys: []v3.AttributeKey{},
|
||||
ExampleQueries: []v3.FilterSet{},
|
||||
}
|
||||
|
||||
// Use existing autocomplete logic for generating attribute suggestions
|
||||
attribKeysResp, err := r.GetLogAttributeKeys(
|
||||
ctx, &v3.FilterAttributeKeyRequest{
|
||||
SearchText: req.SearchText,
|
||||
DataSource: v3.DataSourceLogs,
|
||||
Limit: req.Limit,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf("couldn't get attribute keys: %w", err))
|
||||
}
|
||||
|
||||
suggestions.AttributeKeys = attribKeysResp.AttributeKeys
|
||||
|
||||
// Rank suggested attributes
|
||||
slices.SortFunc(suggestions.AttributeKeys, func(a v3.AttributeKey, b v3.AttributeKey) int {
|
||||
|
||||
// Higher score => higher rank
|
||||
attribKeyScore := func(a v3.AttributeKey) int {
|
||||
|
||||
// Scoring criteria is expected to get more sophisticated in follow up changes
|
||||
if a.Type == v3.AttributeKeyTypeResource {
|
||||
return 2
|
||||
}
|
||||
|
||||
if a.Type == v3.AttributeKeyTypeTag {
|
||||
return 1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// To sort in descending order of score the return value must be negative when a > b
|
||||
return attribKeyScore(b) - attribKeyScore(a)
|
||||
})
|
||||
|
||||
// Put together suggested example queries.
|
||||
|
||||
newExampleQuery := func() v3.FilterSet {
|
||||
// Include existing filter in example query if specified.
|
||||
if req.ExistingFilter != nil {
|
||||
return *req.ExistingFilter
|
||||
}
|
||||
|
||||
return v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{},
|
||||
}
|
||||
}
|
||||
|
||||
// Suggest example query for top suggested attribute using existing
|
||||
// autocomplete logic for recommending attrib values
|
||||
//
|
||||
// Example queries for multiple top attributes using a batch version of
|
||||
// GetLogAttributeValues is expected to come in a follow up change
|
||||
if len(suggestions.AttributeKeys) > 0 {
|
||||
topAttrib := suggestions.AttributeKeys[0]
|
||||
|
||||
resp, err := r.GetLogAttributeValues(ctx, &v3.FilterAttributeValueRequest{
|
||||
DataSource: v3.DataSourceLogs,
|
||||
FilterAttributeKey: topAttrib.Key,
|
||||
FilterAttributeKeyDataType: topAttrib.DataType,
|
||||
TagType: v3.TagType(topAttrib.Type),
|
||||
Limit: 1,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
// Do not fail the entire request if only example query generation fails
|
||||
zap.L().Error("could not find attribute values for creating example query", zap.Error(err))
|
||||
|
||||
} else {
|
||||
addExampleQuerySuggestion := func(value any) {
|
||||
exampleQuery := newExampleQuery()
|
||||
|
||||
exampleQuery.Items = append(exampleQuery.Items, v3.FilterItem{
|
||||
Key: topAttrib,
|
||||
Operator: "=",
|
||||
Value: value,
|
||||
})
|
||||
|
||||
suggestions.ExampleQueries = append(
|
||||
suggestions.ExampleQueries, exampleQuery,
|
||||
)
|
||||
}
|
||||
|
||||
if len(resp.StringAttributeValues) > 0 {
|
||||
addExampleQuerySuggestion(resp.StringAttributeValues[0])
|
||||
} else if len(resp.NumberAttributeValues) > 0 {
|
||||
addExampleQuerySuggestion(resp.NumberAttributeValues[0])
|
||||
} else if len(resp.BoolAttributeValues) > 0 {
|
||||
addExampleQuerySuggestion(resp.BoolAttributeValues[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Suggest static example queries for standard log attributes if needed.
|
||||
if len(suggestions.ExampleQueries) < req.Limit {
|
||||
exampleQuery := newExampleQuery()
|
||||
exampleQuery.Items = append(exampleQuery.Items, v3.FilterItem{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "body",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeUnspecified,
|
||||
IsColumn: true,
|
||||
},
|
||||
Operator: "contains",
|
||||
Value: "error",
|
||||
})
|
||||
suggestions.ExampleQueries = append(suggestions.ExampleQueries, exampleQuery)
|
||||
}
|
||||
|
||||
return &suggestions, nil
|
||||
}
|
||||
|
||||
func readRow(vars []interface{}, columnNames []string, countOfNumberCols int) ([]string, map[string]string, []map[string]string, *v3.Point) {
|
||||
// Each row will have a value and a timestamp, and an optional list of label values
|
||||
// example: {Timestamp: ..., Value: ...}
|
||||
@@ -5002,6 +5127,319 @@ func (r *ClickHouseReader) LiveTailLogsV3(ctx context.Context, query string, tim
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) AddRuleStateHistory(ctx context.Context, ruleStateHistory []v3.RuleStateHistory) error {
|
||||
var statement driver.Batch
|
||||
var err error
|
||||
|
||||
defer func() {
|
||||
if statement != nil {
|
||||
statement.Abort()
|
||||
}
|
||||
}()
|
||||
|
||||
statement, err = r.db.PrepareBatch(ctx, fmt.Sprintf("INSERT INTO %s.%s (rule_id, rule_name, overall_state, overall_state_changed, state, state_changed, unix_milli, labels, fingerprint, value) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)",
|
||||
signozHistoryDBName, ruleStateHistoryTableName))
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, history := range ruleStateHistory {
|
||||
err = statement.Append(history.RuleID, history.RuleName, history.OverallState, history.OverallStateChanged, history.State, history.StateChanged, history.UnixMilli, history.Labels, history.Fingerprint, history.Value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = statement.Send()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) ReadRuleStateHistoryByRuleID(
|
||||
ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) ([]v3.RuleStateHistory, error) {
|
||||
|
||||
var conditions []string
|
||||
|
||||
conditions = append(conditions, fmt.Sprintf("rule_id = '%s'", ruleID))
|
||||
|
||||
conditions = append(conditions, fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", params.Start, params.End))
|
||||
|
||||
if params.Filters != nil && len(params.Filters.Items) != 0 {
|
||||
for _, item := range params.Filters.Items {
|
||||
toFormat := item.Value
|
||||
op := v3.FilterOperator(strings.ToLower(strings.TrimSpace(string(item.Operator))))
|
||||
if op == v3.FilterOperatorContains || op == v3.FilterOperatorNotContains {
|
||||
toFormat = fmt.Sprintf("%%%s%%", toFormat)
|
||||
}
|
||||
fmtVal := utils.ClickHouseFormattedValue(toFormat)
|
||||
switch op {
|
||||
case v3.FilterOperatorEqual:
|
||||
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') = %s", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorNotEqual:
|
||||
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') != %s", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorIn:
|
||||
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') IN %s", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorNotIn:
|
||||
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') NOT IN %s", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorLike:
|
||||
conditions = append(conditions, fmt.Sprintf("like(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorNotLike:
|
||||
conditions = append(conditions, fmt.Sprintf("notLike(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorRegex:
|
||||
conditions = append(conditions, fmt.Sprintf("match(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorNotRegex:
|
||||
conditions = append(conditions, fmt.Sprintf("not match(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorGreaterThan:
|
||||
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') > %s", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorGreaterThanOrEq:
|
||||
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') >= %s", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorLessThan:
|
||||
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') < %s", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorLessThanOrEq:
|
||||
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') <= %s", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorContains:
|
||||
conditions = append(conditions, fmt.Sprintf("like(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorNotContains:
|
||||
conditions = append(conditions, fmt.Sprintf("notLike(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal))
|
||||
case v3.FilterOperatorExists:
|
||||
conditions = append(conditions, fmt.Sprintf("has(JSONExtractKeys(labels), '%s')", item.Key.Key))
|
||||
case v3.FilterOperatorNotExists:
|
||||
conditions = append(conditions, fmt.Sprintf("not has(JSONExtractKeys(labels), '%s')", item.Key.Key))
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported filter operator")
|
||||
}
|
||||
}
|
||||
}
|
||||
whereClause := strings.Join(conditions, " AND ")
|
||||
|
||||
query := fmt.Sprintf("SELECT * FROM %s.%s WHERE %s ORDER BY unix_milli %s LIMIT %d OFFSET %d",
|
||||
signozHistoryDBName, ruleStateHistoryTableName, whereClause, params.Order, params.Limit, params.Offset)
|
||||
|
||||
history := []v3.RuleStateHistory{}
|
||||
err := r.db.Select(ctx, &history, query)
|
||||
if err != nil {
|
||||
zap.L().Error("Error while reading rule state history", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return history, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) ReadRuleStateHistoryTopContributorsByRuleID(
|
||||
ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) ([]v3.RuleStateHistoryContributor, error) {
|
||||
query := fmt.Sprintf(`SELECT
|
||||
fingerprint,
|
||||
any(labels) as labels,
|
||||
count(*) as count
|
||||
FROM %s.%s
|
||||
WHERE rule_id = '%s' AND (state_changed = true) AND (state = 'firing') AND unix_milli >= %d AND unix_milli <= %d
|
||||
GROUP BY fingerprint
|
||||
ORDER BY count DESC`,
|
||||
signozHistoryDBName, ruleStateHistoryTableName, ruleID, params.Start, params.End)
|
||||
|
||||
contributors := []v3.RuleStateHistoryContributor{}
|
||||
err := r.db.Select(ctx, &contributors, query)
|
||||
if err != nil {
|
||||
zap.L().Error("Error while reading rule state history", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return contributors, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetOverallStateTransitions(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) ([]v3.RuleStateTransition, error) {
|
||||
|
||||
tmpl := `WITH firing_events AS (
|
||||
SELECT
|
||||
rule_id,
|
||||
state,
|
||||
unix_milli AS firing_time
|
||||
FROM %s.%s
|
||||
WHERE overall_state = 'firing'
|
||||
AND overall_state_changed = true
|
||||
AND rule_id IN ('%s')
|
||||
AND unix_milli >= %d AND unix_milli <= %d
|
||||
),
|
||||
resolution_events AS (
|
||||
SELECT
|
||||
rule_id,
|
||||
state,
|
||||
unix_milli AS resolution_time
|
||||
FROM %s.%s
|
||||
WHERE overall_state = 'normal'
|
||||
AND overall_state_changed = true
|
||||
AND rule_id IN ('%s')
|
||||
AND unix_milli >= %d AND unix_milli <= %d
|
||||
),
|
||||
matched_events AS (
|
||||
SELECT
|
||||
f.rule_id,
|
||||
f.state,
|
||||
f.firing_time,
|
||||
MIN(r.resolution_time) AS resolution_time
|
||||
FROM firing_events f
|
||||
LEFT JOIN resolution_events r
|
||||
ON f.rule_id = r.rule_id
|
||||
WHERE r.resolution_time > f.firing_time
|
||||
GROUP BY f.rule_id, f.state, f.firing_time
|
||||
)
|
||||
SELECT *
|
||||
FROM matched_events
|
||||
ORDER BY firing_time ASC;`
|
||||
|
||||
query := fmt.Sprintf(tmpl,
|
||||
signozHistoryDBName, ruleStateHistoryTableName, ruleID, params.Start, params.End,
|
||||
signozHistoryDBName, ruleStateHistoryTableName, ruleID, params.Start, params.End)
|
||||
|
||||
transitions := []v3.RuleStateTransition{}
|
||||
err := r.db.Select(ctx, &transitions, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return transitions, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetAvgResolutionTime(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) (float64, error) {
|
||||
|
||||
tmpl := `
|
||||
WITH firing_events AS (
|
||||
SELECT
|
||||
rule_id,
|
||||
state,
|
||||
unix_milli AS firing_time
|
||||
FROM %s.%s
|
||||
WHERE overall_state = 'firing'
|
||||
AND overall_state_changed = true
|
||||
AND rule_id IN ('%s')
|
||||
AND unix_milli >= %d AND unix_milli <= %d
|
||||
),
|
||||
resolution_events AS (
|
||||
SELECT
|
||||
rule_id,
|
||||
state,
|
||||
unix_milli AS resolution_time
|
||||
FROM %s.%s
|
||||
WHERE overall_state = 'normal'
|
||||
AND overall_state_changed = true
|
||||
AND rule_id IN ('%s')
|
||||
AND unix_milli >= %d AND unix_milli <= %d
|
||||
),
|
||||
matched_events AS (
|
||||
SELECT
|
||||
f.rule_id,
|
||||
f.state,
|
||||
f.firing_time,
|
||||
MIN(r.resolution_time) AS resolution_time
|
||||
FROM firing_events f
|
||||
LEFT JOIN resolution_events r
|
||||
ON f.rule_id = r.rule_id
|
||||
WHERE r.resolution_time > f.firing_time
|
||||
GROUP BY f.rule_id, f.state, f.firing_time
|
||||
)
|
||||
SELECT AVG(resolution_time - firing_time) / 1000 AS avg_resolution_time
|
||||
FROM matched_events;
|
||||
`
|
||||
|
||||
query := fmt.Sprintf(tmpl,
|
||||
signozHistoryDBName, ruleStateHistoryTableName, ruleID, params.Start, params.End,
|
||||
signozHistoryDBName, ruleStateHistoryTableName, ruleID, params.Start, params.End)
|
||||
|
||||
var avgResolutionTime float64
|
||||
err := r.db.QueryRow(ctx, query).Scan(&avgResolutionTime)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return avgResolutionTime, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetAvgResolutionTimeByInterval(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) (*v3.Series, error) {
|
||||
|
||||
step := common.MinAllowedStepInterval(params.Start, params.End)
|
||||
|
||||
tmpl := `
|
||||
WITH firing_events AS (
|
||||
SELECT
|
||||
rule_id,
|
||||
state,
|
||||
unix_milli AS firing_time
|
||||
FROM %s.%s
|
||||
WHERE overall_state = 'firing'
|
||||
AND overall_state_changed = true
|
||||
AND rule_id IN ('%s')
|
||||
AND unix_milli >= %d AND unix_milli <= %d
|
||||
),
|
||||
resolution_events AS (
|
||||
SELECT
|
||||
rule_id,
|
||||
state,
|
||||
unix_milli AS resolution_time
|
||||
FROM %s.%s
|
||||
WHERE overall_state = 'normal'
|
||||
AND overall_state_changed = true
|
||||
AND rule_id IN ('%s')
|
||||
AND unix_milli >= %d AND unix_milli <= %d
|
||||
),
|
||||
matched_events AS (
|
||||
SELECT
|
||||
f.rule_id,
|
||||
f.state,
|
||||
f.firing_time,
|
||||
MIN(r.resolution_time) AS resolution_time
|
||||
FROM firing_events f
|
||||
LEFT JOIN resolution_events r
|
||||
ON f.rule_id = r.rule_id
|
||||
WHERE r.resolution_time > f.firing_time
|
||||
GROUP BY f.rule_id, f.state, f.firing_time
|
||||
)
|
||||
SELECT toStartOfInterval(toDateTime(firing_time / 1000), INTERVAL %d SECOND) AS ts, AVG(resolution_time - firing_time) / 1000 AS avg_resolution_time
|
||||
FROM matched_events
|
||||
GROUP BY ts
|
||||
ORDER BY ts ASC;`
|
||||
|
||||
query := fmt.Sprintf(tmpl,
|
||||
signozHistoryDBName, ruleStateHistoryTableName, ruleID, params.Start, params.End,
|
||||
signozHistoryDBName, ruleStateHistoryTableName, ruleID, params.Start, params.End, step)
|
||||
|
||||
result, err := r.GetTimeSeriesResultV3(ctx, query)
|
||||
if err != nil || len(result) == 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return result[0], nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetTotalTriggers(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) (uint64, error) {
|
||||
query := fmt.Sprintf("SELECT count(*) FROM %s.%s WHERE rule_id = '%s' AND (state_changed = true) AND (state = 'firing') AND unix_milli >= %d AND unix_milli <= %d",
|
||||
signozHistoryDBName, ruleStateHistoryTableName, ruleID, params.Start, params.End)
|
||||
|
||||
var totalTriggers uint64
|
||||
err := r.db.QueryRow(ctx, query).Scan(&totalTriggers)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return totalTriggers, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetTriggersByInterval(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) (*v3.Series, error) {
|
||||
step := common.MinAllowedStepInterval(params.Start, params.End)
|
||||
|
||||
query := fmt.Sprintf("SELECT count(*), toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL %d SECOND) as ts FROM %s.%s WHERE rule_id = '%s' AND (state_changed = true) AND (state = 'firing') AND unix_milli >= %d AND unix_milli <= %d GROUP BY ts ORDER BY ts ASC",
|
||||
step, signozHistoryDBName, ruleStateHistoryTableName, ruleID, params.Start, params.End)
|
||||
|
||||
result, err := r.GetTimeSeriesResultV3(ctx, query)
|
||||
if err != nil || len(result) == 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return result[0], nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetMinAndMaxTimestampForTraceID(ctx context.Context, traceID []string) (int64, int64, error) {
|
||||
var minTime, maxTime time.Time
|
||||
|
||||
@@ -5016,9 +5454,10 @@ func (r *ClickHouseReader) GetMinAndMaxTimestampForTraceID(ctx context.Context,
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
// return current time if traceID not found
|
||||
if minTime.IsZero() || maxTime.IsZero() {
|
||||
zap.L().Debug("minTime or maxTime is zero")
|
||||
return 0, 0, nil
|
||||
zap.L().Debug("minTime or maxTime is zero, traceID not found")
|
||||
return time.Now().UnixNano(), time.Now().UnixNano(), nil
|
||||
}
|
||||
|
||||
zap.L().Debug("GetMinAndMaxTimestampForTraceID", zap.Any("minTime", minTime), zap.Any("maxTime", maxTime))
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"slices"
|
||||
@@ -43,6 +44,7 @@ import (
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
mq "go.signoz.io/signoz/pkg/query-service/app/integrations/messagingQueues/kafka"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
|
||||
"go.signoz.io/signoz/pkg/query-service/dao"
|
||||
am "go.signoz.io/signoz/pkg/query-service/integrations/alertManager"
|
||||
@@ -301,6 +303,8 @@ func (aH *APIHandler) RegisterQueryRangeV3Routes(router *mux.Router, am *AuthMid
|
||||
subRouter.HandleFunc("/query_range", am.ViewAccess(aH.QueryRangeV3)).Methods(http.MethodPost)
|
||||
subRouter.HandleFunc("/query_range/format", am.ViewAccess(aH.QueryRangeV3Format)).Methods(http.MethodPost)
|
||||
|
||||
subRouter.HandleFunc("/filter_suggestions", am.ViewAccess(aH.getQueryBuilderSuggestions)).Methods(http.MethodGet)
|
||||
|
||||
// live logs
|
||||
subRouter.HandleFunc("/logs/livetail", am.ViewAccess(aH.liveTailLogs)).Methods(http.MethodGet)
|
||||
}
|
||||
@@ -340,6 +344,10 @@ func (aH *APIHandler) RegisterRoutes(router *mux.Router, am *AuthMiddleware) {
|
||||
router.HandleFunc("/api/v1/rules/{id}", am.EditAccess(aH.deleteRule)).Methods(http.MethodDelete)
|
||||
router.HandleFunc("/api/v1/rules/{id}", am.EditAccess(aH.patchRule)).Methods(http.MethodPatch)
|
||||
router.HandleFunc("/api/v1/testRule", am.EditAccess(aH.testRule)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/rules/{id}/history/stats", am.ViewAccess(aH.getRuleStats)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/rules/{id}/history/timeline", am.ViewAccess(aH.getRuleStateHistory)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/rules/{id}/history/top_contributors", am.ViewAccess(aH.getRuleStateHistoryTopContributors)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/rules/{id}/history/overall_status", am.ViewAccess(aH.getOverallStateTransitions)).Methods(http.MethodPost)
|
||||
|
||||
router.HandleFunc("/api/v1/downtime_schedules", am.OpenAccess(aH.listDowntimeSchedules)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/downtime_schedules/{id}", am.OpenAccess(aH.getDowntimeSchedule)).Methods(http.MethodGet)
|
||||
@@ -622,6 +630,159 @@ func (aH *APIHandler) deleteDowntimeSchedule(w http.ResponseWriter, r *http.Requ
|
||||
aH.Respond(w, nil)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) getRuleStats(w http.ResponseWriter, r *http.Request) {
|
||||
ruleID := mux.Vars(r)["id"]
|
||||
params := v3.QueryRuleStateHistory{}
|
||||
err := json.NewDecoder(r.Body).Decode(¶ms)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
|
||||
return
|
||||
}
|
||||
|
||||
totalCurrentTriggers, err := aH.reader.GetTotalTriggers(r.Context(), ruleID, ¶ms)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
|
||||
return
|
||||
}
|
||||
currentTriggersSeries, err := aH.reader.GetTriggersByInterval(r.Context(), ruleID, ¶ms)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
|
||||
return
|
||||
}
|
||||
|
||||
currentAvgResolutionTime, err := aH.reader.GetAvgResolutionTime(r.Context(), ruleID, ¶ms)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
|
||||
return
|
||||
}
|
||||
currentAvgResolutionTimeSeries, err := aH.reader.GetAvgResolutionTimeByInterval(r.Context(), ruleID, ¶ms)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
|
||||
return
|
||||
}
|
||||
|
||||
if params.End-params.Start >= 86400000 {
|
||||
days := int64(math.Ceil(float64(params.End-params.Start) / 86400000))
|
||||
params.Start -= days * 86400000
|
||||
params.End -= days * 86400000
|
||||
} else {
|
||||
params.Start -= 86400000
|
||||
params.End -= 86400000
|
||||
}
|
||||
|
||||
totalPastTriggers, err := aH.reader.GetTotalTriggers(r.Context(), ruleID, ¶ms)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
|
||||
return
|
||||
}
|
||||
pastTriggersSeries, err := aH.reader.GetTriggersByInterval(r.Context(), ruleID, ¶ms)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
|
||||
return
|
||||
}
|
||||
|
||||
pastAvgResolutionTime, err := aH.reader.GetAvgResolutionTime(r.Context(), ruleID, ¶ms)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
|
||||
return
|
||||
}
|
||||
pastAvgResolutionTimeSeries, err := aH.reader.GetAvgResolutionTimeByInterval(r.Context(), ruleID, ¶ms)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
|
||||
return
|
||||
}
|
||||
stats := v3.Stats{
|
||||
TotalCurrentTriggers: totalCurrentTriggers,
|
||||
TotalPastTriggers: totalPastTriggers,
|
||||
CurrentTriggersSeries: currentTriggersSeries,
|
||||
PastTriggersSeries: pastTriggersSeries,
|
||||
CurrentAvgResolutionTime: strconv.FormatFloat(currentAvgResolutionTime, 'f', -1, 64),
|
||||
PastAvgResolutionTime: strconv.FormatFloat(pastAvgResolutionTime, 'f', -1, 64),
|
||||
CurrentAvgResolutionTimeSeries: currentAvgResolutionTimeSeries,
|
||||
PastAvgResolutionTimeSeries: pastAvgResolutionTimeSeries,
|
||||
}
|
||||
|
||||
aH.Respond(w, stats)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) getOverallStateTransitions(w http.ResponseWriter, r *http.Request) {
|
||||
ruleID := mux.Vars(r)["id"]
|
||||
params := v3.QueryRuleStateHistory{}
|
||||
err := json.NewDecoder(r.Body).Decode(¶ms)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
|
||||
return
|
||||
}
|
||||
|
||||
res, err := aH.reader.GetOverallStateTransitions(r.Context(), ruleID, ¶ms)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
|
||||
return
|
||||
}
|
||||
|
||||
stateItems := []v3.ReleStateItem{}
|
||||
|
||||
for idx, item := range res {
|
||||
start := item.FiringTime
|
||||
end := item.ResolutionTime
|
||||
stateItems = append(stateItems, v3.ReleStateItem{
|
||||
State: item.State,
|
||||
Start: start,
|
||||
End: end,
|
||||
})
|
||||
if idx < len(res)-1 {
|
||||
nextStart := res[idx+1].FiringTime
|
||||
if nextStart > end {
|
||||
stateItems = append(stateItems, v3.ReleStateItem{
|
||||
State: "normal",
|
||||
Start: end,
|
||||
End: nextStart,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
aH.Respond(w, stateItems)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) getRuleStateHistory(w http.ResponseWriter, r *http.Request) {
|
||||
ruleID := mux.Vars(r)["id"]
|
||||
params := v3.QueryRuleStateHistory{}
|
||||
err := json.NewDecoder(r.Body).Decode(¶ms)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
|
||||
return
|
||||
}
|
||||
if err := params.Validate(); err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
|
||||
return
|
||||
}
|
||||
|
||||
res, err := aH.reader.ReadRuleStateHistoryByRuleID(r.Context(), ruleID, ¶ms)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
|
||||
return
|
||||
}
|
||||
aH.Respond(w, res)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) getRuleStateHistoryTopContributors(w http.ResponseWriter, r *http.Request) {
|
||||
ruleID := mux.Vars(r)["id"]
|
||||
params := v3.QueryRuleStateHistory{}
|
||||
err := json.NewDecoder(r.Body).Decode(¶ms)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
|
||||
return
|
||||
}
|
||||
|
||||
res, err := aH.reader.ReadRuleStateHistoryTopContributorsByRuleID(r.Context(), ruleID, ¶ms)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
|
||||
return
|
||||
}
|
||||
aH.Respond(w, res)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) listRules(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
rules, err := aH.ruleManager.ListRuleStates(r.Context())
|
||||
@@ -2246,9 +2407,112 @@ func (aH *APIHandler) WriteJSON(w http.ResponseWriter, r *http.Request, response
|
||||
w.Write(resp)
|
||||
}
|
||||
|
||||
// RegisterMessagingQueuesRoutes adds messaging-queues routes
|
||||
func (aH *APIHandler) RegisterMessagingQueuesRoutes(router *mux.Router, am *AuthMiddleware) {
|
||||
// SubRouter for kafka
|
||||
kafkaSubRouter := router.PathPrefix("/api/v1/messaging-queues/kafka/consumer-lag").Subrouter()
|
||||
|
||||
kafkaSubRouter.HandleFunc("/producer-details", am.ViewAccess(aH.getProducerData)).Methods(http.MethodPost)
|
||||
kafkaSubRouter.HandleFunc("/consumer-details", am.ViewAccess(aH.getConsumerData)).Methods(http.MethodPost)
|
||||
|
||||
// for other messaging queues, add SubRouters here
|
||||
}
|
||||
|
||||
func (aH *APIHandler) getProducerData(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
// parse the query params to retrieve the messaging queue struct
|
||||
messagingQueue, apiErr := ParseMessagingQueueBody(r)
|
||||
|
||||
if apiErr != nil {
|
||||
zap.L().Error(apiErr.Err.Error())
|
||||
RespondError(w, apiErr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
queryRangeParams, err := mq.BuildQueryRangeParams(messagingQueue, "producer")
|
||||
if err != nil {
|
||||
zap.L().Error(err.Error())
|
||||
RespondError(w, apiErr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
if err := validateQueryRangeParamsV3(queryRangeParams); err != nil {
|
||||
zap.L().Error(err.Error())
|
||||
RespondError(w, apiErr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
var result []*v3.Result
|
||||
var errQuriesByName map[string]error
|
||||
|
||||
result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams, nil)
|
||||
if err != nil {
|
||||
apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err}
|
||||
RespondError(w, apiErrObj, errQuriesByName)
|
||||
return
|
||||
}
|
||||
result = postprocess.TransformToTableForClickHouseQueries(result)
|
||||
|
||||
resp := v3.QueryRangeResponse{
|
||||
Result: result,
|
||||
}
|
||||
aH.Respond(w, resp)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) getConsumerData(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
messagingQueue, apiErr := ParseMessagingQueueBody(r)
|
||||
|
||||
if apiErr != nil {
|
||||
zap.L().Error(apiErr.Err.Error())
|
||||
RespondError(w, apiErr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
queryRangeParams, err := mq.BuildQueryRangeParams(messagingQueue, "consumer")
|
||||
if err != nil {
|
||||
zap.L().Error(err.Error())
|
||||
RespondError(w, apiErr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
if err := validateQueryRangeParamsV3(queryRangeParams); err != nil {
|
||||
zap.L().Error(err.Error())
|
||||
RespondError(w, apiErr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
var result []*v3.Result
|
||||
var errQuriesByName map[string]error
|
||||
|
||||
result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams, nil)
|
||||
if err != nil {
|
||||
apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err}
|
||||
RespondError(w, apiErrObj, errQuriesByName)
|
||||
return
|
||||
}
|
||||
result = postprocess.TransformToTableForClickHouseQueries(result)
|
||||
|
||||
resp := v3.QueryRangeResponse{
|
||||
Result: result,
|
||||
}
|
||||
aH.Respond(w, resp)
|
||||
}
|
||||
|
||||
// ParseMessagingQueueBody parse for messaging queue params
|
||||
func ParseMessagingQueueBody(r *http.Request) (*mq.MessagingQueue, *model.ApiError) {
|
||||
messagingQueue := new(mq.MessagingQueue)
|
||||
if err := json.NewDecoder(r.Body).Decode(messagingQueue); err != nil {
|
||||
return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("cannot parse the request body: %v", err)}
|
||||
}
|
||||
return messagingQueue, nil
|
||||
}
|
||||
|
||||
// Preferences
|
||||
|
||||
func (ah *APIHandler) getUserPreference(
|
||||
func (aH *APIHandler) getUserPreference(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
preferenceId := mux.Vars(r)["preferenceId"]
|
||||
@@ -2262,10 +2526,10 @@ func (ah *APIHandler) getUserPreference(
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, preference)
|
||||
aH.Respond(w, preference)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) updateUserPreference(
|
||||
func (aH *APIHandler) updateUserPreference(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
preferenceId := mux.Vars(r)["preferenceId"]
|
||||
@@ -2284,10 +2548,10 @@ func (ah *APIHandler) updateUserPreference(
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, preference)
|
||||
aH.Respond(w, preference)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) getAllUserPreferences(
|
||||
func (aH *APIHandler) getAllUserPreferences(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
user := common.GetUserFromContext(r.Context())
|
||||
@@ -2299,10 +2563,10 @@ func (ah *APIHandler) getAllUserPreferences(
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, preference)
|
||||
aH.Respond(w, preference)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) getOrgPreference(
|
||||
func (aH *APIHandler) getOrgPreference(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
preferenceId := mux.Vars(r)["preferenceId"]
|
||||
@@ -2315,10 +2579,10 @@ func (ah *APIHandler) getOrgPreference(
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, preference)
|
||||
aH.Respond(w, preference)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) updateOrgPreference(
|
||||
func (aH *APIHandler) updateOrgPreference(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
preferenceId := mux.Vars(r)["preferenceId"]
|
||||
@@ -2337,10 +2601,10 @@ func (ah *APIHandler) updateOrgPreference(
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, preference)
|
||||
aH.Respond(w, preference)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) getAllOrgPreferences(
|
||||
func (aH *APIHandler) getAllOrgPreferences(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
user := common.GetUserFromContext(r.Context())
|
||||
@@ -2352,36 +2616,36 @@ func (ah *APIHandler) getAllOrgPreferences(
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, preference)
|
||||
aH.Respond(w, preference)
|
||||
}
|
||||
|
||||
// Integrations
|
||||
func (ah *APIHandler) RegisterIntegrationRoutes(router *mux.Router, am *AuthMiddleware) {
|
||||
// RegisterIntegrationRoutes Registers all Integrations
|
||||
func (aH *APIHandler) RegisterIntegrationRoutes(router *mux.Router, am *AuthMiddleware) {
|
||||
subRouter := router.PathPrefix("/api/v1/integrations").Subrouter()
|
||||
|
||||
subRouter.HandleFunc(
|
||||
"/install", am.ViewAccess(ah.InstallIntegration),
|
||||
"/install", am.ViewAccess(aH.InstallIntegration),
|
||||
).Methods(http.MethodPost)
|
||||
|
||||
subRouter.HandleFunc(
|
||||
"/uninstall", am.ViewAccess(ah.UninstallIntegration),
|
||||
"/uninstall", am.ViewAccess(aH.UninstallIntegration),
|
||||
).Methods(http.MethodPost)
|
||||
|
||||
// Used for polling for status in v0
|
||||
subRouter.HandleFunc(
|
||||
"/{integrationId}/connection_status", am.ViewAccess(ah.GetIntegrationConnectionStatus),
|
||||
"/{integrationId}/connection_status", am.ViewAccess(aH.GetIntegrationConnectionStatus),
|
||||
).Methods(http.MethodGet)
|
||||
|
||||
subRouter.HandleFunc(
|
||||
"/{integrationId}", am.ViewAccess(ah.GetIntegration),
|
||||
"/{integrationId}", am.ViewAccess(aH.GetIntegration),
|
||||
).Methods(http.MethodGet)
|
||||
|
||||
subRouter.HandleFunc(
|
||||
"", am.ViewAccess(ah.ListIntegrations),
|
||||
"", am.ViewAccess(aH.ListIntegrations),
|
||||
).Methods(http.MethodGet)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) ListIntegrations(
|
||||
func (aH *APIHandler) ListIntegrations(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
params := map[string]string{}
|
||||
@@ -2389,21 +2653,21 @@ func (ah *APIHandler) ListIntegrations(
|
||||
params[k] = values[0]
|
||||
}
|
||||
|
||||
resp, apiErr := ah.IntegrationsController.ListIntegrations(
|
||||
resp, apiErr := aH.IntegrationsController.ListIntegrations(
|
||||
r.Context(), params,
|
||||
)
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, "Failed to fetch integrations")
|
||||
return
|
||||
}
|
||||
ah.Respond(w, resp)
|
||||
aH.Respond(w, resp)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) GetIntegration(
|
||||
func (aH *APIHandler) GetIntegration(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
integrationId := mux.Vars(r)["integrationId"]
|
||||
integration, apiErr := ah.IntegrationsController.GetIntegration(
|
||||
integration, apiErr := aH.IntegrationsController.GetIntegration(
|
||||
r.Context(), integrationId,
|
||||
)
|
||||
if apiErr != nil {
|
||||
@@ -2411,14 +2675,14 @@ func (ah *APIHandler) GetIntegration(
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, integration)
|
||||
aH.Respond(w, integration)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) GetIntegrationConnectionStatus(
|
||||
func (aH *APIHandler) GetIntegrationConnectionStatus(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
integrationId := mux.Vars(r)["integrationId"]
|
||||
isInstalled, apiErr := ah.IntegrationsController.IsIntegrationInstalled(
|
||||
isInstalled, apiErr := aH.IntegrationsController.IsIntegrationInstalled(
|
||||
r.Context(), integrationId,
|
||||
)
|
||||
if apiErr != nil {
|
||||
@@ -2428,11 +2692,11 @@ func (ah *APIHandler) GetIntegrationConnectionStatus(
|
||||
|
||||
// Do not spend resources calculating connection status unless installed.
|
||||
if !isInstalled {
|
||||
ah.Respond(w, &integrations.IntegrationConnectionStatus{})
|
||||
aH.Respond(w, &integrations.IntegrationConnectionStatus{})
|
||||
return
|
||||
}
|
||||
|
||||
connectionTests, apiErr := ah.IntegrationsController.GetIntegrationConnectionTests(
|
||||
connectionTests, apiErr := aH.IntegrationsController.GetIntegrationConnectionTests(
|
||||
r.Context(), integrationId,
|
||||
)
|
||||
if apiErr != nil {
|
||||
@@ -2446,7 +2710,7 @@ func (ah *APIHandler) GetIntegrationConnectionStatus(
|
||||
lookbackSeconds = 15 * 60
|
||||
}
|
||||
|
||||
connectionStatus, apiErr := ah.calculateConnectionStatus(
|
||||
connectionStatus, apiErr := aH.calculateConnectionStatus(
|
||||
r.Context(), connectionTests, lookbackSeconds,
|
||||
)
|
||||
if apiErr != nil {
|
||||
@@ -2454,10 +2718,10 @@ func (ah *APIHandler) GetIntegrationConnectionStatus(
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, connectionStatus)
|
||||
aH.Respond(w, connectionStatus)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) calculateConnectionStatus(
|
||||
func (aH *APIHandler) calculateConnectionStatus(
|
||||
ctx context.Context,
|
||||
connectionTests *integrations.IntegrationConnectionTests,
|
||||
lookbackSeconds int64,
|
||||
@@ -2475,7 +2739,7 @@ func (ah *APIHandler) calculateConnectionStatus(
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
logsConnStatus, apiErr := ah.calculateLogsConnectionStatus(
|
||||
logsConnStatus, apiErr := aH.calculateLogsConnectionStatus(
|
||||
ctx, connectionTests.Logs, lookbackSeconds,
|
||||
)
|
||||
|
||||
@@ -2498,7 +2762,7 @@ func (ah *APIHandler) calculateConnectionStatus(
|
||||
return
|
||||
}
|
||||
|
||||
statusForLastReceivedMetric, apiErr := ah.reader.GetLatestReceivedMetric(
|
||||
statusForLastReceivedMetric, apiErr := aH.reader.GetLatestReceivedMetric(
|
||||
ctx, connectionTests.Metrics,
|
||||
)
|
||||
|
||||
@@ -2542,7 +2806,7 @@ func (ah *APIHandler) calculateConnectionStatus(
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (ah *APIHandler) calculateLogsConnectionStatus(
|
||||
func (aH *APIHandler) calculateLogsConnectionStatus(
|
||||
ctx context.Context,
|
||||
logsConnectionTest *integrations.LogsConnectionTest,
|
||||
lookbackSeconds int64,
|
||||
@@ -2584,7 +2848,7 @@ func (ah *APIHandler) calculateLogsConnectionStatus(
|
||||
},
|
||||
},
|
||||
}
|
||||
queryRes, _, err := ah.querier.QueryRange(
|
||||
queryRes, _, err := aH.querier.QueryRange(
|
||||
ctx, qrParams, map[string]v3.AttributeKey{},
|
||||
)
|
||||
if err != nil {
|
||||
@@ -2621,7 +2885,7 @@ func (ah *APIHandler) calculateLogsConnectionStatus(
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (ah *APIHandler) InstallIntegration(
|
||||
func (aH *APIHandler) InstallIntegration(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
req := integrations.InstallIntegrationRequest{}
|
||||
@@ -2632,7 +2896,7 @@ func (ah *APIHandler) InstallIntegration(
|
||||
return
|
||||
}
|
||||
|
||||
integration, apiErr := ah.IntegrationsController.Install(
|
||||
integration, apiErr := aH.IntegrationsController.Install(
|
||||
r.Context(), &req,
|
||||
)
|
||||
if apiErr != nil {
|
||||
@@ -2640,10 +2904,10 @@ func (ah *APIHandler) InstallIntegration(
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, integration)
|
||||
aH.Respond(w, integration)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) UninstallIntegration(
|
||||
func (aH *APIHandler) UninstallIntegration(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
req := integrations.UninstallIntegrationRequest{}
|
||||
@@ -2654,13 +2918,13 @@ func (ah *APIHandler) UninstallIntegration(
|
||||
return
|
||||
}
|
||||
|
||||
apiErr := ah.IntegrationsController.Uninstall(r.Context(), &req)
|
||||
apiErr := aH.IntegrationsController.Uninstall(r.Context(), &req)
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, map[string]interface{}{})
|
||||
aH.Respond(w, map[string]interface{}{})
|
||||
}
|
||||
|
||||
// logs
|
||||
@@ -2704,7 +2968,7 @@ func (aH *APIHandler) logFieldUpdate(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
apiErr := aH.reader.UpdateLogField(r.Context(), &field)
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, "Failed to update filed in the DB")
|
||||
RespondError(w, apiErr, "Failed to update field in the DB")
|
||||
return
|
||||
}
|
||||
aH.WriteJSON(w, r, field)
|
||||
@@ -2807,7 +3071,7 @@ func parseAgentConfigVersion(r *http.Request) (int, *model.ApiError) {
|
||||
return int(version64), nil
|
||||
}
|
||||
|
||||
func (ah *APIHandler) PreviewLogsPipelinesHandler(w http.ResponseWriter, r *http.Request) {
|
||||
func (aH *APIHandler) PreviewLogsPipelinesHandler(w http.ResponseWriter, r *http.Request) {
|
||||
req := logparsingpipeline.PipelinesPreviewRequest{}
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
@@ -2815,7 +3079,7 @@ func (ah *APIHandler) PreviewLogsPipelinesHandler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
resultLogs, apiErr := ah.LogsParsingPipelineController.PreviewLogsPipelines(
|
||||
resultLogs, apiErr := aH.LogsParsingPipelineController.PreviewLogsPipelines(
|
||||
r.Context(), &req,
|
||||
)
|
||||
|
||||
@@ -2824,10 +3088,10 @@ func (ah *APIHandler) PreviewLogsPipelinesHandler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, resultLogs)
|
||||
aH.Respond(w, resultLogs)
|
||||
}
|
||||
|
||||
func (ah *APIHandler) ListLogsPipelinesHandler(w http.ResponseWriter, r *http.Request) {
|
||||
func (aH *APIHandler) ListLogsPipelinesHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
version, err := parseAgentConfigVersion(r)
|
||||
if err != nil {
|
||||
@@ -2839,20 +3103,20 @@ func (ah *APIHandler) ListLogsPipelinesHandler(w http.ResponseWriter, r *http.Re
|
||||
var apierr *model.ApiError
|
||||
|
||||
if version != -1 {
|
||||
payload, apierr = ah.listLogsPipelinesByVersion(context.Background(), version)
|
||||
payload, apierr = aH.listLogsPipelinesByVersion(context.Background(), version)
|
||||
} else {
|
||||
payload, apierr = ah.listLogsPipelines(context.Background())
|
||||
payload, apierr = aH.listLogsPipelines(context.Background())
|
||||
}
|
||||
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, payload)
|
||||
return
|
||||
}
|
||||
ah.Respond(w, payload)
|
||||
aH.Respond(w, payload)
|
||||
}
|
||||
|
||||
// listLogsPipelines lists logs piplines for latest version
|
||||
func (ah *APIHandler) listLogsPipelines(ctx context.Context) (
|
||||
func (aH *APIHandler) listLogsPipelines(ctx context.Context) (
|
||||
*logparsingpipeline.PipelinesResponse, *model.ApiError,
|
||||
) {
|
||||
// get lateset agent config
|
||||
@@ -2866,7 +3130,7 @@ func (ah *APIHandler) listLogsPipelines(ctx context.Context) (
|
||||
latestVersion = lastestConfig.Version
|
||||
}
|
||||
|
||||
payload, err := ah.LogsParsingPipelineController.GetPipelinesByVersion(ctx, latestVersion)
|
||||
payload, err := aH.LogsParsingPipelineController.GetPipelinesByVersion(ctx, latestVersion)
|
||||
if err != nil {
|
||||
return nil, model.WrapApiError(err, "failed to get pipelines")
|
||||
}
|
||||
@@ -2882,10 +3146,10 @@ func (ah *APIHandler) listLogsPipelines(ctx context.Context) (
|
||||
}
|
||||
|
||||
// listLogsPipelinesByVersion lists pipelines along with config version history
|
||||
func (ah *APIHandler) listLogsPipelinesByVersion(ctx context.Context, version int) (
|
||||
func (aH *APIHandler) listLogsPipelinesByVersion(ctx context.Context, version int) (
|
||||
*logparsingpipeline.PipelinesResponse, *model.ApiError,
|
||||
) {
|
||||
payload, err := ah.LogsParsingPipelineController.GetPipelinesByVersion(ctx, version)
|
||||
payload, err := aH.LogsParsingPipelineController.GetPipelinesByVersion(ctx, version)
|
||||
if err != nil {
|
||||
return nil, model.WrapApiError(err, "failed to get pipelines by version")
|
||||
}
|
||||
@@ -2901,7 +3165,7 @@ func (ah *APIHandler) listLogsPipelinesByVersion(ctx context.Context, version in
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
func (ah *APIHandler) CreateLogsPipeline(w http.ResponseWriter, r *http.Request) {
|
||||
func (aH *APIHandler) CreateLogsPipeline(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
req := logparsingpipeline.PostablePipelines{}
|
||||
|
||||
@@ -2924,7 +3188,7 @@ func (ah *APIHandler) CreateLogsPipeline(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
}
|
||||
|
||||
return ah.LogsParsingPipelineController.ApplyPipelines(ctx, postable)
|
||||
return aH.LogsParsingPipelineController.ApplyPipelines(ctx, postable)
|
||||
}
|
||||
|
||||
res, err := createPipeline(r.Context(), req.Pipelines)
|
||||
@@ -2933,7 +3197,7 @@ func (ah *APIHandler) CreateLogsPipeline(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
ah.Respond(w, res)
|
||||
aH.Respond(w, res)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) getSavedViews(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -3046,6 +3310,30 @@ func (aH *APIHandler) autocompleteAggregateAttributes(w http.ResponseWriter, r *
|
||||
aH.Respond(w, response)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) getQueryBuilderSuggestions(w http.ResponseWriter, r *http.Request) {
|
||||
req, err := parseQBFilterSuggestionsRequest(r)
|
||||
if err != nil {
|
||||
RespondError(w, err, nil)
|
||||
return
|
||||
}
|
||||
|
||||
if req.DataSource != v3.DataSourceLogs {
|
||||
// Support for traces and metrics might come later
|
||||
RespondError(w, model.BadRequest(
|
||||
fmt.Errorf("suggestions not supported for %s", req.DataSource),
|
||||
), nil)
|
||||
return
|
||||
}
|
||||
|
||||
response, err := aH.reader.GetQBFilterSuggestionsForLogs(r.Context(), req)
|
||||
if err != nil {
|
||||
RespondError(w, err, nil)
|
||||
return
|
||||
}
|
||||
|
||||
aH.Respond(w, response)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) autoCompleteAttributeKeys(w http.ResponseWriter, r *http.Request) {
|
||||
var response *v3.FilterAttributeKeyResponse
|
||||
req, err := parseFilterAttributeKeyRequest(r)
|
||||
|
||||
@@ -0,0 +1,197 @@
|
||||
## Consumer Lag feature break down
|
||||
|
||||
### 1) Consumer Lag Graph
|
||||
|
||||
|
||||
---
|
||||
|
||||
### 2) Consumer Group Details
|
||||
|
||||
API endpoint:
|
||||
|
||||
```
|
||||
POST /api/v1/messaging-queues/kafka/consumer-lag/consumer-details
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"start": 1720685296000000000,
|
||||
"end": 1721290096000000000,
|
||||
"variables": {
|
||||
"partition": "0",
|
||||
"topic": "topic1",
|
||||
"consumer_group": "cg1"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
response in query range format `series`
|
||||
```json
|
||||
{
|
||||
"status": "success",
|
||||
"data": {
|
||||
"resultType": "",
|
||||
"result": [
|
||||
{
|
||||
"table": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "service_name",
|
||||
"queryName": "",
|
||||
"isValueColumn": false
|
||||
},
|
||||
{
|
||||
"name": "p99",
|
||||
"queryName": "",
|
||||
"isValueColumn": false
|
||||
},
|
||||
{
|
||||
"name": "error_rate",
|
||||
"queryName": "",
|
||||
"isValueColumn": false
|
||||
},
|
||||
{
|
||||
"name": "throughput",
|
||||
"queryName": "",
|
||||
"isValueColumn": false
|
||||
},
|
||||
{
|
||||
"name": "avg_msg_size",
|
||||
"queryName": "",
|
||||
"isValueColumn": false
|
||||
}
|
||||
],
|
||||
"rows": [
|
||||
{
|
||||
"data": {
|
||||
"avg_msg_size": "0",
|
||||
"error_rate": "0",
|
||||
"p99": "0.2942205100000016",
|
||||
"service_name": "consumer-svc",
|
||||
"throughput": "0.00016534391534391533"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
|
||||
### 3) Producer Details
|
||||
|
||||
API endpoint:
|
||||
|
||||
```
|
||||
POST /api/v1/messaging-queues/kafka/consumer-lag/producer-details
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"start": 1720685296000000000,
|
||||
"end": 1721290096000000000,
|
||||
"variables": {
|
||||
"partition": "0",
|
||||
"topic": "topic1"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
response in query range format `series`
|
||||
```json
|
||||
{
|
||||
"status": "success",
|
||||
"data": {
|
||||
"resultType": "",
|
||||
"result": [
|
||||
{
|
||||
"table": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "service_name",
|
||||
"queryName": "",
|
||||
"isValueColumn": false
|
||||
},
|
||||
{
|
||||
"name": "p99_query.p99",
|
||||
"queryName": "",
|
||||
"isValueColumn": false
|
||||
},
|
||||
{
|
||||
"name": "error_rate",
|
||||
"queryName": "",
|
||||
"isValueColumn": false
|
||||
},
|
||||
{
|
||||
"name": "rps",
|
||||
"queryName": "",
|
||||
"isValueColumn": false
|
||||
}
|
||||
],
|
||||
"rows": [
|
||||
{
|
||||
"data": {
|
||||
"error_rate": "0",
|
||||
"p99_query.p99": "150.08830908000002",
|
||||
"rps": "0.00016534391534391533",
|
||||
"service_name": "producer-svc"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
response in query range format `table`
|
||||
```json
|
||||
{
|
||||
"status": "success",
|
||||
"data": {
|
||||
"resultType": "",
|
||||
"result": [
|
||||
{
|
||||
"table": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "service_name",
|
||||
"queryName": "",
|
||||
"isValueColumn": false
|
||||
},
|
||||
{
|
||||
"name": "p99_query.p99",
|
||||
"queryName": "",
|
||||
"isValueColumn": false
|
||||
},
|
||||
{
|
||||
"name": "error_rate",
|
||||
"queryName": "",
|
||||
"isValueColumn": false
|
||||
},
|
||||
{
|
||||
"name": "rps",
|
||||
"queryName": "",
|
||||
"isValueColumn": false
|
||||
}
|
||||
],
|
||||
"rows": [
|
||||
{
|
||||
"data": {
|
||||
"error_rate": "0",
|
||||
"p99_query.p99": "150.08830908000002",
|
||||
"rps": "0.00016534391534391533",
|
||||
"service_name": "producer-svc"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -0,0 +1,9 @@
|
||||
package kafka
|
||||
|
||||
const kafkaQueue = "kafka"
|
||||
|
||||
type MessagingQueue struct {
|
||||
Start int64 `json:"start"`
|
||||
End int64 `json:"end"`
|
||||
Variables map[string]string `json:"variables,omitempty"`
|
||||
}
|
||||
@@ -0,0 +1,76 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func generateConsumerSQL(start, end int64, topic, partition, consumerGroup, queueType string) string {
|
||||
timeRange := (end - start) / 1000000000
|
||||
query := fmt.Sprintf(`
|
||||
WITH consumer_query AS (
|
||||
SELECT
|
||||
serviceName,
|
||||
quantile(0.99)(durationNano) / 1000000 AS p99,
|
||||
COUNT(*) AS total_requests,
|
||||
SUM(CASE WHEN statusCode = 2 THEN 1 ELSE 0 END) AS error_count,
|
||||
avg(CASE WHEN has(numberTagMap, 'messaging.message.body.size') THEN numberTagMap['messaging.message.body.size'] ELSE NULL END) AS avg_msg_size
|
||||
FROM signoz_traces.distributed_signoz_index_v2
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d'
|
||||
AND kind = 5
|
||||
AND msgSystem = '%s'
|
||||
AND stringTagMap['messaging.destination.name'] = '%s'
|
||||
AND stringTagMap['messaging.destination.partition.id'] = '%s'
|
||||
AND stringTagMap['messaging.kafka.consumer.group'] = '%s'
|
||||
GROUP BY serviceName
|
||||
)
|
||||
|
||||
-- Main query to select all metrics
|
||||
SELECT
|
||||
serviceName AS service_name,
|
||||
p99,
|
||||
COALESCE((error_count * 100.0) / total_requests, 0) AS error_rate,
|
||||
COALESCE(total_requests / %d, 0) AS throughput, -- Convert nanoseconds to seconds
|
||||
COALESCE(avg_msg_size, 0) AS avg_msg_size
|
||||
FROM
|
||||
consumer_query
|
||||
ORDER BY
|
||||
serviceName;
|
||||
`, start, end, queueType, topic, partition, consumerGroup, timeRange)
|
||||
return query
|
||||
}
|
||||
|
||||
func generateProducerSQL(start, end int64, topic, partition, queueType string) string {
|
||||
timeRange := (end - start) / 1000000000
|
||||
query := fmt.Sprintf(`
|
||||
WITH producer_query AS (
|
||||
SELECT
|
||||
serviceName,
|
||||
quantile(0.99)(durationNano) / 1000000 AS p99,
|
||||
count(*) AS total_count,
|
||||
SUM(CASE WHEN statusCode = 2 THEN 1 ELSE 0 END) AS error_count
|
||||
FROM signoz_traces.distributed_signoz_index_v2
|
||||
WHERE
|
||||
timestamp >= '%d'
|
||||
AND timestamp <= '%d'
|
||||
AND kind = 4
|
||||
AND msgSystem = '%s'
|
||||
AND stringTagMap['messaging.destination.name'] = '%s'
|
||||
AND stringTagMap['messaging.destination.partition.id'] = '%s'
|
||||
GROUP BY serviceName
|
||||
)
|
||||
|
||||
SELECT
|
||||
serviceName AS service_name,
|
||||
p99,
|
||||
COALESCE((error_count * 100.0) / total_count, 0) AS error_percentage,
|
||||
COALESCE(total_count / %d, 0) AS rps -- Convert nanoseconds to seconds
|
||||
FROM
|
||||
producer_query
|
||||
ORDER BY
|
||||
serviceName;
|
||||
|
||||
`, start, end, queueType, topic, partition, timeRange)
|
||||
return query
|
||||
}
|
||||
@@ -0,0 +1,74 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
)
|
||||
|
||||
var defaultStepInterval int64 = 60
|
||||
|
||||
func BuildQueryRangeParams(messagingQueue *MessagingQueue, queryContext string) (*v3.QueryRangeParamsV3, error) {
|
||||
|
||||
// ToDo: propagate this through APIs when there are different handlers
|
||||
queueType := kafkaQueue
|
||||
|
||||
var cq *v3.CompositeQuery
|
||||
|
||||
chq, err := buildClickHouseQuery(messagingQueue, queueType, queryContext)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cq, err = buildCompositeQuery(chq, queryContext)
|
||||
|
||||
queryRangeParams := &v3.QueryRangeParamsV3{
|
||||
Start: messagingQueue.Start,
|
||||
End: messagingQueue.End,
|
||||
Step: defaultStepInterval,
|
||||
CompositeQuery: cq,
|
||||
Version: "v4",
|
||||
FormatForWeb: true,
|
||||
}
|
||||
|
||||
return queryRangeParams, nil
|
||||
}
|
||||
|
||||
func buildClickHouseQuery(messagingQueue *MessagingQueue, queueType string, queryContext string) (*v3.ClickHouseQuery, error) {
|
||||
start := messagingQueue.Start
|
||||
end := messagingQueue.End
|
||||
topic, ok := messagingQueue.Variables["topic"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid type for Topic")
|
||||
}
|
||||
|
||||
partition, ok := messagingQueue.Variables["partition"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid type for Partition")
|
||||
}
|
||||
|
||||
consumerGroup, ok := messagingQueue.Variables["consumer_group"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid type for consumer group")
|
||||
}
|
||||
|
||||
var query string
|
||||
if queryContext == "producer" {
|
||||
query = generateProducerSQL(start, end, topic, partition, queueType)
|
||||
} else if queryContext == "consumer" {
|
||||
query = generateConsumerSQL(start, end, topic, partition, consumerGroup, queueType)
|
||||
}
|
||||
|
||||
return &v3.ClickHouseQuery{
|
||||
Query: query,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func buildCompositeQuery(chq *v3.ClickHouseQuery, queryContext string) (*v3.CompositeQuery, error) {
|
||||
return &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypeClickHouseSQL,
|
||||
ClickHouseQueries: map[string]*v3.ClickHouseQuery{queryContext: chq},
|
||||
PanelType: v3.PanelTypeTable,
|
||||
}, nil
|
||||
}
|
||||
10
pkg/query-service/app/integrations/messagingQueues/readme.md
Normal file
10
pkg/query-service/app/integrations/messagingQueues/readme.md
Normal file
@@ -0,0 +1,10 @@
|
||||
## Integreation: Messaging Queue
|
||||
|
||||
This package contains the `api`, and `translation` logic to support messaging queue features.
|
||||
|
||||
Currently supported queues:
|
||||
1) Kafka
|
||||
|
||||
For detailed setup, checkout our public docs for configuring:
|
||||
1) Trace collection form Clients (Producer and Consumer)
|
||||
2) Metrics collection from Kafka Brokers, Producers and Consumers
|
||||
@@ -2,6 +2,7 @@ package app
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -837,6 +838,50 @@ func parseAggregateAttributeRequest(r *http.Request) (*v3.AggregateAttributeRequ
|
||||
return &req, nil
|
||||
}
|
||||
|
||||
func parseQBFilterSuggestionsRequest(r *http.Request) (
|
||||
*v3.QBFilterSuggestionsRequest, *model.ApiError,
|
||||
) {
|
||||
dataSource := v3.DataSource(r.URL.Query().Get("dataSource"))
|
||||
if err := dataSource.Validate(); err != nil {
|
||||
return nil, model.BadRequest(err)
|
||||
}
|
||||
|
||||
limit := baseconstants.DefaultFilterSuggestionsLimit
|
||||
limitStr := r.URL.Query().Get("limit")
|
||||
if len(limitStr) > 0 {
|
||||
limit, err := strconv.Atoi(limitStr)
|
||||
if err != nil || limit < 1 {
|
||||
return nil, model.BadRequest(fmt.Errorf(
|
||||
"invalid limit: %s", limitStr,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
var existingFilter *v3.FilterSet
|
||||
existingFilterB64 := r.URL.Query().Get("existingFilter")
|
||||
if len(existingFilterB64) > 0 {
|
||||
decodedFilterJson, err := base64.RawURLEncoding.DecodeString(existingFilterB64)
|
||||
if err != nil {
|
||||
return nil, model.BadRequest(fmt.Errorf("couldn't base64 decode existingFilter: %w", err))
|
||||
}
|
||||
|
||||
existingFilter = &v3.FilterSet{}
|
||||
err = json.Unmarshal(decodedFilterJson, existingFilter)
|
||||
if err != nil {
|
||||
return nil, model.BadRequest(fmt.Errorf("couldn't JSON decode existingFilter: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
searchText := r.URL.Query().Get("searchText")
|
||||
|
||||
return &v3.QBFilterSuggestionsRequest{
|
||||
DataSource: dataSource,
|
||||
Limit: limit,
|
||||
SearchText: searchText,
|
||||
ExistingFilter: existingFilter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseFilterAttributeKeyRequest(r *http.Request) (*v3.FilterAttributeKeyRequest, error) {
|
||||
var req v3.FilterAttributeKeyRequest
|
||||
|
||||
|
||||
@@ -251,7 +251,7 @@ func filterCachedPoints(cachedSeries []*v3.Series, start, end int64) {
|
||||
for _, c := range cachedSeries {
|
||||
points := []v3.Point{}
|
||||
for _, p := range c.Points {
|
||||
if p.Timestamp < start || p.Timestamp > end {
|
||||
if (p.Timestamp < start || p.Timestamp > end) && p.Timestamp != 0 {
|
||||
continue
|
||||
}
|
||||
points = append(points, p)
|
||||
|
||||
@@ -758,8 +758,8 @@ func TestQueryRangeTimeShift(t *testing.T) {
|
||||
func TestQueryRangeTimeShiftWithCache(t *testing.T) {
|
||||
params := []*v3.QueryRangeParamsV3{
|
||||
{
|
||||
Start: 1675115596722 + 60*60*1000 - 86400*1000, //30, 4:23
|
||||
End: 1675115596722 + 120*60*1000 - 86400*1000, //30, 5:23
|
||||
Start: 1675115596722 + 60*60*1000 - 86400*1000, //30th Jan, 4:23
|
||||
End: 1675115596722 + 120*60*1000 - 86400*1000, //30th Jan, 5:23
|
||||
Step: 5 * time.Minute.Milliseconds(),
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
@@ -785,8 +785,8 @@ func TestQueryRangeTimeShiftWithCache(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
Start: 1675115596722, //31, 3:23
|
||||
End: 1675115596722 + 120*60*1000, //31, 5:23
|
||||
Start: 1675115596722, //31st Jan, 3:23
|
||||
End: 1675115596722 + 120*60*1000, //31st Jan, 5:23
|
||||
Step: 5 * time.Minute.Milliseconds(),
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
@@ -824,8 +824,8 @@ func TestQueryRangeTimeShiftWithCache(t *testing.T) {
|
||||
{
|
||||
Labels: map[string]string{},
|
||||
Points: []v3.Point{
|
||||
{Timestamp: 1675115596722 + 60*60*1000 - 86400*1000, Value: 1},
|
||||
{Timestamp: 1675115596722 + 120*60*1000 - 86400*1000 + 60*60*1000, Value: 2},
|
||||
{Timestamp: 1675115596722 + 60*60*1000 - 86400*1000, Value: 1}, // 30th Jan, 4:23
|
||||
{Timestamp: 1675115596722 + 120*60*1000 - 86400*1000 + 60*60*1000, Value: 2}, // 30th Jan, 6:23
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -835,7 +835,7 @@ func TestQueryRangeTimeShiftWithCache(t *testing.T) {
|
||||
// logs queries are generates in ns
|
||||
expectedTimeRangeInQueryString := []string{
|
||||
fmt.Sprintf("timestamp >= %d AND timestamp <= %d", (1675115596722+60*60*1000-86400*1000)*1000000, (1675115596722+120*60*1000-86400*1000)*1000000),
|
||||
fmt.Sprintf("timestamp >= %d AND timestamp <= %d", (1675115596722-86400*1000)*1000000, ((1675115596722+60*60*1000)-86400*1000-1)*1000000),
|
||||
fmt.Sprintf("timestamp >= %d AND timestamp <= %d", (1675115596722-86400*1000)*1000000, ((1675115596722+120*60*1000)-86400*1000)*1000000),
|
||||
}
|
||||
|
||||
for i, param := range params {
|
||||
@@ -856,8 +856,8 @@ func TestQueryRangeTimeShiftWithCache(t *testing.T) {
|
||||
func TestQueryRangeTimeShiftWithLimitAndCache(t *testing.T) {
|
||||
params := []*v3.QueryRangeParamsV3{
|
||||
{
|
||||
Start: 1675115596722 + 60*60*1000 - 86400*1000, //30, 4:23
|
||||
End: 1675115596722 + 120*60*1000 - 86400*1000, //30, 5:23
|
||||
Start: 1675115596722 + 60*60*1000 - 86400*1000, //30th Jan, 4:23
|
||||
End: 1675115596722 + 120*60*1000 - 86400*1000, //30th Jan, 5:23
|
||||
Step: 5 * time.Minute.Milliseconds(),
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
@@ -884,8 +884,8 @@ func TestQueryRangeTimeShiftWithLimitAndCache(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
Start: 1675115596722, //31, 3:23
|
||||
End: 1675115596722 + 120*60*1000, //31, 5:23
|
||||
Start: 1675115596722, //31st Jan, 3:23
|
||||
End: 1675115596722 + 120*60*1000, //31st Jan, 5:23
|
||||
Step: 5 * time.Minute.Milliseconds(),
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
@@ -924,8 +924,8 @@ func TestQueryRangeTimeShiftWithLimitAndCache(t *testing.T) {
|
||||
{
|
||||
Labels: map[string]string{},
|
||||
Points: []v3.Point{
|
||||
{Timestamp: 1675115596722 + 60*60*1000 - 86400*1000, Value: 1},
|
||||
{Timestamp: 1675115596722 + 120*60*1000 - 86400*1000 + 60*60*1000, Value: 2},
|
||||
{Timestamp: 1675115596722 + 60*60*1000 - 86400*1000, Value: 1}, // 30th Jan, 4:23
|
||||
{Timestamp: 1675115596722 + 120*60*1000 - 86400*1000 + 60*60*1000, Value: 2}, // 30th Jan, 6:23
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -935,7 +935,7 @@ func TestQueryRangeTimeShiftWithLimitAndCache(t *testing.T) {
|
||||
// logs queries are generates in ns
|
||||
expectedTimeRangeInQueryString := []string{
|
||||
fmt.Sprintf("timestamp >= %d AND timestamp <= %d", (1675115596722+60*60*1000-86400*1000)*1000000, (1675115596722+120*60*1000-86400*1000)*1000000),
|
||||
fmt.Sprintf("timestamp >= %d AND timestamp <= %d", (1675115596722-86400*1000)*1000000, ((1675115596722+60*60*1000)-86400*1000-1)*1000000),
|
||||
fmt.Sprintf("timestamp >= %d AND timestamp <= %d", (1675115596722-86400*1000)*1000000, ((1675115596722+120*60*1000)-86400*1000)*1000000),
|
||||
}
|
||||
|
||||
for i, param := range params {
|
||||
|
||||
@@ -306,7 +306,7 @@ func (q *querier) runBuilderQuery(
|
||||
}
|
||||
|
||||
// response doesn't need everything
|
||||
filterCachedPoints(mergedSeries, params.Start, params.End)
|
||||
filterCachedPoints(mergedSeries, start, end)
|
||||
|
||||
ch <- channelResult{
|
||||
Err: nil,
|
||||
|
||||
@@ -89,9 +89,11 @@ func NewQuerier(opts QuerierOptions) interfaces.Querier {
|
||||
}
|
||||
}
|
||||
|
||||
// execClickHouseQuery executes the clickhouse query and returns the series list
|
||||
// if testing mode is enabled, it returns the mocked series list
|
||||
func (q *querier) execClickHouseQuery(ctx context.Context, query string) ([]*v3.Series, error) {
|
||||
q.queriesExecuted = append(q.queriesExecuted, query)
|
||||
if q.testingMode && q.reader == nil {
|
||||
q.queriesExecuted = append(q.queriesExecuted, query)
|
||||
return q.returnedSeries, q.returnedErr
|
||||
}
|
||||
result, err := q.reader.GetTimeSeriesResultV3(ctx, query)
|
||||
@@ -116,9 +118,11 @@ func (q *querier) execClickHouseQuery(ctx context.Context, query string) ([]*v3.
|
||||
return result, err
|
||||
}
|
||||
|
||||
// execPromQuery executes the prom query and returns the series list
|
||||
// if testing mode is enabled, it returns the mocked series list
|
||||
func (q *querier) execPromQuery(ctx context.Context, params *model.QueryRangeParams) ([]*v3.Series, error) {
|
||||
q.queriesExecuted = append(q.queriesExecuted, params.Query)
|
||||
if q.testingMode && q.reader == nil {
|
||||
q.queriesExecuted = append(q.queriesExecuted, params.Query)
|
||||
q.timeRanges = append(q.timeRanges, []int{int(params.Start.UnixMilli()), int(params.End.UnixMilli())})
|
||||
return q.returnedSeries, q.returnedErr
|
||||
}
|
||||
@@ -226,6 +230,9 @@ func (q *querier) findMissingTimeRanges(start, end, step int64, cachedData []byt
|
||||
return findMissingTimeRanges(start, end, step, cachedSeriesList, q.fluxInterval)
|
||||
}
|
||||
|
||||
// labelsToString converts the labels map to a string
|
||||
// sorted by key so that the string is consistent
|
||||
// across different runs
|
||||
func labelsToString(labels map[string]string) string {
|
||||
type label struct {
|
||||
Key string
|
||||
@@ -245,11 +252,15 @@ func labelsToString(labels map[string]string) string {
|
||||
return fmt.Sprintf("{%s}", strings.Join(labelKVs, ","))
|
||||
}
|
||||
|
||||
// filterCachedPoints filters the points in the series list
|
||||
// that are outside the start and end time range
|
||||
// and returns the filtered series list
|
||||
// TODO(srikanthccv): is this really needed?
|
||||
func filterCachedPoints(cachedSeries []*v3.Series, start, end int64) {
|
||||
for _, c := range cachedSeries {
|
||||
points := []v3.Point{}
|
||||
for _, p := range c.Points {
|
||||
if p.Timestamp < start || p.Timestamp > end {
|
||||
if (p.Timestamp < start || p.Timestamp > end) && p.Timestamp != 0 {
|
||||
continue
|
||||
}
|
||||
points = append(points, p)
|
||||
@@ -258,6 +269,8 @@ func filterCachedPoints(cachedSeries []*v3.Series, start, end int64) {
|
||||
}
|
||||
}
|
||||
|
||||
// mergeSerieses merges the cached series and the missed series
|
||||
// and returns the merged series list
|
||||
func mergeSerieses(cachedSeries, missedSeries []*v3.Series) []*v3.Series {
|
||||
// Merge the missed series with the cached series by timestamp
|
||||
mergedSeries := make([]*v3.Series, 0)
|
||||
@@ -275,7 +288,9 @@ func mergeSerieses(cachedSeries, missedSeries []*v3.Series) []*v3.Series {
|
||||
}
|
||||
seriesesByLabels[labelsToString(series.Labels)].Points = append(seriesesByLabels[labelsToString(series.Labels)].Points, series.Points...)
|
||||
}
|
||||
|
||||
// Sort the points in each series by timestamp
|
||||
// and remove duplicate points
|
||||
for idx := range seriesesByLabels {
|
||||
series := seriesesByLabels[idx]
|
||||
series.SortPoints()
|
||||
@@ -499,6 +514,8 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan
|
||||
return res, nil, nil
|
||||
}
|
||||
|
||||
// QueryRange is the main function that runs the queries
|
||||
// and returns the results
|
||||
func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, map[string]error, error) {
|
||||
var results []*v3.Result
|
||||
var err error
|
||||
@@ -539,10 +556,16 @@ func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3,
|
||||
return results, errQueriesByName, err
|
||||
}
|
||||
|
||||
// QueriesExecuted returns the list of queries executed
|
||||
// in the last query range call
|
||||
// used for testing
|
||||
func (q *querier) QueriesExecuted() []string {
|
||||
return q.queriesExecuted
|
||||
}
|
||||
|
||||
// TimeRanges returns the list of time ranges
|
||||
// that were used to fetch the data
|
||||
// used for testing
|
||||
func (q *querier) TimeRanges() [][]int {
|
||||
return q.timeRanges
|
||||
}
|
||||
|
||||
@@ -411,12 +411,13 @@ func TestV2FindMissingTimeRangesWithFluxInterval(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestV2QueryRange(t *testing.T) {
|
||||
func TestV2QueryRangePanelGraph(t *testing.T) {
|
||||
params := []*v3.QueryRangeParamsV3{
|
||||
{
|
||||
Start: 1675115596722,
|
||||
End: 1675115596722 + 120*60*1000,
|
||||
Step: 60,
|
||||
Start: 1675115596722, // 31st Jan, 03:23:16
|
||||
End: 1675115596722 + 120*60*1000, // 31st Jan, 05:23:16
|
||||
Step: 60,
|
||||
Version: "v4",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
@@ -450,8 +451,8 @@ func TestV2QueryRange(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
Start: 1675115596722 + 60*60*1000,
|
||||
End: 1675115596722 + 180*60*1000,
|
||||
Start: 1675115596722 + 60*60*1000, // 31st Jan, 04:23:16
|
||||
End: 1675115596722 + 180*60*1000, // 31st Jan, 06:23:16
|
||||
Step: 60,
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
@@ -569,19 +570,21 @@ func TestV2QueryRange(t *testing.T) {
|
||||
"__name__": "http_server_requests_seconds_count",
|
||||
},
|
||||
Points: []v3.Point{
|
||||
{Timestamp: 1675115596722, Value: 1},
|
||||
{Timestamp: 1675115596722 + 60*60*1000, Value: 2},
|
||||
{Timestamp: 1675115596722 + 120*60*1000, Value: 3},
|
||||
{Timestamp: 1675115596722, Value: 1}, // 31st Jan, 03:23:16
|
||||
{Timestamp: 1675115596722 + 60*60*1000, Value: 2}, // 31st Jan, 04:23:16
|
||||
{Timestamp: 1675115596722 + 120*60*1000, Value: 3}, // 31st Jan, 05:23:16
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
q := NewQuerier(opts)
|
||||
expectedTimeRangeInQueryString := []string{
|
||||
fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", 1675115580000, 1675115580000+120*60*1000),
|
||||
fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", 1675115580000+120*60*1000, 1675115580000+180*60*1000),
|
||||
fmt.Sprintf("timestamp >= '%d' AND timestamp <= '%d'", 1675115580000*1000000, (1675115580000+120*60*1000)*int64(1000000)),
|
||||
fmt.Sprintf("timestamp >= '%d' AND timestamp <= '%d'", (1675115580000+60*60*1000)*int64(1000000), (1675115580000+180*60*1000)*int64(1000000)),
|
||||
fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", 1675115580000, 1675115580000+120*60*1000), // 31st Jan, 03:23:00 to 31st Jan, 05:23:00
|
||||
// second query uses the cached data from the first query
|
||||
fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", 1675115580000+120*60*1000, 1675115580000+180*60*1000), // 31st Jan, 05:23:00 to 31st Jan, 06:23:00
|
||||
// No caching for traces yet
|
||||
fmt.Sprintf("timestamp >= '%d' AND timestamp <= '%d'", 1675115580000*1000000, (1675115580000+120*60*1000)*int64(1000000)), // 31st Jan, 03:23:00 to 31st Jan, 05:23:00
|
||||
fmt.Sprintf("timestamp >= '%d' AND timestamp <= '%d'", (1675115580000+60*60*1000)*int64(1000000), (1675115580000+180*60*1000)*int64(1000000)), // 31st Jan, 04:23:00 to 31st Jan, 06:23:00
|
||||
}
|
||||
|
||||
for i, param := range params {
|
||||
@@ -600,12 +603,12 @@ func TestV2QueryRange(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV2QueryRangeValueType(t *testing.T) {
|
||||
// There shouldn't be any caching for value panel type
|
||||
params := []*v3.QueryRangeParamsV3{
|
||||
{
|
||||
Start: 1675115596722,
|
||||
End: 1675115596722 + 120*60*1000,
|
||||
Step: 5 * time.Minute.Milliseconds(),
|
||||
Start: 1675115596722, // 31st Jan, 03:23:16
|
||||
End: 1675115596722 + 120*60*1000, // 31st Jan, 05:23:16
|
||||
Step: 5 * time.Minute.Milliseconds(),
|
||||
Version: "v4",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
PanelType: v3.PanelTypeValue,
|
||||
@@ -635,9 +638,43 @@ func TestV2QueryRangeValueType(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
Start: 1675115596722 + 60*60*1000,
|
||||
End: 1675115596722 + 180*60*1000,
|
||||
Step: 5 * time.Minute.Milliseconds(),
|
||||
Start: 1675115596722 + 60*60*1000, // 31st Jan, 04:23:16
|
||||
End: 1675115596722 + 180*60*1000, // 31st Jan, 06:23:16
|
||||
Step: 60,
|
||||
Version: "v4",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
PanelType: v3.PanelTypeValue,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
Temporality: v3.Delta,
|
||||
StepInterval: 60,
|
||||
AggregateAttribute: v3.AttributeKey{Key: "http_server_requests_seconds_count", Type: v3.AttributeKeyTypeUnspecified, DataType: "float64", IsColumn: true},
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{Key: "method", IsColumn: false},
|
||||
Operator: "=",
|
||||
Value: "GET",
|
||||
},
|
||||
},
|
||||
},
|
||||
AggregateOperator: v3.AggregateOperatorSumRate,
|
||||
TimeAggregation: v3.TimeAggregationRate,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
Expression: "A",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Start: 1675115596722 + 60*60*1000, // 31st Jan, 04:23:16
|
||||
End: 1675115596722 + 180*60*1000, // 31st Jan, 06:23:16
|
||||
Step: 5 * time.Minute.Milliseconds(),
|
||||
Version: "v4",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
PanelType: v3.PanelTypeValue,
|
||||
@@ -681,18 +718,18 @@ func TestV2QueryRangeValueType(t *testing.T) {
|
||||
"__name__": "http_server_requests_seconds_count",
|
||||
},
|
||||
Points: []v3.Point{
|
||||
{Timestamp: 1675115596722, Value: 1},
|
||||
{Timestamp: 1675115596722 + 60*60*1000, Value: 2},
|
||||
{Timestamp: 1675115596722 + 120*60*1000, Value: 3},
|
||||
{Timestamp: 1675115596722, Value: 1}, // 31st Jan, 03:23:16
|
||||
{Timestamp: 1675115596722 + 60*60*1000, Value: 2}, // 31st Jan, 04:23:16
|
||||
{Timestamp: 1675115596722 + 120*60*1000, Value: 3}, // 31st Jan, 05:23:16
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
q := NewQuerier(opts)
|
||||
// No caching
|
||||
expectedTimeRangeInQueryString := []string{
|
||||
fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", 1675115520000, 1675115580000+120*60*1000),
|
||||
fmt.Sprintf("timestamp >= '%d' AND timestamp <= '%d'", (1675115580000+60*60*1000)*int64(1000000), (1675115580000+180*60*1000)*int64(1000000)),
|
||||
fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", 1675115520000, 1675115580000+120*60*1000), // 31st Jan, 03:23:00 to 31st Jan, 05:23:00
|
||||
fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", 1675115580000+120*60*1000, 1675115580000+180*60*1000), // 31st Jan, 05:23:00 to 31st Jan, 06:23:00
|
||||
fmt.Sprintf("timestamp >= '%d' AND timestamp <= '%d'", (1675115580000+60*60*1000)*int64(1000000), (1675115580000+180*60*1000)*int64(1000000)), // 31st Jan, 05:23:00 to 31st Jan, 06:23:00
|
||||
}
|
||||
|
||||
for i, param := range params {
|
||||
@@ -714,9 +751,10 @@ func TestV2QueryRangeValueType(t *testing.T) {
|
||||
func TestV2QueryRangeTimeShift(t *testing.T) {
|
||||
params := []*v3.QueryRangeParamsV3{
|
||||
{
|
||||
Start: 1675115596722, //31, 3:23
|
||||
End: 1675115596722 + 120*60*1000, //31, 5:23
|
||||
Step: 5 * time.Minute.Milliseconds(),
|
||||
Start: 1675115596722, //31, 3:23
|
||||
End: 1675115596722 + 120*60*1000, //31, 5:23
|
||||
Step: 5 * time.Minute.Milliseconds(),
|
||||
Version: "v4",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
@@ -766,9 +804,10 @@ func TestV2QueryRangeTimeShift(t *testing.T) {
|
||||
func TestV2QueryRangeTimeShiftWithCache(t *testing.T) {
|
||||
params := []*v3.QueryRangeParamsV3{
|
||||
{
|
||||
Start: 1675115596722 + 60*60*1000 - 86400*1000, //30, 4:23
|
||||
End: 1675115596722 + 120*60*1000 - 86400*1000, //30, 5:23
|
||||
Step: 5 * time.Minute.Milliseconds(),
|
||||
Start: 1675115596722 + 60*60*1000 - 86400*1000, //30th Jan, 4:23
|
||||
End: 1675115596722 + 120*60*1000 - 86400*1000, //30th Jan, 5:23
|
||||
Step: 5 * time.Minute.Milliseconds(),
|
||||
Version: "v4",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
@@ -793,9 +832,10 @@ func TestV2QueryRangeTimeShiftWithCache(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
Start: 1675115596722, //31, 3:23
|
||||
End: 1675115596722 + 120*60*1000, //31, 5:23
|
||||
Step: 5 * time.Minute.Milliseconds(),
|
||||
Start: 1675115596722, //31st Jan, 3:23
|
||||
End: 1675115596722 + 120*60*1000, //31st Jan, 5:23
|
||||
Step: 5 * time.Minute.Milliseconds(),
|
||||
Version: "v4",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
@@ -832,8 +872,8 @@ func TestV2QueryRangeTimeShiftWithCache(t *testing.T) {
|
||||
{
|
||||
Labels: map[string]string{},
|
||||
Points: []v3.Point{
|
||||
{Timestamp: 1675115596722 + 60*60*1000 - 86400*1000, Value: 1},
|
||||
{Timestamp: 1675115596722 + 120*60*1000 - 86400*1000 + 60*60*1000, Value: 2},
|
||||
{Timestamp: 1675115596722 + 60*60*1000 - 86400*1000, Value: 1}, // 30th Jan, 4:23
|
||||
{Timestamp: 1675115596722 + 120*60*1000 - 86400*1000 + 60*60*1000, Value: 2}, // 30th Jan, 5:23
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -842,8 +882,8 @@ func TestV2QueryRangeTimeShiftWithCache(t *testing.T) {
|
||||
|
||||
// logs queries are generates in ns
|
||||
expectedTimeRangeInQueryString := []string{
|
||||
fmt.Sprintf("timestamp >= %d AND timestamp <= %d", (1675115596722+60*60*1000-86400*1000)*1000000, (1675115596722+120*60*1000-86400*1000)*1000000),
|
||||
fmt.Sprintf("timestamp >= %d AND timestamp <= %d", (1675115596722-86400*1000)*1000000, ((1675115596722+60*60*1000)-86400*1000-1)*1000000),
|
||||
fmt.Sprintf("timestamp >= %d AND timestamp <= %d", (1675115596722+60*60*1000-86400*1000)*1000000, (1675115596722+120*60*1000-86400*1000)*1000000), // 30th Jan, 4:23 to 30th Jan, 5:23
|
||||
fmt.Sprintf("timestamp >= %d AND timestamp <= %d", (1675115596722-86400*1000)*1000000, ((1675115596722+120*60*1000)-86400*1000)*1000000), // 30th Jan, 3:23 to 30th Jan, 5:23
|
||||
}
|
||||
|
||||
for i, param := range params {
|
||||
@@ -864,9 +904,10 @@ func TestV2QueryRangeTimeShiftWithCache(t *testing.T) {
|
||||
func TestV2QueryRangeTimeShiftWithLimitAndCache(t *testing.T) {
|
||||
params := []*v3.QueryRangeParamsV3{
|
||||
{
|
||||
Start: 1675115596722 + 60*60*1000 - 86400*1000, //30, 4:23
|
||||
End: 1675115596722 + 120*60*1000 - 86400*1000, //30, 5:23
|
||||
Step: 5 * time.Minute.Milliseconds(),
|
||||
Start: 1675115596722 + 60*60*1000 - 86400*1000, //30th Jan, 4:23
|
||||
End: 1675115596722 + 120*60*1000 - 86400*1000, //30th, 5:23
|
||||
Step: 5 * time.Minute.Milliseconds(),
|
||||
Version: "v4",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
@@ -892,9 +933,10 @@ func TestV2QueryRangeTimeShiftWithLimitAndCache(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
Start: 1675115596722, //31, 3:23
|
||||
End: 1675115596722 + 120*60*1000, //31, 5:23
|
||||
Step: 5 * time.Minute.Milliseconds(),
|
||||
Start: 1675115596722, //31st Jan, 3:23
|
||||
End: 1675115596722 + 120*60*1000, //31st Jan, 5:23
|
||||
Step: 5 * time.Minute.Milliseconds(),
|
||||
Version: "v4",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
@@ -932,8 +974,8 @@ func TestV2QueryRangeTimeShiftWithLimitAndCache(t *testing.T) {
|
||||
{
|
||||
Labels: map[string]string{},
|
||||
Points: []v3.Point{
|
||||
{Timestamp: 1675115596722 + 60*60*1000 - 86400*1000, Value: 1},
|
||||
{Timestamp: 1675115596722 + 120*60*1000 - 86400*1000 + 60*60*1000, Value: 2},
|
||||
{Timestamp: 1675115596722 + 60*60*1000 - 86400*1000, Value: 1}, // 30th Jan, 4:23
|
||||
{Timestamp: 1675115596722 + 120*60*1000 - 86400*1000 + 60*60*1000, Value: 2}, // 30th Jan, 6:23
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -942,8 +984,8 @@ func TestV2QueryRangeTimeShiftWithLimitAndCache(t *testing.T) {
|
||||
|
||||
// logs queries are generates in ns
|
||||
expectedTimeRangeInQueryString := []string{
|
||||
fmt.Sprintf("timestamp >= %d AND timestamp <= %d", (1675115596722+60*60*1000-86400*1000)*1000000, (1675115596722+120*60*1000-86400*1000)*1000000),
|
||||
fmt.Sprintf("timestamp >= %d AND timestamp <= %d", (1675115596722-86400*1000)*1000000, ((1675115596722+60*60*1000)-86400*1000-1)*1000000),
|
||||
fmt.Sprintf("timestamp >= %d AND timestamp <= %d", (1675115596722+60*60*1000-86400*1000)*1000000, (1675115596722+120*60*1000-86400*1000)*1000000), // 30th Jan, 4:23 to 30th Jan, 5:23
|
||||
fmt.Sprintf("timestamp >= %d AND timestamp <= %d", (1675115596722-86400*1000)*1000000, ((1675115596722+120*60*1000)-86400*1000)*1000000),
|
||||
}
|
||||
|
||||
for i, param := range params {
|
||||
@@ -964,9 +1006,10 @@ func TestV2QueryRangeValueTypePromQL(t *testing.T) {
|
||||
// There shouldn't be any caching for value panel type
|
||||
params := []*v3.QueryRangeParamsV3{
|
||||
{
|
||||
Start: 1675115596722,
|
||||
End: 1675115596722 + 120*60*1000,
|
||||
Step: 5 * time.Minute.Milliseconds(),
|
||||
Start: 1675115596722,
|
||||
End: 1675115596722 + 120*60*1000,
|
||||
Step: 5 * time.Minute.Milliseconds(),
|
||||
Version: "v4",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypePromQL,
|
||||
PanelType: v3.PanelTypeValue,
|
||||
@@ -978,9 +1021,10 @@ func TestV2QueryRangeValueTypePromQL(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
Start: 1675115596722 + 60*60*1000,
|
||||
End: 1675115596722 + 180*60*1000,
|
||||
Step: 5 * time.Minute.Milliseconds(),
|
||||
Start: 1675115596722 + 60*60*1000,
|
||||
End: 1675115596722 + 180*60*1000,
|
||||
Step: 5 * time.Minute.Milliseconds(),
|
||||
Version: "v4",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypePromQL,
|
||||
PanelType: v3.PanelTypeValue,
|
||||
|
||||
@@ -70,6 +70,22 @@ func funcAbsolute(result *v3.Result) *v3.Result {
|
||||
return result
|
||||
}
|
||||
|
||||
// funcRunningDiff returns the running difference of each point
|
||||
func funcRunningDiff(result *v3.Result) *v3.Result {
|
||||
for _, series := range result.Series {
|
||||
// iterate over the point in reverse order
|
||||
for idx := len(series.Points) - 1; idx >= 0; idx-- {
|
||||
if idx > 0 {
|
||||
series.Points[idx].Value = series.Points[idx].Value - series.Points[idx-1].Value
|
||||
}
|
||||
}
|
||||
// remove the first point
|
||||
// the timerange is already adjusted in the query range
|
||||
series.Points = series.Points[1:]
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// funcLog2 returns the log2 of each point
|
||||
func funcLog2(result *v3.Result) *v3.Result {
|
||||
for _, series := range result.Series {
|
||||
@@ -256,6 +272,8 @@ func ApplyFunction(fn v3.Function, result *v3.Result) *v3.Result {
|
||||
}
|
||||
case v3.FunctionNameAbsolute:
|
||||
return funcAbsolute(result)
|
||||
case v3.FunctionNameRunningDiff:
|
||||
return funcRunningDiff(result)
|
||||
case v3.FunctionNameLog2:
|
||||
return funcLog2(result)
|
||||
case v3.FunctionNameLog10:
|
||||
|
||||
@@ -602,3 +602,70 @@ func TestFuncMedian5(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuncRunningDiff(t *testing.T) {
|
||||
type args struct {
|
||||
result *v3.Result
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want *v3.Result
|
||||
}{
|
||||
{
|
||||
name: "test funcRunningDiff",
|
||||
args: args{
|
||||
result: &v3.Result{
|
||||
Series: []*v3.Series{
|
||||
{
|
||||
Points: []v3.Point{{Timestamp: 1, Value: 1}, {Timestamp: 2, Value: 2}, {Timestamp: 3, Value: 3}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: &v3.Result{
|
||||
Series: []*v3.Series{
|
||||
{
|
||||
Points: []v3.Point{{Timestamp: 2, Value: 1}, {Timestamp: 3, Value: 1}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test funcRunningDiff with start number as 8",
|
||||
args: args{
|
||||
result: &v3.Result{
|
||||
Series: []*v3.Series{
|
||||
{
|
||||
Points: []v3.Point{{Timestamp: 1, Value: 8}, {Timestamp: 2, Value: 8}, {Timestamp: 3, Value: 8}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: &v3.Result{
|
||||
Series: []*v3.Series{
|
||||
{
|
||||
Points: []v3.Point{{Timestamp: 2, Value: 0}, {Timestamp: 3, Value: 0}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := funcRunningDiff(tt.args.result)
|
||||
for j, series := range got.Series {
|
||||
if len(series.Points) != len(tt.want.Series[j].Points) {
|
||||
t.Errorf("funcRunningDiff() = len(series.Points) %v, len(tt.want.Series[j].Points) %v", len(series.Points), len(tt.want.Series[j].Points))
|
||||
}
|
||||
for k, point := range series.Points {
|
||||
if point.Value != tt.want.Series[j].Points[k].Value {
|
||||
t.Errorf("funcRunningDiff() = %v, want %v", point.Value, tt.want.Series[j].Points[k].Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -322,13 +322,12 @@ func isLogExpression(expression *govaluate.EvaluableExpression, params *v3.Query
|
||||
func (c *cacheKeyGenerator) GenerateKeys(params *v3.QueryRangeParamsV3) map[string]string {
|
||||
keys := make(map[string]string)
|
||||
|
||||
// For non-graph panels, we don't support caching
|
||||
if params.CompositeQuery.PanelType != v3.PanelTypeGraph {
|
||||
return keys
|
||||
}
|
||||
|
||||
// Use query as the cache key for PromQL queries
|
||||
if params.CompositeQuery.QueryType == v3.QueryTypePromQL {
|
||||
if params.CompositeQuery.PanelType != v3.PanelTypeGraph {
|
||||
return keys
|
||||
}
|
||||
|
||||
for name, query := range params.CompositeQuery.PromQueries {
|
||||
keys[name] = query.Query
|
||||
}
|
||||
@@ -338,6 +337,11 @@ func (c *cacheKeyGenerator) GenerateKeys(params *v3.QueryRangeParamsV3) map[stri
|
||||
// Build keys for each builder query
|
||||
for queryName, query := range params.CompositeQuery.BuilderQueries {
|
||||
if query.Expression == queryName && query.DataSource == v3.DataSourceLogs {
|
||||
|
||||
if params.CompositeQuery.PanelType != v3.PanelTypeGraph {
|
||||
continue
|
||||
}
|
||||
|
||||
var parts []string
|
||||
|
||||
// We need to build uniqe cache query for BuilderQuery
|
||||
@@ -346,6 +350,10 @@ func (c *cacheKeyGenerator) GenerateKeys(params *v3.QueryRangeParamsV3) map[stri
|
||||
parts = append(parts, fmt.Sprintf("aggregate=%s", query.AggregateOperator))
|
||||
parts = append(parts, fmt.Sprintf("limit=%d", query.Limit))
|
||||
|
||||
if query.ShiftBy != 0 {
|
||||
parts = append(parts, fmt.Sprintf("shiftBy=%d", query.ShiftBy))
|
||||
}
|
||||
|
||||
if query.AggregateAttribute.Key != "" {
|
||||
parts = append(parts, fmt.Sprintf("aggregateAttribute=%s", query.AggregateAttribute.CacheKey()))
|
||||
}
|
||||
@@ -379,6 +387,22 @@ func (c *cacheKeyGenerator) GenerateKeys(params *v3.QueryRangeParamsV3) map[stri
|
||||
} else if query.Expression == queryName && query.DataSource == v3.DataSourceMetrics {
|
||||
var parts []string
|
||||
|
||||
// what is this condition checking?
|
||||
// there are two version of the metric query builder, v3 and v4
|
||||
// the way query is built is different for each version
|
||||
// only time series panel type returns a "time series" data
|
||||
// every other panel type returns just a single value
|
||||
// this means that we can't use the previous results for caching
|
||||
// however, in v4, the result of every panel type is a time series data
|
||||
// that gets aggregated in the query service and then converted to a single value
|
||||
// so we can use the previous results for caching
|
||||
|
||||
// if version is not v4 (it can be empty or v3) and panel type is not graph
|
||||
// then we can't use the previous results for caching
|
||||
if params.Version != "v4" && params.CompositeQuery.PanelType != v3.PanelTypeGraph {
|
||||
continue
|
||||
}
|
||||
|
||||
// We need to build uniqe cache query for BuilderQuery
|
||||
|
||||
parts = append(parts, fmt.Sprintf("source=%s", query.DataSource))
|
||||
@@ -387,6 +411,10 @@ func (c *cacheKeyGenerator) GenerateKeys(params *v3.QueryRangeParamsV3) map[stri
|
||||
parts = append(parts, fmt.Sprintf("timeAggregation=%s", query.TimeAggregation))
|
||||
parts = append(parts, fmt.Sprintf("spaceAggregation=%s", query.SpaceAggregation))
|
||||
|
||||
if query.ShiftBy != 0 {
|
||||
parts = append(parts, fmt.Sprintf("shiftBy=%d", query.ShiftBy))
|
||||
}
|
||||
|
||||
if query.AggregateAttribute.Key != "" {
|
||||
parts = append(parts, fmt.Sprintf("aggregateAttribute=%s", query.AggregateAttribute.CacheKey()))
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
logsV3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3"
|
||||
metricsv3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3"
|
||||
"go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/featureManager"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
)
|
||||
@@ -583,3 +584,662 @@ func TestLogsQueryWithFormula(t *testing.T) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestGenerateCacheKeysMetricsBuilder(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
query *v3.QueryRangeParamsV3
|
||||
expectedCacheKeys map[string]string
|
||||
}{
|
||||
// v3 - only the graph builder queries can be cached
|
||||
{
|
||||
name: "version=v3;panelType=graph;dataSource=metrics;queryType=builder",
|
||||
query: &v3.QueryRangeParamsV3{
|
||||
Version: "v3",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
StepInterval: 60,
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateOperator: v3.AggregateOperatorSumRate,
|
||||
AggregateAttribute: v3.AttributeKey{Key: "signoz_latency_bucket"},
|
||||
Temporality: v3.Delta,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "service_name"}, Value: "A", Operator: v3.FilterOperatorEqual},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{Key: "service_name"},
|
||||
{Key: "le"},
|
||||
},
|
||||
Expression: "A",
|
||||
OrderBy: []v3.OrderBy{
|
||||
{ColumnName: constants.SigNozOrderByValue, Order: "desc"},
|
||||
},
|
||||
Having: []v3.Having{
|
||||
{
|
||||
ColumnName: "value",
|
||||
Operator: v3.HavingOperatorGreaterThan,
|
||||
Value: 100,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCacheKeys: map[string]string{
|
||||
"A": "source=metrics&step=60&aggregate=sum_rate&timeAggregation=&spaceAggregation=&aggregateAttribute=signoz_latency_bucket---false&filter-0=key:service_name---false,op:=,value:A&groupBy-0=service_name---false&groupBy-1=le---false&having-0=column:value,op:>,value:100",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "version=v3;panelType=graph;dataSource=metrics;queryType=builder with limit", // limit should not be part of the cache key
|
||||
query: &v3.QueryRangeParamsV3{
|
||||
Version: "v3",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
StepInterval: 60,
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateOperator: v3.AggregateOperatorSumRate,
|
||||
AggregateAttribute: v3.AttributeKey{Key: "signoz_latency_bucket"},
|
||||
Temporality: v3.Delta,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "service_name"}, Value: "A", Operator: v3.FilterOperatorEqual},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{Key: "service_name"},
|
||||
{Key: "le"},
|
||||
},
|
||||
Expression: "A",
|
||||
OrderBy: []v3.OrderBy{
|
||||
{ColumnName: constants.SigNozOrderByValue, Order: "desc"},
|
||||
},
|
||||
Having: []v3.Having{
|
||||
{
|
||||
ColumnName: "value",
|
||||
Operator: v3.HavingOperatorGreaterThan,
|
||||
Value: 100,
|
||||
},
|
||||
},
|
||||
Limit: 10,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCacheKeys: map[string]string{
|
||||
"A": "source=metrics&step=60&aggregate=sum_rate&timeAggregation=&spaceAggregation=&aggregateAttribute=signoz_latency_bucket---false&filter-0=key:service_name---false,op:=,value:A&groupBy-0=service_name---false&groupBy-1=le---false&having-0=column:value,op:>,value:100",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "version=v3;panelType=graph;dataSource=metrics;queryType=builder with shiftBy", // shiftBy should be part of the cache key
|
||||
query: &v3.QueryRangeParamsV3{
|
||||
Version: "v3",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
StepInterval: 60,
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateOperator: v3.AggregateOperatorSumRate,
|
||||
AggregateAttribute: v3.AttributeKey{Key: "signoz_latency_bucket"},
|
||||
Temporality: v3.Delta,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "service_name"}, Value: "A", Operator: v3.FilterOperatorEqual},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{Key: "service_name"},
|
||||
{Key: "le"},
|
||||
},
|
||||
Expression: "A",
|
||||
OrderBy: []v3.OrderBy{
|
||||
{ColumnName: constants.SigNozOrderByValue, Order: "desc"},
|
||||
},
|
||||
Having: []v3.Having{
|
||||
{
|
||||
ColumnName: "value",
|
||||
Operator: v3.HavingOperatorGreaterThan,
|
||||
Value: 100,
|
||||
},
|
||||
},
|
||||
Limit: 10,
|
||||
ShiftBy: 86400,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCacheKeys: map[string]string{
|
||||
"A": "source=metrics&step=60&aggregate=sum_rate&timeAggregation=&spaceAggregation=&shiftBy=86400&aggregateAttribute=signoz_latency_bucket---false&filter-0=key:service_name---false,op:=,value:A&groupBy-0=service_name---false&groupBy-1=le---false&having-0=column:value,op:>,value:100",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "version=v3;panelType=value;dataSource=metrics;queryType=builder",
|
||||
query: &v3.QueryRangeParamsV3{
|
||||
Version: "v3",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeValue,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
StepInterval: 60,
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateOperator: v3.AggregateOperatorSumRate,
|
||||
Expression: "A",
|
||||
AggregateAttribute: v3.AttributeKey{Key: "signoz_latency_bucket"},
|
||||
Temporality: v3.Delta,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "service_name"}, Value: "A", Operator: v3.FilterOperatorEqual},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{Key: "service_name"},
|
||||
{Key: "le"},
|
||||
},
|
||||
OrderBy: []v3.OrderBy{
|
||||
{ColumnName: constants.SigNozOrderByValue, Order: "desc"},
|
||||
},
|
||||
Having: []v3.Having{
|
||||
{
|
||||
ColumnName: "value",
|
||||
Operator: v3.HavingOperatorGreaterThan,
|
||||
Value: 100,
|
||||
},
|
||||
},
|
||||
ReduceTo: v3.ReduceToOperatorAvg,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCacheKeys: map[string]string{},
|
||||
},
|
||||
{
|
||||
name: "version=v3;panelType=table;dataSource=metrics;queryType=builder",
|
||||
query: &v3.QueryRangeParamsV3{
|
||||
Version: "v3",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeTable,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
StepInterval: 60,
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateOperator: v3.AggregateOperatorSumRate,
|
||||
Expression: "A",
|
||||
AggregateAttribute: v3.AttributeKey{Key: "signoz_latency_bucket"},
|
||||
Temporality: v3.Delta,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "service_name"}, Value: "A", Operator: v3.FilterOperatorEqual},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{Key: "service_name"},
|
||||
{Key: "le"},
|
||||
},
|
||||
OrderBy: []v3.OrderBy{
|
||||
{ColumnName: constants.SigNozOrderByValue, Order: "desc"},
|
||||
},
|
||||
Having: []v3.Having{
|
||||
{
|
||||
ColumnName: "value",
|
||||
Operator: v3.HavingOperatorGreaterThan,
|
||||
Value: 100,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCacheKeys: map[string]string{},
|
||||
},
|
||||
|
||||
// v4 - everything can be cached
|
||||
{
|
||||
name: "version=v4;panelType=graph;dataSource=metrics;queryType=builder",
|
||||
query: &v3.QueryRangeParamsV3{
|
||||
Version: "v4",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
StepInterval: 60,
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateOperator: v3.AggregateOperatorSumRate,
|
||||
AggregateAttribute: v3.AttributeKey{Key: "signoz_latency_bucket"},
|
||||
Temporality: v3.Delta,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "service_name"}, Value: "A", Operator: v3.FilterOperatorEqual},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{Key: "service_name"},
|
||||
{Key: "le"},
|
||||
},
|
||||
Expression: "A",
|
||||
OrderBy: []v3.OrderBy{
|
||||
{ColumnName: constants.SigNozOrderByValue, Order: "desc"},
|
||||
},
|
||||
Having: []v3.Having{
|
||||
{
|
||||
ColumnName: "value",
|
||||
Operator: v3.HavingOperatorGreaterThan,
|
||||
Value: 100,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCacheKeys: map[string]string{
|
||||
"A": "source=metrics&step=60&aggregate=sum_rate&timeAggregation=&spaceAggregation=&aggregateAttribute=signoz_latency_bucket---false&filter-0=key:service_name---false,op:=,value:A&groupBy-0=service_name---false&groupBy-1=le---false&having-0=column:value,op:>,value:100",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "version=v4;panelType=graph;dataSource=metrics;queryType=builder with limit", // limit should not be part of the cache key
|
||||
query: &v3.QueryRangeParamsV3{
|
||||
Version: "v4",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
StepInterval: 60,
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateOperator: v3.AggregateOperatorSumRate,
|
||||
AggregateAttribute: v3.AttributeKey{Key: "signoz_latency_bucket"},
|
||||
Temporality: v3.Delta,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "service_name"}, Value: "A", Operator: v3.FilterOperatorEqual},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{Key: "service_name"},
|
||||
{Key: "le"},
|
||||
},
|
||||
Expression: "A",
|
||||
OrderBy: []v3.OrderBy{
|
||||
{ColumnName: constants.SigNozOrderByValue, Order: "desc"},
|
||||
},
|
||||
Having: []v3.Having{
|
||||
{
|
||||
ColumnName: "value",
|
||||
Operator: v3.HavingOperatorGreaterThan,
|
||||
Value: 100,
|
||||
},
|
||||
},
|
||||
Limit: 10,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCacheKeys: map[string]string{
|
||||
"A": "source=metrics&step=60&aggregate=sum_rate&timeAggregation=&spaceAggregation=&aggregateAttribute=signoz_latency_bucket---false&filter-0=key:service_name---false,op:=,value:A&groupBy-0=service_name---false&groupBy-1=le---false&having-0=column:value,op:>,value:100",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "version=v4;panelType=graph;dataSource=metrics;queryType=builder with shiftBy", // shiftBy should be part of the cache key
|
||||
query: &v3.QueryRangeParamsV3{
|
||||
Version: "v4",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
StepInterval: 60,
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateOperator: v3.AggregateOperatorSumRate,
|
||||
AggregateAttribute: v3.AttributeKey{Key: "signoz_latency_bucket"},
|
||||
Temporality: v3.Delta,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "service_name"}, Value: "A", Operator: v3.FilterOperatorEqual},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{Key: "service_name"},
|
||||
{Key: "le"},
|
||||
},
|
||||
Expression: "A",
|
||||
OrderBy: []v3.OrderBy{
|
||||
{ColumnName: constants.SigNozOrderByValue, Order: "desc"},
|
||||
},
|
||||
Having: []v3.Having{
|
||||
{
|
||||
ColumnName: "value",
|
||||
Operator: v3.HavingOperatorGreaterThan,
|
||||
Value: 100,
|
||||
},
|
||||
},
|
||||
Limit: 10,
|
||||
ShiftBy: 86400,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCacheKeys: map[string]string{
|
||||
"A": "source=metrics&step=60&aggregate=sum_rate&timeAggregation=&spaceAggregation=&shiftBy=86400&aggregateAttribute=signoz_latency_bucket---false&filter-0=key:service_name---false,op:=,value:A&groupBy-0=service_name---false&groupBy-1=le---false&having-0=column:value,op:>,value:100",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "version=v4;panelType=value;dataSource=metrics;queryType=builder",
|
||||
query: &v3.QueryRangeParamsV3{
|
||||
Version: "v4",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeValue,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
StepInterval: 60,
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateOperator: v3.AggregateOperatorSumRate,
|
||||
Expression: "A",
|
||||
AggregateAttribute: v3.AttributeKey{Key: "signoz_latency_bucket"},
|
||||
Temporality: v3.Delta,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "service_name"}, Value: "A", Operator: v3.FilterOperatorEqual},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{Key: "service_name"},
|
||||
{Key: "le"},
|
||||
},
|
||||
OrderBy: []v3.OrderBy{
|
||||
{ColumnName: constants.SigNozOrderByValue, Order: "desc"},
|
||||
},
|
||||
Having: []v3.Having{
|
||||
{
|
||||
ColumnName: "value",
|
||||
Operator: v3.HavingOperatorGreaterThan,
|
||||
Value: 100,
|
||||
},
|
||||
},
|
||||
ReduceTo: v3.ReduceToOperatorAvg,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCacheKeys: map[string]string{
|
||||
"A": "source=metrics&step=60&aggregate=sum_rate&timeAggregation=&spaceAggregation=&aggregateAttribute=signoz_latency_bucket---false&filter-0=key:service_name---false,op:=,value:A&groupBy-0=service_name---false&groupBy-1=le---false&having-0=column:value,op:>,value:100",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "version=v4;panelType=table;dataSource=metrics;queryType=builder",
|
||||
query: &v3.QueryRangeParamsV3{
|
||||
Version: "v4",
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeTable,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
StepInterval: 60,
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateOperator: v3.AggregateOperatorSumRate,
|
||||
Expression: "A",
|
||||
AggregateAttribute: v3.AttributeKey{Key: "signoz_latency_bucket"},
|
||||
Temporality: v3.Delta,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "service_name"}, Value: "A", Operator: v3.FilterOperatorEqual},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{Key: "service_name"},
|
||||
{Key: "le"},
|
||||
},
|
||||
OrderBy: []v3.OrderBy{
|
||||
{ColumnName: constants.SigNozOrderByValue, Order: "desc"},
|
||||
},
|
||||
Having: []v3.Having{
|
||||
{
|
||||
ColumnName: "value",
|
||||
Operator: v3.HavingOperatorGreaterThan,
|
||||
Value: 100,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCacheKeys: map[string]string{
|
||||
"A": "source=metrics&step=60&aggregate=sum_rate&timeAggregation=&spaceAggregation=&aggregateAttribute=signoz_latency_bucket---false&filter-0=key:service_name---false,op:=,value:A&groupBy-0=service_name---false&groupBy-1=le---false&having-0=column:value,op:>,value:100",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
keyGen := NewKeyGenerator()
|
||||
|
||||
for _, test := range testCases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
cacheKeys := keyGen.GenerateKeys(test.query)
|
||||
require.Equal(t, test.expectedCacheKeys, cacheKeys)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateCacheKeysLogs(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
query *v3.QueryRangeParamsV3
|
||||
expectedCacheKeys map[string]string
|
||||
}{
|
||||
{
|
||||
name: "panelType=graph;dataSource=logs;queryType=builder",
|
||||
query: &v3.QueryRangeParamsV3{
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
StepInterval: 60,
|
||||
DataSource: v3.DataSourceLogs,
|
||||
AggregateOperator: v3.AggregateOperatorCount,
|
||||
AggregateAttribute: v3.AttributeKey{Key: "log_level"},
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "service_name"}, Value: "A", Operator: v3.FilterOperatorEqual},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{Key: "service_name"},
|
||||
{Key: "log_level"},
|
||||
},
|
||||
Expression: "A",
|
||||
Having: []v3.Having{
|
||||
{
|
||||
ColumnName: "value",
|
||||
Operator: v3.HavingOperatorGreaterThan,
|
||||
Value: 100,
|
||||
},
|
||||
},
|
||||
OrderBy: []v3.OrderBy{
|
||||
{ColumnName: constants.SigNozOrderByValue, Order: "desc"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCacheKeys: map[string]string{
|
||||
"A": "source=logs&step=60&aggregate=count&limit=0&aggregateAttribute=log_level---false&filter-0=key:service_name---false,op:=,value:A&groupBy-0=service_name---false&groupBy-1=log_level---false&orderBy-0=#SIGNOZ_VALUE-desc&having-0=column:value,op:>,value:100",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "panelType=table;dataSource=logs;queryType=builder",
|
||||
query: &v3.QueryRangeParamsV3{
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeTable,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
StepInterval: 60,
|
||||
DataSource: v3.DataSourceLogs,
|
||||
AggregateOperator: v3.AggregateOperatorCount,
|
||||
AggregateAttribute: v3.AttributeKey{Key: "log_level"},
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "service_name"}, Value: "A", Operator: v3.FilterOperatorEqual},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{Key: "service_name"},
|
||||
{Key: "log_level"},
|
||||
},
|
||||
Expression: "A",
|
||||
Having: []v3.Having{
|
||||
{
|
||||
ColumnName: "value",
|
||||
Operator: v3.HavingOperatorGreaterThan,
|
||||
Value: 100,
|
||||
},
|
||||
},
|
||||
OrderBy: []v3.OrderBy{
|
||||
{ColumnName: constants.SigNozOrderByValue, Order: "desc"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCacheKeys: map[string]string{},
|
||||
},
|
||||
{
|
||||
name: "panelType=value;dataSource=logs;queryType=builder",
|
||||
query: &v3.QueryRangeParamsV3{
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeValue,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
StepInterval: 60,
|
||||
DataSource: v3.DataSourceLogs,
|
||||
AggregateOperator: v3.AggregateOperatorCount,
|
||||
AggregateAttribute: v3.AttributeKey{Key: "log_level"},
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "service_name"}, Value: "A", Operator: v3.FilterOperatorEqual},
|
||||
},
|
||||
},
|
||||
Expression: "A",
|
||||
Limit: 10,
|
||||
ReduceTo: v3.ReduceToOperatorAvg,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCacheKeys: map[string]string{},
|
||||
},
|
||||
}
|
||||
|
||||
keyGen := NewKeyGenerator()
|
||||
for _, test := range testCases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
cacheKeys := keyGen.GenerateKeys(test.query)
|
||||
require.Equal(t, test.expectedCacheKeys, cacheKeys)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateCacheKeysMetricsPromQL(t *testing.T) {
|
||||
// there is no version difference between v3 and v4 for promql
|
||||
testCases := []struct {
|
||||
name string
|
||||
query *v3.QueryRangeParamsV3
|
||||
expectedCacheKeys map[string]string
|
||||
}{
|
||||
{
|
||||
name: "panelType=graph;dataSource=metrics;queryType=promql",
|
||||
query: &v3.QueryRangeParamsV3{
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
QueryType: v3.QueryTypePromQL,
|
||||
PromQueries: map[string]*v3.PromQuery{
|
||||
"A": {
|
||||
Query: "signoz_latency_bucket",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCacheKeys: map[string]string{
|
||||
"A": "signoz_latency_bucket",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "panelType=graph;dataSource=metrics;queryType=promql",
|
||||
query: &v3.QueryRangeParamsV3{
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeGraph,
|
||||
QueryType: v3.QueryTypePromQL,
|
||||
PromQueries: map[string]*v3.PromQuery{
|
||||
"A": {
|
||||
Query: "histogram_quantile(0.9, sum(rate(signoz_latency_bucket[1m])) by (le))",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCacheKeys: map[string]string{
|
||||
"A": "histogram_quantile(0.9, sum(rate(signoz_latency_bucket[1m])) by (le))",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "panelType=value;dataSource=metrics;queryType=promql",
|
||||
query: &v3.QueryRangeParamsV3{
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
PanelType: v3.PanelTypeValue,
|
||||
QueryType: v3.QueryTypePromQL,
|
||||
PromQueries: map[string]*v3.PromQuery{
|
||||
"A": {
|
||||
Query: "histogram_quantile(0.9, sum(rate(signoz_latency_bucket[1m])) by (le))",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCacheKeys: map[string]string{},
|
||||
},
|
||||
}
|
||||
|
||||
keyGen := NewKeyGenerator()
|
||||
for _, test := range testCases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
cacheKeys := keyGen.GenerateKeys(test.query)
|
||||
require.Equal(t, test.expectedCacheKeys, cacheKeys)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
opAmpModel "go.signoz.io/signoz/pkg/query-service/app/opamp/model"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/preferences"
|
||||
"go.signoz.io/signoz/pkg/query-service/common"
|
||||
"go.signoz.io/signoz/pkg/query-service/migrate"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/app/explorer"
|
||||
@@ -147,6 +148,13 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go func() {
|
||||
err = migrate.ClickHouseMigrate(reader.GetConn(), serverOptions.Cluster)
|
||||
if err != nil {
|
||||
zap.L().Error("error while running clickhouse migrations", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
var c cache.Cache
|
||||
if serverOptions.CacheConfigPath != "" {
|
||||
cacheOpts, err := cache.LoadFromYAMLCacheConfigFile(serverOptions.CacheConfigPath)
|
||||
@@ -295,6 +303,7 @@ func (s *Server) createPublicServer(api *APIHandler) (*http.Server, error) {
|
||||
api.RegisterIntegrationRoutes(r, am)
|
||||
api.RegisterQueryRangeV3Routes(r, am)
|
||||
api.RegisterQueryRangeV4Routes(r, am)
|
||||
api.RegisterMessagingQueuesRoutes(r, am)
|
||||
|
||||
c := cors.New(cors.Options{
|
||||
AllowedOrigins: []string{"*"},
|
||||
@@ -713,6 +722,7 @@ func makeRulesManager(
|
||||
DisableRules: disableRules,
|
||||
FeatureFlags: fm,
|
||||
Reader: ch,
|
||||
EvalDelay: constants.GetEvalDelay(),
|
||||
}
|
||||
|
||||
// create Manager
|
||||
|
||||
@@ -127,7 +127,7 @@ func AddTimestampFilters(minTime int64, maxTime int64, params *v3.QueryRangePara
|
||||
if compositeQuery == nil {
|
||||
return
|
||||
}
|
||||
// Build queries for each builder query
|
||||
// Build queries for each builder query and apply timestamp filter only if TraceID is present
|
||||
for queryName, query := range compositeQuery.BuilderQueries {
|
||||
if query.Expression != queryName && query.DataSource != v3.DataSourceTraces {
|
||||
continue
|
||||
|
||||
@@ -13,10 +13,20 @@ func AdjustedMetricTimeRange(start, end, step int64, mq v3.BuilderQuery) (int64,
|
||||
start = start - (start % (step * 1000))
|
||||
// if the query is a rate query, we adjust the start time by one more step
|
||||
// so that we can calculate the rate for the first data point
|
||||
hasRunningDiff := false
|
||||
for _, fn := range mq.Functions {
|
||||
if fn.Name == v3.FunctionNameRunningDiff {
|
||||
hasRunningDiff = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if (mq.AggregateOperator.IsRateOperator() || mq.TimeAggregation.IsRateOperator()) &&
|
||||
mq.Temporality != v3.Delta {
|
||||
start -= step * 1000
|
||||
}
|
||||
if hasRunningDiff {
|
||||
start -= step * 1000
|
||||
}
|
||||
// align the end to the nearest minute
|
||||
adjustStep := int64(math.Min(float64(step), 60))
|
||||
end = end - (end % (adjustStep * 1000))
|
||||
|
||||
@@ -152,6 +152,15 @@ func GetContextTimeoutMaxAllowed() time.Duration {
|
||||
return contextTimeoutDuration
|
||||
}
|
||||
|
||||
func GetEvalDelay() time.Duration {
|
||||
evalDelayStr := GetOrDefaultEnv("RULES_EVAL_DELAY", "2m")
|
||||
evalDelayDuration, err := time.ParseDuration(evalDelayStr)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return evalDelayDuration
|
||||
}
|
||||
|
||||
var ContextTimeoutMaxAllowed = GetContextTimeoutMaxAllowed()
|
||||
|
||||
const (
|
||||
@@ -407,3 +416,5 @@ var TracesListViewDefaultSelectedColumns = []v3.AttributeKey{
|
||||
IsColumn: true,
|
||||
},
|
||||
}
|
||||
|
||||
const DefaultFilterSuggestionsLimit = 100
|
||||
|
||||
@@ -93,6 +93,10 @@ type Reader interface {
|
||||
GetLogAttributeValues(ctx context.Context, req *v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error)
|
||||
GetLogAggregateAttributes(ctx context.Context, req *v3.AggregateAttributeRequest) (*v3.AggregateAttributeResponse, error)
|
||||
GetUsers(ctx context.Context) ([]model.UserPayload, error)
|
||||
GetQBFilterSuggestionsForLogs(
|
||||
ctx context.Context,
|
||||
req *v3.QBFilterSuggestionsRequest,
|
||||
) (*v3.QBFilterSuggestionsResponse, *model.ApiError)
|
||||
|
||||
// Connection needed for rules, not ideal but required
|
||||
GetConn() clickhouse.Conn
|
||||
@@ -104,6 +108,14 @@ type Reader interface {
|
||||
|
||||
GetMetricMetadata(context.Context, string, string) (*v3.MetricMetadataResponse, error)
|
||||
|
||||
AddRuleStateHistory(ctx context.Context, ruleStateHistory []v3.RuleStateHistory) error
|
||||
GetOverallStateTransitions(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) ([]v3.RuleStateTransition, error)
|
||||
ReadRuleStateHistoryByRuleID(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) ([]v3.RuleStateHistory, error)
|
||||
GetTotalTriggers(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) (uint64, error)
|
||||
GetTriggersByInterval(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) (*v3.Series, error)
|
||||
GetAvgResolutionTime(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) (float64, error)
|
||||
GetAvgResolutionTimeByInterval(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) (*v3.Series, error)
|
||||
ReadRuleStateHistoryTopContributorsByRuleID(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) ([]v3.RuleStateHistoryContributor, error)
|
||||
GetMinAndMaxTimestampForTraceID(ctx context.Context, traceID []string) (int64, int64, error)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/v2/lib/driver"
|
||||
"github.com/jmoiron/sqlx"
|
||||
alertstov4 "go.signoz.io/signoz/pkg/query-service/migrate/0_45_alerts_to_v4"
|
||||
alertscustomstep "go.signoz.io/signoz/pkg/query-service/migrate/0_47_alerts_custom_step"
|
||||
@@ -77,3 +80,90 @@ func Migrate(dsn string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ClickHouseMigrate(conn driver.Conn, cluster string) error {
|
||||
|
||||
database := "CREATE DATABASE IF NOT EXISTS signoz_analytics ON CLUSTER %s"
|
||||
|
||||
localTable := `CREATE TABLE IF NOT EXISTS signoz_analytics.rule_state_history ON CLUSTER %s
|
||||
(
|
||||
_retention_days UInt32 DEFAULT 180,
|
||||
rule_id LowCardinality(String),
|
||||
rule_name LowCardinality(String),
|
||||
overall_state LowCardinality(String),
|
||||
overall_state_changed Bool,
|
||||
state LowCardinality(String),
|
||||
state_changed Bool,
|
||||
unix_milli Int64 CODEC(Delta(8), ZSTD(1)),
|
||||
fingerprint UInt64 CODEC(ZSTD(1)),
|
||||
value Float64 CODEC(Gorilla, ZSTD(1)),
|
||||
labels String CODEC(ZSTD(5)),
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
PARTITION BY toDate(unix_milli / 1000)
|
||||
ORDER BY (rule_id, unix_milli)
|
||||
TTL toDateTime(unix_milli / 1000) + toIntervalDay(_retention_days)
|
||||
SETTINGS ttl_only_drop_parts = 1, index_granularity = 8192`
|
||||
|
||||
distributedTable := `CREATE TABLE IF NOT EXISTS signoz_analytics.distributed_rule_state_history ON CLUSTER %s
|
||||
(
|
||||
rule_id LowCardinality(String),
|
||||
rule_name LowCardinality(String),
|
||||
overall_state LowCardinality(String),
|
||||
overall_state_changed Bool,
|
||||
state LowCardinality(String),
|
||||
state_changed Bool,
|
||||
unix_milli Int64 CODEC(Delta(8), ZSTD(1)),
|
||||
fingerprint UInt64 CODEC(ZSTD(1)),
|
||||
value Float64 CODEC(Gorilla, ZSTD(1)),
|
||||
labels String CODEC(ZSTD(5)),
|
||||
)
|
||||
ENGINE = Distributed(%s, signoz_analytics, rule_state_history, cityHash64(rule_id, rule_name, fingerprint))`
|
||||
|
||||
// check if db exists
|
||||
dbExists := `SELECT count(*) FROM system.databases WHERE name = 'signoz_analytics'`
|
||||
var count uint64
|
||||
err := conn.QueryRow(context.Background(), dbExists).Scan(&count)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if count == 0 {
|
||||
err = conn.Exec(context.Background(), fmt.Sprintf(database, cluster))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// check if table exists
|
||||
tableExists := `SELECT count(*) FROM system.tables WHERE name = 'rule_state_history' AND database = 'signoz_analytics'`
|
||||
var tableCount uint64
|
||||
err = conn.QueryRow(context.Background(), tableExists).Scan(&tableCount)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if tableCount == 0 {
|
||||
err = conn.Exec(context.Background(), fmt.Sprintf(localTable, cluster))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// check if distributed table exists
|
||||
distributedTableExists := `SELECT count(*) FROM system.tables WHERE name = 'distributed_rule_state_history' AND database = 'signoz_analytics'`
|
||||
var distributedTableCount uint64
|
||||
err = conn.QueryRow(context.Background(), distributedTableExists).Scan(&distributedTableCount)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if distributedTableCount == 0 {
|
||||
err = conn.Exec(context.Background(), fmt.Sprintf(distributedTable, cluster, cluster))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -252,6 +252,18 @@ type FilterAttributeKeyRequest struct {
|
||||
Limit int `json:"limit"`
|
||||
}
|
||||
|
||||
type QBFilterSuggestionsRequest struct {
|
||||
DataSource DataSource `json:"dataSource"`
|
||||
SearchText string `json:"searchText"`
|
||||
Limit int `json:"limit"`
|
||||
ExistingFilter *FilterSet `json:"existing_filter"`
|
||||
}
|
||||
|
||||
type QBFilterSuggestionsResponse struct {
|
||||
AttributeKeys []AttributeKey `json:"attributes"`
|
||||
ExampleQueries []FilterSet `json:"example_queries"`
|
||||
}
|
||||
|
||||
type AttributeKeyDataType string
|
||||
|
||||
const (
|
||||
@@ -615,21 +627,22 @@ func GetPercentileFromOperator(operator SpaceAggregation) float64 {
|
||||
type FunctionName string
|
||||
|
||||
const (
|
||||
FunctionNameCutOffMin FunctionName = "cutOffMin"
|
||||
FunctionNameCutOffMax FunctionName = "cutOffMax"
|
||||
FunctionNameClampMin FunctionName = "clampMin"
|
||||
FunctionNameClampMax FunctionName = "clampMax"
|
||||
FunctionNameAbsolute FunctionName = "absolute"
|
||||
FunctionNameLog2 FunctionName = "log2"
|
||||
FunctionNameLog10 FunctionName = "log10"
|
||||
FunctionNameCumSum FunctionName = "cumSum"
|
||||
FunctionNameEWMA3 FunctionName = "ewma3"
|
||||
FunctionNameEWMA5 FunctionName = "ewma5"
|
||||
FunctionNameEWMA7 FunctionName = "ewma7"
|
||||
FunctionNameMedian3 FunctionName = "median3"
|
||||
FunctionNameMedian5 FunctionName = "median5"
|
||||
FunctionNameMedian7 FunctionName = "median7"
|
||||
FunctionNameTimeShift FunctionName = "timeShift"
|
||||
FunctionNameCutOffMin FunctionName = "cutOffMin"
|
||||
FunctionNameCutOffMax FunctionName = "cutOffMax"
|
||||
FunctionNameClampMin FunctionName = "clampMin"
|
||||
FunctionNameClampMax FunctionName = "clampMax"
|
||||
FunctionNameAbsolute FunctionName = "absolute"
|
||||
FunctionNameRunningDiff FunctionName = "runningDiff"
|
||||
FunctionNameLog2 FunctionName = "log2"
|
||||
FunctionNameLog10 FunctionName = "log10"
|
||||
FunctionNameCumSum FunctionName = "cumSum"
|
||||
FunctionNameEWMA3 FunctionName = "ewma3"
|
||||
FunctionNameEWMA5 FunctionName = "ewma5"
|
||||
FunctionNameEWMA7 FunctionName = "ewma7"
|
||||
FunctionNameMedian3 FunctionName = "median3"
|
||||
FunctionNameMedian5 FunctionName = "median5"
|
||||
FunctionNameMedian7 FunctionName = "median7"
|
||||
FunctionNameTimeShift FunctionName = "timeShift"
|
||||
)
|
||||
|
||||
func (f FunctionName) Validate() error {
|
||||
@@ -639,6 +652,7 @@ func (f FunctionName) Validate() error {
|
||||
FunctionNameClampMin,
|
||||
FunctionNameClampMax,
|
||||
FunctionNameAbsolute,
|
||||
FunctionNameRunningDiff,
|
||||
FunctionNameLog2,
|
||||
FunctionNameLog10,
|
||||
FunctionNameCumSum,
|
||||
@@ -1026,8 +1040,8 @@ type LogsLiveTailClient struct {
|
||||
}
|
||||
|
||||
type Series struct {
|
||||
Labels map[string]string `json:"labels"`
|
||||
LabelsArray []map[string]string `json:"labelsArray"`
|
||||
Labels map[string]string `json:"labels,omitempty"`
|
||||
LabelsArray []map[string]string `json:"labelsArray,omitempty"`
|
||||
Points []Point `json:"values"`
|
||||
}
|
||||
|
||||
@@ -1142,3 +1156,92 @@ type MetricMetadataResponse struct {
|
||||
IsMonotonic bool `json:"isMonotonic"`
|
||||
Temporality string `json:"temporality"`
|
||||
}
|
||||
|
||||
type LabelsString string
|
||||
|
||||
func (l *LabelsString) MarshalJSON() ([]byte, error) {
|
||||
lbls := make(map[string]string)
|
||||
err := json.Unmarshal([]byte(*l), &lbls)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return json.Marshal(lbls)
|
||||
}
|
||||
|
||||
func (l *LabelsString) Scan(src interface{}) error {
|
||||
if data, ok := src.(string); ok {
|
||||
*l = LabelsString(data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l LabelsString) String() string {
|
||||
return string(l)
|
||||
}
|
||||
|
||||
type RuleStateHistory struct {
|
||||
RuleID string `json:"ruleID" ch:"rule_id"`
|
||||
RuleName string `json:"ruleName" ch:"rule_name"`
|
||||
// One of ["normal", "firing"]
|
||||
OverallState string `json:"overallState" ch:"overall_state"`
|
||||
OverallStateChanged bool `json:"overallStateChanged" ch:"overall_state_changed"`
|
||||
// One of ["normal", "firing", "no_data", "muted"]
|
||||
State string `json:"state" ch:"state"`
|
||||
StateChanged bool `json:"stateChanged" ch:"state_changed"`
|
||||
UnixMilli int64 `json:"unixMilli" ch:"unix_milli"`
|
||||
Labels LabelsString `json:"labels" ch:"labels"`
|
||||
Fingerprint uint64 `json:"fingerprint" ch:"fingerprint"`
|
||||
Value float64 `json:"value" ch:"value"`
|
||||
}
|
||||
|
||||
type QueryRuleStateHistory struct {
|
||||
Start int64 `json:"start"`
|
||||
End int64 `json:"end"`
|
||||
Filters *FilterSet `json:"filters"`
|
||||
Offset int64 `json:"offset"`
|
||||
Limit int64 `json:"limit"`
|
||||
Order string `json:"order"`
|
||||
}
|
||||
|
||||
func (r *QueryRuleStateHistory) Validate() error {
|
||||
if r.Start == 0 || r.End == 0 {
|
||||
return fmt.Errorf("start and end are required")
|
||||
}
|
||||
if r.Offset < 0 || r.Limit < 0 {
|
||||
return fmt.Errorf("offset and limit must be greater than 0")
|
||||
}
|
||||
if r.Order != "asc" && r.Order != "desc" {
|
||||
return fmt.Errorf("order must be asc or desc")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type RuleStateHistoryContributor struct {
|
||||
Fingerprint uint64 `json:"fingerprint" ch:"fingerprint"`
|
||||
Labels LabelsString `json:"labels" ch:"labels"`
|
||||
Count uint64 `json:"count" ch:"count"`
|
||||
}
|
||||
|
||||
type RuleStateTransition struct {
|
||||
RuleID string `json:"ruleID" ch:"rule_id"`
|
||||
State string `json:"state" ch:"state"`
|
||||
FiringTime int64 `json:"firingTime" ch:"firing_time"`
|
||||
ResolutionTime int64 `json:"resolutionTime" ch:"resolution_time"`
|
||||
}
|
||||
|
||||
type ReleStateItem struct {
|
||||
State string `json:"state"`
|
||||
Start int64 `json:"start"`
|
||||
End int64 `json:"end"`
|
||||
}
|
||||
|
||||
type Stats struct {
|
||||
TotalCurrentTriggers uint64 `json:"totalCurrentTriggers"`
|
||||
TotalPastTriggers uint64 `json:"totalPastTriggers"`
|
||||
CurrentTriggersSeries *Series `json:"currentTriggersSeries"`
|
||||
PastTriggersSeries *Series `json:"pastTriggersSeries"`
|
||||
CurrentAvgResolutionTime string `json:"currentAvgResolutionTime"`
|
||||
PastAvgResolutionTime string `json:"pastAvgResolutionTime"`
|
||||
CurrentAvgResolutionTimeSeries *Series `json:"currentAvgResolutionTimeSeries"`
|
||||
PastAvgResolutionTimeSeries *Series `json:"pastAvgResolutionTimeSeries"`
|
||||
}
|
||||
|
||||
@@ -78,6 +78,8 @@ type Alert struct {
|
||||
ResolvedAt time.Time
|
||||
LastSentAt time.Time
|
||||
ValidUntil time.Time
|
||||
|
||||
Missing bool
|
||||
}
|
||||
|
||||
func (a *Alert) needsSending(ts time.Time, resendDelay time.Duration) bool {
|
||||
|
||||
@@ -63,6 +63,8 @@ type ManagerOptions struct {
|
||||
DisableRules bool
|
||||
FeatureFlags interfaces.FeatureLookup
|
||||
Reader interfaces.Reader
|
||||
|
||||
EvalDelay time.Duration
|
||||
}
|
||||
|
||||
// The Manager manages recording and alerting rules.
|
||||
@@ -524,7 +526,9 @@ func (m *Manager) prepareTask(acquireLock bool, r *PostableRule, taskName string
|
||||
tr, err := NewThresholdRule(
|
||||
ruleId,
|
||||
r,
|
||||
ThresholdRuleOpts{},
|
||||
ThresholdRuleOpts{
|
||||
EvalDelay: m.opts.EvalDelay,
|
||||
},
|
||||
m.featureFlags,
|
||||
m.reader,
|
||||
)
|
||||
@@ -549,6 +553,7 @@ func (m *Manager) prepareTask(acquireLock bool, r *PostableRule, taskName string
|
||||
r,
|
||||
log.With(m.logger, "alert", r.AlertName),
|
||||
PromRuleOpts{},
|
||||
m.reader,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
@@ -908,6 +913,7 @@ func (m *Manager) TestNotification(ctx context.Context, ruleStr string) (int, *m
|
||||
PromRuleOpts{
|
||||
SendAlways: true,
|
||||
},
|
||||
m.reader,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
|
||||
@@ -2,6 +2,7 @@ package rules
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
@@ -15,6 +16,7 @@ import (
|
||||
pql "github.com/prometheus/prometheus/promql"
|
||||
"go.signoz.io/signoz/pkg/query-service/converter"
|
||||
"go.signoz.io/signoz/pkg/query-service/formatter"
|
||||
"go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
qslabels "go.signoz.io/signoz/pkg/query-service/utils/labels"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils/times"
|
||||
@@ -54,6 +56,8 @@ type PromRule struct {
|
||||
|
||||
logger log.Logger
|
||||
opts PromRuleOpts
|
||||
|
||||
reader interfaces.Reader
|
||||
}
|
||||
|
||||
func NewPromRule(
|
||||
@@ -61,6 +65,7 @@ func NewPromRule(
|
||||
postableRule *PostableRule,
|
||||
logger log.Logger,
|
||||
opts PromRuleOpts,
|
||||
reader interfaces.Reader,
|
||||
) (*PromRule, error) {
|
||||
|
||||
if postableRule.RuleCondition == nil {
|
||||
@@ -83,6 +88,7 @@ func NewPromRule(
|
||||
logger: logger,
|
||||
opts: opts,
|
||||
}
|
||||
p.reader = reader
|
||||
|
||||
if int64(p.evalWindow) == 0 {
|
||||
p.evalWindow = 5 * time.Minute
|
||||
@@ -215,8 +221,6 @@ func (r *PromRule) GetEvaluationTimestamp() time.Time {
|
||||
// State returns the maximum state of alert instances for this rule.
|
||||
// StateFiring > StatePending > StateInactive
|
||||
func (r *PromRule) State() AlertState {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
|
||||
maxState := StateInactive
|
||||
for _, a := range r.active {
|
||||
@@ -338,6 +342,8 @@ func (r *PromRule) compareOp() CompareOp {
|
||||
|
||||
func (r *PromRule) Eval(ctx context.Context, ts time.Time, queriers *Queriers) (interface{}, error) {
|
||||
|
||||
prevState := r.State()
|
||||
|
||||
start := ts.Add(-r.evalWindow)
|
||||
end := ts
|
||||
interval := 60 * time.Second // TODO(srikanthccv): this should be configurable
|
||||
@@ -459,8 +465,14 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time, queriers *Queriers) (
|
||||
|
||||
}
|
||||
|
||||
itemsToAdd := []v3.RuleStateHistory{}
|
||||
|
||||
// Check if any pending alerts should be removed or fire now. Write out alert timeseries.
|
||||
for fp, a := range r.active {
|
||||
labelsJSON, err := json.Marshal(a.Labels)
|
||||
if err != nil {
|
||||
zap.L().Error("error marshaling labels", zap.Error(err), zap.String("name", r.Name()))
|
||||
}
|
||||
if _, ok := resultFPs[fp]; !ok {
|
||||
// If the alert was previously firing, keep it around for a given
|
||||
// retention time so it is reported as resolved to the AlertManager.
|
||||
@@ -470,6 +482,15 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time, queriers *Queriers) (
|
||||
if a.State != StateInactive {
|
||||
a.State = StateInactive
|
||||
a.ResolvedAt = ts
|
||||
itemsToAdd = append(itemsToAdd, v3.RuleStateHistory{
|
||||
RuleID: r.ID(),
|
||||
RuleName: r.Name(),
|
||||
State: "normal",
|
||||
StateChanged: true,
|
||||
UnixMilli: ts.UnixMilli(),
|
||||
Labels: v3.LabelsString(labelsJSON),
|
||||
Fingerprint: a.Labels.Hash(),
|
||||
})
|
||||
}
|
||||
continue
|
||||
}
|
||||
@@ -477,12 +498,46 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time, queriers *Queriers) (
|
||||
if a.State == StatePending && ts.Sub(a.ActiveAt) >= r.holdDuration {
|
||||
a.State = StateFiring
|
||||
a.FiredAt = ts
|
||||
state := "firing"
|
||||
if a.Missing {
|
||||
state = "no_data"
|
||||
}
|
||||
itemsToAdd = append(itemsToAdd, v3.RuleStateHistory{
|
||||
RuleID: r.ID(),
|
||||
RuleName: r.Name(),
|
||||
State: state,
|
||||
StateChanged: true,
|
||||
UnixMilli: ts.UnixMilli(),
|
||||
Labels: v3.LabelsString(labelsJSON),
|
||||
Fingerprint: a.Labels.Hash(),
|
||||
Value: a.Value,
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
r.health = HealthGood
|
||||
r.lastError = err
|
||||
|
||||
currentState := r.State()
|
||||
|
||||
if currentState != prevState {
|
||||
for idx := range itemsToAdd {
|
||||
if currentState == StateInactive {
|
||||
itemsToAdd[idx].OverallState = "normal"
|
||||
} else {
|
||||
itemsToAdd[idx].OverallState = currentState.String()
|
||||
}
|
||||
itemsToAdd[idx].OverallStateChanged = true
|
||||
}
|
||||
}
|
||||
|
||||
if len(itemsToAdd) > 0 && r.reader != nil {
|
||||
err := r.reader.AddRuleStateHistory(ctx, itemsToAdd)
|
||||
if err != nil {
|
||||
zap.L().Error("error while inserting rule state history", zap.Error(err), zap.Any("itemsToAdd", itemsToAdd))
|
||||
}
|
||||
}
|
||||
|
||||
return len(r.active), nil
|
||||
}
|
||||
|
||||
|
||||
@@ -611,7 +611,7 @@ func TestPromRuleShouldAlert(t *testing.T) {
|
||||
postableRule.RuleCondition.MatchType = MatchType(c.matchType)
|
||||
postableRule.RuleCondition.Target = &c.target
|
||||
|
||||
rule, err := NewPromRule("69", &postableRule, testLogger{t}, PromRuleOpts{})
|
||||
rule, err := NewPromRule("69", &postableRule, testLogger{t}, PromRuleOpts{}, nil)
|
||||
if err != nil {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -75,6 +75,9 @@ type ThresholdRule struct {
|
||||
|
||||
querier interfaces.Querier
|
||||
querierV2 interfaces.Querier
|
||||
|
||||
reader interfaces.Reader
|
||||
evalDelay time.Duration
|
||||
}
|
||||
|
||||
type ThresholdRuleOpts struct {
|
||||
@@ -86,6 +89,12 @@ type ThresholdRuleOpts struct {
|
||||
// sendAlways will send alert irresepective of resendDelay
|
||||
// or other params
|
||||
SendAlways bool
|
||||
|
||||
// EvalDelay is the time to wait for data to be available
|
||||
// before evaluating the rule. This is useful in scenarios
|
||||
// where data might not be available in the system immediately
|
||||
// after the timestamp.
|
||||
EvalDelay time.Duration
|
||||
}
|
||||
|
||||
func NewThresholdRule(
|
||||
@@ -96,6 +105,8 @@ func NewThresholdRule(
|
||||
reader interfaces.Reader,
|
||||
) (*ThresholdRule, error) {
|
||||
|
||||
zap.L().Info("creating new ThresholdRule", zap.String("id", id), zap.Any("opts", opts))
|
||||
|
||||
if p.RuleCondition == nil {
|
||||
return nil, fmt.Errorf("no rule condition")
|
||||
} else if !p.RuleCondition.IsValid() {
|
||||
@@ -117,6 +128,7 @@ func NewThresholdRule(
|
||||
typ: p.AlertType,
|
||||
version: p.Version,
|
||||
temporalityMap: make(map[string]map[v3.Temporality]bool),
|
||||
evalDelay: opts.EvalDelay,
|
||||
}
|
||||
|
||||
if int64(t.evalWindow) == 0 {
|
||||
@@ -139,6 +151,7 @@ func NewThresholdRule(
|
||||
|
||||
t.querier = querier.NewQuerier(querierOption)
|
||||
t.querierV2 = querierV2.NewQuerier(querierOptsV2)
|
||||
t.reader = reader
|
||||
|
||||
zap.L().Info("creating new ThresholdRule", zap.String("name", t.name), zap.String("id", t.id))
|
||||
|
||||
@@ -276,8 +289,6 @@ func (r *ThresholdRule) GetEvaluationTimestamp() time.Time {
|
||||
// StateFiring > StatePending > StateInactive
|
||||
func (r *ThresholdRule) State() AlertState {
|
||||
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
maxState := StateInactive
|
||||
for _, a := range r.active {
|
||||
if a.State > maxState {
|
||||
@@ -365,9 +376,14 @@ func (r *ThresholdRule) populateTemporality(ctx context.Context, qp *v3.QueryRan
|
||||
}
|
||||
}
|
||||
|
||||
nameToTemporality, err := r.FetchTemporality(ctx, missingTemporality, ch)
|
||||
if err != nil {
|
||||
return err
|
||||
var nameToTemporality map[string]map[v3.Temporality]bool
|
||||
var err error
|
||||
|
||||
if len(missingTemporality) > 0 {
|
||||
nameToTemporality, err = r.FetchTemporality(ctx, missingTemporality, ch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if qp.CompositeQuery != nil && len(qp.CompositeQuery.BuilderQueries) > 0 {
|
||||
@@ -402,7 +418,6 @@ func (r *ThresholdRule) ForEachActiveAlert(f func(*Alert)) {
|
||||
}
|
||||
|
||||
func (r *ThresholdRule) SendAlerts(ctx context.Context, ts time.Time, resendDelay time.Duration, interval time.Duration, notifyFunc NotifyFunc) {
|
||||
zap.L().Info("sending alerts", zap.String("rule", r.Name()))
|
||||
alerts := []*Alert{}
|
||||
r.ForEachActiveAlert(func(alert *Alert) {
|
||||
if r.opts.SendAlways || alert.needsSending(ts, resendDelay) {
|
||||
@@ -431,11 +446,14 @@ func (r *ThresholdRule) Unit() string {
|
||||
|
||||
func (r *ThresholdRule) prepareQueryRange(ts time.Time) *v3.QueryRangeParamsV3 {
|
||||
|
||||
// todo(srikanthccv): make this configurable
|
||||
// 2 minutes is reasonable time to wait for data to be available
|
||||
// 60 seconds (SDK) + 10 seconds (batch) + rest for n/w + serialization + write to disk etc..
|
||||
start := ts.Add(-time.Duration(r.evalWindow)).UnixMilli() - 2*60*1000
|
||||
end := ts.UnixMilli() - 2*60*1000
|
||||
zap.L().Info("prepareQueryRange", zap.Int64("ts", ts.UnixMilli()), zap.Int64("evalWindow", r.evalWindow.Milliseconds()), zap.Int64("evalDelay", r.evalDelay.Milliseconds()))
|
||||
|
||||
start := ts.Add(-time.Duration(r.evalWindow)).UnixMilli()
|
||||
end := ts.UnixMilli()
|
||||
if r.evalDelay > 0 {
|
||||
start = start - int64(r.evalDelay.Milliseconds())
|
||||
end = end - int64(r.evalDelay.Milliseconds())
|
||||
}
|
||||
// round to minute otherwise we could potentially miss data
|
||||
start = start - (start % (60 * 1000))
|
||||
end = end - (end % (60 * 1000))
|
||||
@@ -860,6 +878,8 @@ func normalizeLabelName(name string) string {
|
||||
|
||||
func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time, queriers *Queriers) (interface{}, error) {
|
||||
|
||||
prevState := r.State()
|
||||
|
||||
valueFormatter := formatter.FromUnit(r.Unit())
|
||||
res, err := r.buildAndRunQuery(ctx, ts, queriers.Ch)
|
||||
|
||||
@@ -967,6 +987,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time, queriers *Querie
|
||||
Value: smpl.V,
|
||||
GeneratorURL: r.GeneratorURL(),
|
||||
Receivers: r.preferredChannels,
|
||||
Missing: smpl.IsMissing,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -988,8 +1009,14 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time, queriers *Querie
|
||||
|
||||
}
|
||||
|
||||
itemsToAdd := []v3.RuleStateHistory{}
|
||||
|
||||
// Check if any pending alerts should be removed or fire now. Write out alert timeseries.
|
||||
for fp, a := range r.active {
|
||||
labelsJSON, err := json.Marshal(a.Labels)
|
||||
if err != nil {
|
||||
zap.L().Error("error marshaling labels", zap.Error(err), zap.Any("labels", a.Labels))
|
||||
}
|
||||
if _, ok := resultFPs[fp]; !ok {
|
||||
// If the alert was previously firing, keep it around for a given
|
||||
// retention time so it is reported as resolved to the AlertManager.
|
||||
@@ -999,6 +1026,15 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time, queriers *Querie
|
||||
if a.State != StateInactive {
|
||||
a.State = StateInactive
|
||||
a.ResolvedAt = ts
|
||||
itemsToAdd = append(itemsToAdd, v3.RuleStateHistory{
|
||||
RuleID: r.ID(),
|
||||
RuleName: r.Name(),
|
||||
State: "normal",
|
||||
StateChanged: true,
|
||||
UnixMilli: ts.UnixMilli(),
|
||||
Labels: v3.LabelsString(labelsJSON),
|
||||
Fingerprint: a.Labels.Hash(),
|
||||
})
|
||||
}
|
||||
continue
|
||||
}
|
||||
@@ -1006,8 +1042,46 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time, queriers *Querie
|
||||
if a.State == StatePending && ts.Sub(a.ActiveAt) >= r.holdDuration {
|
||||
a.State = StateFiring
|
||||
a.FiredAt = ts
|
||||
state := "firing"
|
||||
if a.Missing {
|
||||
state = "no_data"
|
||||
}
|
||||
itemsToAdd = append(itemsToAdd, v3.RuleStateHistory{
|
||||
RuleID: r.ID(),
|
||||
RuleName: r.Name(),
|
||||
State: state,
|
||||
StateChanged: true,
|
||||
UnixMilli: ts.UnixMilli(),
|
||||
Labels: v3.LabelsString(labelsJSON),
|
||||
Fingerprint: a.Labels.Hash(),
|
||||
Value: a.Value,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
currentState := r.State()
|
||||
|
||||
if currentState != prevState {
|
||||
for idx := range itemsToAdd {
|
||||
if currentState == StateInactive {
|
||||
itemsToAdd[idx].OverallState = "normal"
|
||||
} else {
|
||||
itemsToAdd[idx].OverallState = currentState.String()
|
||||
}
|
||||
itemsToAdd[idx].OverallStateChanged = true
|
||||
}
|
||||
} else {
|
||||
for idx := range itemsToAdd {
|
||||
itemsToAdd[idx].OverallState = currentState.String()
|
||||
itemsToAdd[idx].OverallStateChanged = false
|
||||
}
|
||||
}
|
||||
|
||||
if len(itemsToAdd) > 0 && r.reader != nil {
|
||||
err := r.reader.AddRuleStateHistory(ctx, itemsToAdd)
|
||||
if err != nil {
|
||||
zap.L().Error("error while inserting rule state history", zap.Error(err), zap.Any("itemsToAdd", itemsToAdd))
|
||||
}
|
||||
}
|
||||
r.health = HealthGood
|
||||
r.lastError = err
|
||||
|
||||
@@ -1,13 +1,18 @@
|
||||
package rules
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/clickhouseReader"
|
||||
"go.signoz.io/signoz/pkg/query-service/featureManager"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils/labels"
|
||||
|
||||
cmock "github.com/srikanthccv/ClickHouse-go-mock"
|
||||
)
|
||||
|
||||
func TestThresholdRuleShouldAlert(t *testing.T) {
|
||||
@@ -611,7 +616,7 @@ func TestThresholdRuleShouldAlert(t *testing.T) {
|
||||
postableRule.RuleCondition.MatchType = MatchType(c.matchType)
|
||||
postableRule.RuleCondition.Target = &c.target
|
||||
|
||||
rule, err := NewThresholdRule("69", &postableRule, ThresholdRuleOpts{}, fm, nil)
|
||||
rule, err := NewThresholdRule("69", &postableRule, ThresholdRuleOpts{EvalDelay: 2 * time.Minute}, fm, nil)
|
||||
if err != nil {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
@@ -697,7 +702,7 @@ func TestPrepareLinksToLogs(t *testing.T) {
|
||||
}
|
||||
fm := featureManager.StartManager()
|
||||
|
||||
rule, err := NewThresholdRule("69", &postableRule, ThresholdRuleOpts{}, fm, nil)
|
||||
rule, err := NewThresholdRule("69", &postableRule, ThresholdRuleOpts{EvalDelay: 2 * time.Minute}, fm, nil)
|
||||
if err != nil {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
@@ -739,7 +744,7 @@ func TestPrepareLinksToTraces(t *testing.T) {
|
||||
}
|
||||
fm := featureManager.StartManager()
|
||||
|
||||
rule, err := NewThresholdRule("69", &postableRule, ThresholdRuleOpts{}, fm, nil)
|
||||
rule, err := NewThresholdRule("69", &postableRule, ThresholdRuleOpts{EvalDelay: 2 * time.Minute}, fm, nil)
|
||||
if err != nil {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
@@ -815,7 +820,7 @@ func TestThresholdRuleLabelNormalization(t *testing.T) {
|
||||
postableRule.RuleCondition.MatchType = MatchType(c.matchType)
|
||||
postableRule.RuleCondition.Target = &c.target
|
||||
|
||||
rule, err := NewThresholdRule("69", &postableRule, ThresholdRuleOpts{}, fm, nil)
|
||||
rule, err := NewThresholdRule("69", &postableRule, ThresholdRuleOpts{EvalDelay: 2 * time.Minute}, fm, nil)
|
||||
if err != nil {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
@@ -834,6 +839,55 @@ func TestThresholdRuleLabelNormalization(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestThresholdRuleEvalDelay(t *testing.T) {
|
||||
postableRule := PostableRule{
|
||||
AlertName: "Test Eval Delay",
|
||||
AlertType: "METRIC_BASED_ALERT",
|
||||
RuleType: RuleTypeThreshold,
|
||||
EvalWindow: Duration(5 * time.Minute),
|
||||
Frequency: Duration(1 * time.Minute),
|
||||
RuleCondition: &RuleCondition{
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypeClickHouseSQL,
|
||||
ClickHouseQueries: map[string]*v3.ClickHouseQuery{
|
||||
"A": {
|
||||
Query: "SELECT 1 >= {{.start_timestamp_ms}} AND 1 <= {{.end_timestamp_ms}}",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// 01:39:47
|
||||
ts := time.Unix(1717205987, 0)
|
||||
|
||||
cases := []struct {
|
||||
expectedQuery string
|
||||
}{
|
||||
// Test cases for Equals Always
|
||||
{
|
||||
// 01:34:00 - 01:39:00
|
||||
expectedQuery: "SELECT 1 >= 1717205640000 AND 1 <= 1717205940000",
|
||||
},
|
||||
}
|
||||
|
||||
fm := featureManager.StartManager()
|
||||
for idx, c := range cases {
|
||||
rule, err := NewThresholdRule("69", &postableRule, ThresholdRuleOpts{}, fm, nil) // no eval delay
|
||||
if err != nil {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
params := rule.prepareQueryRange(ts)
|
||||
|
||||
assert.Equal(t, c.expectedQuery, params.CompositeQuery.ClickHouseQueries["A"].Query, "Test case %d", idx)
|
||||
|
||||
secondTimeParams := rule.prepareQueryRange(ts)
|
||||
|
||||
assert.Equal(t, c.expectedQuery, secondTimeParams.CompositeQuery.ClickHouseQueries["A"].Query, "Test case %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
func TestThresholdRuleClickHouseTmpl(t *testing.T) {
|
||||
postableRule := PostableRule{
|
||||
AlertName: "Tricky Condition Tests",
|
||||
@@ -868,7 +922,7 @@ func TestThresholdRuleClickHouseTmpl(t *testing.T) {
|
||||
|
||||
fm := featureManager.StartManager()
|
||||
for idx, c := range cases {
|
||||
rule, err := NewThresholdRule("69", &postableRule, ThresholdRuleOpts{}, fm, nil)
|
||||
rule, err := NewThresholdRule("69", &postableRule, ThresholdRuleOpts{EvalDelay: 2 * time.Minute}, fm, nil)
|
||||
if err != nil {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
@@ -882,3 +936,166 @@ func TestThresholdRuleClickHouseTmpl(t *testing.T) {
|
||||
assert.Equal(t, c.expectedQuery, secondTimeParams.CompositeQuery.ClickHouseQueries["A"].Query, "Test case %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
type queryMatcherAny struct {
|
||||
}
|
||||
|
||||
func (m *queryMatcherAny) Match(string, string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestThresholdRuleUnitCombinations(t *testing.T) {
|
||||
postableRule := PostableRule{
|
||||
AlertName: "Units test",
|
||||
AlertType: "METRIC_BASED_ALERT",
|
||||
RuleType: RuleTypeThreshold,
|
||||
EvalWindow: Duration(5 * time.Minute),
|
||||
Frequency: Duration(1 * time.Minute),
|
||||
RuleCondition: &RuleCondition{
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
StepInterval: 60,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: "signoz_calls_total",
|
||||
},
|
||||
AggregateOperator: v3.AggregateOperatorSumRate,
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
Expression: "A",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
fm := featureManager.StartManager()
|
||||
mock, err := cmock.NewClickHouseWithQueryMatcher(nil, &queryMatcherAny{})
|
||||
if err != nil {
|
||||
t.Errorf("an error '%s' was not expected when opening a stub database connection", err)
|
||||
}
|
||||
|
||||
cols := make([]cmock.ColumnType, 0)
|
||||
cols = append(cols, cmock.ColumnType{Name: "value", Type: "Float64"})
|
||||
cols = append(cols, cmock.ColumnType{Name: "attr", Type: "String"})
|
||||
cols = append(cols, cmock.ColumnType{Name: "timestamp", Type: "String"})
|
||||
|
||||
cases := []struct {
|
||||
targetUnit string
|
||||
yAxisUnit string
|
||||
values [][]interface{}
|
||||
expectAlerts int
|
||||
compareOp string
|
||||
matchType string
|
||||
target float64
|
||||
summaryAny []string
|
||||
}{
|
||||
{
|
||||
targetUnit: "s",
|
||||
yAxisUnit: "ns",
|
||||
values: [][]interface{}{
|
||||
{float64(572588400), "attr", time.Now()}, // 0.57 seconds
|
||||
{float64(572386400), "attr", time.Now().Add(1 * time.Second)}, // 0.57 seconds
|
||||
{float64(300947400), "attr", time.Now().Add(2 * time.Second)}, // 0.3 seconds
|
||||
{float64(299316000), "attr", time.Now().Add(3 * time.Second)}, // 0.3 seconds
|
||||
{float64(66640400.00000001), "attr", time.Now().Add(4 * time.Second)}, // 0.06 seconds
|
||||
},
|
||||
expectAlerts: 0,
|
||||
compareOp: "1", // Above
|
||||
matchType: "1", // Once
|
||||
target: 1, // 1 second
|
||||
},
|
||||
{
|
||||
targetUnit: "ms",
|
||||
yAxisUnit: "ns",
|
||||
values: [][]interface{}{
|
||||
{float64(572588400), "attr", time.Now()}, // 572.58 ms
|
||||
{float64(572386400), "attr", time.Now().Add(1 * time.Second)}, // 572.38 ms
|
||||
{float64(300947400), "attr", time.Now().Add(2 * time.Second)}, // 300.94 ms
|
||||
{float64(299316000), "attr", time.Now().Add(3 * time.Second)}, // 299.31 ms
|
||||
{float64(66640400.00000001), "attr", time.Now().Add(4 * time.Second)}, // 66.64 ms
|
||||
},
|
||||
expectAlerts: 4,
|
||||
compareOp: "1", // Above
|
||||
matchType: "1", // Once
|
||||
target: 200, // 200 ms
|
||||
summaryAny: []string{
|
||||
"observed metric value is 299 ms",
|
||||
"the observed metric value is 573 ms",
|
||||
"the observed metric value is 572 ms",
|
||||
"the observed metric value is 301 ms",
|
||||
},
|
||||
},
|
||||
{
|
||||
targetUnit: "decgbytes",
|
||||
yAxisUnit: "bytes",
|
||||
values: [][]interface{}{
|
||||
{float64(2863284053), "attr", time.Now()}, // 2.86 GB
|
||||
{float64(2863388842), "attr", time.Now().Add(1 * time.Second)}, // 2.86 GB
|
||||
{float64(300947400), "attr", time.Now().Add(2 * time.Second)}, // 0.3 GB
|
||||
{float64(299316000), "attr", time.Now().Add(3 * time.Second)}, // 0.3 GB
|
||||
{float64(66640400.00000001), "attr", time.Now().Add(4 * time.Second)}, // 66.64 MB
|
||||
},
|
||||
expectAlerts: 0,
|
||||
compareOp: "1", // Above
|
||||
matchType: "1", // Once
|
||||
target: 200, // 200 GB
|
||||
},
|
||||
}
|
||||
|
||||
for idx, c := range cases {
|
||||
rows := cmock.NewRows(cols, c.values)
|
||||
|
||||
// We are testing the eval logic after the query is run
|
||||
// so we don't care about the query string here
|
||||
queryString := "SELECT any"
|
||||
mock.
|
||||
ExpectQuery(queryString).
|
||||
WillReturnRows(rows)
|
||||
postableRule.RuleCondition.CompareOp = CompareOp(c.compareOp)
|
||||
postableRule.RuleCondition.MatchType = MatchType(c.matchType)
|
||||
postableRule.RuleCondition.Target = &c.target
|
||||
postableRule.RuleCondition.CompositeQuery.Unit = c.yAxisUnit
|
||||
postableRule.RuleCondition.TargetUnit = c.targetUnit
|
||||
postableRule.Annotations = map[string]string{
|
||||
"description": "This alert is fired when the defined metric (current value: {{$value}}) crosses the threshold ({{$threshold}})",
|
||||
"summary": "The rule threshold is set to {{$threshold}}, and the observed metric value is {{$value}}",
|
||||
}
|
||||
|
||||
options := clickhouseReader.NewOptions("", 0, 0, 0, "", "archiveNamespace")
|
||||
reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "")
|
||||
|
||||
rule, err := NewThresholdRule("69", &postableRule, ThresholdRuleOpts{}, fm, reader)
|
||||
rule.temporalityMap = map[string]map[v3.Temporality]bool{
|
||||
"signoz_calls_total": {
|
||||
v3.Delta: true,
|
||||
},
|
||||
}
|
||||
if err != nil {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
queriers := Queriers{
|
||||
Ch: mock,
|
||||
}
|
||||
|
||||
retVal, err := rule.Eval(context.Background(), time.Now(), &queriers)
|
||||
if err != nil {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
assert.Equal(t, c.expectAlerts, retVal.(int), "case %d", idx)
|
||||
if c.expectAlerts != 0 {
|
||||
foundCount := 0
|
||||
for _, item := range rule.active {
|
||||
for _, summary := range c.summaryAny {
|
||||
if strings.Contains(item.Annotations.Get("summary"), summary) {
|
||||
foundCount++
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
assert.Equal(t, c.expectAlerts, foundCount, "case %d", idx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
279
pkg/query-service/tests/integration/filter_suggestions_test.go
Normal file
279
pkg/query-service/tests/integration/filter_suggestions_test.go
Normal file
@@ -0,0 +1,279 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
mockhouse "github.com/srikanthccv/ClickHouse-go-mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.signoz.io/signoz/pkg/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
"go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/dao"
|
||||
"go.signoz.io/signoz/pkg/query-service/featureManager"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils"
|
||||
)
|
||||
|
||||
// If no data has been received yet, filter suggestions should contain
|
||||
// standard log fields and static example queries based on them
|
||||
func TestDefaultLogsFilterSuggestions(t *testing.T) {
|
||||
require := require.New(t)
|
||||
tb := NewFilterSuggestionsTestBed(t)
|
||||
|
||||
tb.mockAttribKeysQueryResponse([]v3.AttributeKey{})
|
||||
suggestionsQueryParams := map[string]string{}
|
||||
suggestionsResp := tb.GetQBFilterSuggestionsForLogs(suggestionsQueryParams)
|
||||
|
||||
require.Greater(len(suggestionsResp.AttributeKeys), 0)
|
||||
require.True(slices.ContainsFunc(
|
||||
suggestionsResp.AttributeKeys, func(a v3.AttributeKey) bool {
|
||||
return a.Key == "body"
|
||||
},
|
||||
))
|
||||
|
||||
require.Greater(len(suggestionsResp.ExampleQueries), 0)
|
||||
require.False(slices.ContainsFunc(
|
||||
suggestionsResp.AttributeKeys, func(a v3.AttributeKey) bool {
|
||||
return a.Type == v3.AttributeKeyTypeTag || a.Type == v3.AttributeKeyTypeResource
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
func TestLogsFilterSuggestionsWithoutExistingFilter(t *testing.T) {
|
||||
require := require.New(t)
|
||||
tb := NewFilterSuggestionsTestBed(t)
|
||||
|
||||
testAttrib := v3.AttributeKey{
|
||||
Key: "container_id",
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
IsColumn: false,
|
||||
}
|
||||
testAttribValue := "test-container"
|
||||
|
||||
tb.mockAttribKeysQueryResponse([]v3.AttributeKey{testAttrib})
|
||||
tb.mockAttribValuesQueryResponse(testAttrib, []string{testAttribValue})
|
||||
suggestionsQueryParams := map[string]string{}
|
||||
suggestionsResp := tb.GetQBFilterSuggestionsForLogs(suggestionsQueryParams)
|
||||
|
||||
require.Greater(len(suggestionsResp.AttributeKeys), 0)
|
||||
require.True(slices.ContainsFunc(
|
||||
suggestionsResp.AttributeKeys, func(a v3.AttributeKey) bool {
|
||||
return a.Key == testAttrib.Key && a.Type == testAttrib.Type
|
||||
},
|
||||
))
|
||||
|
||||
require.Greater(len(suggestionsResp.ExampleQueries), 0)
|
||||
require.True(slices.ContainsFunc(
|
||||
suggestionsResp.ExampleQueries, func(q v3.FilterSet) bool {
|
||||
return slices.ContainsFunc(q.Items, func(i v3.FilterItem) bool {
|
||||
return i.Key.Key == testAttrib.Key && i.Value == testAttribValue
|
||||
})
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
// If a filter already exists, suggested example queries should
|
||||
// contain existing filter
|
||||
func TestLogsFilterSuggestionsWithExistingFilter(t *testing.T) {
|
||||
require := require.New(t)
|
||||
tb := NewFilterSuggestionsTestBed(t)
|
||||
|
||||
testAttrib := v3.AttributeKey{
|
||||
Key: "container_id",
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
IsColumn: false,
|
||||
}
|
||||
testAttribValue := "test-container"
|
||||
|
||||
testFilterAttrib := v3.AttributeKey{
|
||||
Key: "tenant_id",
|
||||
Type: v3.AttributeKeyTypeTag,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
IsColumn: false,
|
||||
}
|
||||
testFilterAttribValue := "test-tenant"
|
||||
testFilter := v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: testFilterAttrib,
|
||||
Operator: "=",
|
||||
Value: testFilterAttribValue,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tb.mockAttribKeysQueryResponse([]v3.AttributeKey{testAttrib, testFilterAttrib})
|
||||
tb.mockAttribValuesQueryResponse(testAttrib, []string{testAttribValue})
|
||||
|
||||
testFilterJson, err := json.Marshal(testFilter)
|
||||
require.Nil(err, "couldn't serialize existing filter to JSON")
|
||||
suggestionsQueryParams := map[string]string{
|
||||
"existingFilter": base64.RawURLEncoding.EncodeToString(testFilterJson),
|
||||
}
|
||||
suggestionsResp := tb.GetQBFilterSuggestionsForLogs(suggestionsQueryParams)
|
||||
|
||||
require.Greater(len(suggestionsResp.AttributeKeys), 0)
|
||||
|
||||
// All example queries should contain the existing filter as a prefix
|
||||
require.Greater(len(suggestionsResp.ExampleQueries), 0)
|
||||
for _, q := range suggestionsResp.ExampleQueries {
|
||||
require.Equal(q.Items[0], testFilter.Items[0])
|
||||
}
|
||||
}
|
||||
|
||||
// Mocks response for CH queries made by reader.GetLogAttributeKeys
|
||||
func (tb *FilterSuggestionsTestBed) mockAttribKeysQueryResponse(
|
||||
attribsToReturn []v3.AttributeKey,
|
||||
) {
|
||||
cols := []mockhouse.ColumnType{}
|
||||
cols = append(cols, mockhouse.ColumnType{Type: "String", Name: "tagKey"})
|
||||
cols = append(cols, mockhouse.ColumnType{Type: "String", Name: "tagType"})
|
||||
cols = append(cols, mockhouse.ColumnType{Type: "String", Name: "tagDataType"})
|
||||
|
||||
values := [][]any{}
|
||||
for _, a := range attribsToReturn {
|
||||
rowValues := []any{}
|
||||
rowValues = append(rowValues, a.Key)
|
||||
rowValues = append(rowValues, string(a.Type))
|
||||
rowValues = append(rowValues, string(a.DataType))
|
||||
values = append(values, rowValues)
|
||||
}
|
||||
|
||||
tb.mockClickhouse.ExpectQuery(
|
||||
"select.*from.*signoz_logs.distributed_tag_attributes.*",
|
||||
).WithArgs(
|
||||
constants.DefaultFilterSuggestionsLimit,
|
||||
).WillReturnRows(
|
||||
mockhouse.NewRows(cols, values),
|
||||
)
|
||||
|
||||
// Add expectation for the create table query used to determine
|
||||
// if an attribute is a column
|
||||
cols = []mockhouse.ColumnType{{Type: "String", Name: "statement"}}
|
||||
values = [][]any{{"CREATE TABLE signoz_logs.distributed_logs"}}
|
||||
tb.mockClickhouse.ExpectSelect(
|
||||
"SHOW CREATE TABLE.*",
|
||||
).WillReturnRows(mockhouse.NewRows(cols, values))
|
||||
|
||||
}
|
||||
|
||||
// Mocks response for CH queries made by reader.GetLogAttributeValues
|
||||
func (tb *FilterSuggestionsTestBed) mockAttribValuesQueryResponse(
|
||||
expectedAttrib v3.AttributeKey,
|
||||
stringValuesToReturn []string,
|
||||
) {
|
||||
cols := []mockhouse.ColumnType{}
|
||||
cols = append(cols, mockhouse.ColumnType{Type: "String", Name: "stringTagValue"})
|
||||
|
||||
values := [][]any{}
|
||||
for _, v := range stringValuesToReturn {
|
||||
rowValues := []any{}
|
||||
rowValues = append(rowValues, v)
|
||||
values = append(values, rowValues)
|
||||
}
|
||||
|
||||
tb.mockClickhouse.ExpectQuery(
|
||||
"select distinct.*stringTagValue.*from.*signoz_logs.distributed_tag_attributes.*",
|
||||
).WithArgs(string(expectedAttrib.Key), v3.TagType(expectedAttrib.Type), 1).WillReturnRows(mockhouse.NewRows(cols, values))
|
||||
}
|
||||
|
||||
type FilterSuggestionsTestBed struct {
|
||||
t *testing.T
|
||||
testUser *model.User
|
||||
qsHttpHandler http.Handler
|
||||
mockClickhouse mockhouse.ClickConnMockCommon
|
||||
}
|
||||
|
||||
func (tb *FilterSuggestionsTestBed) GetQBFilterSuggestionsForLogs(
|
||||
queryParams map[string]string,
|
||||
) *v3.QBFilterSuggestionsResponse {
|
||||
|
||||
_, dsExistsInQP := queryParams["dataSource"]
|
||||
require.False(tb.t, dsExistsInQP)
|
||||
queryParams["dataSource"] = "logs"
|
||||
|
||||
result := tb.QSGetRequest("/api/v3/filter_suggestions", queryParams)
|
||||
|
||||
dataJson, err := json.Marshal(result.Data)
|
||||
if err != nil {
|
||||
tb.t.Fatalf("could not marshal apiResponse.Data: %v", err)
|
||||
}
|
||||
|
||||
var resp v3.QBFilterSuggestionsResponse
|
||||
err = json.Unmarshal(dataJson, &resp)
|
||||
if err != nil {
|
||||
tb.t.Fatalf("could not unmarshal apiResponse.Data json into PipelinesResponse")
|
||||
}
|
||||
|
||||
return &resp
|
||||
}
|
||||
|
||||
func NewFilterSuggestionsTestBed(t *testing.T) *FilterSuggestionsTestBed {
|
||||
testDB := utils.NewQueryServiceDBForTests(t)
|
||||
|
||||
fm := featureManager.StartManager()
|
||||
reader, mockClickhouse := NewMockClickhouseReader(t, testDB, fm)
|
||||
mockClickhouse.MatchExpectationsInOrder(false)
|
||||
|
||||
apiHandler, err := app.NewAPIHandler(app.APIHandlerOpts{
|
||||
Reader: reader,
|
||||
AppDao: dao.DB(),
|
||||
FeatureFlags: fm,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("could not create a new ApiHandler: %v", err)
|
||||
}
|
||||
|
||||
router := app.NewRouter()
|
||||
am := app.NewAuthMiddleware(auth.GetUserFromRequest)
|
||||
apiHandler.RegisterRoutes(router, am)
|
||||
apiHandler.RegisterQueryRangeV3Routes(router, am)
|
||||
|
||||
user, apiErr := createTestUser()
|
||||
if apiErr != nil {
|
||||
t.Fatalf("could not create a test user: %v", apiErr)
|
||||
}
|
||||
|
||||
return &FilterSuggestionsTestBed{
|
||||
t: t,
|
||||
testUser: user,
|
||||
qsHttpHandler: router,
|
||||
mockClickhouse: mockClickhouse,
|
||||
}
|
||||
}
|
||||
|
||||
func (tb *FilterSuggestionsTestBed) QSGetRequest(
|
||||
path string,
|
||||
queryParams map[string]string,
|
||||
) *app.ApiResponse {
|
||||
if len(queryParams) > 0 {
|
||||
qps := []string{}
|
||||
for q, v := range queryParams {
|
||||
qps = append(qps, fmt.Sprintf("%s=%s", q, v))
|
||||
}
|
||||
path = fmt.Sprintf("%s?%s", path, strings.Join(qps, "&"))
|
||||
}
|
||||
|
||||
req, err := AuthenticatedRequestForTest(
|
||||
tb.testUser, path, nil,
|
||||
)
|
||||
if err != nil {
|
||||
tb.t.Fatalf("couldn't create authenticated test request: %v", err)
|
||||
}
|
||||
|
||||
result, err := HandleTestRequest(tb.qsHttpHandler, req, 200)
|
||||
if err != nil {
|
||||
tb.t.Fatalf("test request failed: %v", err)
|
||||
}
|
||||
return result
|
||||
}
|
||||
@@ -512,7 +512,7 @@ func (tb *LogPipelinesTestBed) PostPipelinesToQSExpectingStatusCode(
|
||||
postablePipelines logparsingpipeline.PostablePipelines,
|
||||
expectedStatusCode int,
|
||||
) *logparsingpipeline.PipelinesResponse {
|
||||
req, err := NewAuthenticatedTestRequest(
|
||||
req, err := AuthenticatedRequestForTest(
|
||||
tb.testUser, "/api/v1/logs/pipelines", postablePipelines,
|
||||
)
|
||||
if err != nil {
|
||||
@@ -562,7 +562,7 @@ func (tb *LogPipelinesTestBed) PostPipelinesToQS(
|
||||
}
|
||||
|
||||
func (tb *LogPipelinesTestBed) GetPipelinesFromQS() *logparsingpipeline.PipelinesResponse {
|
||||
req, err := NewAuthenticatedTestRequest(
|
||||
req, err := AuthenticatedRequestForTest(
|
||||
tb.testUser, "/api/v1/logs/pipelines/latest", nil,
|
||||
)
|
||||
if err != nil {
|
||||
|
||||
@@ -3,10 +3,7 @@ package tests
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"runtime/debug"
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -501,38 +498,18 @@ func (tb *IntegrationsTestBed) RequestQS(
|
||||
path string,
|
||||
postData interface{},
|
||||
) *app.ApiResponse {
|
||||
req, err := NewAuthenticatedTestRequest(
|
||||
req, err := AuthenticatedRequestForTest(
|
||||
tb.testUser, path, postData,
|
||||
)
|
||||
if err != nil {
|
||||
tb.t.Fatalf("couldn't create authenticated test request: %v", err)
|
||||
}
|
||||
|
||||
respWriter := httptest.NewRecorder()
|
||||
tb.qsHttpHandler.ServeHTTP(respWriter, req)
|
||||
response := respWriter.Result()
|
||||
responseBody, err := io.ReadAll(response.Body)
|
||||
result, err := HandleTestRequest(tb.qsHttpHandler, req, 200)
|
||||
if err != nil {
|
||||
tb.t.Fatalf("couldn't read response body received from QS: %v", err)
|
||||
tb.t.Fatalf("test request failed: %v", err)
|
||||
}
|
||||
|
||||
if response.StatusCode != 200 {
|
||||
tb.t.Fatalf(
|
||||
"unexpected response status from query service for path %s. status: %d, body: %v\n%v",
|
||||
path, response.StatusCode, string(responseBody), string(debug.Stack()),
|
||||
)
|
||||
}
|
||||
|
||||
var result app.ApiResponse
|
||||
err = json.Unmarshal(responseBody, &result)
|
||||
if err != nil {
|
||||
tb.t.Fatalf(
|
||||
"Could not unmarshal QS response into an ApiResponse.\nResponse body: %s",
|
||||
string(responseBody),
|
||||
)
|
||||
}
|
||||
|
||||
return &result
|
||||
return result
|
||||
}
|
||||
|
||||
func (tb *IntegrationsTestBed) mockLogQueryResponse(logsInResponse []model.SignozLog) {
|
||||
|
||||
@@ -5,8 +5,10 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"runtime/debug"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -16,6 +18,7 @@ import (
|
||||
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry"
|
||||
mockhouse "github.com/srikanthccv/ClickHouse-go-mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.signoz.io/signoz/pkg/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/clickhouseReader"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
"go.signoz.io/signoz/pkg/query-service/constants"
|
||||
@@ -172,7 +175,7 @@ func createTestUser() (*model.User, *model.ApiError) {
|
||||
)
|
||||
}
|
||||
|
||||
func NewAuthenticatedTestRequest(
|
||||
func AuthenticatedRequestForTest(
|
||||
user *model.User,
|
||||
path string,
|
||||
postData interface{},
|
||||
@@ -198,3 +201,31 @@ func NewAuthenticatedTestRequest(
|
||||
req.Header.Add("Authorization", "Bearer "+userJwt.AccessJwt)
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func HandleTestRequest(handler http.Handler, req *http.Request, expectedStatus int) (*app.ApiResponse, error) {
|
||||
respWriter := httptest.NewRecorder()
|
||||
handler.ServeHTTP(respWriter, req)
|
||||
response := respWriter.Result()
|
||||
responseBody, err := io.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't read response body received from QS: %w", err)
|
||||
}
|
||||
|
||||
if response.StatusCode != expectedStatus {
|
||||
return nil, fmt.Errorf(
|
||||
"unexpected response status from query service for path %s. status: %d, body: %v\n%v",
|
||||
req.URL.Path, response.StatusCode, string(responseBody), string(debug.Stack()),
|
||||
)
|
||||
}
|
||||
|
||||
var result app.ApiResponse
|
||||
err = json.Unmarshal(responseBody, &result)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"Could not unmarshal QS response into an ApiResponse.\nResponse body: %s",
|
||||
string(responseBody),
|
||||
)
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user