Compare commits

..

20 Commits

Author SHA1 Message Date
Tushar Vats
02f127423b Merge branch 'main' into tvats-improve-error-msg 2025-11-24 15:12:15 +05:30
Tushar Vats
ab8a63bc51 fix: used with additiona instead of with append 2025-11-24 14:37:58 +05:30
Nityananda Gohain
12c9b921a7 chore: fix error in http_handler for get ttl (#9652) 2025-11-22 14:47:34 +05:30
Abhishek Kumar Singh
52228bc6c4 feat: add support for recovery threshold (#9428) 2025-11-21 20:00:37 +00:00
Tushar Vats
79988b448f fix: error message spacing for incorrect password (#9649) 2025-11-21 22:04:19 +05:30
Piyush Singariya
4bfd7ba3d7 fix(logs pipelines): Migrate model.APIErrors to errors (#9598)
* chore: in progress 1

* chore: in progress 2

* feat: fix errors

* feat: ready for review

* fix: lint

* chore: changes based on review

* fix: error checking

* chore: test done for saving pipelines

* chore: redundent error code

* fix: nit change based on review

---------

Co-authored-by: Nityananda Gohain <nityanandagohain@gmail.com>
2025-11-21 11:26:19 +00:00
Abhi kumar
3349158213 chore: converted querysearch codemirror component to uncontrolled component (#9569)
* chore: converted querysearch codemirror component to uncontrolled component

* refactor: remove local query state and make QuerySearch uncontrolled

* chore: fixed breaking tests in querySearch

* chore: removed unnessasary comments

* chore: added fix for forward ref warning

* fix: added fix for query getting reset to empty string

* chore: removed queryv2 changes

* chore: fixed forwardref error in queryv2

* test: updated querysearch test to use actual codemirror

* chore: added instrumentation for cursor jump to start

* chore: pr review changes

---------

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2025-11-21 12:35:17 +05:30
Tushar Vats
1c9f4efb9f feat: update signoz cloud integration agent version from v0.0.6 to v0.0.7 (#9644) 2025-11-21 06:27:19 +00:00
Amlan Kumar Nandy
fd839ff1db chore: consistent styling in edit alert v2 (#9645) 2025-11-21 04:49:26 +00:00
Abhishek Kumar Singh
09cbe4aa0d chore: metric name and group by extractor with CH and PromQL support (#9543) 2025-11-20 17:28:16 +00:00
Niladri Adhikary
096e38ee91 fix: handle empty variable list in PrepareWhereClause (#9126) 2025-11-20 22:33:34 +05:30
Tushar Vats
c2393c74fd feat: added more context aware error msgs 2025-11-19 19:05:32 +05:30
Tushar Vats
05f3b68bcf Merge branch 'main' into tvats-improve-error-msg 2025-11-19 18:05:51 +05:30
Tushar Vats
05d5746962 fix: added a helper append function to errors 2025-11-19 16:46:21 +05:30
Tushar Vats
8491604454 fix: added more verbose err msgs 2025-11-19 15:49:53 +05:30
Tushar Vats
45cdbbe94a Merge branch 'main' into tvats-improve-error-msg 2025-11-19 15:06:37 +05:30
Tushar Vats
d85ad40a90 fix: removed newline chars 2025-11-19 15:05:44 +05:30
Tushar Vats
84a03438da Merge branch 'main' into tvats-improve-error-msg 2025-11-18 21:38:20 +05:30
Tushar Vats
b650d7d8db fix: updated unit tests 2025-11-18 21:37:58 +05:30
Tushar Vats
6f71238c0f feat: improve error message 2025-11-18 20:36:02 +05:30
58 changed files with 7131 additions and 1363 deletions

View File

@@ -86,7 +86,7 @@ go-run-enterprise: ## Runs the enterprise go backend server
SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://127.0.0.1:9000 \
SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER=cluster \
go run -race \
$(GO_BUILD_CONTEXT_ENTERPRISE)/*.go
$(GO_BUILD_CONTEXT_ENTERPRISE)/*.go server
.PHONY: go-test
go-test: ## Runs go unit tests

View File

@@ -246,7 +246,9 @@ func (r *AnomalyRule) buildAndRunQuery(ctx context.Context, orgID valuer.UUID, t
continue
}
}
results, err := r.Threshold.ShouldAlert(*series, r.Unit())
results, err := r.Threshold.Eval(*series, r.Unit(), ruletypes.EvalData{
ActiveAlerts: r.ActiveAlertsLabelFP(),
})
if err != nil {
return nil, err
}
@@ -296,7 +298,9 @@ func (r *AnomalyRule) buildAndRunQueryV5(ctx context.Context, orgID valuer.UUID,
continue
}
}
results, err := r.Threshold.ShouldAlert(*series, r.Unit())
results, err := r.Threshold.Eval(*series, r.Unit(), ruletypes.EvalData{
ActiveAlerts: r.ActiveAlertsLabelFP(),
})
if err != nil {
return nil, err
}
@@ -410,6 +414,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
GeneratorURL: r.GeneratorURL(),
Receivers: ruleReceiverMap[lbs.Map()[ruletypes.LabelThresholdName]],
Missing: smpl.IsMissing,
IsRecovering: smpl.IsRecovering,
}
}
@@ -422,6 +427,9 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
alert.Value = a.Value
alert.Annotations = a.Annotations
// Update the recovering and missing state of existing alert
alert.IsRecovering = a.IsRecovering
alert.Missing = a.Missing
if v, ok := alert.Labels.Map()[ruletypes.LabelThresholdName]; ok {
alert.Receivers = ruleReceiverMap[v]
}
@@ -480,6 +488,30 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
Value: a.Value,
})
}
// We need to change firing alert to recovering if the returned sample meets recovery threshold
changeFiringToRecovering := a.State == model.StateFiring && a.IsRecovering
// We need to change recovering alerts to firing if the returned sample meets target threshold
changeRecoveringToFiring := a.State == model.StateRecovering && !a.IsRecovering && !a.Missing
// in any of the above case we need to update the status of alert
if changeFiringToRecovering || changeRecoveringToFiring {
state := model.StateRecovering
if changeRecoveringToFiring {
state = model.StateFiring
}
a.State = state
r.logger.DebugContext(ctx, "converting alert state", "name", r.Name(), "state", state)
itemsToAdd = append(itemsToAdd, model.RuleStateHistory{
RuleID: r.ID(),
RuleName: r.Name(),
State: state,
StateChanged: true,
UnixMilli: ts.UnixMilli(),
Labels: model.LabelsString(labelsJSON),
Fingerprint: a.QueryResultLables.Hash(),
Value: a.Value,
})
}
}
currentState := r.State()

View File

@@ -12,6 +12,7 @@ import {
startCompletion,
} from '@codemirror/autocomplete';
import { javascript } from '@codemirror/lang-javascript';
import * as Sentry from '@sentry/react';
import { Color } from '@signozhq/design-tokens';
import { copilot } from '@uiw/codemirror-theme-copilot';
import { githubLight } from '@uiw/codemirror-theme-github';
@@ -79,6 +80,16 @@ const stopEventsExtension = EditorView.domEventHandlers({
},
});
interface QuerySearchProps {
placeholder?: string;
onChange: (value: string) => void;
queryData: IBuilderQuery;
dataSource: DataSource;
signalSource?: string;
hardcodedAttributeKeys?: QueryKeyDataSuggestionsProps[];
onRun?: (query: string) => void;
}
function QuerySearch({
placeholder,
onChange,
@@ -87,17 +98,8 @@ function QuerySearch({
onRun,
signalSource,
hardcodedAttributeKeys,
}: {
placeholder?: string;
onChange: (value: string) => void;
queryData: IBuilderQuery;
dataSource: DataSource;
signalSource?: string;
hardcodedAttributeKeys?: QueryKeyDataSuggestionsProps[];
onRun?: (query: string) => void;
}): JSX.Element {
}: QuerySearchProps): JSX.Element {
const isDarkMode = useIsDarkMode();
const [query, setQuery] = useState<string>(queryData.filter?.expression || '');
const [valueSuggestions, setValueSuggestions] = useState<any[]>([]);
const [activeKey, setActiveKey] = useState<string>('');
const [isLoadingSuggestions, setIsLoadingSuggestions] = useState(false);
@@ -107,8 +109,12 @@ function QuerySearch({
message: '',
errors: [],
});
const isProgrammaticChangeRef = useRef(false);
const [isEditorReady, setIsEditorReady] = useState(false);
const [isFocused, setIsFocused] = useState(false);
const editorRef = useRef<EditorView | null>(null);
const handleQueryValidation = (newQuery: string): void => {
const handleQueryValidation = useCallback((newQuery: string): void => {
try {
const validationResponse = validateQuery(newQuery);
setValidation(validationResponse);
@@ -119,29 +125,67 @@ function QuerySearch({
errors: [error as IDetailedError],
});
}
};
}, []);
// Track if the query was changed externally (from queryData) vs internally (user input)
const [isExternalQueryChange, setIsExternalQueryChange] = useState(false);
const [lastExternalQuery, setLastExternalQuery] = useState<string>('');
const getCurrentQuery = useCallback(
(): string => editorRef.current?.state.doc.toString() || '',
[],
);
useEffect(() => {
const newQuery = queryData.filter?.expression || '';
// Only mark as external change if the query actually changed from external source
if (newQuery !== lastExternalQuery) {
setQuery(newQuery);
setIsExternalQueryChange(true);
setLastExternalQuery(newQuery);
}
}, [queryData.filter?.expression, lastExternalQuery]);
const updateEditorValue = useCallback(
(value: string, options: { skipOnChange?: boolean } = {}): void => {
const view = editorRef.current;
if (!view) return;
// Validate query when it changes externally (from queryData)
useEffect(() => {
if (isExternalQueryChange && query) {
handleQueryValidation(query);
setIsExternalQueryChange(false);
}
}, [isExternalQueryChange, query]);
const currentValue = view.state.doc.toString();
if (currentValue === value) return;
if (options.skipOnChange) {
isProgrammaticChangeRef.current = true;
}
view.dispatch({
changes: {
from: 0,
to: currentValue.length,
insert: value,
},
selection: {
anchor: value.length,
},
});
},
[],
);
const handleEditorCreate = useCallback((view: EditorView): void => {
editorRef.current = view;
setIsEditorReady(true);
}, []);
useEffect(
() => {
if (!isEditorReady) return;
const newQuery = queryData.filter?.expression || '';
const currentQuery = getCurrentQuery();
/* eslint-disable-next-line sonarjs/no-collapsible-if */
if (newQuery !== currentQuery && !isFocused) {
// Prevent clearing a non-empty editor when queryData becomes empty temporarily
// Only update if newQuery has a value, or if both are empty (initial state)
if (newQuery || !currentQuery) {
updateEditorValue(newQuery, { skipOnChange: true });
if (newQuery) {
handleQueryValidation(newQuery);
}
}
}
},
// eslint-disable-next-line react-hooks/exhaustive-deps
[isEditorReady, queryData.filter?.expression, isFocused],
);
const [keySuggestions, setKeySuggestions] = useState<
QueryKeyDataSuggestionsProps[] | null
@@ -150,7 +194,6 @@ function QuerySearch({
const [showExamples] = useState(false);
const [cursorPos, setCursorPos] = useState({ line: 0, ch: 0 });
const [isFocused, setIsFocused] = useState(false);
const [
isFetchingCompleteValuesList,
@@ -159,8 +202,6 @@ function QuerySearch({
const lastPosRef = useRef<{ line: number; ch: number }>({ line: 0, ch: 0 });
// Reference to the editor view for programmatic autocompletion
const editorRef = useRef<EditorView | null>(null);
const lastKeyRef = useRef<string>('');
const lastFetchedKeyRef = useRef<string>('');
const lastValueRef = useRef<string>('');
@@ -506,6 +547,7 @@ function QuerySearch({
if (!editorRef.current) {
editorRef.current = viewUpdate.view;
setIsEditorReady(true);
}
const selection = viewUpdate.view.state.selection.main;
@@ -521,7 +563,15 @@ function QuerySearch({
const lastPos = lastPosRef.current;
if (newPos.line !== lastPos.line || newPos.ch !== lastPos.ch) {
setCursorPos(newPos);
setCursorPos((lastPos) => {
if (newPos.ch !== lastPos.ch && newPos.ch === 0) {
Sentry.captureEvent({
message: `Cursor jumped to start of line from ${lastPos.ch} to ${newPos.ch}`,
level: 'warning',
});
}
return newPos;
});
lastPosRef.current = newPos;
if (doc) {
@@ -554,16 +604,17 @@ function QuerySearch({
}, []);
const handleChange = (value: string): void => {
setQuery(value);
if (isProgrammaticChangeRef.current) {
isProgrammaticChangeRef.current = false;
return;
}
onChange(value);
// Mark as internal change to avoid triggering external validation
setIsExternalQueryChange(false);
// Update lastExternalQuery to prevent external validation trigger
setLastExternalQuery(value);
};
const handleBlur = (): void => {
handleQueryValidation(query);
const currentQuery = getCurrentQuery();
handleQueryValidation(currentQuery);
setIsFocused(false);
};
@@ -582,12 +633,11 @@ function QuerySearch({
const handleExampleClick = (exampleQuery: string): void => {
// If there's an existing query, append the example with AND
const newQuery = query ? `${query} AND ${exampleQuery}` : exampleQuery;
setQuery(newQuery);
// Mark as internal change to avoid triggering external validation
setIsExternalQueryChange(false);
// Update lastExternalQuery to prevent external validation trigger
setLastExternalQuery(newQuery);
const currentQuery = getCurrentQuery();
const newQuery = currentQuery
? `${currentQuery} AND ${exampleQuery}`
: exampleQuery;
updateEditorValue(newQuery);
};
// Helper function to render a badge for the current context mode
@@ -622,8 +672,10 @@ function QuerySearch({
const word = context.matchBefore(/[a-zA-Z0-9_.:/?&=#%\-\[\]]*/);
if (word?.from === word?.to && !context.explicit) return null;
// Get current query from editor
const currentQuery = editorRef.current?.state.doc.toString() || '';
// Get the query context at the cursor position
const queryContext = getQueryContextAtCursor(query, cursorPos.ch);
const queryContext = getQueryContextAtCursor(currentQuery, cursorPos.ch);
// Define autocomplete options based on the context
let options: {
@@ -1119,7 +1171,8 @@ function QuerySearch({
if (queryContext.isInParenthesis) {
// Different suggestions based on the context within parenthesis or bracket
const curChar = query.charAt(cursorPos.ch - 1) || '';
const currentQuery = editorRef.current?.state.doc.toString() || '';
const curChar = currentQuery.charAt(cursorPos.ch - 1) || '';
if (curChar === '(' || curChar === '[') {
// Right after opening parenthesis/bracket
@@ -1268,7 +1321,7 @@ function QuerySearch({
style={{
position: 'absolute',
top: 8,
right: validation.isValid === false && query ? 40 : 8, // Move left when error shown
right: validation.isValid === false && getCurrentQuery() ? 40 : 8, // Move left when error shown
cursor: 'help',
zIndex: 10,
transition: 'right 0.2s ease',
@@ -1289,10 +1342,10 @@ function QuerySearch({
</Tooltip>
<CodeMirror
value={query}
theme={isDarkMode ? copilot : githubLight}
onChange={handleChange}
onUpdate={handleUpdate}
onCreateEditor={handleEditorCreate}
className={cx('query-where-clause-editor', {
isValid: validation.isValid === true,
hasErrors: validation.errors.length > 0,
@@ -1330,7 +1383,7 @@ function QuerySearch({
// Mod-Enter is usually Ctrl-Enter or Cmd-Enter based on OS
run: (): boolean => {
if (onRun && typeof onRun === 'function') {
onRun(query);
onRun(getCurrentQuery());
} else {
handleRunQuery();
}
@@ -1356,7 +1409,7 @@ function QuerySearch({
onBlur={handleBlur}
/>
{query && validation.isValid === false && !isFocused && (
{getCurrentQuery() && validation.isValid === false && !isFocused && (
<div
className={cx('query-status-container', {
hasErrors: validation.errors.length > 0,

View File

@@ -9,7 +9,13 @@ import SpanScopeSelector from 'container/QueryBuilder/filters/QueryBuilderSearch
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
import { useQueryOperations } from 'hooks/queryBuilder/useQueryBuilderOperations';
import { Copy, Ellipsis, Trash } from 'lucide-react';
import { memo, useCallback, useMemo, useState } from 'react';
import {
ForwardedRef,
forwardRef,
useCallback,
useMemo,
useState,
} from 'react';
import { IBuilderQuery } from 'types/api/queryBuilder/queryBuilderData';
import { HandleChangeQueryDataV5 } from 'types/common/operations.types';
import { DataSource } from 'types/common/queryBuilder';
@@ -20,28 +26,29 @@ import QueryAddOns from './QueryAddOns/QueryAddOns';
import QueryAggregation from './QueryAggregation/QueryAggregation';
import QuerySearch from './QuerySearch/QuerySearch';
export const QueryV2 = memo(function QueryV2({
ref,
index,
queryVariant,
query,
filterConfigs,
isListViewPanel = false,
showTraceOperator = false,
hasTraceOperator = false,
version,
showOnlyWhereClause = false,
signalSource = '',
isMultiQueryAllowed = false,
onSignalSourceChange,
signalSourceChangeEnabled = false,
queriesCount = 1,
}: QueryProps & {
ref: React.RefObject<HTMLDivElement>;
onSignalSourceChange: (value: string) => void;
signalSourceChangeEnabled: boolean;
queriesCount: number;
}): JSX.Element {
export const QueryV2 = forwardRef(function QueryV2(
{
index,
queryVariant,
query,
filterConfigs,
isListViewPanel = false,
showTraceOperator = false,
hasTraceOperator = false,
version,
showOnlyWhereClause = false,
signalSource = '',
isMultiQueryAllowed = false,
onSignalSourceChange,
signalSourceChangeEnabled = false,
queriesCount = 1,
}: QueryProps & {
onSignalSourceChange: (value: string) => void;
signalSourceChangeEnabled: boolean;
queriesCount: number;
},
ref: ForwardedRef<HTMLDivElement>,
): JSX.Element {
const { cloneQuery, panelType } = useQueryBuilder();
const showFunctions = query?.functions?.length > 0;
@@ -295,3 +302,5 @@ export const QueryV2 = memo(function QueryV2({
</div>
);
});
QueryV2.displayName = 'QueryV2';

View File

@@ -5,13 +5,85 @@ import { getKeySuggestions } from 'api/querySuggestions/getKeySuggestions';
import { getValueSuggestions } from 'api/querySuggestions/getValueSuggestion';
import { initialQueriesMap } from 'constants/queryBuilder';
import * as UseQBModule from 'hooks/queryBuilder/useQueryBuilder';
import React from 'react';
import { render, screen, userEvent, waitFor } from 'tests/test-utils';
import { fireEvent, render, userEvent, waitFor } from 'tests/test-utils';
import type { QueryKeyDataSuggestionsProps } from 'types/api/querySuggestions/types';
import { DataSource } from 'types/common/queryBuilder';
import QuerySearch from '../QuerySearch/QuerySearch';
const CM_EDITOR_SELECTOR = '.cm-editor .cm-content';
// Mock DOM APIs that CodeMirror needs
beforeAll(() => {
// Mock getClientRects and getBoundingClientRect for Range objects
const mockRect: DOMRect = {
width: 100,
height: 20,
top: 0,
left: 0,
right: 100,
bottom: 20,
x: 0,
y: 0,
toJSON: (): DOMRect => mockRect,
} as DOMRect;
// Create a minimal Range mock with only what CodeMirror actually uses
const createMockRange = (): Range => {
let startContainer: Node = document.createTextNode('');
let endContainer: Node = document.createTextNode('');
let startOffset = 0;
let endOffset = 0;
const mockRange = {
// CodeMirror uses these for text measurement
getClientRects: (): DOMRectList =>
(({
length: 1,
item: (index: number): DOMRect | null => (index === 0 ? mockRect : null),
0: mockRect,
*[Symbol.iterator](): Generator<DOMRect> {
yield mockRect;
},
} as unknown) as DOMRectList),
getBoundingClientRect: (): DOMRect => mockRect,
// CodeMirror calls these to set up text ranges
setStart: (node: Node, offset: number): void => {
startContainer = node;
startOffset = offset;
},
setEnd: (node: Node, offset: number): void => {
endContainer = node;
endOffset = offset;
},
// Minimal Range properties (TypeScript requires these)
get startContainer(): Node {
return startContainer;
},
get endContainer(): Node {
return endContainer;
},
get startOffset(): number {
return startOffset;
},
get endOffset(): number {
return endOffset;
},
get collapsed(): boolean {
return startContainer === endContainer && startOffset === endOffset;
},
commonAncestorContainer: document.body,
};
return (mockRange as unknown) as Range;
};
// Mock document.createRange to return a new Range instance each time
document.createRange = (): Range => createMockRange();
// Mock getBoundingClientRect for elements
Element.prototype.getBoundingClientRect = (): DOMRect => mockRect;
});
jest.mock('hooks/useDarkMode', () => ({
useIsDarkMode: (): boolean => false,
}));
@@ -31,24 +103,6 @@ jest.mock('hooks/queryBuilder/useQueryBuilder', () => {
};
});
jest.mock('@codemirror/autocomplete', () => ({
autocompletion: (): Record<string, unknown> => ({}),
closeCompletion: (): boolean => true,
completionKeymap: [] as unknown[],
startCompletion: (): boolean => true,
}));
jest.mock('@codemirror/lang-javascript', () => ({
javascript: (): Record<string, unknown> => ({}),
}));
jest.mock('@uiw/codemirror-theme-copilot', () => ({
copilot: {},
}));
jest.mock('@uiw/codemirror-theme-github', () => ({
githubLight: {},
}));
jest.mock('api/querySuggestions/getKeySuggestions', () => ({
getKeySuggestions: jest.fn().mockResolvedValue({
data: {
@@ -63,153 +117,19 @@ jest.mock('api/querySuggestions/getValueSuggestion', () => ({
}),
}));
// Mock CodeMirror to a simple textarea to make it testable and call onUpdate
jest.mock(
'@uiw/react-codemirror',
(): Record<string, unknown> => {
// Minimal EditorView shape used by the component
class EditorViewMock {}
(EditorViewMock as any).domEventHandlers = (): unknown => ({} as unknown);
(EditorViewMock as any).lineWrapping = {} as unknown;
(EditorViewMock as any).editable = { of: () => ({}) } as unknown;
// Note: We're NOT mocking CodeMirror here - using the real component
// This provides integration testing with the actual CodeMirror editor
const keymap = { of: (arr: unknown) => arr } as unknown;
const Prec = { highest: (ext: unknown) => ext } as unknown;
type CodeMirrorProps = {
value?: string;
onChange?: (v: string) => void;
onFocus?: () => void;
onBlur?: () => void;
placeholder?: string;
onCreateEditor?: (view: unknown) => unknown;
onUpdate?: (arg: {
view: {
state: {
selection: { main: { head: number } };
doc: {
toString: () => string;
lineAt: (
_pos: number,
) => { number: number; from: number; to: number; text: string };
};
};
};
}) => void;
'data-testid'?: string;
extensions?: unknown[];
};
function CodeMirrorMock({
value,
onChange,
onFocus,
onBlur,
placeholder,
onCreateEditor,
onUpdate,
'data-testid': dataTestId,
extensions,
}: CodeMirrorProps): JSX.Element {
const [localValue, setLocalValue] = React.useState<string>(value ?? '');
// Provide a fake editor instance
React.useEffect(() => {
if (onCreateEditor) {
onCreateEditor(new EditorViewMock() as any);
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
// Call onUpdate whenever localValue changes to simulate cursor and doc
React.useEffect(() => {
if (onUpdate) {
const text = String(localValue ?? '');
const head = text.length;
onUpdate({
view: {
state: {
selection: { main: { head } },
doc: {
toString: (): string => text,
lineAt: () => ({
number: 1,
from: 0,
to: text.length,
text,
}),
},
},
},
});
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [localValue]);
const handleKeyDown = (
e: React.KeyboardEvent<HTMLTextAreaElement>,
): void => {
const isModEnter = e.key === 'Enter' && (e.metaKey || e.ctrlKey);
if (!isModEnter) return;
const exts: unknown[] = Array.isArray(extensions) ? extensions : [];
const flat: unknown[] = exts.flatMap((x: unknown) =>
Array.isArray(x) ? x : [x],
);
const keyBindings = flat.filter(
(x) =>
Boolean(x) &&
typeof x === 'object' &&
'key' in (x as Record<string, unknown>),
) as Array<{ key?: string; run?: () => boolean | void }>;
keyBindings
.filter((b) => b.key === 'Mod-Enter' && typeof b.run === 'function')
.forEach((b) => {
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
b.run!();
});
};
return (
<textarea
data-testid={dataTestId || 'query-where-clause-editor'}
placeholder={placeholder}
value={localValue}
onChange={(e): void => {
setLocalValue(e.target.value);
if (onChange) {
onChange(e.target.value);
}
}}
onFocus={onFocus}
onBlur={onBlur}
onKeyDown={handleKeyDown}
style={{ width: '100%', minHeight: 80 }}
/>
);
}
return {
__esModule: true,
default: CodeMirrorMock,
EditorView: EditorViewMock,
keymap,
Prec,
};
},
);
const handleRunQueryMock = ((UseQBModule as unknown) as {
handleRunQuery: jest.MockedFunction<() => void>;
}).handleRunQuery;
const PLACEHOLDER_TEXT =
"Enter your filter query (e.g., http.status_code >= 500 AND service.name = 'frontend')";
const TESTID_EDITOR = 'query-where-clause-editor';
const SAMPLE_KEY_TYPING = 'http.';
const SAMPLE_VALUE_TYPING_INCOMPLETE = " service.name = '";
const SAMPLE_VALUE_TYPING_COMPLETE = " service.name = 'frontend'";
const SAMPLE_STATUS_QUERY = " status_code = '200'";
const SAMPLE_VALUE_TYPING_INCOMPLETE = "service.name = '";
const SAMPLE_VALUE_TYPING_COMPLETE = "service.name = 'frontend'";
const SAMPLE_STATUS_QUERY = "http.status_code = '200'";
describe('QuerySearch', () => {
describe('QuerySearch (Integration with Real CodeMirror)', () => {
it('renders with placeholder', () => {
render(
<QuerySearch
@@ -219,21 +139,19 @@ describe('QuerySearch', () => {
/>,
);
expect(screen.getByPlaceholderText(PLACEHOLDER_TEXT)).toBeInTheDocument();
// CodeMirror renders a contenteditable div, so we check for the container
const editorContainer = document.querySelector('.query-where-clause-editor');
expect(editorContainer).toBeInTheDocument();
});
it('fetches key suggestions when typing a key (debounced)', async () => {
jest.useFakeTimers();
const advance = (ms: number): void => {
jest.advanceTimersByTime(ms);
};
const user = userEvent.setup({
advanceTimers: advance,
pointerEventsCheck: 0,
});
// Use real timers for CodeMirror integration tests
const mockedGetKeys = getKeySuggestions as jest.MockedFunction<
typeof getKeySuggestions
>;
mockedGetKeys.mockClear();
const user = userEvent.setup({ pointerEventsCheck: 0 });
render(
<QuerySearch
@@ -243,28 +161,33 @@ describe('QuerySearch', () => {
/>,
);
const editor = screen.getByTestId(TESTID_EDITOR);
await user.type(editor, SAMPLE_KEY_TYPING);
advance(1000);
await waitFor(() => expect(mockedGetKeys).toHaveBeenCalled(), {
timeout: 3000,
// Wait for CodeMirror to initialize
await waitFor(() => {
const editor = document.querySelector(CM_EDITOR_SELECTOR);
expect(editor).toBeInTheDocument();
});
// Find the CodeMirror editor contenteditable element
const editor = document.querySelector(CM_EDITOR_SELECTOR) as HTMLElement;
// Focus and type into the editor
await user.click(editor);
await user.type(editor, SAMPLE_KEY_TYPING);
// Wait for debounced API call (300ms debounce + some buffer)
await waitFor(() => expect(mockedGetKeys).toHaveBeenCalled(), {
timeout: 2000,
});
jest.useRealTimers();
});
it('fetches value suggestions when editing value context', async () => {
jest.useFakeTimers();
const advance = (ms: number): void => {
jest.advanceTimersByTime(ms);
};
const user = userEvent.setup({
advanceTimers: advance,
pointerEventsCheck: 0,
});
// Use real timers for CodeMirror integration tests
const mockedGetValues = getValueSuggestions as jest.MockedFunction<
typeof getValueSuggestions
>;
mockedGetValues.mockClear();
const user = userEvent.setup({ pointerEventsCheck: 0 });
render(
<QuerySearch
@@ -274,21 +197,28 @@ describe('QuerySearch', () => {
/>,
);
const editor = screen.getByTestId(TESTID_EDITOR);
await user.type(editor, SAMPLE_VALUE_TYPING_INCOMPLETE);
advance(1000);
await waitFor(() => expect(mockedGetValues).toHaveBeenCalled(), {
timeout: 3000,
// Wait for CodeMirror to initialize
await waitFor(() => {
const editor = document.querySelector(CM_EDITOR_SELECTOR);
expect(editor).toBeInTheDocument();
});
const editor = document.querySelector(CM_EDITOR_SELECTOR) as HTMLElement;
await user.click(editor);
await user.type(editor, SAMPLE_VALUE_TYPING_INCOMPLETE);
// Wait for debounced API call (300ms debounce + some buffer)
await waitFor(() => expect(mockedGetValues).toHaveBeenCalled(), {
timeout: 2000,
});
jest.useRealTimers();
});
it('fetches key suggestions on mount for LOGS', async () => {
jest.useFakeTimers();
// Use real timers for CodeMirror integration tests
const mockedGetKeysOnMount = getKeySuggestions as jest.MockedFunction<
typeof getKeySuggestions
>;
mockedGetKeysOnMount.mockClear();
render(
<QuerySearch
@@ -298,17 +228,15 @@ describe('QuerySearch', () => {
/>,
);
jest.advanceTimersByTime(1000);
// Wait for debounced API call (300ms debounce + some buffer)
await waitFor(() => expect(mockedGetKeysOnMount).toHaveBeenCalled(), {
timeout: 3000,
timeout: 2000,
});
const lastArgs = mockedGetKeysOnMount.mock.calls[
mockedGetKeysOnMount.mock.calls.length - 1
]?.[0] as { signal: unknown; searchText: string };
expect(lastArgs).toMatchObject({ signal: DataSource.LOGS, searchText: '' });
jest.useRealTimers();
});
it('calls provided onRun on Mod-Enter', async () => {
@@ -324,12 +252,26 @@ describe('QuerySearch', () => {
/>,
);
const editor = screen.getByTestId(TESTID_EDITOR);
// Wait for CodeMirror to initialize
await waitFor(() => {
const editor = document.querySelector(CM_EDITOR_SELECTOR);
expect(editor).toBeInTheDocument();
});
const editor = document.querySelector(CM_EDITOR_SELECTOR) as HTMLElement;
await user.click(editor);
await user.type(editor, SAMPLE_STATUS_QUERY);
await user.keyboard('{Meta>}{Enter}{/Meta}');
await waitFor(() => expect(onRun).toHaveBeenCalled());
// Use fireEvent for keyboard shortcuts as userEvent might not work well with CodeMirror
const modKey = navigator.platform.includes('Mac') ? 'metaKey' : 'ctrlKey';
fireEvent.keyDown(editor, {
key: 'Enter',
code: 'Enter',
[modKey]: true,
keyCode: 13,
});
await waitFor(() => expect(onRun).toHaveBeenCalled(), { timeout: 2000 });
});
it('calls handleRunQuery when Mod-Enter without onRun', async () => {
@@ -348,11 +290,62 @@ describe('QuerySearch', () => {
/>,
);
const editor = screen.getByTestId(TESTID_EDITOR);
// Wait for CodeMirror to initialize
await waitFor(() => {
const editor = document.querySelector(CM_EDITOR_SELECTOR);
expect(editor).toBeInTheDocument();
});
const editor = document.querySelector(CM_EDITOR_SELECTOR) as HTMLElement;
await user.click(editor);
await user.type(editor, SAMPLE_VALUE_TYPING_COMPLETE);
await user.keyboard('{Meta>}{Enter}{/Meta}');
await waitFor(() => expect(mockedHandleRunQuery).toHaveBeenCalled());
// Use fireEvent for keyboard shortcuts as userEvent might not work well with CodeMirror
const modKey = navigator.platform.includes('Mac') ? 'metaKey' : 'ctrlKey';
fireEvent.keyDown(editor, {
key: 'Enter',
code: 'Enter',
[modKey]: true,
keyCode: 13,
});
await waitFor(() => expect(mockedHandleRunQuery).toHaveBeenCalled(), {
timeout: 2000,
});
});
it('initializes CodeMirror with expression from queryData.filter.expression on mount', async () => {
const testExpression =
"http.status_code >= 500 AND service.name = 'frontend'";
const queryDataWithExpression = {
...initialQueriesMap.logs.builder.queryData[0],
filter: {
expression: testExpression,
},
};
render(
<QuerySearch
onChange={jest.fn() as jest.MockedFunction<(v: string) => void>}
queryData={queryDataWithExpression}
dataSource={DataSource.LOGS}
/>,
);
// Wait for CodeMirror to initialize and the expression to be set
await waitFor(
() => {
// CodeMirror stores content in .cm-content, check the text content
const editorContent = document.querySelector(
CM_EDITOR_SELECTOR,
) as HTMLElement;
expect(editorContent).toBeInTheDocument();
// CodeMirror may render the text in multiple ways, check if it contains our expression
const textContent = editorContent.textContent || '';
expect(textContent).toContain('http.status_code');
expect(textContent).toContain('service.name');
},
{ timeout: 3000 },
);
});
});

View File

@@ -135,6 +135,62 @@
}
}
.alert-details-v2 {
.alert-details__breadcrumb {
padding-left: 32px;
.breadcrumb-item {
font-size: 13px;
}
}
.alert-info {
margin-right: 16px;
.alert-info__action-buttons {
.alert-action-buttons {
.ant-btn {
font-size: 13px;
}
}
}
}
.divider {
display: none;
}
.tabs-and-filters {
.ant-tabs {
.ant-tabs-nav {
margin: 0 8px;
}
.ant-tabs-tab {
.ant-tabs-tab-btn {
border-radius: 2px;
border: 1px solid var(--bg-slate-400);
background: var(--bg-ink-400);
font-size: 13px;
&[aria-selected='true'] {
background: var(--bg-ink-500);
}
}
}
.ant-tabs-content-holder {
.ant-tabs-content {
.ant-tabs-tabpane {
.alert-history {
margin: 0 16px;
}
}
}
}
}
}
}
.lightMode {
.alert-details {
&-tabs {

View File

@@ -2,6 +2,7 @@ import './AlertDetails.styles.scss';
import { Breadcrumb, Button, Divider, Empty } from 'antd';
import logEvent from 'api/common/logEvent';
import classNames from 'classnames';
import { Filters } from 'components/AlertDetailsFilters/Filters';
import RouteTab from 'components/RouteTab';
import Spinner from 'components/Spinner';
@@ -13,7 +14,10 @@ import { useEffect, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import { useLocation } from 'react-router-dom';
import { AlertTypes } from 'types/api/alerts/alertTypes';
import { PostableAlertRuleV2 } from 'types/api/alerts/alertTypesV2';
import {
NEW_ALERT_SCHEMA_VERSION,
PostableAlertRuleV2,
} from 'types/api/alerts/alertTypesV2';
import AlertHeader from './AlertHeader/AlertHeader';
import { useGetAlertRuleDetails, useRouteTabUtils } from './hooks';
@@ -117,6 +121,8 @@ function AlertDetails(): JSX.Element {
}
};
const isV2Alert = alertRuleDetails?.schemaVersion === NEW_ALERT_SCHEMA_VERSION;
// Show spinner until we have alert data loaded
if (isLoading && !alertRuleDetails) {
return <Spinner />;
@@ -129,7 +135,9 @@ function AlertDetails(): JSX.Element {
initialAlertType={alertRuleDetails?.alertType as AlertTypes}
initialAlertState={initialAlertState}
>
<div className="alert-details">
<div
className={classNames('alert-details', { 'alert-details-v2': isV2Alert })}
>
<Breadcrumb
className="alert-details__breadcrumb"
items={[

View File

@@ -25,6 +25,11 @@ const menuItemStyle: CSSProperties = {
letterSpacing: '0.14px',
};
const menuItemStyleV2: CSSProperties = {
fontSize: '13px',
letterSpacing: '0.13px',
};
function AlertActionButtons({
ruleId,
alertDetails,
@@ -63,6 +68,8 @@ function AlertActionButtons({
const isV2Alert = alertDetails.schemaVersion === NEW_ALERT_SCHEMA_VERSION;
const finalMenuItemStyle = isV2Alert ? menuItemStyleV2 : menuItemStyle;
const menuItems: MenuProps['items'] = [
...(!isV2Alert
? [
@@ -71,7 +78,7 @@ function AlertActionButtons({
label: 'Rename',
icon: <PenLine size={16} color={Color.BG_VANILLA_400} />,
onClick: handleRename,
style: menuItemStyle,
style: finalMenuItemStyle,
},
]
: []),
@@ -80,7 +87,7 @@ function AlertActionButtons({
label: 'Duplicate',
icon: <Copy size={16} color={Color.BG_VANILLA_400} />,
onClick: handleAlertDuplicate,
style: menuItemStyle,
style: finalMenuItemStyle,
},
{
key: 'delete-rule',
@@ -88,7 +95,7 @@ function AlertActionButtons({
icon: <Trash2 size={16} color={Color.BG_CHERRY_400} />,
onClick: handleAlertDelete,
style: {
...menuItemStyle,
...finalMenuItemStyle,
color: Color.BG_CHERRY_400,
},
},

2
go.mod
View File

@@ -4,7 +4,7 @@ go 1.24.0
require (
dario.cat/mergo v1.0.1
github.com/AfterShip/clickhouse-sql-parser v0.4.11
github.com/AfterShip/clickhouse-sql-parser v0.4.16
github.com/ClickHouse/clickhouse-go/v2 v2.40.1
github.com/DATA-DOG/go-sqlmock v1.5.2
github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd

4
go.sum
View File

@@ -66,8 +66,8 @@ dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
github.com/AfterShip/clickhouse-sql-parser v0.4.11 h1:fZMKAjRmgzW44+hEhF6ywi4VjFZQjJ8QrFBbgBsjmF4=
github.com/AfterShip/clickhouse-sql-parser v0.4.11/go.mod h1:W0Z82wJWkJxz2RVun/RMwxue3g7ut47Xxl+SFqdJGus=
github.com/AfterShip/clickhouse-sql-parser v0.4.16 h1:gpl+wXclYUKT0p4+gBq22XeRYWwEoZ9f35vogqMvkLQ=
github.com/AfterShip/clickhouse-sql-parser v0.4.16/go.mod h1:W0Z82wJWkJxz2RVun/RMwxue3g7ut47Xxl+SFqdJGus=
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 h1:Gt0j3wceWMwPmiazCa8MzMA0MfhmPIz0Qp0FJ6qcM0U=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM=

View File

@@ -27,7 +27,7 @@ func (a *AuthN) Authenticate(ctx context.Context, email string, password string,
}
if !factorPassword.Equals(password) {
return nil, errors.New(errors.TypeUnauthenticated, types.ErrCodeIncorrectPassword, "invalid email orpassword")
return nil, errors.New(errors.TypeUnauthenticated, types.ErrCodeIncorrectPassword, "invalid email or password")
}
return authtypes.NewIdentity(user.ID, orgID, user.Email, user.Role), nil

View File

@@ -112,7 +112,7 @@ func (b *base) WithUrl(u string) *base {
}
}
// WithUrl adds additional messages to the base error and returns a new base error.
// WithAdditional adds additional messages to the base error and returns a new base error.
func (b *base) WithAdditional(a ...string) *base {
return &base{
t: b.t,

View File

@@ -51,3 +51,88 @@ func TestUnwrapb(t *testing.T) {
atyp, _, _, _, _, _ = Unwrapb(oerr)
assert.Equal(t, TypeInternal, atyp)
}
func TestWithAdditionalf(t *testing.T) {
t.Run("adds additional message to base error", func(t *testing.T) {
typ := typ{"test-error"}
baseErr := New(typ, MustNewCode("test_code"), "primary message")
result := WithAdditionalf(baseErr, "additional context %d", 456)
assert.NotNil(t, result)
_, _, msg, _, _, additional := Unwrapb(result)
assert.Equal(t, "primary message", msg, "primary message should not change")
assert.Equal(t, []string{"additional context 456"}, additional)
})
t.Run("adds additional message to non-base error", func(t *testing.T) {
stdErr := errors.New("some error")
result := WithAdditionalf(stdErr, "extra info: %s", "details")
assert.NotNil(t, result)
_, _, _, _, _, additional := Unwrapb(result)
assert.Equal(t, []string{"extra info: details"}, additional)
})
t.Run("appends to existing additional messages", func(t *testing.T) {
typ := typ{"test-error"}
baseErr := New(typ, MustNewCode("test_code"), "message").
WithAdditional("first additional", "second additional")
result := WithAdditionalf(baseErr, "third additional %s", "msg")
_, _, _, _, _, additional := Unwrapb(result)
assert.Equal(t, []string{
"first additional",
"second additional",
"third additional msg",
}, additional)
})
}
func TestWithUrl(t *testing.T) {
t.Run("adds url to base error", func(t *testing.T) {
typ := typ{"test-error"}
baseErr := New(typ, MustNewCode("test_code"), "error message")
result := baseErr.WithUrl("https://docs.signoz.io/errors")
_, _, _, _, url, _ := Unwrapb(result)
assert.Equal(t, "https://docs.signoz.io/errors", url)
})
t.Run("replaces existing url", func(t *testing.T) {
typ := typ{"test-error"}
baseErr := New(typ, MustNewCode("test_code"), "error message").
WithUrl("https://old-url.com")
result := baseErr.WithUrl("https://new-url.com")
_, _, _, _, url, _ := Unwrapb(result)
assert.Equal(t, "https://new-url.com", url)
})
}
func TestWithAdditional(t *testing.T) {
t.Run("adds additional messages to base error", func(t *testing.T) {
typ := typ{"test-error"}
baseErr := New(typ, MustNewCode("test_code"), "main message")
result := baseErr.WithAdditional("hint 1", "hint 2", "hint 3")
_, _, _, _, _, additional := Unwrapb(result)
assert.Equal(t, []string{"hint 1", "hint 2", "hint 3"}, additional)
})
t.Run("replaces existing additional messages", func(t *testing.T) {
typ := typ{"test-error"}
baseErr := New(typ, MustNewCode("test_code"), "message").
WithAdditional("old hint")
result := baseErr.WithAdditional("new hint 1", "new hint 2")
_, _, _, _, _, additional := Unwrapb(result)
assert.Equal(t, []string{"new hint 1", "new hint 2"}, additional)
})
}

View File

@@ -0,0 +1,687 @@
package queryfilterextractor
import (
"fmt"
"strings"
clickhouse "github.com/AfterShip/clickhouse-sql-parser/parser"
"github.com/SigNoz/signoz/pkg/errors"
)
const (
// MetricNameColumn is the column name used for filtering metrics
MetricNameColumn = "metric_name"
)
// ClickHouseFilterExtractor extracts metric names and grouping keys from ClickHouse SQL queries
type ClickHouseFilterExtractor struct{}
// NewClickHouseFilterExtractor creates a new ClickHouse filter extractor
func NewClickHouseFilterExtractor() *ClickHouseFilterExtractor {
return &ClickHouseFilterExtractor{}
}
// Extract parses a ClickHouse query and extracts metric names and grouping keys
func (e *ClickHouseFilterExtractor) Extract(query string) (*FilterResult, error) {
p := clickhouse.NewParser(query)
stmts, err := p.ParseStmts()
if err != nil {
return nil, errors.NewInvalidInputf(errors.CodeInvalidInput, "failed to parse clickhouse query: %s", err.Error())
}
result := &FilterResult{MetricNames: []string{}, GroupByColumns: []ColumnInfo{}}
metricNames := make(map[string]bool)
// Track top-level queries for GROUP BY extraction
topLevelQueries := make(map[*clickhouse.SelectQuery]bool)
// Process all statements
for _, stmt := range stmts {
selectQuery, ok := stmt.(*clickhouse.SelectQuery)
if !ok {
continue
}
// Mark as top-level
topLevelQueries[selectQuery] = true
// Walk the AST to extract metrics
clickhouse.Walk(selectQuery, func(node clickhouse.Expr) bool {
e.fillMetricNamesFromExpr(node, metricNames)
return true // Continue traversal
})
}
// Extract GROUP BY from the top-level queries by first building a map of CTEs and
// then recursively extracting the GROUP BY from the CTEs and subqueries.
// Build CTE map for all top-level queries
cteMap := make(map[string]*clickhouse.SelectQuery)
for query := range topLevelQueries {
e.buildCTEMap(query, cteMap)
}
// Extract GROUP BY with aliases and origins from the CTEs and subqueries using recursive approach
// Use a map to handle duplicates (last ColumnInfo wins across queries)
groupByColumnsMap := make(map[string]ColumnInfo) // column name -> ColumnInfo
visited := make(map[*clickhouse.SelectQuery]bool)
for query := range topLevelQueries {
columns, err := e.extractGroupByColumns(query, cteMap, visited)
if err != nil {
return nil, err
}
for _, col := range columns {
// Last column info wins for duplicate columns across multiple queries
groupByColumnsMap[col.Name] = col
}
}
// Convert sets to slices
for metric := range metricNames {
result.MetricNames = append(result.MetricNames, metric)
}
// Build GroupByColumns from the map
for _, colInfo := range groupByColumnsMap {
result.GroupByColumns = append(result.GroupByColumns, colInfo)
}
return result, nil
}
// ========================================
// Metric Name Extraction
// ========================================
// fillMetricNamesFromExpr extracts metric names from various node types
func (e *ClickHouseFilterExtractor) fillMetricNamesFromExpr(node clickhouse.Expr, metricNames map[string]bool) {
switch n := node.(type) {
case *clickhouse.BinaryOperation:
e.fillMetricFromBinaryOp(n, metricNames)
}
}
// fillMetricFromBinaryOp extracts metrics from binary operations
func (e *ClickHouseFilterExtractor) fillMetricFromBinaryOp(op *clickhouse.BinaryOperation, metricNames map[string]bool) {
// Check if left side is metric_name column
leftCol := e.getColumnName(op.LeftExpr)
rightCol := e.getColumnName(op.RightExpr)
// Handle metric_name on left side: metric_name = 'value'
if leftCol == MetricNameColumn {
e.fillMetricWithBinaryOpConditions(op, op.RightExpr, metricNames)
return
}
// Handle metric_name on right side: 'value' = metric_name
if rightCol == MetricNameColumn {
e.fillMetricWithBinaryOpConditions(op, op.LeftExpr, metricNames)
return
}
}
// fillMetricWithBinaryOpConditions extracts metric names from the value side of a binary operation
//
// Supported operators:
// - "=", "==": Extracts literal string values or values from any() function
// - "IN", "GLOBAL IN": Extracts all literal string values from the list
//
// Unsupported operators (can be added later if needed):
// - "!=", "<>", "NOT IN": Negative filters. (e.g., metric_name != 'a')
// - "LIKE", "ILIKE": Pattern matching filters
// - "NOT LIKE", "NOT ILIKE": Negative pattern matching filters
// - "OR", "AND": Boolean operators as the Walk function will automatically traverse both sides
// of OR/AND operations and extract metrics from each branch. (e.g., metric_name='a' OR metric_name='b')
func (e *ClickHouseFilterExtractor) fillMetricWithBinaryOpConditions(op *clickhouse.BinaryOperation, valueExpr clickhouse.Expr, metricNames map[string]bool) {
switch op.Operation {
case clickhouse.TokenKindSingleEQ, clickhouse.TokenKindDoubleEQ:
// metric_name = 'value' or metric_name = any(['a', 'b'])
// Skip if value side is a function call (function-wrapped literals are ignored, test case: CH59)
if fn, ok := valueExpr.(*clickhouse.FunctionExpr); ok {
// Only handle any() function, skip others like lowercase('cpu')
if fn.Name != nil && fn.Name.Name == "any" {
e.extractInValues(valueExpr, metricNames)
}
// Otherwise skip function-wrapped literals
} else if val := e.extractStringLiteral(valueExpr); val != "" {
metricNames[val] = true
}
case "IN", "GLOBAL IN":
// metric_name IN ('a', 'b', 'c')
// GLOBAL IN behaves the same as IN for metric extraction purposes
// Skip if value side is a function call (function-wrapped literals are ignored, test case: CH59)
if _, ok := valueExpr.(*clickhouse.FunctionExpr); !ok {
e.extractInValues(valueExpr, metricNames)
}
}
}
// extractStringLiteral extracts a string literal value from an expression
func (e *ClickHouseFilterExtractor) extractStringLiteral(expr clickhouse.Expr) string {
switch ex := expr.(type) {
case *clickhouse.StringLiteral:
return ex.Literal
}
return ""
}
// extractInValues extracts values from IN expressions
func (e *ClickHouseFilterExtractor) extractInValues(expr clickhouse.Expr, metricNames map[string]bool) {
// Find all string literals in the expression
strLits := clickhouse.FindAll(expr, func(node clickhouse.Expr) bool {
// metric_name passed in `in` condition will be string literal.
_, ok := node.(*clickhouse.StringLiteral)
return ok
})
for _, strLitNode := range strLits {
if strLit, ok := strLitNode.(*clickhouse.StringLiteral); ok {
// Unquote the string literal
val := e.extractStringLiteral(strLit)
if val != "" {
metricNames[val] = true
}
}
}
}
// ========================================
// GROUP BY Column Extraction
// ========================================
// extractGroupByColumns extracts the GROUP BY columns from a query
// It follows the top-down approach where outer GROUP BY overrides inner GROUP BY in subqueries and CTEs.
// Returns a slice of ColumnInfo with column names, aliases, and origins
func (e *ClickHouseFilterExtractor) extractGroupByColumns(query *clickhouse.SelectQuery, cteMap map[string]*clickhouse.SelectQuery, visited map[*clickhouse.SelectQuery]bool) ([]ColumnInfo, error) {
if visited[query] {
return nil, nil
}
// Mark this query as visited to prevent cycles
visited[query] = true
// First, check if this query has its own GROUP BY using direct field access
hasGroupBy := query.GroupBy != nil
// If this query has GROUP BY, use it (outer overrides inner)
if hasGroupBy {
// Extract GROUP BY columns
tempGroupBy := make(map[string]bool)
e.fillGroupsFromGroupByClause(query.GroupBy, tempGroupBy)
// Extract SELECT columns and their aliases from the same query level
selectAliases := e.extractSelectColumns(query)
// Build ColumnInfo array by matching GROUP BY with SELECT aliases and origins
result := []ColumnInfo{}
for groupByCol := range tempGroupBy {
alias := selectAliases[groupByCol] // Will be "" if not in SELECT
// Extract originExpr by tracing back through queries
originVisited := make(map[*clickhouse.SelectQuery]bool)
originExpr := e.extractColumnOrigin(groupByCol, query, cteMap, originVisited)
originField, err := extractCHOriginFieldFromQuery(fmt.Sprintf("SELECT %s", originExpr))
if err != nil {
return nil, err
}
result = append(result, ColumnInfo{
Name: groupByCol,
Alias: alias,
OriginExpr: originExpr,
OriginField: originField,
})
}
return result, nil
}
// If no GROUP BY in this query, follow CTE/subquery references
// It might have grouping inside the CTE/subquery
sourceQuery := e.extractSourceQuery(query, cteMap)
if sourceQuery != nil {
return e.extractGroupByColumns(sourceQuery, cteMap, visited)
}
return nil, nil
}
// fillGroupsFromGroupByClause extracts GROUP BY columns from a specific GroupByClause and fills the map with the column names
func (e *ClickHouseFilterExtractor) fillGroupsFromGroupByClause(groupByClause *clickhouse.GroupByClause, groupBy map[string]bool) {
// Extract GROUP BY expressions properly
// Find only the direct child ColumnExprList, not nested ones
// We use Find instead of FindAll to get only the first (direct child) ColumnExprList
exprListNode, foundList := clickhouse.Find(groupByClause, func(node clickhouse.Expr) bool {
_, ok := node.(*clickhouse.ColumnExprList)
return ok
})
if !foundList {
return
}
// Note: We only extract from the top-level ColumnExprList.Items to avoid extracting nested parts
// This prevents extracting 'timestamp' from 'toDate(timestamp)' - we only get 'toDate(timestamp)'
if exprList, ok := exprListNode.(*clickhouse.ColumnExprList); ok {
// Extract each expression from the list - these are top-level only
if exprList.Items != nil {
for _, item := range exprList.Items {
groupKey := e.extractColumnStrByExpr(item)
if groupKey != "" {
// Strip table alias if present (e.g., "m.region" -> "region")
groupKey = e.stripTableAlias(groupKey)
groupBy[groupKey] = true
}
}
}
}
}
// extractColumnStrByExpr extracts the complete string representation of different expression types
// Supports:
// - Ident: Simple identifier like "region" or "timestamp"
// - FunctionExpr: Function call like "toDate(timestamp)"
// - ColumnExpr: Column expression like "m.region", "toDate(timestamp)"
// - Other expression types: Return the string representation of the expression
//
// For example:
// - "region" -> "region"
// - "toDate(timestamp)" -> "toDate(timestamp)"
// - "`m.region`" -> "`m.region`"
func (e *ClickHouseFilterExtractor) extractColumnStrByExpr(expr clickhouse.Expr) string {
if expr == nil {
return ""
}
switch ex := expr.(type) {
// Ident is a simple identifier like "region" or "timestamp"
case *clickhouse.Ident:
// Handling for backticks which are native to ClickHouse and used for literal names.
// CH Parser removes the backticks from the identifier, so we need to add them back.
if ex.QuoteType == clickhouse.BackTicks {
return "`" + ex.Name + "`"
}
return ex.Name
// FunctionExpr is a function call like "toDate(timestamp)"
case *clickhouse.FunctionExpr:
// For function expressions, return the complete function call string
return ex.String()
// ColumnExpr is a column expression like "m.region", "toDate(timestamp)"
case *clickhouse.ColumnExpr:
// ColumnExpr wraps another expression - extract the underlying expression
if ex.Expr != nil {
return e.extractColumnStrByExpr(ex.Expr)
}
return ex.String()
default:
// For other expression types, return the string representation
return expr.String()
}
}
// stripTableAlias removes table alias prefix from a column name (e.g., "m.region" -> "region")
// but for literals with backticks, we need preserve the entire string. (e.g., `os.type` -> "os.type")
func (e *ClickHouseFilterExtractor) stripTableAlias(name string) string {
// Handling for backticks which are native to ClickHouse and used for literal names.
if strings.HasPrefix(name, "`") && strings.HasSuffix(name, "`") {
return strings.Trim(name, "`")
}
// split the name by dot and return the last part
parts := strings.Split(name, ".")
if len(parts) > 1 {
return parts[len(parts)-1]
}
return name
}
// getColumnName extracts column name from an expression
func (e *ClickHouseFilterExtractor) getColumnName(expr clickhouse.Expr) string {
switch ex := expr.(type) {
case *clickhouse.Ident:
return ex.Name
case *clickhouse.Path:
// Handle Path type for qualified column names like "m.metric_name"
// Extract the last field which is the column name
if len(ex.Fields) > 0 {
return ex.Fields[len(ex.Fields)-1].Name
}
return ""
}
return ""
}
// extractSourceQuery extracts the SelectQuery from FROM expressions
// Handles CTE references, subqueries, and table expressions
// For example: from the below query We'll try to extract the name of the source query
// which in the below case is "aggregated". Once we find it we return the SelectQuery node
// from the cteMap, which acts as the source for the GROUP BY extraction.
//
// WITH aggregated AS (
// SELECT region as region_alias, sum(value) AS total
// FROM metrics
// WHERE metric_name = 'cpu_usage'
// GROUP BY region
// )
// SELECT * FROM aggregated
func (e *ClickHouseFilterExtractor) extractSourceQuery(query *clickhouse.SelectQuery, cteMap map[string]*clickhouse.SelectQuery) *clickhouse.SelectQuery {
if query.From == nil {
return nil
}
// Find the FROM clause and extract the source
fromExprs := clickhouse.FindAll(query.From, func(node clickhouse.Expr) bool {
switch node.(type) {
case *clickhouse.Ident, *clickhouse.SelectQuery:
return true
}
return false
})
for _, fromExpr := range fromExprs {
switch expr := fromExpr.(type) {
case *clickhouse.Ident:
// CTE reference by simple name
if cteQuery, exists := cteMap[expr.Name]; exists {
return cteQuery
}
case *clickhouse.SelectQuery:
// Direct subquery
return expr
}
}
return nil
}
// ========================================
// Column Origin Tracing
// ========================================
// extractColumnOrigin recursively traces a column back to its original expression
// Returns the original expression string (e.g., "JSONExtractString(labels, 'service.name')")
// or the column name itself if it's a direct column reference
func (e *ClickHouseFilterExtractor) extractColumnOrigin(
columnName string,
query *clickhouse.SelectQuery,
cteMap map[string]*clickhouse.SelectQuery,
visited map[*clickhouse.SelectQuery]bool,
) string {
if query == nil {
return columnName
}
// Prevent infinite recursion and redundant work
// Once a query is visited, we don't need to check it again
if visited[query] {
return columnName
}
visited[query] = true
// this is to prevent infinite recursion in a single query search
// but we don't want this to affect the other queries searches
// so we delete it after the search is done for current query
defer delete(visited, query)
// Step 1: Search in CTE and Joins, this will take us to very end of the SubQueries and CTE
sourceQuery := e.extractSourceQuery(query, cteMap)
if sourceQuery != nil {
returningOrigin := e.extractColumnOrigin(columnName, sourceQuery, cteMap, visited)
if returningOrigin != columnName {
return returningOrigin
}
}
// Step 2: Once we're sure there are no SubQueries and CTE we just find all the selectItem
// and then get their column origin values
selectItems := clickhouse.FindAll(query, func(node clickhouse.Expr) bool {
_, ok := node.(*clickhouse.SelectItem)
return ok
})
// extractOriginFromSelectItem extracts the origin from a SelectItem
extractOriginFromSelectItem := func(selectItem *clickhouse.SelectItem) *string {
// Check if this SelectItem matches our column (by alias or by name)
alias := e.extractSelectItemAlias(selectItem)
exprStr := e.extractSelectItemName(selectItem)
normalizedExpr := e.stripTableAlias(exprStr)
// Case 1: Column matches an alias in SELECT
if alias == columnName {
// This is an alias - get the expression it's aliasing
if selectItem.Expr != nil {
originExpr := e.extractFullExpression(selectItem.Expr)
// If the expression is just a column name, trace it back further
if normalizedExpr == columnName || e.isSimpleColumnReference(selectItem.Expr) {
// It's referencing another column - trace back through source query
sourceQuery := e.extractSourceQuery(query, cteMap)
if sourceQuery != nil {
originExpr := e.extractColumnOrigin(normalizedExpr, sourceQuery, cteMap, visited)
return &originExpr
}
}
return &originExpr
}
}
// Case 2: Column matches the expression itself (no alias)
if normalizedExpr == columnName {
// Check if this is a simple column reference or a complex expression
if e.isSimpleColumnReference(selectItem.Expr) {
// Simple column - trace back through source query
sourceQuery := e.extractSourceQuery(query, cteMap)
if sourceQuery != nil {
originExpr := e.extractColumnOrigin(columnName, sourceQuery, cteMap, visited)
return &originExpr
}
return &columnName
} else {
// Complex expression - return it as origin
originExpr := e.extractFullExpression(selectItem.Expr)
return &originExpr
}
}
return nil
}
var finalColumnOrigin string
for _, itemNode := range selectItems {
if selectItem, ok := itemNode.(*clickhouse.SelectItem); ok {
// We call the extractOriginFromSelectItem function for each SelectItem
// and if the origin is not nil, we set the finalColumnOrigin to the origin
// this has to be done to get to the most nested origin of column where selectItem is present
origin := extractOriginFromSelectItem(selectItem)
if origin != nil {
finalColumnOrigin = *origin
}
}
}
if finalColumnOrigin != "" {
return finalColumnOrigin
}
return columnName
}
// extractFullExpression extracts the complete string representation of an expression
func (e *ClickHouseFilterExtractor) extractFullExpression(expr clickhouse.Expr) string {
if expr == nil {
return ""
}
return expr.String()
}
// isSimpleColumnReference checks if an expression is just a simple column reference
// (not a function call or complex expression)
func (e *ClickHouseFilterExtractor) isSimpleColumnReference(expr clickhouse.Expr) bool {
if expr == nil {
return false
}
switch ex := expr.(type) {
case *clickhouse.Ident:
// backticks are treated as non simple column reference
// so that we can return the origin expression with backticks
// origin parser will handle the backticks and extract the column name from it
if ex.QuoteType == clickhouse.BackTicks {
return false
}
return true
case *clickhouse.Path:
return true
case *clickhouse.ColumnExpr:
// Check if it wraps a simple reference
if ex.Expr != nil {
return e.isSimpleColumnReference(ex.Expr)
}
}
return false
}
// ========================================
// SELECT Column Alias Extraction
// ========================================
// extractSelectColumns extracts column names and their aliases from SELECT clause of a specific query
// Returns a map where key is normalized column name and value is the alias
// For duplicate columns with different aliases, the last alias wins
// This follows the same pattern as extractGroupFromGroupByClause - finding direct children only
func (e *ClickHouseFilterExtractor) extractSelectColumns(query *clickhouse.SelectQuery) map[string]string {
aliasMap := make(map[string]string)
if query == nil {
return aliasMap
}
// Find SelectItem nodes which represent columns in the SELECT clause
// SelectItem has an Expr field (the column/expression) and an Alias field
selectItems := clickhouse.FindAll(query, func(node clickhouse.Expr) bool {
_, ok := node.(*clickhouse.SelectItem)
return ok
})
// Process each SelectItem and extract column name and alias
for _, itemNode := range selectItems {
if selectItem, ok := itemNode.(*clickhouse.SelectItem); ok {
// Extract the column name/expression from SelectItem.Expr
columnName := e.extractSelectItemName(selectItem)
if columnName == "" {
continue
}
// Normalize column name (strip table alias)
normalizedName := e.stripTableAlias(columnName)
// Extract alias from SelectItem.Alias
alias := e.extractSelectItemAlias(selectItem)
// Store in map - last alias wins for duplicates
aliasMap[normalizedName] = alias
}
}
return aliasMap
}
// extractSelectItemName extracts the column name or expression from a SelectItem
func (e *ClickHouseFilterExtractor) extractSelectItemName(selectItem *clickhouse.SelectItem) string {
if selectItem == nil || selectItem.Expr == nil {
return ""
}
return e.extractColumnStrByExpr(selectItem.Expr)
}
// extractSelectItemAlias extracts the alias from a SelectItem
// Returns empty string if no alias is present
func (e *ClickHouseFilterExtractor) extractSelectItemAlias(selectItem *clickhouse.SelectItem) string {
if selectItem == nil || selectItem.Alias == nil {
return ""
}
// The Alias field is an *Ident (pointer type)
if selectItem.Alias.Name != "" {
return selectItem.Alias.Name
}
return ""
}
// ========================================
// CTE and Subquery Extraction
// ========================================
// buildCTEMap builds a map of CTE names to their SelectQuery nodes by recursively
// traversing all queries and their nested expressions
func (e *ClickHouseFilterExtractor) buildCTEMap(query *clickhouse.SelectQuery, cteMap map[string]*clickhouse.SelectQuery) {
// Access CTEs directly from WithClause if it exists
if query.With != nil && query.With.CTEs != nil {
for _, cte := range query.With.CTEs {
cteName := e.extractCTEName(cte)
cteQuery := e.extractCTEQuery(cte)
if cteName != "" && cteQuery != nil {
cteMap[cteName] = cteQuery
// Recursively build CTE map for nested CTEs
e.buildCTEMap(cteQuery, cteMap)
}
}
}
// Also check for CTEs in subqueries and other expressions
e.buildCTEMapFromExpr(query, cteMap)
}
// extractCTEName extracts the CTE name from a CTEStmt, the Expr field is the name of the CTE
func (e *ClickHouseFilterExtractor) extractCTEName(cte *clickhouse.CTEStmt) string {
if cte == nil || cte.Expr == nil {
return ""
}
switch name := cte.Expr.(type) {
case *clickhouse.Ident:
return name.Name
default:
return cte.Expr.String()
}
}
// extractCTEQuery extracts the SelectQuery from a CTEStmt, the Alias field is the SelectQuery
func (e *ClickHouseFilterExtractor) extractCTEQuery(cte *clickhouse.CTEStmt) *clickhouse.SelectQuery {
if cte == nil || cte.Alias == nil {
return nil
}
// The Alias field should contain a SelectQuery
if selectQuery, ok := cte.Alias.(*clickhouse.SelectQuery); ok {
return selectQuery
}
return nil
}
// buildCTEMapFromExpr recursively extracts CTEs from various expression types
func (e *ClickHouseFilterExtractor) buildCTEMapFromExpr(expr clickhouse.Expr, cteMap map[string]*clickhouse.SelectQuery) {
// Walk through all nodes to find SelectQuery nodes that might contain CTEs
clickhouse.Walk(expr, func(node clickhouse.Expr) bool {
switch n := node.(type) {
case *clickhouse.SelectQuery:
// Don't process the same query we started with to avoid infinite recursion
if n != expr {
e.buildCTEMap(n, cteMap)
}
case *clickhouse.TableExpr:
if n.Expr != nil {
e.buildCTEMapFromExpr(n.Expr, cteMap)
}
case *clickhouse.JoinTableExpr:
if n.Table != nil {
e.buildCTEMapFromExpr(n.Table, cteMap)
}
}
return true // Continue traversal
})
}

View File

@@ -0,0 +1,305 @@
package queryfilterextractor
import (
"strings"
"github.com/AfterShip/clickhouse-sql-parser/parser"
"github.com/SigNoz/signoz/pkg/errors"
)
// excludedFunctions contains functions that should cause ExtractOriginField to return empty string.
// Map key is the function name in lowercase, value is the original function name.
var excludedFunctions = map[string]string{
// Time functions
"now": "now",
"today": "today",
"yesterday": "yesterday",
"todatetime": "toDateTime",
"todatetime64": "toDateTime64",
"todate": "toDate",
"todate32": "toDate32",
"tostartofinterval": "toStartOfInterval",
"tostartofday": "toStartOfDay",
"tostartofweek": "toStartOfWeek",
"tostartofmonth": "toStartOfMonth",
"tostartofquarter": "toStartOfQuarter",
"tostartofyear": "toStartOfYear",
"tostartofhour": "toStartOfHour",
"tostartofminute": "toStartOfMinute",
"tostartofsecond": "toStartOfSecond",
"tostartoffiveminutes": "toStartOfFiveMinutes",
"tostartoftenminutes": "toStartOfTenMinutes",
"tostartoffifteenminutes": "toStartOfFifteenMinutes",
"tointervalsecond": "toIntervalSecond",
"tointervalminute": "toIntervalMinute",
"tointervalhour": "toIntervalHour",
"tointervalday": "toIntervalDay",
"tointervalweek": "toIntervalWeek",
"tointervalmonth": "toIntervalMonth",
"tointervalquarter": "toIntervalQuarter",
"tointervalyear": "toIntervalYear",
"parsedatetime": "parseDateTime",
"parsedatetimebesteffort": "parseDateTimeBestEffort",
// Aggregate functions
"count": "count",
"sum": "sum",
"avg": "avg",
"min": "min",
"max": "max",
"any": "any",
"stddevpop": "stddevPop",
"stddevsamp": "stddevSamp",
"varpop": "varPop",
"varsamp": "varSamp",
"grouparray": "groupArray",
"groupuniqarray": "groupUniqArray",
"quantile": "quantile",
"quantiles": "quantiles",
"quantileexact": "quantileExact",
"quantiletiming": "quantileTiming",
"median": "median",
"uniq": "uniq",
"uniqexact": "uniqExact",
"uniqcombined": "uniqCombined",
"uniqhll12": "uniqHLL12",
"topk": "topK",
"first": "first",
"last": "last",
}
// jsonExtractFunctions contains functions that extract from JSON columns.
// Map key is the function name in lowercase, value is the original function name.
var jsonExtractFunctions = map[string]string{
"jsonextractstring": "JSONExtractString",
"jsonextractint": "JSONExtractInt",
"jsonextractuint": "JSONExtractUInt",
"jsonextractfloat": "JSONExtractFloat",
"jsonextractbool": "JSONExtractBool",
"jsonextract": "JSONExtract",
"jsonextractraw": "JSONExtractRaw",
"jsonextractarrayraw": "JSONExtractArrayRaw",
"jsonextractkeysandvalues": "JSONExtractKeysAndValues",
}
// isFunctionPresentInStore checks if a function name exists in the function store map
func isFunctionPresentInStore(funcName string, funcStore map[string]string) bool {
_, exists := funcStore[strings.ToLower(funcName)]
return exists
}
// isReservedSelectKeyword checks if a keyword is a reserved keyword for the SELECT statement
// We're only including those which can appear in the SELECT statement without being quoted
func isReservedSelectKeyword(keyword string) bool {
return strings.ToUpper(keyword) == parser.KeywordSelect || strings.ToUpper(keyword) == parser.KeywordFrom
}
// extractCHOriginField extracts the origin field (column name) from a query string
// or fields getting extracted in case of JSON extraction functions.
func extractCHOriginFieldFromQuery(query string) (string, error) {
// Parse the query string
p := parser.NewParser(query)
stmts, err := p.ParseStmts()
if err != nil {
return "", errors.NewInternalf(errors.CodeInternal, "failed to parse origin field from query: %s", err.Error())
}
// Get the first statement which should be a SELECT
selectStmt := stmts[0].(*parser.SelectQuery)
// If query has multiple select items, return blank string as we don't expect multiple select items
if len(selectStmt.SelectItems) > 1 {
return "", nil
}
if len(selectStmt.SelectItems) == 0 {
return "", errors.NewInternalf(errors.CodeInternal, "SELECT query has no select items")
}
// Extract origin field from the first (and only) select item's expression
return extractOriginFieldFromExpr(selectStmt.SelectItems[0].Expr)
}
// extractOriginFieldFromExpr extracts the origin field (column name) from an expression.
// This is the internal helper function that contains the original logic.
func extractOriginFieldFromExpr(expr parser.Expr) (string, error) {
// Check if expression contains excluded functions or IF/CASE
hasExcludedExpressions := false
hasReservedKeyword := false
parser.Walk(expr, func(node parser.Expr) bool {
// exclude reserved keywords because the parser will treat them as valid SQL
// example: SELECT FROM table here the "FROM" is a reserved keyword,
// but the parser will treat it as valid column to be extracted.
if ident, ok := node.(*parser.Ident); ok {
if ident.QuoteType == parser.Unquoted && isReservedSelectKeyword(ident.Name) {
hasReservedKeyword = true
return false
}
}
// for functions, we need to check if the function is excluded function or a JSON extraction function with nested JSON extraction
if funcExpr, ok := node.(*parser.FunctionExpr); ok {
if isFunctionPresentInStore(funcExpr.Name.Name, excludedFunctions) {
hasExcludedExpressions = true
return false
}
// Check for nested JSON extraction functions
if isFunctionPresentInStore(funcExpr.Name.Name, jsonExtractFunctions) {
// Check if any argument contains another JSON extraction function
if funcExpr.Params != nil && funcExpr.Params.Items != nil {
for _, arg := range funcExpr.Params.Items.Items {
if containsJSONExtractFunction(arg) {
hasExcludedExpressions = true
return false
}
}
}
}
}
if _, ok := node.(*parser.CaseExpr); ok {
hasExcludedExpressions = true
return false
}
return true
})
// If the expression contains reserved keywords, return error
if hasReservedKeyword {
return "", errors.New(errors.TypeUnsupported, errors.CodeUnsupported, "reserved keyword found in select clause")
}
// If the expression contains excluded expressions, return empty string
if hasExcludedExpressions {
return "", nil
}
// Extract all column names from the expression
columns := extractColumns(expr)
// If we found exactly one unique column, return it
if len(columns) == 1 {
return columns[0], nil
}
// Multiple columns or no columns - return empty string
return "", nil
}
// containsJSONExtractFunction checks if an expression contains a JSON extraction function
func containsJSONExtractFunction(expr parser.Expr) bool {
if expr == nil {
return false
}
found := false
parser.Walk(expr, func(node parser.Expr) bool {
if funcExpr, ok := node.(*parser.FunctionExpr); ok {
if isFunctionPresentInStore(funcExpr.Name.Name, jsonExtractFunctions) {
found = true
return false
}
}
return true
})
return found
}
// extractColumns recursively extracts all unique column names from an expression.
// Note: String literals are also considered as origin fields and will be included in the result.
func extractColumns(expr parser.Expr) []string {
columnMap := make(map[string]bool)
extractColumnsHelper(expr, columnMap)
// Convert map to slice
columns := make([]string, 0, len(columnMap))
for col := range columnMap {
columns = append(columns, col)
}
return columns
}
// extractColumnsHelper is a recursive helper that finds all column references.
// Note: String literals are also considered as origin fields and will be added to the columnMap.
func extractColumnsHelper(expr parser.Expr, columnMap map[string]bool) {
switch n := expr.(type) {
// Ident is a simple identifier like "region" or "timestamp"
case *parser.Ident:
// Add identifiers as column references
columnMap[n.Name] = true
// FunctionExpr is a function call like "toDate(timestamp)", "JSONExtractString(labels, 'service.name')"
case *parser.FunctionExpr:
// Special handling for JSON extraction functions
// In case of nested JSON extraction, we return blank values (handled at top level)
if isFunctionPresentInStore(n.Name.Name, jsonExtractFunctions) {
// For JSON functions, extract from the second argument (the JSON path/key being extracted)
// The first argument is the column name, the second is the exact data being extracted
// The extracted data (second argument) is treated as the origin field
if n.Params != nil && n.Params.Items != nil && len(n.Params.Items.Items) >= 2 {
secondArg := n.Params.Items.Items[1]
// If the second argument is a string literal, use its value as the origin field
// String literals are considered as origin fields
if strLit, ok := secondArg.(*parser.StringLiteral); ok {
columnMap[strLit.Literal] = true
} else {
// Otherwise, try to extract columns from it
extractColumnsHelper(secondArg, columnMap)
}
}
return
}
// For regular functions, recursively process all arguments, ex: lower(name)
if n.Params != nil && n.Params.Items != nil {
for _, item := range n.Params.Items.Items {
extractColumnsHelper(item, columnMap)
}
}
// BinaryOperation is a binary operation like "region = 'us-east-1'" or "unix_milli / 1000"
case *parser.BinaryOperation:
extractColumnsHelper(n.LeftExpr, columnMap)
extractColumnsHelper(n.RightExpr, columnMap)
// ColumnExpr is a column expression like "m.region", "service.name"
case *parser.ColumnExpr:
extractColumnsHelper(n.Expr, columnMap)
// CastExpr is a cast expression like "CAST(unix_milli AS String)"
case *parser.CastExpr:
extractColumnsHelper(n.Expr, columnMap)
case *parser.ParamExprList:
if n.Items != nil {
extractColumnsHelper(n.Items, columnMap)
}
// Ex: coalesce(cpu_usage, 0) + coalesce(mem_usage, 0)
case *parser.ColumnExprList:
for _, item := range n.Items {
extractColumnsHelper(item, columnMap)
}
// StringLiteral is a string literal like "us-east-1" or "cpu.usage"
case *parser.StringLiteral:
// String literals are considered as origin fields
columnMap[n.Literal] = true
return
// Support for columns like table.column_name
case *parser.Path:
if len(n.Fields) > 0 {
extractColumnsHelper(n.Fields[len(n.Fields)-1], columnMap)
}
return
// Add more cases as needed for other expression types
default:
// For unknown types, return empty (don't extract columns)
return
}
}

View File

@@ -0,0 +1,237 @@
package queryfilterextractor
import (
"testing"
)
func TestExtractOriginField(t *testing.T) {
tests := []struct {
name string
query string
expected string
expectError bool
}{
// JSON extraction functions - should return the second argument (JSON path/key) as origin field
{
name: "JSONExtractString simple",
query: `SELECT JSONExtractString(labels, 'service.name')`,
expected: "service.name",
},
{
name: "JSONExtractInt",
query: `SELECT JSONExtractInt(labels, 'status.code')`,
expected: "status.code",
},
{
name: "JSONExtractFloat",
query: `SELECT JSONExtractFloat(labels, 'cpu.usage')`,
expected: "cpu.usage",
},
{
name: "JSONExtractBool",
query: `SELECT JSONExtractBool(labels, 'feature.enabled')`,
expected: "feature.enabled",
},
{
name: "JSONExtractString with function wrapper",
query: `SELECT lower(JSONExtractString(labels, 'user.email'))`,
expected: "user.email",
},
{
name: "Nested JSON extraction",
query: `SELECT JSONExtractInt(JSONExtractRaw(labels, 'meta'), 'status.code')`,
expected: "", // Nested JSON extraction should return blank
},
// Nested functions - should return the deepest column
{
name: "Nested time functions with column",
query: `SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(60))`,
expected: "", // Contains toStartOfInterval and toDateTime which are excluded
},
{
name: "Division with column",
query: `SELECT unix_milli / 1000`,
expected: "unix_milli",
},
{
name: "Function with single column",
query: `SELECT lower(unix_milli)`,
expected: "unix_milli",
},
{
name: "CAST with single column",
query: `SELECT CAST(unix_milli AS String)`,
expected: "unix_milli",
},
{
name: "intDiv with single column",
query: `SELECT intDiv(unix_milli, 1000)`,
expected: "unix_milli",
},
// Multiple columns - should return blank
{
name: "Multiple columns in coalesce",
query: `SELECT (coalesce(cpu_usage, 0) + coalesce(mem_usage, 0)) / 2`,
expected: "",
},
{
name: "Multiple columns in arithmetic",
query: `SELECT cpu_usage + mem_usage`,
expected: "",
},
{
name: "Multiple columns in function",
query: `SELECT concat(first_name, last_name)`,
expected: "",
},
// IF/CASE conditions - should return blank
{
name: "IF with single column in condition",
query: `SELECT IF(error_count > 0, service, 'healthy')`,
expected: "", // Multiple columns: error_count and service
},
{
name: "IF with JSON and multiple columns",
query: `SELECT if(JSONExtractInt(metadata, 'retry.count') > 3, toLower(JSONExtractString(metadata, 'user.id')), hostname)`,
expected: "", // Multiple columns: metadata and hostname
},
{
name: "String literal should return string",
query: `SELECT 'constant'`,
expected: "constant",
},
// No columns - should return blank
{
name: "Number literal",
query: `SELECT 42`,
expected: "",
},
{
name: "Multiple literals",
query: `SELECT 'constant', 42`,
expected: "",
},
{
name: "Multiple string literals",
query: `SELECT 'constant', '42'`,
expected: "",
},
// Excluded functions - should return blank
{
name: "now() function",
query: `SELECT now()`,
expected: "",
},
{
name: "today() function",
query: `SELECT today()`,
expected: "",
},
{
name: "count aggregate",
query: `SELECT count(user_id)`,
expected: "",
},
{
name: "sum aggregate",
query: `SELECT sum(amount)`,
expected: "",
},
// Single column simple cases
{
name: "Simple column reference",
query: `SELECT user_id`,
expected: "user_id",
},
{
name: "Column with alias",
query: `SELECT user_id AS id`,
expected: "user_id",
},
{
name: "Column in arithmetic with literals (multiplication)",
query: `SELECT unix_milli * 1000`,
expected: "unix_milli",
},
// Edge cases
{
name: "Nested functions with single column deep",
query: `SELECT upper(lower(trim(column_name)))`,
expected: "column_name",
},
// Qualified column names (Path)
{
name: "Column with table prefix",
query: `SELECT table.column_name`,
expected: "column_name", // IndexOperation: extracts column name from Index field
},
{
name: "Qualified column in function",
query: `SELECT lower(table.column_name)`,
expected: "column_name",
},
{
name: "Qualified column in arithmetic",
query: `SELECT table.column_name * 100`,
expected: "column_name",
},
{
name: "Nested qualified column (schema.table.column)",
query: `SELECT schema.table.column_name`,
expected: "column_name", // Should extract the final column name
},
{
name: "Multiple qualified columns",
query: `SELECT table1.column1 + table2.column2`,
expected: "", // Multiple columns: column1 and column2
},
{
name: "Qualified column with CAST",
query: `SELECT CAST(table.column_name AS String)`,
expected: "column_name",
},
{
name: "Multiple select items - return blank",
query: `SELECT JSONExtractString(labels, 'service.name'), unix_milli / 1000, cpu_usage + mem_usage`,
expected: "",
},
// Error cases
{
name: "Invalid SQL syntax",
query: `SELECT FROM table`,
expectError: true,
},
{
name: "Malformed query",
query: `SELECT * FROM`,
expectError: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result, err := extractCHOriginFieldFromQuery(tt.query)
if tt.expectError {
if err == nil {
t.Errorf("ExtractOriginField() expected error but got nil, result = %q", result)
}
} else {
if err != nil {
t.Errorf("ExtractOriginField() unexpected error: %v", err)
}
if result != tt.expected {
t.Errorf("ExtractOriginField() = %q, want %q", result, tt.expected)
}
}
})
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,115 @@
package queryfilterextractor
import (
"github.com/SigNoz/signoz/pkg/errors"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql/parser"
)
// PromQLFilterExtractor extracts metric names and grouping keys from PromQL queries
type PromQLFilterExtractor struct{}
// NewPromQLFilterExtractor creates a new PromQL filter extractor
func NewPromQLFilterExtractor() *PromQLFilterExtractor {
return &PromQLFilterExtractor{}
}
// Extract parses a PromQL query and extracts metric names and grouping keys
func (e *PromQLFilterExtractor) Extract(query string) (*FilterResult, error) {
expr, err := parser.ParseExpr(query)
if err != nil {
return nil, errors.NewInvalidInputf(errors.CodeInvalidInput, "failed to parse promql query: %s", err.Error())
}
result := &FilterResult{
MetricNames: []string{},
GroupByColumns: []ColumnInfo{},
}
// Use a visitor to traverse the AST
visitor := &promQLVisitor{
metricNames: make(map[string]bool),
groupBy: make(map[string]bool),
}
// Walk the AST
if err := parser.Walk(visitor, expr, nil); err != nil {
return result, errors.NewInternalf(errors.CodeInternal, "failed to walk promql query: %s", err.Error())
}
// Convert sets to slices
for metric := range visitor.metricNames {
result.MetricNames = append(result.MetricNames, metric)
}
for groupKey := range visitor.groupBy {
result.GroupByColumns = append(result.GroupByColumns, ColumnInfo{Name: groupKey, OriginExpr: groupKey, OriginField: groupKey})
}
return result, nil
}
// promQLVisitor implements the parser.Visitor interface
type promQLVisitor struct {
metricNames map[string]bool
groupBy map[string]bool
// Track if we've already captured grouping from an outermost aggregation
hasOutermostGrouping bool
}
func (v *promQLVisitor) Visit(node parser.Node, path []parser.Node) (parser.Visitor, error) {
switch n := node.(type) {
case *parser.VectorSelector:
v.visitVectorSelector(n)
case *parser.AggregateExpr:
v.visitAggregateExpr(n, path)
}
return v, nil
}
// visitVectorSelector will be called whenever the Visitor encounters a VectorSelector node.
// in the case we'll be extracting the metric names from the vector selector.
func (v *promQLVisitor) visitVectorSelector(vs *parser.VectorSelector) {
// Check if metric name is specified directly
if vs.Name != "" {
v.metricNames[vs.Name] = true
}
// Check for __name__ label matcher
for _, matcher := range vs.LabelMatchers {
if matcher.Name == labels.MetricName {
switch matcher.Type {
case labels.MatchEqual:
v.metricNames[matcher.Value] = true
// Skip for negative filters - negative filters don't extract metric names
// case labels.MatchNotEqual, labels.MatchRegexp, labels.MatchNotRegexp:
}
}
}
}
// visitAggregateExpr will be called whenever the Visitor encounters an AggregateExpr node.
// in the case we'll be extracting the grouping keys from the outermost aggregation.
func (v *promQLVisitor) visitAggregateExpr(ae *parser.AggregateExpr, path []parser.Node) {
// Count how many AggregateExpr nodes are in the path (excluding current node)
// This tells us the nesting level
nestingLevel := 0
for _, p := range path {
if _, ok := p.(*parser.AggregateExpr); ok {
nestingLevel++
}
}
// Only capture grouping from the outermost aggregation (nesting level 0)
if nestingLevel == 0 && !v.hasOutermostGrouping {
// If Without is true, we skip grouping per spec
if !ae.Without && len(ae.Grouping) > 0 {
v.hasOutermostGrouping = true
for _, label := range ae.Grouping {
v.groupBy[label] = true
}
}
}
// Continue traversal to find metrics in the expression
}

View File

@@ -0,0 +1,205 @@
package queryfilterextractor
import (
"reflect"
"testing"
)
func TestPromQLFilterExtractor_Extract(t *testing.T) {
extractor := NewPromQLFilterExtractor()
tests := []struct {
name string
query string
wantMetrics []string
wantGroupByColumns []ColumnInfo
wantError bool
}{
{
name: "P1 - Simple vector selector",
query: `http_requests_total{job="api"}`,
wantMetrics: []string{"http_requests_total"},
wantGroupByColumns: []ColumnInfo{},
},
{
name: "P2 - Function call",
query: `rate(cpu_usage_seconds_total[5m])`,
wantMetrics: []string{"cpu_usage_seconds_total"},
wantGroupByColumns: []ColumnInfo{},
},
{
name: "P3 - Aggregation with by()",
query: `sum by (pod,region) (rate(http_requests_total[5m]))`,
wantMetrics: []string{"http_requests_total"},
wantGroupByColumns: []ColumnInfo{{Name: "pod", OriginExpr: "pod", OriginField: "pod"}, {Name: "region", OriginExpr: "region", OriginField: "region"}},
},
{
name: "P4 - Aggregation with without()",
query: `sum without (instance) (rate(cpu_usage_total[1m]))`,
wantMetrics: []string{"cpu_usage_total"},
wantGroupByColumns: []ColumnInfo{}, // without() means no grouping keys per spec
},
{
name: "P5 - Invalid: metric name set twice",
query: `sum(rate(http_requests_total{__name__!="http_requests_error_total"}[5m]))`,
wantMetrics: []string{},
wantGroupByColumns: []ColumnInfo{},
wantError: true,
},
{
name: "P6 - Regex negative label",
query: `sum(rate(http_requests_total{status!~"5.."}[5m]))`,
wantMetrics: []string{"http_requests_total"},
wantGroupByColumns: []ColumnInfo{},
},
{
name: "P7 - Nested aggregations",
query: `sum by (region) (max by (pod, region) (cpu_usage_total{env="prod"}))`,
wantMetrics: []string{"cpu_usage_total"},
wantGroupByColumns: []ColumnInfo{{Name: "region", OriginExpr: "region", OriginField: "region"}}, // Only outermost grouping
},
{
name: "P7a - Nested aggregation: inner grouping ignored",
query: `sum(max by (pod) (cpu_usage_total{env="prod"}))`,
wantMetrics: []string{"cpu_usage_total"},
wantGroupByColumns: []ColumnInfo{}, // Inner grouping is ignored when outer has no grouping (nestingLevel != 0 case)
},
{
name: "P8 - Arithmetic expression",
query: `(http_requests_total{job="api"} + http_errors_total{job="api"})`,
wantMetrics: []string{"http_requests_total", "http_errors_total"},
wantGroupByColumns: []ColumnInfo{},
},
{
name: "P9 - Mix of positive metric & exclusion label",
query: `sum by (region)(rate(foo{job!="db"}[5m]))`,
wantMetrics: []string{"foo"},
wantGroupByColumns: []ColumnInfo{{Name: "region", OriginExpr: "region", OriginField: "region"}},
},
{
name: "P10 - Function + aggregation",
query: `histogram_quantile(0.9, sum(rate(http_request_duration_seconds_bucket[5m])) by (le))`,
wantMetrics: []string{"http_request_duration_seconds_bucket"},
wantGroupByColumns: []ColumnInfo{{Name: "le", OriginExpr: "le", OriginField: "le"}},
},
{
name: "P11 - Subquery",
query: `sum_over_time(cpu_usage_total[1h:5m])`,
wantMetrics: []string{"cpu_usage_total"},
wantGroupByColumns: []ColumnInfo{},
},
{
name: "P12 - Nested aggregation inside subquery",
query: `max_over_time(sum(rate(cpu_usage_total[5m]))[1h:5m])`,
wantMetrics: []string{"cpu_usage_total"},
wantGroupByColumns: []ColumnInfo{},
},
{
name: "P13 - Subquery with multiple metrics",
query: `avg_over_time((foo + bar)[10m:1m])`,
wantMetrics: []string{"foo", "bar"},
wantGroupByColumns: []ColumnInfo{},
},
{
name: "P14 - Simple meta-metric",
query: `sum by (pod) (up)`,
wantMetrics: []string{"up"},
wantGroupByColumns: []ColumnInfo{{Name: "pod", OriginExpr: "pod", OriginField: "pod"}},
},
{
name: "P15 - Binary operator unless",
query: `sum(rate(http_requests_total[5m])) unless avg(rate(http_errors_total[5m]))`,
wantMetrics: []string{"http_requests_total", "http_errors_total"},
wantGroupByColumns: []ColumnInfo{},
},
{
name: "P16 - Vector matching",
query: `sum(rate(foo[5m])) / ignoring(instance) group_left(job) sum(rate(bar[5m]))`,
wantMetrics: []string{"foo", "bar"},
wantGroupByColumns: []ColumnInfo{},
},
{
name: "P17 - Offset modifier with aggregation",
query: `sum by (env)(rate(cpu_usage_seconds_total{job="api"}[5m] offset 1h))`,
wantMetrics: []string{"cpu_usage_seconds_total"},
wantGroupByColumns: []ColumnInfo{{Name: "env", OriginExpr: "env", OriginField: "env"}},
},
{
name: "P18 - Invalid syntax",
query: `sum by ((foo)(bar))(http_requests_total)`,
wantMetrics: []string{},
wantGroupByColumns: []ColumnInfo{},
wantError: true,
},
{
name: "P19 - Literal expression",
query: `2 + 3`,
wantMetrics: []string{},
wantGroupByColumns: []ColumnInfo{},
},
{
name: "P20 - Aggregation inside subquery with deriv",
query: `deriv(sum by (instance)(rate(node_network_receive_bytes_total[5m]))[30m:5m])`,
wantMetrics: []string{"node_network_receive_bytes_total"},
wantGroupByColumns: []ColumnInfo{{Name: "instance", OriginExpr: "instance", OriginField: "instance"}}, // Aggregation is inside subquery, not outermost
},
{
name: "P21 - Aggregation inside subquery with avg_over_time",
query: `avg_over_time(sum by (job)(rate(http_requests_total[1m]))[30m:1m])`,
wantMetrics: []string{"http_requests_total"},
wantGroupByColumns: []ColumnInfo{{Name: "job", OriginExpr: "job", OriginField: "job"}}, // Aggregation is inside subquery, not outermost
},
{
name: "P22 - Aggregation inside subquery with max_over_time",
query: `max_over_time(sum by (pod)(rate(container_restarts_total[5m]))[1h:5m])`,
wantMetrics: []string{"container_restarts_total"},
wantGroupByColumns: []ColumnInfo{{Name: "pod", OriginExpr: "pod", OriginField: "pod"}}, // Aggregation is inside subquery, not outermost
},
{
name: "P23 - Aggregation inside subquery with deriv (no rate)",
query: `deriv(sum by (namespace)(container_memory_working_set_bytes)[1h:10m])`,
wantMetrics: []string{"container_memory_working_set_bytes"},
wantGroupByColumns: []ColumnInfo{{Name: "namespace", OriginExpr: "namespace", OriginField: "namespace"}}, // Aggregation is inside subquery, not outermost
},
{
name: "P24 - Aggregation inside subquery with histogram_quantile",
query: `histogram_quantile(0.95, avg_over_time(sum by (le, service)(rate(http_request_duration_seconds_bucket[5m]))[1h:5m]))`,
wantMetrics: []string{"http_request_duration_seconds_bucket"},
wantGroupByColumns: []ColumnInfo{{Name: "le", OriginExpr: "le", OriginField: "le"}, {Name: "service", OriginExpr: "service", OriginField: "service"}}, // Aggregation is inside subquery, not outermost
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result, err := extractor.Extract(tt.query)
// Check error expectation
if tt.wantError {
if err == nil {
t.Errorf("Extract() expected error but got none, query: %s", tt.query)
}
return
}
if err != nil {
t.Errorf("Extract() unexpected error = %v, query: %s", err, tt.query)
return
}
// Sort for comparison
gotMetrics := sortStrings(result.MetricNames)
wantMetrics := sortStrings(tt.wantMetrics)
if !reflect.DeepEqual(gotMetrics, wantMetrics) {
t.Errorf("Extract() MetricNames = %v, want %v", gotMetrics, wantMetrics)
}
// Test GroupByColumns - need to normalize for comparison (order may vary)
gotGroupByColumns := sortColumnInfo(result.GroupByColumns)
wantGroupByColumns := sortColumnInfo(tt.wantGroupByColumns)
if !reflect.DeepEqual(gotGroupByColumns, wantGroupByColumns) {
t.Errorf("Extract() GroupByColumns = %v, want %v", gotGroupByColumns, wantGroupByColumns)
}
})
}
}

View File

@@ -0,0 +1,58 @@
// Package queryfilterextractor provides utilities for extracting metric names
// and grouping keys.
//
// This is useful for metrics discovery, and query analysis.
package queryfilterextractor
import "github.com/SigNoz/signoz/pkg/errors"
const (
ExtractorCH = "qfe_ch"
ExtractorPromQL = "qfe_promql"
)
// ColumnInfo represents a column in the query
type ColumnInfo struct {
Name string
Alias string
OriginExpr string
OriginField string
}
// GroupName returns the field name in the resulting data which is used for grouping
//
// - examples:
//
// - SELECT region as new_region FROM metrics WHERE metric_name='cpu' GROUP BY region
// GroupName() will return "new_region"
//
// - SELECT region FROM metrics WHERE metric_name='cpu' GROUP BY region
// GroupName() will return "region"
func (c *ColumnInfo) GroupName() string {
if c.Alias != "" {
return c.Alias
}
return c.Name
}
type FilterResult struct {
// MetricNames are the metrics that are being filtered on
MetricNames []string
// GroupByColumns are the columns that are being grouped by
GroupByColumns []ColumnInfo
}
type FilterExtractor interface {
Extract(query string) (*FilterResult, error)
}
func NewExtractor(extractorType string) (FilterExtractor, error) {
switch extractorType {
case ExtractorCH:
return NewClickHouseFilterExtractor(), nil
case ExtractorPromQL:
return NewPromQLFilterExtractor(), nil
default:
return nil, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "invalid extractor type: %s", extractorType)
}
}

View File

@@ -1,7 +1,6 @@
package agentConf
import (
"github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/types/opamptypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
@@ -25,6 +24,6 @@ type AgentFeature interface {
// TODO(Raj): maybe refactor agentConf further and clean this up
serializedSettingsUsed string,
apiErr *model.ApiError,
err error,
)
}

View File

@@ -7,16 +7,26 @@ import (
"strings"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/opamptypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/pkg/errors"
"go.uber.org/zap"
"golang.org/x/exp/slices"
)
var (
CodeConfigVersionNotFound = errors.MustNewCode("config_version_not_found")
CodeElementTypeRequired = errors.MustNewCode("element_type_required")
CodeConfigElementsRequired = errors.MustNewCode("config_elements_required")
CodeConfigVersionInsertFailed = errors.MustNewCode("config_version_insert_failed")
CodeConfigElementInsertFailed = errors.MustNewCode("config_element_insert_failed")
CodeConfigDeployStatusUpdateFailed = errors.MustNewCode("config_deploy_status_update_failed")
CodeConfigHistoryGetFailed = errors.MustNewCode("config_history_get_failed")
)
// Repo handles DDL and DML ops on ingestion rules
type Repo struct {
store sqlstore.SQLStore
@@ -24,7 +34,7 @@ type Repo struct {
func (r *Repo) GetConfigHistory(
ctx context.Context, orgId valuer.UUID, typ opamptypes.ElementType, limit int,
) ([]opamptypes.AgentConfigVersion, *model.ApiError) {
) ([]opamptypes.AgentConfigVersion, error) {
var c []opamptypes.AgentConfigVersion
err := r.store.BunDB().NewSelect().
Model(&c).
@@ -39,7 +49,7 @@ func (r *Repo) GetConfigHistory(
Scan(ctx)
if err != nil {
return nil, model.InternalError(err)
return nil, errors.WrapInternalf(err, CodeConfigHistoryGetFailed, "failed to get config history")
}
incompleteStatuses := []opamptypes.DeployStatus{opamptypes.DeployInitiated, opamptypes.Deploying}
@@ -54,7 +64,7 @@ func (r *Repo) GetConfigHistory(
func (r *Repo) GetConfigVersion(
ctx context.Context, orgId valuer.UUID, typ opamptypes.ElementType, v int,
) (*opamptypes.AgentConfigVersion, *model.ApiError) {
) (*opamptypes.AgentConfigVersion, error) {
var c opamptypes.AgentConfigVersion
err := r.store.BunDB().NewSelect().
Model(&c).
@@ -69,9 +79,9 @@ func (r *Repo) GetConfigVersion(
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, model.NotFoundError(err)
return nil, errors.WrapNotFoundf(err, CodeConfigVersionNotFound, "config version not found")
}
return nil, model.InternalError(err)
return nil, errors.WrapInternalf(err, errors.CodeInternal, "failed to get config version")
}
return &c, nil
@@ -79,7 +89,7 @@ func (r *Repo) GetConfigVersion(
func (r *Repo) GetLatestVersion(
ctx context.Context, orgId valuer.UUID, typ opamptypes.ElementType,
) (*opamptypes.AgentConfigVersion, *model.ApiError) {
) (*opamptypes.AgentConfigVersion, error) {
var c opamptypes.AgentConfigVersion
err := r.store.BunDB().NewSelect().
Model(&c).
@@ -93,9 +103,9 @@ func (r *Repo) GetLatestVersion(
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, model.NotFoundError(err)
return nil, errors.WrapNotFoundf(err, CodeConfigVersionNotFound, "config latest version not found")
}
return nil, model.InternalError(err)
return nil, errors.WrapInternalf(err, errors.CodeInternal, "failed to get latest config version")
}
return &c, nil
@@ -103,18 +113,16 @@ func (r *Repo) GetLatestVersion(
func (r *Repo) insertConfig(
ctx context.Context, orgId valuer.UUID, userId valuer.UUID, c *opamptypes.AgentConfigVersion, elements []string,
) (fnerr *model.ApiError) {
) error {
if c.ElementType.StringValue() == "" {
return model.BadRequest(fmt.Errorf(
"element type is required for creating agent config version",
))
return errors.NewInvalidInputf(CodeElementTypeRequired, "element type is required for creating agent config version")
}
// allowing empty elements for logs - use case is deleting all pipelines
if len(elements) == 0 && c.ElementType != opamptypes.ElementTypeLogPipelines {
zap.L().Error("insert config called with no elements ", zap.String("ElementType", c.ElementType.StringValue()))
return model.BadRequest(fmt.Errorf("config must have atleast one element"))
return errors.NewInvalidInputf(CodeConfigElementsRequired, "config must have atleast one element")
}
if c.Version != 0 {
@@ -122,15 +130,13 @@ func (r *Repo) insertConfig(
// in a monotonically increasing order starting with 1. hence, we reject insert
// requests with version anything other than 0. here, 0 indicates un-assigned
zap.L().Error("invalid version assignment while inserting agent config", zap.Int("version", c.Version), zap.String("ElementType", c.ElementType.StringValue()))
return model.BadRequest(fmt.Errorf(
"user defined versions are not supported in the agent config",
))
return errors.NewInvalidInputf(errors.CodeInvalidInput, "user defined versions are not supported in the agent config")
}
configVersion, err := r.GetLatestVersion(ctx, orgId, c.ElementType)
if err != nil && err.Type() != model.ErrorNotFound {
if err != nil && !errors.Ast(err, errors.TypeNotFound) {
zap.L().Error("failed to fetch latest config version", zap.Error(err))
return model.InternalError(fmt.Errorf("failed to fetch latest config version"))
return err
}
if configVersion != nil {
@@ -141,7 +147,7 @@ func (r *Repo) insertConfig(
}
defer func() {
if fnerr != nil {
if err != nil {
// remove all the damage (invalid rows from db)
r.store.BunDB().NewDelete().Model(new(opamptypes.AgentConfigVersion)).Where("id = ?", c.ID).Where("org_id = ?", orgId).Exec(ctx)
r.store.BunDB().NewDelete().Model(new(opamptypes.AgentConfigElement)).Where("version_id = ?", c.ID).Exec(ctx)
@@ -153,10 +159,9 @@ func (r *Repo) insertConfig(
NewInsert().
Model(c).
Exec(ctx)
if dbErr != nil {
zap.L().Error("error in inserting config version: ", zap.Error(dbErr))
return model.InternalError(errors.Wrap(dbErr, "failed to insert ingestion rule"))
return errors.WrapInternalf(dbErr, CodeConfigVersionInsertFailed, "failed to insert config version")
}
for _, e := range elements {
@@ -172,7 +177,7 @@ func (r *Repo) insertConfig(
}
_, dbErr = r.store.BunDB().NewInsert().Model(agentConfigElement).Exec(ctx)
if dbErr != nil {
return model.InternalError(dbErr)
return errors.WrapInternalf(dbErr, CodeConfigElementInsertFailed, "failed to insert config element")
}
}
@@ -214,8 +219,7 @@ func (r *Repo) updateDeployStatus(ctx context.Context,
func (r *Repo) updateDeployStatusByHash(
ctx context.Context, orgId valuer.UUID, confighash string, status string, result string,
) *model.ApiError {
) error {
_, err := r.store.BunDB().NewUpdate().
Model(new(opamptypes.AgentConfigVersion)).
Set("deploy_status = ?", status).
@@ -225,7 +229,7 @@ func (r *Repo) updateDeployStatusByHash(
Exec(ctx)
if err != nil {
zap.L().Error("failed to update deploy status", zap.Error(err))
return model.InternalError(errors.Wrap(err, "failed to update deploy status"))
return errors.WrapInternalf(err, CodeConfigDeployStatusUpdateFailed, "failed to update deploy status")
}
return nil

View File

@@ -8,6 +8,7 @@ import (
"sync"
"sync/atomic"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/query-service/app/opamp"
filterprocessor "github.com/SigNoz/signoz/pkg/query-service/app/opamp/otelconfig/filterprocessor"
tsp "github.com/SigNoz/signoz/pkg/query-service/app/opamp/otelconfig/tailsampler"
@@ -16,13 +17,16 @@ import (
"github.com/SigNoz/signoz/pkg/types/opamptypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/google/uuid"
"github.com/pkg/errors"
"go.uber.org/zap"
yaml "gopkg.in/yaml.v3"
)
var m *Manager
var (
CodeConfigVersionNoConfig = errors.MustNewCode("config_version_no_config")
)
func init() {
m = &Manager{}
}
@@ -103,16 +107,14 @@ func (m *Manager) RecommendAgentConfig(orgId valuer.UUID, currentConfYaml []byte
for _, feature := range m.agentFeatures {
featureType := opamptypes.NewElementType(string(feature.AgentFeatureType()))
latestConfig, apiErr := GetLatestVersion(context.Background(), orgId, featureType)
if apiErr != nil && apiErr.Type() != model.ErrorNotFound {
return nil, "", errors.Wrap(apiErr.ToError(), "failed to get latest agent config version")
latestConfig, err := GetLatestVersion(context.Background(), orgId, featureType)
if err != nil && !errors.Ast(err, errors.TypeNotFound) {
return nil, "", err
}
updatedConf, serializedSettingsUsed, apiErr := feature.RecommendAgentConfig(orgId, recommendation, latestConfig)
if apiErr != nil {
return nil, "", errors.Wrap(apiErr.ToError(), fmt.Sprintf(
"failed to generate agent config recommendation for %s", featureType,
))
updatedConf, serializedSettingsUsed, err := feature.RecommendAgentConfig(orgId, recommendation, latestConfig)
if err != nil {
return nil, "", errors.WithAdditionalf(err, "agent config recommendation for %s failed", featureType)
}
recommendation = updatedConf
@@ -178,26 +180,26 @@ func (m *Manager) ReportConfigDeploymentStatus(
func GetLatestVersion(
ctx context.Context, orgId valuer.UUID, elementType opamptypes.ElementType,
) (*opamptypes.AgentConfigVersion, *model.ApiError) {
) (*opamptypes.AgentConfigVersion, error) {
return m.GetLatestVersion(ctx, orgId, elementType)
}
func GetConfigVersion(
ctx context.Context, orgId valuer.UUID, elementType opamptypes.ElementType, version int,
) (*opamptypes.AgentConfigVersion, *model.ApiError) {
) (*opamptypes.AgentConfigVersion, error) {
return m.GetConfigVersion(ctx, orgId, elementType, version)
}
func GetConfigHistory(
ctx context.Context, orgId valuer.UUID, typ opamptypes.ElementType, limit int,
) ([]opamptypes.AgentConfigVersion, *model.ApiError) {
) ([]opamptypes.AgentConfigVersion, error) {
return m.GetConfigHistory(ctx, orgId, typ, limit)
}
// StartNewVersion launches a new config version for given set of elements
func StartNewVersion(
ctx context.Context, orgId valuer.UUID, userId valuer.UUID, eleType opamptypes.ElementType, elementIds []string,
) (*opamptypes.AgentConfigVersion, *model.ApiError) {
) (*opamptypes.AgentConfigVersion, error) {
// create a new version
cfg := opamptypes.NewAgentConfigVersion(orgId, userId, eleType)
@@ -217,17 +219,16 @@ func NotifyConfigUpdate(ctx context.Context) {
m.notifyConfigUpdateSubscribers()
}
func Redeploy(ctx context.Context, orgId valuer.UUID, typ opamptypes.ElementType, version int) *model.ApiError {
func Redeploy(ctx context.Context, orgId valuer.UUID, typ opamptypes.ElementType, version int) error {
configVersion, err := GetConfigVersion(ctx, orgId, typ, version)
if err != nil {
zap.L().Error("failed to fetch config version during redeploy", zap.Error(err))
return model.WrapApiError(err, "failed to fetch details of the config version")
return err
}
if configVersion == nil || (configVersion != nil && configVersion.Config == "") {
zap.L().Debug("config version has no conf yaml", zap.Any("configVersion", configVersion))
return model.BadRequest(fmt.Errorf("the config version can not be redeployed"))
return errors.NewInvalidInputf(CodeConfigVersionNoConfig, "the config version can not be redeployed")
}
switch typ {
case opamptypes.ElementTypeSamplingRules:
@@ -246,7 +247,7 @@ func Redeploy(ctx context.Context, orgId valuer.UUID, typ opamptypes.ElementType
configHash, err := opamp.UpsertControlProcessors(ctx, "traces", processorConf, m.OnConfigUpdate)
if err != nil {
zap.L().Error("failed to call agent config update for trace processor", zap.Error(err))
return model.InternalError(fmt.Errorf("failed to deploy the config"))
return errors.WithAdditionalf(err, "failed to deploy the config")
}
m.updateDeployStatus(ctx, orgId, opamptypes.ElementTypeSamplingRules, version, opamptypes.DeployInitiated.StringValue(), "Deployment started", configHash, configVersion.Config)

View File

@@ -116,7 +116,7 @@ func (c *Controller) GenerateConnectionUrl(ctx context.Context, orgId string, cl
return nil, model.WrapApiError(apiErr, "couldn't upsert cloud account")
}
agentVersion := "v0.0.6"
agentVersion := "v0.0.7"
if req.AgentConfig.Version != "" {
agentVersion = req.AgentConfig.Version
}

View File

@@ -5,9 +5,9 @@ import (
"context"
"database/sql"
"encoding/json"
"errors"
"fmt"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/modules/thirdpartyapi"
"io"
@@ -1791,7 +1791,7 @@ func (aH *APIHandler) GetWaterfallSpansForTraceWithMetadata(w http.ResponseWrite
}
traceID := mux.Vars(r)["traceId"]
if traceID == "" {
RespondError(w, model.BadRequest(errors.New("traceID is required")), nil)
render.Error(w, errors.NewInvalidInputf(errors.CodeInvalidInput, "traceID is required"))
return
}
@@ -1825,7 +1825,7 @@ func (aH *APIHandler) GetFlamegraphSpansForTrace(w http.ResponseWriter, r *http.
traceID := mux.Vars(r)["traceId"]
if traceID == "" {
RespondError(w, model.BadRequest(errors.New("traceID is required")), nil)
render.Error(w, errors.NewInvalidInputf(errors.CodeInvalidInput, "traceID is required"))
return
}
@@ -1926,9 +1926,9 @@ func (aH *APIHandler) setTTL(w http.ResponseWriter, r *http.Request) {
}
ctx := r.Context()
claims, errv2 := authtypes.ClaimsFromContext(ctx)
if errv2 != nil {
RespondError(w, &model.ApiError{Err: errors.New("failed to get org id from context"), Typ: model.ErrorInternal}, nil)
claims, err := authtypes.ClaimsFromContext(ctx)
if err != nil {
render.Error(w, errors.NewInternalf(errors.CodeInternal, "failed to get org id from context"))
return
}
@@ -1995,17 +1995,15 @@ func (aH *APIHandler) getTTL(w http.ResponseWriter, r *http.Request) {
}
ctx := r.Context()
claims, errv2 := authtypes.ClaimsFromContext(ctx)
if errv2 != nil {
RespondError(w, &model.ApiError{Err: errors.New("failed to get org id from context"), Typ: model.ErrorInternal}, nil)
claims, err := authtypes.ClaimsFromContext(ctx)
if err != nil {
render.Error(w, err)
return
}
result, apiErr := aH.reader.GetTTL(r.Context(), claims.OrgID, ttlParams)
if apiErr != nil && aH.HandleError(w, apiErr.Err, http.StatusInternalServerError) {
return
}
aH.WriteJSON(w, r, result)
}
@@ -2070,7 +2068,7 @@ func (aH *APIHandler) getHealth(w http.ResponseWriter, r *http.Request) {
func (aH *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
if aH.SetupCompleted {
RespondError(w, &model.ApiError{Err: errors.New("self-registration is disabled"), Typ: model.ErrorBadData}, nil)
render.Error(w, errors.NewInvalidInputf(errors.CodeInvalidInput, "self-registration is disabled"))
return
}
@@ -3453,7 +3451,7 @@ func (aH *APIHandler) InstallIntegration(w http.ResponseWriter, r *http.Request)
}
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
RespondError(w, model.UnauthorizedError(errors.New("unauthorized")), nil)
render.Error(w, err)
return
}
@@ -4064,7 +4062,7 @@ func (aH *APIHandler) logAggregate(w http.ResponseWriter, r *http.Request) {
aH.WriteJSON(w, r, model.GetLogsAggregatesResponse{})
}
func parseAgentConfigVersion(r *http.Request) (int, *model.ApiError) {
func parseAgentConfigVersion(r *http.Request) (int, error) {
versionString := mux.Vars(r)["version"]
if versionString == "latest" {
@@ -4074,11 +4072,11 @@ func parseAgentConfigVersion(r *http.Request) (int, *model.ApiError) {
version64, err := strconv.ParseInt(versionString, 0, 8)
if err != nil {
return 0, model.BadRequestStr("invalid version number")
return 0, errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "invalid version number")
}
if version64 <= 0 {
return 0, model.BadRequestStr("invalid version number")
return 0, errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid version number")
}
return int(version64), nil
@@ -4088,16 +4086,13 @@ func (aH *APIHandler) PreviewLogsPipelinesHandler(w http.ResponseWriter, r *http
req := logparsingpipeline.PipelinesPreviewRequest{}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
RespondError(w, model.BadRequest(err), nil)
render.Error(w, errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "failed to decode request body"))
return
}
resultLogs, apiErr := aH.LogsParsingPipelineController.PreviewLogsPipelines(
r.Context(), &req,
)
if apiErr != nil {
RespondError(w, apiErr, nil)
resultLogs, err := aH.LogsParsingPipelineController.PreviewLogsPipelines(r.Context(), &req)
if err != nil {
render.Error(w, err)
return
}
@@ -4105,9 +4100,9 @@ func (aH *APIHandler) PreviewLogsPipelinesHandler(w http.ResponseWriter, r *http
}
func (aH *APIHandler) ListLogsPipelinesHandler(w http.ResponseWriter, r *http.Request) {
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
if errv2 != nil {
render.Error(w, errv2)
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
@@ -4119,35 +4114,33 @@ func (aH *APIHandler) ListLogsPipelinesHandler(w http.ResponseWriter, r *http.Re
version, err := parseAgentConfigVersion(r)
if err != nil {
RespondError(w, model.WrapApiError(err, "Failed to parse agent config version"), nil)
render.Error(w, err)
return
}
var payload *logparsingpipeline.PipelinesResponse
var apierr *model.ApiError
if version != -1 {
payload, apierr = aH.listLogsPipelinesByVersion(context.Background(), orgID, version)
payload, err = aH.listLogsPipelinesByVersion(r.Context(), orgID, version)
} else {
payload, apierr = aH.listLogsPipelines(context.Background(), orgID)
payload, err = aH.listLogsPipelines(r.Context(), orgID)
}
if apierr != nil {
RespondError(w, apierr, payload)
if err != nil {
render.Error(w, err)
return
}
aH.Respond(w, payload)
}
// listLogsPipelines lists logs piplines for latest version
func (aH *APIHandler) listLogsPipelines(ctx context.Context, orgID valuer.UUID) (
*logparsingpipeline.PipelinesResponse, *model.ApiError,
*logparsingpipeline.PipelinesResponse, error,
) {
// get lateset agent config
latestVersion := -1
lastestConfig, err := agentConf.GetLatestVersion(ctx, orgID, opamptypes.ElementTypeLogPipelines)
if err != nil && err.Type() != model.ErrorNotFound {
return nil, model.WrapApiError(err, "failed to get latest agent config version")
if err != nil && !errorsV2.Ast(err, errorsV2.TypeNotFound) {
return nil, err
}
if lastestConfig != nil {
@@ -4156,14 +4149,14 @@ func (aH *APIHandler) listLogsPipelines(ctx context.Context, orgID valuer.UUID)
payload, err := aH.LogsParsingPipelineController.GetPipelinesByVersion(ctx, orgID, latestVersion)
if err != nil {
return nil, model.WrapApiError(err, "failed to get pipelines")
return nil, err
}
// todo(Nitya): make a new API for history pagination
limit := 10
history, err := agentConf.GetConfigHistory(ctx, orgID, opamptypes.ElementTypeLogPipelines, limit)
if err != nil {
return nil, model.WrapApiError(err, "failed to get config history")
return nil, err
}
payload.History = history
return payload, nil
@@ -4171,18 +4164,18 @@ func (aH *APIHandler) listLogsPipelines(ctx context.Context, orgID valuer.UUID)
// listLogsPipelinesByVersion lists pipelines along with config version history
func (aH *APIHandler) listLogsPipelinesByVersion(ctx context.Context, orgID valuer.UUID, version int) (
*logparsingpipeline.PipelinesResponse, *model.ApiError,
*logparsingpipeline.PipelinesResponse, error,
) {
payload, err := aH.LogsParsingPipelineController.GetPipelinesByVersion(ctx, orgID, version)
if err != nil {
return nil, model.WrapApiError(err, "failed to get pipelines by version")
return nil, err
}
// todo(Nitya): make a new API for history pagination
limit := 10
history, err := agentConf.GetConfigHistory(ctx, orgID, opamptypes.ElementTypeLogPipelines, limit)
if err != nil {
return nil, model.WrapApiError(err, "failed to retrieve agent config history")
return nil, err
}
payload.History = history
@@ -4218,14 +4211,14 @@ func (aH *APIHandler) CreateLogsPipeline(w http.ResponseWriter, r *http.Request)
createPipeline := func(
ctx context.Context,
postable []pipelinetypes.PostablePipeline,
) (*logparsingpipeline.PipelinesResponse, *model.ApiError) {
) (*logparsingpipeline.PipelinesResponse, error) {
if len(postable) == 0 {
zap.L().Warn("found no pipelines in the http request, this will delete all the pipelines")
}
validationErr := aH.LogsParsingPipelineController.ValidatePipelines(ctx, postable)
if validationErr != nil {
return nil, validationErr
err := aH.LogsParsingPipelineController.ValidatePipelines(ctx, postable)
if err != nil {
return nil, err
}
return aH.LogsParsingPipelineController.ApplyPipelines(ctx, orgID, userID, postable)
@@ -4233,7 +4226,7 @@ func (aH *APIHandler) CreateLogsPipeline(w http.ResponseWriter, r *http.Request)
res, err := createPipeline(r.Context(), req.Pipelines)
if err != nil {
RespondError(w, err, nil)
render.Error(w, err)
return
}

View File

@@ -106,7 +106,7 @@ func (c *Controller) Uninstall(ctx context.Context, orgId string, req *Uninstall
return nil
}
func (c *Controller) GetPipelinesForInstalledIntegrations(ctx context.Context, orgId string) ([]pipelinetypes.GettablePipeline, *model.ApiError) {
func (c *Controller) GetPipelinesForInstalledIntegrations(ctx context.Context, orgId string) ([]pipelinetypes.GettablePipeline, error) {
return c.mgr.GetPipelinesForInstalledIntegrations(ctx, orgId)
}

View File

@@ -256,7 +256,7 @@ func (m *Manager) UninstallIntegration(
func (m *Manager) GetPipelinesForInstalledIntegrations(
ctx context.Context,
orgId string,
) ([]pipelinetypes.GettablePipeline, *model.ApiError) {
) ([]pipelinetypes.GettablePipeline, error) {
installedIntegrations, apiErr := m.getInstalledIntegrations(ctx, orgId)
if apiErr != nil {
return nil, apiErr

View File

@@ -8,15 +8,23 @@ import (
"gopkg.in/yaml.v3"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/query-service/constants"
coreModel "github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/types/pipelinetypes"
"github.com/pkg/errors"
"go.uber.org/zap"
)
var lockLogsPipelineSpec sync.RWMutex
var (
CodeCollectorConfigUnmarshalFailed = errors.MustNewCode("collector_config_unmarshal_failed")
CodeCollectorConfigMarshalFailed = errors.MustNewCode("collector_config_marshal_failed")
CodeCollectorConfigServiceNotFound = errors.MustNewCode("collector_config_service_not_found")
CodeCollectorConfigServiceMarshalFailed = errors.MustNewCode("collector_config_service_marshal_failed")
CodeCollectorConfigServiceUnmarshalFailed = errors.MustNewCode("collector_config_service_unmarshal_failed")
CodeCollectorConfigLogsPipelineNotFound = errors.MustNewCode("collector_config_logs_pipeline_not_found")
)
// check if the processors already exist
// if yes then update the processor.
// if something doesn't exists then remove it.
@@ -57,15 +65,15 @@ type otelPipeline struct {
func getOtelPipelineFromConfig(config map[string]interface{}) (*otelPipeline, error) {
if _, ok := config["service"]; !ok {
return nil, fmt.Errorf("service not found in OTEL config")
return nil, errors.NewInvalidInputf(CodeCollectorConfigServiceNotFound, "service not found in OTEL config")
}
b, err := json.Marshal(config["service"])
if err != nil {
return nil, err
return nil, errors.WrapInternalf(err, CodeCollectorConfigServiceMarshalFailed, "could not marshal OTEL config")
}
p := otelPipeline{}
if err := json.Unmarshal(b, &p); err != nil {
return nil, err
return nil, errors.WrapInternalf(err, CodeCollectorConfigServiceUnmarshalFailed, "could not unmarshal OTEL config")
}
return &p, nil
}
@@ -163,21 +171,16 @@ func checkDuplicateString(pipeline []string) bool {
return false
}
func GenerateCollectorConfigWithPipelines(
config []byte,
pipelines []pipelinetypes.GettablePipeline,
) ([]byte, *coreModel.ApiError) {
func GenerateCollectorConfigWithPipelines(config []byte, pipelines []pipelinetypes.GettablePipeline) ([]byte, error) {
var collectorConf map[string]interface{}
err := yaml.Unmarshal([]byte(config), &collectorConf)
if err != nil {
return nil, coreModel.BadRequest(err)
return nil, errors.WrapInvalidInputf(err, CodeCollectorConfigUnmarshalFailed, "could not unmarshal collector config")
}
signozPipelineProcessors, signozPipelineProcNames, err := PreparePipelineProcessor(pipelines)
if err != nil {
return nil, coreModel.BadRequest(errors.Wrap(
err, "could not prepare otel collector processors for log pipelines",
))
return nil, err
}
// Escape any `$`s as `$$$` in config generated for pipelines, to ensure any occurrences
@@ -186,9 +189,7 @@ func GenerateCollectorConfigWithPipelines(
procConf := signozPipelineProcessors[procName]
serializedProcConf, err := yaml.Marshal(procConf)
if err != nil {
return nil, coreModel.InternalError(fmt.Errorf(
"could not marshal processor config for %s: %w", procName, err,
))
return nil, errors.WrapInternalf(err, CodeCollectorConfigMarshalFailed, "could not marshal processor config for %s", procName)
}
escapedSerializedConf := strings.ReplaceAll(
string(serializedProcConf), "$", "$$",
@@ -197,9 +198,7 @@ func GenerateCollectorConfigWithPipelines(
var escapedConf map[string]interface{}
err = yaml.Unmarshal([]byte(escapedSerializedConf), &escapedConf)
if err != nil {
return nil, coreModel.InternalError(fmt.Errorf(
"could not unmarshal dollar escaped processor config for %s: %w", procName, err,
))
return nil, errors.WrapInternalf(err, CodeCollectorConfigUnmarshalFailed, "could not unmarshal dollar escaped processor config for %s", procName)
}
signozPipelineProcessors[procName] = escapedConf
@@ -211,12 +210,10 @@ func GenerateCollectorConfigWithPipelines(
// build the new processor list in service.pipelines.logs
p, err := getOtelPipelineFromConfig(collectorConf)
if err != nil {
return nil, coreModel.BadRequest(err)
return nil, err
}
if p.Pipelines.Logs == nil {
return nil, coreModel.InternalError(fmt.Errorf(
"logs pipeline doesn't exist",
))
return nil, errors.NewInternalf(CodeCollectorConfigLogsPipelineNotFound, "logs pipeline doesn't exist")
}
updatedProcessorList, _ := buildCollectorPipelineProcessorsList(p.Pipelines.Logs.Processors, signozPipelineProcNames)
@@ -227,7 +224,7 @@ func GenerateCollectorConfigWithPipelines(
updatedConf, err := yaml.Marshal(collectorConf)
if err != nil {
return nil, coreModel.BadRequest(err)
return nil, errors.WrapInternalf(err, CodeCollectorConfigMarshalFailed, "could not marshal collector config")
}
return updatedConf, nil

View File

@@ -7,6 +7,7 @@ import (
"slices"
"strings"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/query-service/agentConf"
"github.com/SigNoz/signoz/pkg/query-service/constants"
"github.com/SigNoz/signoz/pkg/query-service/model"
@@ -17,20 +18,23 @@ import (
"github.com/SigNoz/signoz/pkg/types/pipelinetypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/google/uuid"
"github.com/pkg/errors"
"go.uber.org/zap"
)
var (
CodeRawPipelinesMarshalFailed = errors.MustNewCode("raw_pipelines_marshal_failed")
)
// Controller takes care of deployment cycle of log parsing pipelines.
type LogParsingPipelineController struct {
Repo
GetIntegrationPipelines func(context.Context, string) ([]pipelinetypes.GettablePipeline, *model.ApiError)
GetIntegrationPipelines func(context.Context, string) ([]pipelinetypes.GettablePipeline, error)
}
func NewLogParsingPipelinesController(
sqlStore sqlstore.SQLStore,
getIntegrationPipelines func(context.Context, string) ([]pipelinetypes.GettablePipeline, *model.ApiError),
getIntegrationPipelines func(context.Context, string) ([]pipelinetypes.GettablePipeline, error),
) (*LogParsingPipelineController, error) {
repo := NewRepo(sqlStore)
return &LogParsingPipelineController{
@@ -53,7 +57,7 @@ func (ic *LogParsingPipelineController) ApplyPipelines(
orgID valuer.UUID,
userID valuer.UUID,
postable []pipelinetypes.PostablePipeline,
) (*PipelinesResponse, *model.ApiError) {
) (*PipelinesResponse, error) {
var pipelines []pipelinetypes.GettablePipeline
// scan through postable pipelines, to select the existing pipelines or insert missing ones
@@ -68,9 +72,9 @@ func (ic *LogParsingPipelineController) ApplyPipelines(
// the same pipeline id.
r.ID = uuid.NewString()
r.OrderID = idx + 1
pipeline, apiErr := ic.insertPipeline(ctx, orgID, &r)
if apiErr != nil {
return nil, model.WrapApiError(apiErr, "failed to insert pipeline")
pipeline, err := ic.insertPipeline(ctx, orgID, &r)
if err != nil {
return nil, err
}
pipelines = append(pipelines, *pipeline)
@@ -90,13 +94,12 @@ func (ic *LogParsingPipelineController) ApplyPipelines(
return ic.GetPipelinesByVersion(ctx, orgID, cfg.Version)
}
func (ic *LogParsingPipelineController) ValidatePipelines(
ctx context.Context,
func (ic *LogParsingPipelineController) ValidatePipelines(ctx context.Context,
postedPipelines []pipelinetypes.PostablePipeline,
) *model.ApiError {
) error {
for _, p := range postedPipelines {
if err := p.IsValid(); err != nil {
return model.BadRequestStr(err.Error())
return errors.WithAdditionalf(err, "invalid pipeline: %s", p.Name)
}
}
@@ -121,39 +124,29 @@ func (ic *LogParsingPipelineController) ValidatePipelines(
}
sampleLogs := []model.SignozLog{{Body: ""}}
_, _, simulationErr := SimulatePipelinesProcessing(
ctx, gettablePipelines, sampleLogs,
)
if simulationErr != nil {
return model.BadRequest(fmt.Errorf(
"invalid pipelines config: %w", simulationErr.ToError(),
))
}
return nil
_, _, err := SimulatePipelinesProcessing(ctx, gettablePipelines, sampleLogs)
return err
}
// Returns effective list of pipelines including user created
// pipelines and pipelines for installed integrations
func (ic *LogParsingPipelineController) getEffectivePipelinesByVersion(
ctx context.Context, orgID valuer.UUID, version int,
) ([]pipelinetypes.GettablePipeline, *model.ApiError) {
) ([]pipelinetypes.GettablePipeline, error) {
result := []pipelinetypes.GettablePipeline{}
if version >= 0 {
savedPipelines, errors := ic.getPipelinesByVersion(ctx, orgID.String(), version)
if errors != nil {
zap.L().Error("failed to get pipelines for version", zap.Int("version", version), zap.Errors("errors", errors))
return nil, model.InternalError(fmt.Errorf("failed to get pipelines for given version %v", errors))
savedPipelines, err := ic.getPipelinesByVersion(ctx, orgID.String(), version)
if err != nil {
zap.L().Error("failed to get pipelines for version", zap.Int("version", version), zap.Error(err))
return nil, err
}
result = savedPipelines
}
integrationPipelines, apiErr := ic.GetIntegrationPipelines(ctx, orgID.String())
if apiErr != nil {
return nil, model.WrapApiError(
apiErr, "could not get pipelines for installed integrations",
)
integrationPipelines, err := ic.GetIntegrationPipelines(ctx, orgID.String())
if err != nil {
return nil, err
}
// Filter out any integration pipelines included in pipelines saved by user
@@ -194,12 +187,11 @@ func (ic *LogParsingPipelineController) getEffectivePipelinesByVersion(
// GetPipelinesByVersion responds with version info and associated pipelines
func (ic *LogParsingPipelineController) GetPipelinesByVersion(
ctx context.Context, orgId valuer.UUID, version int,
) (*PipelinesResponse, *model.ApiError) {
pipelines, errors := ic.getEffectivePipelinesByVersion(ctx, orgId, version)
if errors != nil {
zap.L().Error("failed to get pipelines for version", zap.Int("version", version), zap.Error(errors))
return nil, model.InternalError(fmt.Errorf("failed to get pipelines for given version %v", errors))
) (*PipelinesResponse, error) {
pipelines, err := ic.getEffectivePipelinesByVersion(ctx, orgId, version)
if err != nil {
zap.L().Error("failed to get pipelines for version", zap.Int("version", version), zap.Error(err))
return nil, err
}
var configVersion *opamptypes.AgentConfigVersion
@@ -207,7 +199,7 @@ func (ic *LogParsingPipelineController) GetPipelinesByVersion(
cv, err := agentConf.GetConfigVersion(ctx, orgId, opamptypes.ElementTypeLogPipelines, version)
if err != nil {
zap.L().Error("failed to get config for version", zap.Int("version", version), zap.Error(err))
return nil, model.WrapApiError(err, "failed to get config for given version")
return nil, err
}
configVersion = cv
}
@@ -231,11 +223,8 @@ type PipelinesPreviewResponse struct {
func (ic *LogParsingPipelineController) PreviewLogsPipelines(
ctx context.Context,
request *PipelinesPreviewRequest,
) (*PipelinesPreviewResponse, *model.ApiError) {
result, collectorLogs, err := SimulatePipelinesProcessing(
ctx, request.Pipelines, request.Logs,
)
) (*PipelinesPreviewResponse, error) {
result, collectorLogs, err := SimulatePipelinesProcessing(ctx, request.Pipelines, request.Logs)
if err != nil {
return nil, err
}
@@ -256,33 +245,27 @@ func (pc *LogParsingPipelineController) RecommendAgentConfig(
orgId valuer.UUID,
currentConfYaml []byte,
configVersion *opamptypes.AgentConfigVersion,
) (
recommendedConfYaml []byte,
serializedSettingsUsed string,
apiErr *model.ApiError,
) {
) ([]byte, string, error) {
pipelinesVersion := -1
if configVersion != nil {
pipelinesVersion = configVersion.Version
}
pipelinesResp, apiErr := pc.GetPipelinesByVersion(
pipelinesResp, err := pc.GetPipelinesByVersion(
context.Background(), orgId, pipelinesVersion,
)
if apiErr != nil {
return nil, "", apiErr
if err != nil {
return nil, "", err
}
updatedConf, apiErr := GenerateCollectorConfigWithPipelines(
currentConfYaml, pipelinesResp.Pipelines,
)
if apiErr != nil {
return nil, "", model.WrapApiError(apiErr, "could not marshal yaml for updated conf")
updatedConf, err := GenerateCollectorConfigWithPipelines(currentConfYaml, pipelinesResp.Pipelines)
if err != nil {
return nil, "", err
}
rawPipelineData, err := json.Marshal(pipelinesResp.Pipelines)
if err != nil {
return nil, "", model.BadRequest(errors.Wrap(err, "could not serialize pipelines to JSON"))
return nil, "", errors.WrapInternalf(err, CodeRawPipelinesMarshalFailed, "could not serialize pipelines to JSON")
}
return updatedConf, string(rawPipelineData), nil

View File

@@ -6,13 +6,13 @@ import (
"fmt"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types/pipelinetypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/pkg/errors"
"go.uber.org/zap"
)
@@ -33,24 +33,18 @@ func NewRepo(sqlStore sqlstore.SQLStore) Repo {
// insertPipeline stores a given postable pipeline to database
func (r *Repo) insertPipeline(
ctx context.Context, orgID valuer.UUID, postable *pipelinetypes.PostablePipeline,
) (*pipelinetypes.GettablePipeline, *model.ApiError) {
) (*pipelinetypes.GettablePipeline, error) {
if err := postable.IsValid(); err != nil {
return nil, model.BadRequest(errors.Wrap(err,
"pipeline is not valid",
))
return nil, errors.WithAdditionalf(err, "pipeline is not valid")
}
rawConfig, err := json.Marshal(postable.Config)
if err != nil {
return nil, model.BadRequest(errors.Wrap(err,
"failed to unmarshal postable pipeline config",
))
return nil, errors.WrapInternalf(err, errors.CodeInternal, "failed to unmarshal postable pipeline config")
}
filter, err := json.Marshal(postable.Filter)
if err != nil {
return nil, model.BadRequest(errors.Wrap(err,
"failed to marshal postable pipeline filter",
))
return nil, errors.WrapInternalf(err, errors.CodeInternal, "failed to marshal postable pipeline filter")
}
claims, errv2 := authtypes.ClaimsFromContext(ctx)
@@ -85,10 +79,9 @@ func (r *Repo) insertPipeline(
_, err = r.sqlStore.BunDB().NewInsert().
Model(&insertRow.StoreablePipeline).
Exec(ctx)
if err != nil {
zap.L().Error("error in inserting pipeline data", zap.Error(err))
return nil, model.InternalError(errors.Wrap(err, "failed to insert pipeline"))
return nil, errors.WrapInternalf(err, errors.CodeInternal, "failed to insert pipeline")
}
return insertRow, nil
@@ -97,8 +90,7 @@ func (r *Repo) insertPipeline(
// getPipelinesByVersion returns pipelines associated with a given version
func (r *Repo) getPipelinesByVersion(
ctx context.Context, orgID string, version int,
) ([]pipelinetypes.GettablePipeline, []error) {
var errors []error
) ([]pipelinetypes.GettablePipeline, error) {
storablePipelines := []pipelinetypes.StoreablePipeline{}
err := r.sqlStore.BunDB().NewSelect().
Model(&storablePipelines).
@@ -110,7 +102,7 @@ func (r *Repo) getPipelinesByVersion(
Order("p.order_id ASC").
Scan(ctx)
if err != nil {
return nil, []error{fmt.Errorf("failed to get pipelines from db: %v", err)}
return nil, errors.WrapInternalf(err, CodePipelinesGetFailed, "failed to get pipelines from db")
}
gettablePipelines := make([]pipelinetypes.GettablePipeline, len(storablePipelines))
@@ -118,23 +110,24 @@ func (r *Repo) getPipelinesByVersion(
return gettablePipelines, nil
}
var errs []error
for i := range storablePipelines {
gettablePipelines[i].StoreablePipeline = storablePipelines[i]
if err := gettablePipelines[i].ParseRawConfig(); err != nil {
errors = append(errors, err)
errs = append(errs, err)
}
if err := gettablePipelines[i].ParseFilter(); err != nil {
errors = append(errors, err)
errs = append(errs, err)
}
}
return gettablePipelines, errors
return gettablePipelines, errors.Join(errs...)
}
// GetPipelines returns pipeline and errors (if any)
func (r *Repo) GetPipeline(
ctx context.Context, orgID string, id string,
) (*pipelinetypes.GettablePipeline, *model.ApiError) {
) (*pipelinetypes.GettablePipeline, error) {
storablePipelines := []pipelinetypes.StoreablePipeline{}
err := r.sqlStore.BunDB().NewSelect().
@@ -144,12 +137,12 @@ func (r *Repo) GetPipeline(
Scan(ctx)
if err != nil {
zap.L().Error("failed to get ingestion pipeline from db", zap.Error(err))
return nil, model.InternalError(errors.Wrap(err, "failed to get ingestion pipeline from db"))
return nil, errors.WrapInternalf(err, errors.CodeInternal, "failed to get ingestion pipeline from db")
}
if len(storablePipelines) == 0 {
zap.L().Warn("No row found for ingestion pipeline id", zap.String("id", id))
return nil, model.NotFoundError(fmt.Errorf("no row found for ingestion pipeline id %v", id))
return nil, errors.NewNotFoundf(errors.CodeNotFound, "no row found for ingestion pipeline id %v", id)
}
if len(storablePipelines) == 1 {
@@ -157,20 +150,16 @@ func (r *Repo) GetPipeline(
gettablePipeline.StoreablePipeline = storablePipelines[0]
if err := gettablePipeline.ParseRawConfig(); err != nil {
zap.L().Error("invalid pipeline config found", zap.String("id", id), zap.Error(err))
return nil, model.InternalError(
errors.Wrap(err, "found an invalid pipeline config"),
)
return nil, err
}
if err := gettablePipeline.ParseFilter(); err != nil {
zap.L().Error("invalid pipeline filter found", zap.String("id", id), zap.Error(err))
return nil, model.InternalError(
errors.Wrap(err, "found an invalid pipeline filter"),
)
return nil, err
}
return &gettablePipeline, nil
}
return nil, model.InternalError(fmt.Errorf("multiple pipelines with same id"))
return nil, errors.NewInternalf(errors.CodeInternal, "multiple pipelines with same id")
}
func (r *Repo) DeletePipeline(ctx context.Context, orgID string, id string) error {

View File

@@ -3,6 +3,8 @@ package logparsingpipeline
import "github.com/SigNoz/signoz/pkg/errors"
var (
CodeInvalidOperatorType = errors.MustNewCode("operator_type_mismatch")
CodeFieldNilCheckType = errors.MustNewCode("operator_field_nil_check")
CodeInvalidOperatorType = errors.MustNewCode("operator_type_mismatch")
CodeFieldNilCheckType = errors.MustNewCode("operator_field_nil_check")
CodePipelinesGetFailed = errors.MustNewCode("pipelines_get_failed")
CodeProcessorFactoryMapFailed = errors.MustNewCode("processor_factory_map_failed")
)

View File

@@ -9,22 +9,16 @@ import (
"github.com/SigNoz/signoz-otel-collector/pkg/collectorsimulator"
_ "github.com/SigNoz/signoz-otel-collector/pkg/parser/grok"
"github.com/SigNoz/signoz-otel-collector/processor/signozlogspipelineprocessor"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/types/pipelinetypes"
"github.com/pkg/errors"
"go.opentelemetry.io/collector/otelcol"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
)
func SimulatePipelinesProcessing(
ctx context.Context,
pipelines []pipelinetypes.GettablePipeline,
logs []model.SignozLog,
) (
output []model.SignozLog, collectorWarnAndErrorLogs []string, apiErr *model.ApiError,
) {
func SimulatePipelinesProcessing(ctx context.Context, pipelines []pipelinetypes.GettablePipeline, logs []model.SignozLog) (
[]model.SignozLog, []string, error) {
if len(pipelines) < 1 {
return logs, nil, nil
}
@@ -42,13 +36,9 @@ func SimulatePipelinesProcessing(
}
simulatorInputPLogs := SignozLogsToPLogs(logs)
processorFactories, err := otelcol.MakeFactoryMap(
signozlogspipelineprocessor.NewFactory(),
)
processorFactories, err := otelcol.MakeFactoryMap(signozlogspipelineprocessor.NewFactory())
if err != nil {
return nil, nil, model.InternalError(errors.Wrap(
err, "could not construct processor factory map",
))
return nil, nil, errors.WrapInternalf(err, CodeProcessorFactoryMapFailed, "could not construct processor factory map")
}
// Pipelines translate to logtransformprocessors in otel collector config.
@@ -60,9 +50,9 @@ func SimulatePipelinesProcessing(
timeout := time.Millisecond * time.Duration(len(pipelines)*100+100)
configGenerator := func(baseConf []byte) ([]byte, error) {
updatedConf, apiErr := GenerateCollectorConfigWithPipelines(baseConf, pipelines)
if apiErr != nil {
return nil, apiErr.ToError()
updatedConf, err := GenerateCollectorConfigWithPipelines(baseConf, pipelines)
if err != nil {
return nil, err
}
return updatedConf, nil
}
@@ -76,14 +66,9 @@ func SimulatePipelinesProcessing(
)
if simulationErr != nil {
if errors.Is(simulationErr, collectorsimulator.ErrInvalidConfig) {
apiErr = model.BadRequest(simulationErr)
} else {
apiErr = model.InternalError(simulationErr)
return nil, nil, errors.WrapInvalidInputf(simulationErr, errors.CodeInvalidInput, "invalid config")
}
return nil, collectorErrs, model.WrapApiError(apiErr,
"could not simulate log pipelines processing.\nCollector errors",
)
return nil, nil, errors.WrapInternalf(simulationErr, errors.CodeInternal, "could not simulate log pipelines processing")
}
outputSignozLogs := PLogsToSignozLogs(outputPLogs)
@@ -98,6 +83,7 @@ func SimulatePipelinesProcessing(
delete(sigLog.Attributes_int64, inputOrderAttribute)
}
collectorWarnAndErrorLogs := []string{}
for _, log := range collectorErrs {
// if log is empty or log comes from featuregate.go, then remove it
if log == "" || strings.Contains(log, "featuregate.go") {

View File

@@ -3,25 +3,27 @@ package opamp
import (
"context"
"crypto/sha256"
"fmt"
"github.com/SigNoz/signoz/pkg/errors"
model "github.com/SigNoz/signoz/pkg/query-service/app/opamp/model"
"github.com/SigNoz/signoz/pkg/query-service/app/opamp/otelconfig"
coreModel "github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/knadh/koanf/parsers/yaml"
"github.com/open-telemetry/opamp-go/protobufs"
"go.opentelemetry.io/collector/confmap"
"go.uber.org/zap"
)
var (
CodeNoAgentsAvailable = errors.MustNewCode("no_agents_available")
CodeOpAmpServerDown = errors.MustNewCode("opamp_server_down")
CodeMultipleAgentsNotSupported = errors.MustNewCode("multiple_agents_not_supported")
)
// inserts or updates ingestion controller processors depending
// on the signal (metrics or traces)
func UpsertControlProcessors(
ctx context.Context,
signal string,
processors map[string]interface{},
callback model.OnChangeCallback,
) (hash string, fnerr *coreModel.ApiError) {
func UpsertControlProcessors(ctx context.Context, signal string,
processors map[string]interface{}, callback model.OnChangeCallback,
) (string, error) {
// note: only processors enabled through tracesPipelinePlan will be added
// to pipeline. To enable or disable processors from pipeline, call
// AddToTracePipeline() or RemoveFromTracesPipeline() prior to calling
@@ -31,33 +33,24 @@ func UpsertControlProcessors(
if signal != string(Metrics) && signal != string(Traces) {
zap.L().Error("received invalid signal int UpsertControlProcessors", zap.String("signal", signal))
fnerr = coreModel.BadRequest(fmt.Errorf(
"signal not supported in ingestion rules: %s", signal,
))
return
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "signal not supported in ingestion rules: %s", signal)
}
if opAmpServer == nil {
fnerr = coreModel.UnavailableError(fmt.Errorf(
"opamp server is down, unable to push config to agent at this moment",
))
return
return "", errors.NewInternalf(CodeOpAmpServerDown, "opamp server is down, unable to push config to agent at this moment")
}
agents := opAmpServer.agents.GetAllAgents()
if len(agents) == 0 {
fnerr = coreModel.UnavailableError(fmt.Errorf("no agents available at the moment"))
return
return "", errors.NewInternalf(CodeNoAgentsAvailable, "no agents available at the moment")
}
if len(agents) > 1 && signal == string(Traces) {
zap.L().Debug("found multiple agents. this feature is not supported for traces pipeline (sampling rules)")
fnerr = coreModel.BadRequest(fmt.Errorf("multiple agents not supported in sampling rules"))
return
return "", errors.NewInvalidInputf(CodeMultipleAgentsNotSupported, "multiple agents not supported in sampling rules")
}
hash := ""
for _, agent := range agents {
agenthash, err := addIngestionControlToAgent(agent, signal, processors, false)
if err != nil {
zap.L().Error("failed to push ingestion rules config to agent", zap.String("agentID", agent.AgentID), zap.Error(err))

View File

@@ -12,9 +12,12 @@ import (
// AlertState denotes the state of an active alert.
type AlertState int
// The enum values are ordered by priority (lowest to highest).
// When determining overall rule state, higher numeric values take precedence.
const (
StateInactive AlertState = iota
StatePending
StateRecovering
StateFiring
StateNoData
StateDisabled
@@ -32,6 +35,8 @@ func (s AlertState) String() string {
return "nodata"
case StateDisabled:
return "disabled"
case StateRecovering:
return "recovering"
}
panic(errors.Errorf("unknown alert state: %d", s))
}
@@ -58,6 +63,8 @@ func (s *AlertState) UnmarshalJSON(b []byte) error {
*s = StateNoData
case "disabled":
*s = StateDisabled
case "recovering":
*s = StateRecovering
default:
*s = StateInactive
}
@@ -83,6 +90,8 @@ func (s *AlertState) Scan(value interface{}) error {
*s = StateNoData
case "disabled":
*s = StateDisabled
case "recovering":
*s = StateRecovering
}
return nil
}

View File

@@ -5,11 +5,16 @@ import (
"reflect"
"strings"
"github.com/SigNoz/signoz/pkg/errors"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
expr "github.com/antonmedv/expr"
"go.uber.org/zap"
)
var (
CodeExprCompilationFailed = errors.MustNewCode("expr_compilation_failed")
)
var logOperatorsToExpr = map[v3.FilterOperator]string{
v3.FilterOperatorEqual: "==",
v3.FilterOperatorNotEqual: "!=",
@@ -50,7 +55,7 @@ func Parse(filters *v3.FilterSet) (string, error) {
var res []string
for _, v := range filters.Items {
if _, ok := logOperatorsToExpr[v.Operator]; !ok {
return "", fmt.Errorf("operator not supported")
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "operator not supported: %s", v.Operator)
}
name := getName(v.Key)
@@ -108,7 +113,7 @@ func Parse(filters *v3.FilterSet) (string, error) {
q := strings.Join(res, " "+strings.ToLower(filters.Operator)+" ")
_, err := expr.Compile(q)
if err != nil {
return "", err
return "", errors.WrapInternalf(err, CodeExprCompilationFailed, "failed to compile expression: %s", q)
}
return q, nil

View File

@@ -191,6 +191,26 @@ func (r *BaseRule) currentAlerts() []*ruletypes.Alert {
return alerts
}
// ActiveAlertsLabelFP returns a map of active alert labels fingerprint and
// the fingerprint is computed using the QueryResultLables.Hash() method.
// We use the QueryResultLables instead of labels as these labels are raw labels
// that we get from the sample.
// This is useful in cases where we want to check if an alert is still active
// based on the labels we have.
func (r *BaseRule) ActiveAlertsLabelFP() map[uint64]struct{} {
r.mtx.Lock()
defer r.mtx.Unlock()
activeAlerts := make(map[uint64]struct{}, len(r.Active))
for _, alert := range r.Active {
if alert == nil || alert.QueryResultLables == nil {
continue
}
activeAlerts[alert.QueryResultLables.Hash()] = struct{}{}
}
return activeAlerts
}
func (r *BaseRule) EvalDelay() time.Duration {
return r.evalDelay
}

View File

@@ -1,9 +1,10 @@
package rules
import (
"github.com/stretchr/testify/require"
"testing"
"github.com/stretchr/testify/require"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
)
@@ -74,7 +75,7 @@ func TestBaseRule_RequireMinPoints(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
_, err := test.rule.Threshold.ShouldAlert(*test.series, "")
_, err := test.rule.Threshold.Eval(*test.series, "", ruletypes.EvalData{})
require.NoError(t, err)
require.Equal(t, len(test.series.Points) >= test.rule.ruleCondition.RequiredNumPoints, test.shouldAlert)
})

View File

@@ -4,13 +4,14 @@ import (
"context"
"encoding/json"
"fmt"
"github.com/SigNoz/signoz/pkg/query-service/utils/labels"
"log/slog"
"sort"
"strings"
"sync"
"time"
"github.com/SigNoz/signoz/pkg/query-service/utils/labels"
"go.uber.org/zap"
"github.com/go-openapi/strfmt"

View File

@@ -159,7 +159,9 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time) (interface{}, error)
continue
}
results, err := r.Threshold.ShouldAlert(toCommonSeries(series), r.Unit())
results, err := r.Threshold.Eval(toCommonSeries(series), r.Unit(), ruletypes.EvalData{
ActiveAlerts: r.ActiveAlertsLabelFP(),
})
if err != nil {
return nil, err
}
@@ -233,6 +235,7 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time) (interface{}, error)
Value: result.V,
GeneratorURL: r.GeneratorURL(),
Receivers: ruleReceiverMap[lbs.Map()[ruletypes.LabelThresholdName]],
IsRecovering: result.IsRecovering,
}
}
}
@@ -245,6 +248,9 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time) (interface{}, error)
if alert, ok := r.Active[h]; ok && alert.State != model.StateInactive {
alert.Value = a.Value
alert.Annotations = a.Annotations
// Update the recovering and missing state of existing alert
alert.IsRecovering = a.IsRecovering
alert.Missing = a.Missing
if v, ok := alert.Labels.Map()[ruletypes.LabelThresholdName]; ok {
alert.Receivers = ruleReceiverMap[v]
}
@@ -304,6 +310,29 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time) (interface{}, error)
})
}
// We need to change firing alert to recovering if the returned sample meets recovery threshold
changeAlertingToRecovering := a.State == model.StateFiring && a.IsRecovering
// We need to change recovering alerts to firing if the returned sample meets target threshold
changeRecoveringToFiring := a.State == model.StateRecovering && !a.IsRecovering && !a.Missing
// in any of the above case we need to update the status of alert
if changeAlertingToRecovering || changeRecoveringToFiring {
state := model.StateRecovering
if changeRecoveringToFiring {
state = model.StateFiring
}
a.State = state
r.logger.DebugContext(ctx, "converting alert state", "name", r.Name(), "state", state)
itemsToAdd = append(itemsToAdd, model.RuleStateHistory{
RuleID: r.ID(),
RuleName: r.Name(),
State: state,
StateChanged: true,
UnixMilli: ts.UnixMilli(),
Labels: model.LabelsString(labelsJSON),
Fingerprint: a.QueryResultLables.Hash(),
Value: a.Value,
})
}
}
r.health = ruletypes.HealthGood
r.lastError = err

View File

@@ -23,7 +23,7 @@ func getVectorValues(vectors []ruletypes.Sample) []float64 {
return values
}
func TestPromRuleShouldAlert(t *testing.T) {
func TestPromRuleEval(t *testing.T) {
postableRule := ruletypes.PostableRule{
AlertName: "Test Rule",
AlertType: ruletypes.AlertTypeMetric,
@@ -696,7 +696,7 @@ func TestPromRuleShouldAlert(t *testing.T) {
assert.NoError(t, err)
}
resultVectors, err := rule.Threshold.ShouldAlert(toCommonSeries(c.values), rule.Unit())
resultVectors, err := rule.Threshold.Eval(toCommonSeries(c.values), rule.Unit(), ruletypes.EvalData{})
assert.NoError(t, err)
// Compare full result vector with expected vector

View File

@@ -24,6 +24,8 @@ type Rule interface {
HoldDuration() time.Duration
State() model.AlertState
ActiveAlerts() []*ruletypes.Alert
// ActiveAlertsLabelFP returns a map of active alert labels fingerprint
ActiveAlertsLabelFP() map[uint64]struct{}
PreferredChannels() []string

View File

@@ -488,7 +488,9 @@ func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, orgID valuer.UUID,
continue
}
}
resultSeries, err := r.Threshold.ShouldAlert(*series, r.Unit())
resultSeries, err := r.Threshold.Eval(*series, r.Unit(), ruletypes.EvalData{
ActiveAlerts: r.ActiveAlertsLabelFP(),
})
if err != nil {
return nil, err
}
@@ -565,7 +567,9 @@ func (r *ThresholdRule) buildAndRunQueryV5(ctx context.Context, orgID valuer.UUI
continue
}
}
resultSeries, err := r.Threshold.ShouldAlert(*series, r.Unit())
resultSeries, err := r.Threshold.Eval(*series, r.Unit(), ruletypes.EvalData{
ActiveAlerts: r.ActiveAlertsLabelFP(),
})
if err != nil {
return nil, err
}
@@ -666,13 +670,14 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time) (interface{}, er
// Links with timestamps should go in annotations since labels
// is used alert grouping, and we want to group alerts with the same
// label set, but different timestamps, together.
if r.typ == ruletypes.AlertTypeTraces {
switch r.typ {
case ruletypes.AlertTypeTraces:
link := r.prepareLinksToTraces(ctx, ts, smpl.Metric)
if link != "" && r.hostFromSource() != "" {
r.logger.InfoContext(ctx, "adding traces link to annotations", "link", fmt.Sprintf("%s/traces-explorer?%s", r.hostFromSource(), link))
annotations = append(annotations, labels.Label{Name: "related_traces", Value: fmt.Sprintf("%s/traces-explorer?%s", r.hostFromSource(), link)})
}
} else if r.typ == ruletypes.AlertTypeLogs {
case ruletypes.AlertTypeLogs:
link := r.prepareLinksToLogs(ctx, ts, smpl.Metric)
if link != "" && r.hostFromSource() != "" {
r.logger.InfoContext(ctx, "adding logs link to annotations", "link", fmt.Sprintf("%s/logs/logs-explorer?%s", r.hostFromSource(), link))
@@ -698,6 +703,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time) (interface{}, er
GeneratorURL: r.GeneratorURL(),
Receivers: ruleReceiverMap[lbs.Map()[ruletypes.LabelThresholdName]],
Missing: smpl.IsMissing,
IsRecovering: smpl.IsRecovering,
}
}
@@ -711,6 +717,9 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time) (interface{}, er
alert.Value = a.Value
alert.Annotations = a.Annotations
// Update the recovering and missing state of existing alert
alert.IsRecovering = a.IsRecovering
alert.Missing = a.Missing
if v, ok := alert.Labels.Map()[ruletypes.LabelThresholdName]; ok {
alert.Receivers = ruleReceiverMap[v]
}
@@ -735,6 +744,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time) (interface{}, er
delete(r.Active, fp)
}
if a.State != model.StateInactive {
r.logger.DebugContext(ctx, "converting firing alert to inActive", "name", r.Name())
a.State = model.StateInactive
a.ResolvedAt = ts
itemsToAdd = append(itemsToAdd, model.RuleStateHistory{
@@ -752,6 +762,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time) (interface{}, er
}
if a.State == model.StatePending && ts.Sub(a.ActiveAt) >= r.holdDuration {
r.logger.DebugContext(ctx, "converting pending alert to firing", "name", r.Name())
a.State = model.StateFiring
a.FiredAt = ts
state := model.StateFiring
@@ -769,6 +780,30 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time) (interface{}, er
Value: a.Value,
})
}
// We need to change firing alert to recovering if the returned sample meets recovery threshold
changeAlertingToRecovering := a.State == model.StateFiring && a.IsRecovering
// We need to change recovering alerts to firing if the returned sample meets target threshold
changeRecoveringToFiring := a.State == model.StateRecovering && !a.IsRecovering && !a.Missing
// in any of the above case we need to update the status of alert
if changeAlertingToRecovering || changeRecoveringToFiring {
state := model.StateRecovering
if changeRecoveringToFiring {
state = model.StateFiring
}
a.State = state
r.logger.DebugContext(ctx, "converting alert state", "name", r.Name(), "state", state)
itemsToAdd = append(itemsToAdd, model.RuleStateHistory{
RuleID: r.ID(),
RuleName: r.Name(),
State: state,
StateChanged: true,
UnixMilli: ts.UnixMilli(),
Labels: model.LabelsString(labelsJSON),
Fingerprint: a.QueryResultLables.Hash(),
Value: a.Value,
})
}
}
currentState := r.State()

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -437,6 +437,10 @@ func (v *filterExpressionVisitor) VisitComparison(ctx *grammar.ComparisonContext
}
switch varValues := varItem.Value.(type) {
case []any:
if len(varValues) == 0 {
v.errors = append(v.errors, fmt.Sprintf("malformed request payload: variable `%s` used in expression has an empty list value", strings.TrimPrefix(var_, "$")))
return ""
}
values = varValues
case any:
values = []any{varValues}
@@ -516,6 +520,10 @@ func (v *filterExpressionVisitor) VisitComparison(ctx *grammar.ComparisonContext
if ok {
switch varValues := varItem.Value.(type) {
case []any:
if len(varValues) == 0 {
v.errors = append(v.errors, fmt.Sprintf("malformed request payload: variable `%s` used in expression has an empty list value", strings.TrimPrefix(var_, "$")))
return ""
}
value = varValues[0]
case any:
value = varValues
@@ -856,17 +864,23 @@ func (v *filterExpressionVisitor) VisitKey(ctx *grammar.KeyContext) any {
} else if !v.ignoreNotFoundKeys {
// TODO(srikanthccv): do we want to return an error here?
// should we infer the type and auto-magically build a key for expression?
v.errors = append(v.errors, fmt.Sprintf("key `%s` not found", fieldKey.Name))
v.errors = append(v.errors, fmt.Sprintf("key `%s` is not a valid field, consider removing it from filter query", fieldKey.Name))
v.mainErrorURL = "https://signoz.io/docs/userguide/search-troubleshooting/#key-fieldname-not-found"
}
}
if len(fieldKeysForName) > 1 {
keys := []string{}
for _, item := range fieldKeysForName {
keys = append(keys, fmt.Sprintf("%s.%s:%s", item.FieldContext.StringValue(), item.Name, item.FieldDataType.StringValue()))
}
warnMsg := fmt.Sprintf(
"Key `%s` is ambiguous, found %d different combinations of field context / data type: %v.",
"Key `%s` is ambiguous, found %d different combinations of field context / data type. "+
"Please specify one from these [ %s ] to disambiguate.",
fieldKey.Name,
len(fieldKeysForName),
fieldKeysForName,
strings.Join(keys, ", "),
)
mixedFieldContext := map[string]bool{}
for _, item := range fieldKeysForName {

View File

@@ -0,0 +1,75 @@
package querybuilder
import (
"strings"
"testing"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
)
// testFieldKey returns a mock TelemetryFieldKey for the given name
func testFieldKey(name string) *telemetrytypes.TelemetryFieldKey {
return &telemetrytypes.TelemetryFieldKey{
Name: name,
Signal: telemetrytypes.SignalLogs,
FieldContext: telemetrytypes.FieldContextAttribute,
FieldDataType: telemetrytypes.FieldDataTypeString,
}
}
// TestPrepareWhereClause_EmptyVariableList ensures PrepareWhereClause errors when a variable has an empty list value
func TestPrepareWhereClause_EmptyVariableList(t *testing.T) {
tests := []struct {
name string
expr string
variables map[string]qbtypes.VariableItem
expectError bool
wantInError string
}{
{
name: "Empty []any for equality",
expr: "service = $service",
variables: map[string]qbtypes.VariableItem{
"service": {Value: []any{}},
},
expectError: true,
wantInError: "Found 1 errors while parsing the search expression",
},
{
name: "Empty []any for IN clause",
expr: "service IN $service",
variables: map[string]qbtypes.VariableItem{
"service": {Value: []any{}},
},
expectError: true,
wantInError: "Found 1 errors while parsing the search expression",
},
}
keys := map[string][]*telemetrytypes.TelemetryFieldKey{
"service": {testFieldKey("service")},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
opts := FilterExprVisitorOpts{
FieldKeys: keys,
Variables: tt.variables,
}
_, err := PrepareWhereClause(tt.expr, opts, 0, 0)
if tt.expectError {
if err == nil {
t.Fatalf("expected error, got nil")
}
if tt.wantInError != "" && !strings.Contains(err.Error(), tt.wantInError) {
t.Fatalf("expected error to contain %q, got %q", tt.wantInError, err.Error())
}
} else if err != nil {
t.Fatalf("unexpected error: %v", err)
}
})
}
}

View File

@@ -716,7 +716,7 @@ func TestFilterExprLogs(t *testing.T) {
shouldPass: false,
expectedQuery: "",
expectedArgs: []any{},
expectedErrorContains: "key `greater` not found",
expectedErrorContains: "key `greater` is not a valid field, consider removing it from filter query",
},
{
category: "Key-operator-value boundary",
@@ -732,7 +732,7 @@ func TestFilterExprLogs(t *testing.T) {
shouldPass: false,
expectedQuery: "",
expectedArgs: []any{},
expectedErrorContains: "key `less` not found",
expectedErrorContains: "key `less` is not a valid field, consider removing it from filter query",
},
{
category: "Key-operator-value boundary",
@@ -788,7 +788,7 @@ func TestFilterExprLogs(t *testing.T) {
shouldPass: false,
expectedQuery: "",
expectedArgs: []any{},
expectedErrorContains: "key `user` not found",
expectedErrorContains: "key `user` is not a valid field, consider removing it from filter query",
},
{
category: "Key-operator-value boundary",
@@ -1999,7 +1999,7 @@ func TestFilterExprLogs(t *testing.T) {
shouldPass: false,
expectedQuery: "",
expectedArgs: nil,
expectedErrorContains: "key `response.body.data.items[].id` not found",
expectedErrorContains: "key `response.body.data.items[].id` is not a valid field, consider removing it from filter query",
},
{
category: "Nested object paths",
@@ -2236,7 +2236,7 @@ func TestFilterExprLogs(t *testing.T) {
shouldPass: false,
expectedQuery: "",
expectedArgs: nil,
expectedErrorContains: "key `user_id` not found",
expectedErrorContains: "key `user_id` is not a valid field, consider removing it from filter query",
},
// More common filter patterns
@@ -2387,7 +2387,7 @@ func TestFilterExprLogs(t *testing.T) {
for _, tc := range testCases {
t.Run(fmt.Sprintf("%s: %s", tc.category, limitString(tc.query, 50)), func(t *testing.T) {
clause, err := querybuilder.PrepareWhereClause(tc.query, opts, 0, 0)
clause, err := querybuilder.PrepareWhereClause(tc.query, opts, 0, 0)
if tc.shouldPass {
if err != nil {
@@ -2506,7 +2506,7 @@ func TestFilterExprLogsConflictNegation(t *testing.T) {
for _, tc := range testCases {
t.Run(fmt.Sprintf("%s: %s", tc.category, limitString(tc.query, 50)), func(t *testing.T) {
clause, err := querybuilder.PrepareWhereClause(tc.query, opts, 0, 0)
clause, err := querybuilder.PrepareWhereClause(tc.query, opts, 0, 0)
if tc.shouldPass {
if err != nil {

View File

@@ -249,7 +249,7 @@ func (b *logQueryStatementBuilder) buildListQuery(
// get column expression for the field - use array index directly to avoid pointer to loop variable
colExpr, err := b.fm.ColumnExpressionFor(ctx, &query.SelectFields[index], keys)
if err != nil {
return nil, err
return nil, errors.WithAdditionalf(err, "Consider removing field %s from columns by clicking options and then removing the column", query.SelectFields[index].Name)
}
sb.SelectMore(colExpr)
}
@@ -269,7 +269,7 @@ func (b *logQueryStatementBuilder) buildListQuery(
for _, orderBy := range query.Order {
colExpr, err := b.fm.ColumnExpressionFor(ctx, &orderBy.Key.TelemetryFieldKey, keys)
if err != nil {
return nil, err
return nil, errors.WithAdditionalf(err, "Consider removing field %s and choosing a different column in 'Order by' drop down menu", orderBy.Key.TelemetryFieldKey.Name)
}
sb.OrderBy(fmt.Sprintf("%s %s", colExpr, orderBy.Direction.StringValue()))
}
@@ -592,7 +592,7 @@ func (b *logQueryStatementBuilder) addFilterCondition(
JsonBodyPrefix: b.jsonBodyPrefix,
JsonKeyToKey: b.jsonKeyToKey,
Variables: variables,
}, start, end)
}, start, end)
if err != nil {
return nil, err

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"log/slog"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/querybuilder"
"github.com/SigNoz/signoz/pkg/telemetrymetrics"
@@ -122,7 +123,7 @@ func (b *meterQueryStatementBuilder) buildTemporalAggDeltaFastPath(
for _, g := range query.GroupBy {
col, err := b.fm.ColumnExpressionFor(ctx, &g.TelemetryFieldKey, keys)
if err != nil {
return "", []any{}, err
return "", []any{}, errors.WithAdditionalf(err, "Consider removing field %s from 'AGGREGATE ACROSS TIME SERIES by' options", g.TelemetryFieldKey.Name)
}
sb.SelectMore(col)
}
@@ -148,7 +149,7 @@ func (b *meterQueryStatementBuilder) buildTemporalAggDeltaFastPath(
FieldKeys: keys,
FullTextColumn: &telemetrytypes.TelemetryFieldKey{Name: "labels"},
Variables: variables,
}, start, end)
}, start, end)
if err != nil {
return "", []any{}, err
}
@@ -202,7 +203,7 @@ func (b *meterQueryStatementBuilder) buildTemporalAggDelta(
for _, g := range query.GroupBy {
col, err := b.fm.ColumnExpressionFor(ctx, &g.TelemetryFieldKey, keys)
if err != nil {
return "", nil, err
return "", []any{}, errors.WithAdditionalf(err, "Consider removing field %s from 'AGGREGATE ACROSS TIME SERIES by' options", g.TelemetryFieldKey.Name)
}
sb.SelectMore(col)
}
@@ -231,7 +232,7 @@ func (b *meterQueryStatementBuilder) buildTemporalAggDelta(
FieldKeys: keys,
FullTextColumn: &telemetrytypes.TelemetryFieldKey{Name: "labels"},
Variables: variables,
}, start, end)
}, start, end)
if err != nil {
return "", nil, err
}
@@ -272,7 +273,7 @@ func (b *meterQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
for _, g := range query.GroupBy {
col, err := b.fm.ColumnExpressionFor(ctx, &g.TelemetryFieldKey, keys)
if err != nil {
return "", nil, err
return "", []any{}, errors.WithAdditionalf(err, "Consider removing field %s from 'AGGREGATE ACROSS TIME SERIES by' options", g.TelemetryFieldKey.Name)
}
baseSb.SelectMore(col)
}
@@ -295,7 +296,7 @@ func (b *meterQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
FieldKeys: keys,
FullTextColumn: &telemetrytypes.TelemetryFieldKey{Name: "labels"},
Variables: variables,
}, start, end)
}, start, end)
if err != nil {
return "", nil, err
}

View File

@@ -6,6 +6,7 @@ import (
"log/slog"
"os"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/querybuilder"
"github.com/SigNoz/signoz/pkg/types/metrictypes"
@@ -361,7 +362,7 @@ func (b *MetricQueryStatementBuilder) buildTimeSeriesCTE(
for _, g := range query.GroupBy {
col, err := b.fm.ColumnExpressionFor(ctx, &g.TelemetryFieldKey, keys)
if err != nil {
return "", nil, err
return "", nil, errors.WithAdditionalf(err, "Consider removing field %s from 'AGGREGATE ACROSS TIME SERIES by' options", g.TelemetryFieldKey.Name)
}
sb.SelectMore(col)
}

View File

@@ -295,7 +295,7 @@ func (b *traceQueryStatementBuilder) buildListQuery(
for _, field := range selectedFields {
colExpr, err := b.fm.ColumnExpressionFor(ctx, &field, keys)
if err != nil {
return nil, err
return nil, errors.WithAdditionalf(err, "Consider removing field %s by clicking Options and then removing the field", field.Name)
}
sb.SelectMore(colExpr)
}
@@ -313,7 +313,7 @@ func (b *traceQueryStatementBuilder) buildListQuery(
for _, orderBy := range query.Order {
colExpr, err := b.fm.ColumnExpressionFor(ctx, &orderBy.Key.TelemetryFieldKey, keys)
if err != nil {
return nil, err
return nil, errors.WithAdditionalf(err, "Consider removing field %s and choosing a different field in 'Order by' drop down menu", orderBy.Key.TelemetryFieldKey.Name)
}
sb.OrderBy(fmt.Sprintf("%s %s", colExpr, orderBy.Direction.StringValue()))
}

View File

@@ -467,7 +467,7 @@ func (b *traceOperatorCTEBuilder) buildListQuery(ctx context.Context, selectFrom
for _, orderBy := range b.operator.Order {
colExpr, err := b.stmtBuilder.fm.ColumnExpressionFor(ctx, &orderBy.Key.TelemetryFieldKey, keys)
if err != nil {
return nil, err
return nil, errors.WithAdditionalf(err, "Consider removing field %s and choosing a different column in 'Order by' drop down menu", orderBy.Key.TelemetryFieldKey.Name)
}
sb.OrderBy(fmt.Sprintf("%s %s", colExpr, orderBy.Direction.StringValue()))
orderApplied = true

View File

@@ -220,7 +220,7 @@ func (p *PostablePipeline) IsValid() error {
// check the filter
_, err := queryBuilderToExpr.Parse(p.Filter)
if err != nil {
return errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "filter for pipeline %v is not correct", p.Name)
return err
}
idUnique := map[string]struct{}{}
@@ -403,7 +403,7 @@ func isValidOperator(op PipelineOperator) error {
!isValidOtelValue(op.To) ||
!isValidOtelValue(op.Field) {
valueErrStr := "value should have prefix of body, attributes, resource"
return errors.NewInvalidInputf(errors.CodeInvalidInput, "%s for operator Id %s", valueErrStr, op.ID)
return errors.NewInvalidInputf(errors.CodeInvalidInput, "%s for operator: %s in position: %d with id: %s", valueErrStr, op.Type, op.OrderId, op.ID)
}
return nil
}

View File

@@ -59,7 +59,8 @@ type Alert struct {
LastSentAt time.Time
ValidUntil time.Time
Missing bool
Missing bool
IsRecovering bool
}
func (a *Alert) NeedsSending(ts time.Time, resendDelay time.Duration) bool {

View File

@@ -621,10 +621,10 @@ func TestParseIntoRuleThresholdGeneration(t *testing.T) {
}
// Test that threshold can evaluate properly
vector, err := threshold.ShouldAlert(v3.Series{
vector, err := threshold.Eval(v3.Series{
Points: []v3.Point{{Value: 0.15, Timestamp: 1000}}, // 150ms in seconds
Labels: map[string]string{"test": "label"},
}, "")
}, "", EvalData{})
if err != nil {
t.Fatalf("Unexpected error in shouldAlert: %v", err)
}
@@ -698,20 +698,20 @@ func TestParseIntoRuleMultipleThresholds(t *testing.T) {
}
// Test with a value that should trigger both WARNING and CRITICAL thresholds
vector, err := threshold.ShouldAlert(v3.Series{
vector, err := threshold.Eval(v3.Series{
Points: []v3.Point{{Value: 95.0, Timestamp: 1000}}, // 95% CPU usage
Labels: map[string]string{"service": "test"},
}, "")
}, "", EvalData{})
if err != nil {
t.Fatalf("Unexpected error in shouldAlert: %v", err)
}
assert.Equal(t, 2, len(vector))
vector, err = threshold.ShouldAlert(v3.Series{
vector, err = threshold.Eval(v3.Series{
Points: []v3.Point{{Value: 75.0, Timestamp: 1000}}, // 75% CPU usage
Labels: map[string]string{"service": "test"},
}, "")
}, "", EvalData{})
if err != nil {
t.Fatalf("Unexpected error in shouldAlert: %v", err)
}
@@ -719,7 +719,7 @@ func TestParseIntoRuleMultipleThresholds(t *testing.T) {
assert.Equal(t, 1, len(vector))
}
func TestAnomalyNegationShouldAlert(t *testing.T) {
func TestAnomalyNegationEval(t *testing.T) {
tests := []struct {
name string
ruleJSON []byte
@@ -1046,9 +1046,9 @@ func TestAnomalyNegationShouldAlert(t *testing.T) {
t.Fatalf("unexpected error from GetRuleThreshold: %v", err)
}
resultVector, err := ruleThreshold.ShouldAlert(tt.series, "")
resultVector, err := ruleThreshold.Eval(tt.series, "", EvalData{})
if err != nil {
t.Fatalf("unexpected error from ShouldAlert: %v", err)
t.Fatalf("unexpected error from Eval: %v", err)
}
shouldAlert := len(resultVector) > 0

View File

@@ -19,7 +19,11 @@ type Sample struct {
IsMissing bool
Target float64
// IsRecovering is true if the sample is part of a recovering alert.
IsRecovering bool
Target float64
RecoveryTarget *float64
TargetUnit string
}

View File

@@ -57,8 +57,28 @@ type RuleReceivers struct {
Name string `json:"name"`
}
// EvalData are other dependent values used to evaluate the threshold rules.
type EvalData struct {
// ActiveAlerts is a map of active alert fingerprints
// used to check if a sample is part of an active alert
// when evaluating the recovery threshold.
ActiveAlerts map[uint64]struct{}
}
// HasActiveAlert checks if the given sample figerprint is active
// as an alert.
func (eval EvalData) HasActiveAlert(sampleLabelFp uint64) bool {
if len(eval.ActiveAlerts) == 0 {
return false
}
_, ok := eval.ActiveAlerts[sampleLabelFp]
return ok
}
type RuleThreshold interface {
ShouldAlert(series v3.Series, unit string) (Vector, error)
// Eval runs the given series through the threshold rules
// using the given EvalData and returns the matching series
Eval(series v3.Series, unit string, evalData EvalData) (Vector, error)
GetRuleReceivers() []RuleReceivers
}
@@ -97,7 +117,7 @@ func (r BasicRuleThresholds) Validate() error {
return errors.Join(errs...)
}
func (r BasicRuleThresholds) ShouldAlert(series v3.Series, unit string) (Vector, error) {
func (r BasicRuleThresholds) Eval(series v3.Series, unit string, evalData EvalData) (Vector, error) {
var resultVector Vector
thresholds := []BasicRuleThreshold(r)
sortThresholds(thresholds)
@@ -105,8 +125,31 @@ func (r BasicRuleThresholds) ShouldAlert(series v3.Series, unit string) (Vector,
smpl, shouldAlert := threshold.shouldAlert(series, unit)
if shouldAlert {
smpl.Target = *threshold.TargetValue
if threshold.RecoveryTarget != nil {
smpl.RecoveryTarget = threshold.RecoveryTarget
}
smpl.TargetUnit = threshold.TargetUnit
resultVector = append(resultVector, smpl)
continue
}
// Prepare alert hash from series labels and threshold name if recovery target option was provided
if threshold.RecoveryTarget == nil {
continue
}
sampleLabels := PrepareSampleLabelsForRule(series.Labels, threshold.Name)
alertHash := sampleLabels.Hash()
// check if alert is active and then check if recovery threshold matches
if evalData.HasActiveAlert(alertHash) {
smpl, matchesRecoveryThrehold := threshold.matchesRecoveryThreshold(series, unit)
if matchesRecoveryThrehold {
smpl.Target = *threshold.TargetValue
smpl.RecoveryTarget = threshold.RecoveryTarget
smpl.TargetUnit = threshold.TargetUnit
// IsRecovering to notify that metrics is in recovery stage
smpl.IsRecovering = true
resultVector = append(resultVector, smpl)
}
}
}
return resultVector, nil
@@ -133,16 +176,27 @@ func sortThresholds(thresholds []BasicRuleThreshold) {
})
}
func (b BasicRuleThreshold) target(ruleUnit string) float64 {
// convertToRuleUnit converts the given value from the target unit to the rule unit
func (b BasicRuleThreshold) convertToRuleUnit(val float64, ruleUnit string) float64 {
unitConverter := converter.FromUnit(converter.Unit(b.TargetUnit))
// convert the target value to the y-axis unit
value := unitConverter.Convert(converter.Value{
F: *b.TargetValue,
F: val,
U: converter.Unit(b.TargetUnit),
}, converter.Unit(ruleUnit))
return value.F
}
// target returns the target value in the rule unit
func (b BasicRuleThreshold) target(ruleUnit string) float64 {
return b.convertToRuleUnit(*b.TargetValue, ruleUnit)
}
// recoveryTarget returns the recovery target value in the rule unit
func (b BasicRuleThreshold) recoveryTarget(ruleUnit string) float64 {
return b.convertToRuleUnit(*b.RecoveryTarget, ruleUnit)
}
func (b BasicRuleThreshold) getCompareOp() CompareOp {
return b.CompareOp
}
@@ -178,6 +232,13 @@ func (b BasicRuleThreshold) Validate() error {
return errors.Join(errs...)
}
func (b BasicRuleThreshold) matchesRecoveryThreshold(series v3.Series, ruleUnit string) (Sample, bool) {
return b.shouldAlertWithTarget(series, b.recoveryTarget(ruleUnit))
}
func (b BasicRuleThreshold) shouldAlert(series v3.Series, ruleUnit string) (Sample, bool) {
return b.shouldAlertWithTarget(series, b.target(ruleUnit))
}
func removeGroupinSetPoints(series v3.Series) []v3.Point {
var result []v3.Point
for _, s := range series.Points {
@@ -188,21 +249,22 @@ func removeGroupinSetPoints(series v3.Series) []v3.Point {
return result
}
func (b BasicRuleThreshold) shouldAlert(series v3.Series, ruleUnit string) (Sample, bool) {
// PrepareSampleLabelsForRule prepares the labels for the sample to be used in the alerting.
// It accepts seriesLabels and thresholdName as input and returns the labels with the threshold name label added.
func PrepareSampleLabelsForRule(seriesLabels map[string]string, thresholdName string) (lbls labels.Labels) {
lb := labels.NewBuilder(labels.Labels{})
for name, value := range seriesLabels {
lb.Set(name, value)
}
lb.Set(LabelThresholdName, thresholdName)
lb.Set(LabelSeverityName, strings.ToLower(thresholdName))
return lb.Labels()
}
func (b BasicRuleThreshold) shouldAlertWithTarget(series v3.Series, target float64) (Sample, bool) {
var shouldAlert bool
var alertSmpl Sample
var lbls labels.Labels
for name, value := range series.Labels {
lbls = append(lbls, labels.Label{Name: name, Value: value})
}
target := b.target(ruleUnit)
// TODO(srikanthccv): is it better to move the logic to notifier instead of
// adding two labels?
lbls = append(lbls, labels.Label{Name: LabelThresholdName, Value: b.Name})
lbls = append(lbls, labels.Label{Name: LabelSeverityName, Value: strings.ToLower(b.Name)})
lbls := PrepareSampleLabelsForRule(series.Labels, b.Name)
series.Points = removeGroupinSetPoints(series)

View File

@@ -9,7 +9,7 @@ import (
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
)
func TestBasicRuleThresholdShouldAlert_UnitConversion(t *testing.T) {
func TestBasicRuleThresholdEval_UnitConversion(t *testing.T) {
target := 100.0
tests := []struct {
@@ -270,7 +270,7 @@ func TestBasicRuleThresholdShouldAlert_UnitConversion(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
thresholds := BasicRuleThresholds{tt.threshold}
vector, err := thresholds.ShouldAlert(tt.series, tt.ruleUnit)
vector, err := thresholds.Eval(tt.series, tt.ruleUnit, EvalData{})
assert.NoError(t, err)
alert := len(vector) > 0
@@ -301,3 +301,31 @@ func TestBasicRuleThresholdShouldAlert_UnitConversion(t *testing.T) {
})
}
}
func TestPrepareSampleLabelsForRule(t *testing.T) {
alertAllHashes := make(map[uint64]struct{})
thresholdName := "test"
for range 50_000 {
sampleLabels := map[string]string{
"service": "test",
"env": "prod",
"tier": "backend",
"namespace": "default",
"pod": "test-pod",
"container": "test-container",
"node": "test-node",
"cluster": "test-cluster",
"region": "test-region",
"az": "test-az",
"hostname": "test-hostname",
"ip": "192.168.1.1",
"port": "8080",
}
lbls := PrepareSampleLabelsForRule(sampleLabels, thresholdName)
assert.True(t, lbls.Has(LabelThresholdName), "LabelThresholdName not found in labels")
alertAllHashes[lbls.Hash()] = struct{}{}
}
t.Logf("Total hashes: %d", len(alertAllHashes))
// there should be only one hash for all the samples
assert.Equal(t, 1, len(alertAllHashes), "Expected only one hash for all the samples")
}