Compare commits
7 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
16e0ef2515 | ||
|
|
fecf6667a3 | ||
|
|
bda2316377 | ||
|
|
fff7f8fc76 | ||
|
|
8cfeef4521 | ||
|
|
d85a1a21ac | ||
|
|
6404e7388e |
@@ -1,5 +1,6 @@
|
||||
import { render, screen } from '@testing-library/react';
|
||||
import ROUTES from 'constants/routes';
|
||||
import { PreferenceContextProvider } from 'providers/preferences/context/PreferenceContextProvider';
|
||||
import MockQueryClientProvider from 'providers/test/MockQueryClientProvider';
|
||||
import { DataSource } from 'types/common/queryBuilder';
|
||||
|
||||
@@ -52,11 +53,32 @@ jest.mock('hooks/saveViews/useDeleteView', () => ({
|
||||
})),
|
||||
}));
|
||||
|
||||
// Mock usePreferenceSync
|
||||
jest.mock('providers/preferences/sync/usePreferenceSync', () => ({
|
||||
usePreferenceSync: (): any => ({
|
||||
preferences: {
|
||||
columns: [],
|
||||
formatting: {
|
||||
maxLines: 2,
|
||||
format: 'table',
|
||||
fontSize: 'small',
|
||||
version: 1,
|
||||
},
|
||||
},
|
||||
loading: false,
|
||||
error: null,
|
||||
updateColumns: jest.fn(),
|
||||
updateFormatting: jest.fn(),
|
||||
}),
|
||||
}));
|
||||
|
||||
describe('ExplorerCard', () => {
|
||||
it('renders a card with a title and a description', () => {
|
||||
render(
|
||||
<MockQueryClientProvider>
|
||||
<ExplorerCard sourcepage={DataSource.TRACES}>child</ExplorerCard>
|
||||
<PreferenceContextProvider>
|
||||
<ExplorerCard sourcepage={DataSource.TRACES}>child</ExplorerCard>
|
||||
</PreferenceContextProvider>
|
||||
</MockQueryClientProvider>,
|
||||
);
|
||||
expect(screen.queryByText('Query Builder')).not.toBeInTheDocument();
|
||||
@@ -65,7 +87,9 @@ describe('ExplorerCard', () => {
|
||||
it('renders a save view button', () => {
|
||||
render(
|
||||
<MockQueryClientProvider>
|
||||
<ExplorerCard sourcepage={DataSource.TRACES}>child</ExplorerCard>
|
||||
<PreferenceContextProvider>
|
||||
<ExplorerCard sourcepage={DataSource.TRACES}>child</ExplorerCard>
|
||||
</PreferenceContextProvider>
|
||||
</MockQueryClientProvider>,
|
||||
);
|
||||
expect(screen.queryByText('Save view')).not.toBeInTheDocument();
|
||||
|
||||
@@ -6,6 +6,7 @@ import { initialQueriesMap, PANEL_TYPES } from 'constants/queryBuilder';
|
||||
import { mapQueryDataFromApi } from 'lib/newQueryBuilder/queryBuilderMappers/mapQueryDataFromApi';
|
||||
import isEqual from 'lodash-es/isEqual';
|
||||
import { Query } from 'types/api/queryBuilder/queryBuilderData';
|
||||
import { DataSource } from 'types/common/queryBuilder';
|
||||
|
||||
import {
|
||||
DeleteViewHandlerProps,
|
||||
@@ -106,7 +107,11 @@ export const isQueryUpdatedInView = ({
|
||||
!isEqual(
|
||||
options?.selectColumns,
|
||||
extraData && JSON.parse(extraData)?.selectColumns,
|
||||
)
|
||||
) ||
|
||||
(stagedQuery?.builder?.queryData?.[0]?.dataSource === DataSource.LOGS &&
|
||||
(!isEqual(options?.format, extraData && JSON.parse(extraData)?.format) ||
|
||||
!isEqual(options?.maxLines, extraData && JSON.parse(extraData)?.maxLines) ||
|
||||
!isEqual(options?.fontSize, extraData && JSON.parse(extraData)?.fontSize)))
|
||||
);
|
||||
};
|
||||
|
||||
|
||||
@@ -74,6 +74,7 @@ const formatMap = {
|
||||
'MM/dd HH:mm': DATE_TIME_FORMATS.SLASH_SHORT,
|
||||
'MM/DD': DATE_TIME_FORMATS.DATE_SHORT,
|
||||
'YY-MM': DATE_TIME_FORMATS.YEAR_MONTH,
|
||||
'MMM d, yyyy, h:mm:ss aaaa': DATE_TIME_FORMATS.DASH_DATETIME,
|
||||
YY: DATE_TIME_FORMATS.YEAR_SHORT,
|
||||
};
|
||||
|
||||
|
||||
@@ -54,6 +54,7 @@ import {
|
||||
X,
|
||||
} from 'lucide-react';
|
||||
import { useAppContext } from 'providers/App/App';
|
||||
import { FormattingOptions } from 'providers/preferences/types';
|
||||
import {
|
||||
CSSProperties,
|
||||
Dispatch,
|
||||
@@ -270,17 +271,26 @@ function ExplorerOptions({
|
||||
const getUpdatedExtraData = (
|
||||
extraData: string | undefined,
|
||||
newSelectedColumns: BaseAutocompleteData[],
|
||||
formattingOptions?: FormattingOptions,
|
||||
): string => {
|
||||
let updatedExtraData;
|
||||
|
||||
if (extraData) {
|
||||
const parsedExtraData = JSON.parse(extraData);
|
||||
parsedExtraData.selectColumns = newSelectedColumns;
|
||||
if (formattingOptions) {
|
||||
parsedExtraData.format = formattingOptions.format;
|
||||
parsedExtraData.maxLines = formattingOptions.maxLines;
|
||||
parsedExtraData.fontSize = formattingOptions.fontSize;
|
||||
}
|
||||
updatedExtraData = JSON.stringify(parsedExtraData);
|
||||
} else {
|
||||
updatedExtraData = JSON.stringify({
|
||||
color: Color.BG_SIENNA_500,
|
||||
selectColumns: newSelectedColumns,
|
||||
format: formattingOptions?.format,
|
||||
maxLines: formattingOptions?.maxLines,
|
||||
fontSize: formattingOptions?.fontSize,
|
||||
});
|
||||
}
|
||||
return updatedExtraData;
|
||||
@@ -289,6 +299,14 @@ function ExplorerOptions({
|
||||
const updatedExtraData = getUpdatedExtraData(
|
||||
extraData,
|
||||
options?.selectColumns,
|
||||
// pass this only for logs
|
||||
sourcepage === DataSource.LOGS
|
||||
? {
|
||||
format: options?.format,
|
||||
maxLines: options?.maxLines,
|
||||
fontSize: options?.fontSize,
|
||||
}
|
||||
: undefined,
|
||||
);
|
||||
|
||||
const {
|
||||
@@ -517,6 +535,14 @@ function ExplorerOptions({
|
||||
color,
|
||||
selectColumns: options.selectColumns,
|
||||
version: 1,
|
||||
...// pass this only for logs
|
||||
(sourcepage === DataSource.LOGS
|
||||
? {
|
||||
format: options?.format,
|
||||
maxLines: options?.maxLines,
|
||||
fontSize: options?.fontSize,
|
||||
}
|
||||
: {}),
|
||||
}),
|
||||
notifications,
|
||||
panelType: panelType || PANEL_TYPES.LIST,
|
||||
|
||||
@@ -114,7 +114,6 @@ function LogsExplorerViews({
|
||||
|
||||
// Context
|
||||
const {
|
||||
initialDataSource,
|
||||
currentQuery,
|
||||
stagedQuery,
|
||||
panelType,
|
||||
@@ -144,7 +143,7 @@ function LogsExplorerViews({
|
||||
|
||||
const { options, config } = useOptionsMenu({
|
||||
storageKey: LOCALSTORAGE.LOGS_LIST_OPTIONS,
|
||||
dataSource: initialDataSource || DataSource.LOGS,
|
||||
dataSource: DataSource.LOGS,
|
||||
aggregateOperator: listQuery?.aggregateOperator || StringOperators.NOOP,
|
||||
});
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ import { logsQueryRangeSuccessResponse } from 'mocks-server/__mockdata__/logs_qu
|
||||
import { server } from 'mocks-server/server';
|
||||
import { rest } from 'msw';
|
||||
import { SELECTED_VIEWS } from 'pages/LogsExplorer/utils';
|
||||
import { PreferenceContextProvider } from 'providers/preferences/context/PreferenceContextProvider';
|
||||
import { QueryBuilderContext } from 'providers/QueryBuilder';
|
||||
import { VirtuosoMockContext } from 'react-virtuoso';
|
||||
import { fireEvent, render, RenderResult } from 'tests/test-utils';
|
||||
@@ -87,6 +88,25 @@ jest.mock('hooks/useSafeNavigate', () => ({
|
||||
}),
|
||||
}));
|
||||
|
||||
// Mock usePreferenceSync
|
||||
jest.mock('providers/preferences/sync/usePreferenceSync', () => ({
|
||||
usePreferenceSync: (): any => ({
|
||||
preferences: {
|
||||
columns: [],
|
||||
formatting: {
|
||||
maxLines: 2,
|
||||
format: 'table',
|
||||
fontSize: 'small',
|
||||
version: 1,
|
||||
},
|
||||
},
|
||||
loading: false,
|
||||
error: null,
|
||||
updateColumns: jest.fn(),
|
||||
updateFormatting: jest.fn(),
|
||||
}),
|
||||
}));
|
||||
|
||||
jest.mock('hooks/logs/useCopyLogLink', () => ({
|
||||
useCopyLogLink: jest.fn().mockReturnValue({
|
||||
activeLogId: ACTIVE_LOG_ID,
|
||||
@@ -105,13 +125,15 @@ const renderer = (): RenderResult =>
|
||||
<VirtuosoMockContext.Provider
|
||||
value={{ viewportHeight: 300, itemHeight: 100 }}
|
||||
>
|
||||
<LogsExplorerViews
|
||||
selectedView={SELECTED_VIEWS.SEARCH}
|
||||
showFrequencyChart
|
||||
setIsLoadingQueries={(): void => {}}
|
||||
listQueryKeyRef={{ current: {} }}
|
||||
chartQueryKeyRef={{ current: {} }}
|
||||
/>
|
||||
<PreferenceContextProvider>
|
||||
<LogsExplorerViews
|
||||
selectedView={SELECTED_VIEWS.SEARCH}
|
||||
showFrequencyChart
|
||||
setIsLoadingQueries={(): void => {}}
|
||||
listQueryKeyRef={{ current: {} }}
|
||||
chartQueryKeyRef={{ current: {} }}
|
||||
/>
|
||||
</PreferenceContextProvider>
|
||||
</VirtuosoMockContext.Provider>,
|
||||
);
|
||||
|
||||
@@ -184,13 +206,15 @@ describe('LogsExplorerViews -', () => {
|
||||
lodsQueryServerRequest();
|
||||
render(
|
||||
<QueryBuilderContext.Provider value={mockQueryBuilderContextValue}>
|
||||
<LogsExplorerViews
|
||||
selectedView={SELECTED_VIEWS.SEARCH}
|
||||
showFrequencyChart
|
||||
setIsLoadingQueries={(): void => {}}
|
||||
listQueryKeyRef={{ current: {} }}
|
||||
chartQueryKeyRef={{ current: {} }}
|
||||
/>
|
||||
<PreferenceContextProvider>
|
||||
<LogsExplorerViews
|
||||
selectedView={SELECTED_VIEWS.SEARCH}
|
||||
showFrequencyChart
|
||||
setIsLoadingQueries={(): void => {}}
|
||||
listQueryKeyRef={{ current: {} }}
|
||||
chartQueryKeyRef={{ current: {} }}
|
||||
/>
|
||||
</PreferenceContextProvider>
|
||||
</QueryBuilderContext.Provider>,
|
||||
);
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ import { logsPaginationQueryRangeSuccessResponse } from 'mocks-server/__mockdata
|
||||
import { server } from 'mocks-server/server';
|
||||
import { rest } from 'msw';
|
||||
import { DashboardProvider } from 'providers/Dashboard/Dashboard';
|
||||
import { PreferenceContextProvider } from 'providers/preferences/context/PreferenceContextProvider';
|
||||
import { I18nextProvider } from 'react-i18next';
|
||||
import i18n from 'ReactI18';
|
||||
import { act, fireEvent, render, screen, waitFor } from 'tests/test-utils';
|
||||
@@ -108,11 +109,13 @@ describe('LogsPanelComponent', () => {
|
||||
render(
|
||||
<I18nextProvider i18n={i18n}>
|
||||
<DashboardProvider>
|
||||
<NewWidget
|
||||
selectedGraph={PANEL_TYPES.LIST}
|
||||
fillSpans={undefined}
|
||||
yAxisUnit={undefined}
|
||||
/>
|
||||
<PreferenceContextProvider>
|
||||
<NewWidget
|
||||
selectedGraph={PANEL_TYPES.LIST}
|
||||
fillSpans={undefined}
|
||||
yAxisUnit={undefined}
|
||||
/>
|
||||
</PreferenceContextProvider>
|
||||
</DashboardProvider>
|
||||
</I18nextProvider>,
|
||||
);
|
||||
|
||||
@@ -1,7 +1,4 @@
|
||||
import getFromLocalstorage from 'api/browser/localstorage/get';
|
||||
import setToLocalstorage from 'api/browser/localstorage/set';
|
||||
import { getAggregateKeys } from 'api/queryBuilder/getAttributeKeys';
|
||||
import { LOCALSTORAGE } from 'constants/localStorage';
|
||||
import { LogViewMode } from 'container/LogsTable';
|
||||
import { useGetAggregateKeys } from 'hooks/queryBuilder/useGetAggregateKeys';
|
||||
import useDebounce from 'hooks/useDebounce';
|
||||
@@ -11,6 +8,7 @@ import {
|
||||
AllTraceFilterKeys,
|
||||
AllTraceFilterKeyValue,
|
||||
} from 'pages/TracesExplorer/Filter/filterUtils';
|
||||
import { usePreferenceContext } from 'providers/preferences/context/PreferenceContextProvider';
|
||||
import { useCallback, useEffect, useMemo, useState } from 'react';
|
||||
import { useQueries } from 'react-query';
|
||||
import { ErrorResponse, SuccessResponse } from 'types/api';
|
||||
@@ -35,10 +33,10 @@ import {
|
||||
import { getOptionsFromKeys } from './utils';
|
||||
|
||||
interface UseOptionsMenuProps {
|
||||
storageKey?: string;
|
||||
dataSource: DataSource;
|
||||
aggregateOperator: string;
|
||||
initialOptions?: InitialOptions;
|
||||
storageKey: LOCALSTORAGE;
|
||||
}
|
||||
|
||||
interface UseOptionsMenu {
|
||||
@@ -48,22 +46,21 @@ interface UseOptionsMenu {
|
||||
}
|
||||
|
||||
const useOptionsMenu = ({
|
||||
storageKey,
|
||||
dataSource,
|
||||
aggregateOperator,
|
||||
initialOptions = {},
|
||||
}: UseOptionsMenuProps): UseOptionsMenu => {
|
||||
const { notifications } = useNotifications();
|
||||
const {
|
||||
preferences,
|
||||
updateColumns,
|
||||
updateFormatting,
|
||||
} = usePreferenceContext();
|
||||
|
||||
const [searchText, setSearchText] = useState<string>('');
|
||||
const [isFocused, setIsFocused] = useState<boolean>(false);
|
||||
const debouncedSearchText = useDebounce(searchText, 300);
|
||||
|
||||
const localStorageOptionsQuery = useMemo(
|
||||
() => getFromLocalstorage(storageKey),
|
||||
[storageKey],
|
||||
);
|
||||
|
||||
const initialQueryParams = useMemo(
|
||||
() => ({
|
||||
searchText: '',
|
||||
@@ -77,7 +74,6 @@ const useOptionsMenu = ({
|
||||
|
||||
const {
|
||||
query: optionsQuery,
|
||||
queryData: optionsQueryData,
|
||||
redirectWithQuery: redirectWithOptionsData,
|
||||
} = useUrlQueryData<OptionsQuery>(URL_OPTIONS, defaultOptionsQuery);
|
||||
|
||||
@@ -105,7 +101,9 @@ const useOptionsMenu = ({
|
||||
);
|
||||
|
||||
const initialSelectedColumns = useMemo(() => {
|
||||
if (!isFetchedInitialAttributes) return [];
|
||||
if (!isFetchedInitialAttributes) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const attributesData = initialAttributesResult?.reduce(
|
||||
(acc, attributeResponse) => {
|
||||
@@ -142,14 +140,12 @@ const useOptionsMenu = ({
|
||||
})
|
||||
.filter(Boolean) as BaseAutocompleteData[];
|
||||
|
||||
// this is the last point where we can set the default columns and if uptil now also we have an empty array then we will set the default columns
|
||||
if (!initialSelected || !initialSelected?.length) {
|
||||
initialSelected = defaultTraceSelectedColumns;
|
||||
}
|
||||
}
|
||||
|
||||
return initialSelected || [];
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [
|
||||
isFetchedInitialAttributes,
|
||||
initialOptions?.selectColumns,
|
||||
@@ -171,7 +167,6 @@ const useOptionsMenu = ({
|
||||
const searchedAttributeKeys = useMemo(() => {
|
||||
if (searchedAttributesData?.payload?.attributeKeys?.length) {
|
||||
if (dataSource === DataSource.LOGS) {
|
||||
// add timestamp and body to the list of attributes
|
||||
return [
|
||||
...defaultLogsSelectedColumns,
|
||||
...searchedAttributesData.payload.attributeKeys.filter(
|
||||
@@ -188,32 +183,35 @@ const useOptionsMenu = ({
|
||||
return [];
|
||||
}, [dataSource, searchedAttributesData?.payload?.attributeKeys]);
|
||||
|
||||
const initialOptionsQuery: OptionsQuery = useMemo(
|
||||
() => ({
|
||||
const initialOptionsQuery: OptionsQuery = useMemo(() => {
|
||||
let defaultColumns = defaultOptionsQuery.selectColumns;
|
||||
if (dataSource === DataSource.TRACES) {
|
||||
defaultColumns = defaultTraceSelectedColumns;
|
||||
} else if (dataSource === DataSource.LOGS) {
|
||||
defaultColumns = defaultLogsSelectedColumns;
|
||||
}
|
||||
|
||||
const finalSelectColumns = initialOptions?.selectColumns
|
||||
? initialSelectedColumns
|
||||
: defaultColumns;
|
||||
|
||||
return {
|
||||
...defaultOptionsQuery,
|
||||
...initialOptions,
|
||||
// eslint-disable-next-line no-nested-ternary
|
||||
selectColumns: initialOptions?.selectColumns
|
||||
? initialSelectedColumns
|
||||
: dataSource === DataSource.TRACES
|
||||
? defaultTraceSelectedColumns
|
||||
: defaultOptionsQuery.selectColumns,
|
||||
}),
|
||||
[dataSource, initialOptions, initialSelectedColumns],
|
||||
);
|
||||
selectColumns: finalSelectColumns,
|
||||
};
|
||||
}, [dataSource, initialOptions, initialSelectedColumns]);
|
||||
|
||||
const selectedColumnKeys = useMemo(
|
||||
() => optionsQueryData?.selectColumns?.map(({ id }) => id) || [],
|
||||
[optionsQueryData],
|
||||
() => preferences?.columns?.map(({ id }) => id) || [],
|
||||
[preferences?.columns],
|
||||
);
|
||||
|
||||
const optionsFromAttributeKeys = useMemo(() => {
|
||||
const filteredAttributeKeys = searchedAttributeKeys.filter((item) => {
|
||||
// For other data sources, only filter out 'body' if it exists
|
||||
if (dataSource !== DataSource.LOGS) {
|
||||
return item.key !== 'body';
|
||||
}
|
||||
// For LOGS, keep all keys
|
||||
return true;
|
||||
});
|
||||
|
||||
@@ -223,10 +221,8 @@ const useOptionsMenu = ({
|
||||
const handleRedirectWithOptionsData = useCallback(
|
||||
(newQueryData: OptionsQuery) => {
|
||||
redirectWithOptionsData(newQueryData);
|
||||
|
||||
setToLocalstorage(storageKey, JSON.stringify(newQueryData));
|
||||
},
|
||||
[storageKey, redirectWithOptionsData],
|
||||
[redirectWithOptionsData],
|
||||
);
|
||||
|
||||
const handleSelectColumns = useCallback(
|
||||
@@ -235,7 +231,7 @@ const useOptionsMenu = ({
|
||||
const newSelectedColumns = newSelectedColumnKeys.reduce((acc, key) => {
|
||||
const column = [
|
||||
...searchedAttributeKeys,
|
||||
...optionsQueryData.selectColumns,
|
||||
...(preferences?.columns || []),
|
||||
].find(({ id }) => id === key);
|
||||
|
||||
if (!column) return acc;
|
||||
@@ -243,75 +239,116 @@ const useOptionsMenu = ({
|
||||
}, [] as BaseAutocompleteData[]);
|
||||
|
||||
const optionsData: OptionsQuery = {
|
||||
...optionsQueryData,
|
||||
...defaultOptionsQuery,
|
||||
selectColumns: newSelectedColumns,
|
||||
format: preferences?.formatting?.format || defaultOptionsQuery.format,
|
||||
maxLines: preferences?.formatting?.maxLines || defaultOptionsQuery.maxLines,
|
||||
fontSize: preferences?.formatting?.fontSize || defaultOptionsQuery.fontSize,
|
||||
};
|
||||
|
||||
updateColumns(newSelectedColumns);
|
||||
handleRedirectWithOptionsData(optionsData);
|
||||
},
|
||||
[
|
||||
searchedAttributeKeys,
|
||||
selectedColumnKeys,
|
||||
optionsQueryData,
|
||||
preferences,
|
||||
handleRedirectWithOptionsData,
|
||||
updateColumns,
|
||||
],
|
||||
);
|
||||
|
||||
const handleRemoveSelectedColumn = useCallback(
|
||||
(columnKey: string) => {
|
||||
const newSelectedColumns = optionsQueryData?.selectColumns?.filter(
|
||||
const newSelectedColumns = preferences?.columns?.filter(
|
||||
({ id }) => id !== columnKey,
|
||||
);
|
||||
|
||||
if (!newSelectedColumns.length && dataSource !== DataSource.LOGS) {
|
||||
if (!newSelectedColumns?.length && dataSource !== DataSource.LOGS) {
|
||||
notifications.error({
|
||||
message: 'There must be at least one selected column',
|
||||
});
|
||||
} else {
|
||||
const optionsData: OptionsQuery = {
|
||||
...optionsQueryData,
|
||||
selectColumns: newSelectedColumns,
|
||||
...defaultOptionsQuery,
|
||||
selectColumns: newSelectedColumns || [],
|
||||
format: preferences?.formatting?.format || defaultOptionsQuery.format,
|
||||
maxLines:
|
||||
preferences?.formatting?.maxLines || defaultOptionsQuery.maxLines,
|
||||
fontSize:
|
||||
preferences?.formatting?.fontSize || defaultOptionsQuery.fontSize,
|
||||
};
|
||||
|
||||
updateColumns(newSelectedColumns || []);
|
||||
handleRedirectWithOptionsData(optionsData);
|
||||
}
|
||||
},
|
||||
[dataSource, notifications, optionsQueryData, handleRedirectWithOptionsData],
|
||||
[
|
||||
dataSource,
|
||||
notifications,
|
||||
preferences,
|
||||
handleRedirectWithOptionsData,
|
||||
updateColumns,
|
||||
],
|
||||
);
|
||||
|
||||
const handleFormatChange = useCallback(
|
||||
(value: LogViewMode) => {
|
||||
const optionsData: OptionsQuery = {
|
||||
...optionsQueryData,
|
||||
...defaultOptionsQuery,
|
||||
selectColumns: preferences?.columns || [],
|
||||
format: value,
|
||||
maxLines: preferences?.formatting?.maxLines || defaultOptionsQuery.maxLines,
|
||||
fontSize: preferences?.formatting?.fontSize || defaultOptionsQuery.fontSize,
|
||||
};
|
||||
|
||||
updateFormatting({
|
||||
maxLines: preferences?.formatting?.maxLines || defaultOptionsQuery.maxLines,
|
||||
format: value,
|
||||
fontSize: preferences?.formatting?.fontSize || defaultOptionsQuery.fontSize,
|
||||
});
|
||||
handleRedirectWithOptionsData(optionsData);
|
||||
},
|
||||
[handleRedirectWithOptionsData, optionsQueryData],
|
||||
[handleRedirectWithOptionsData, preferences, updateFormatting],
|
||||
);
|
||||
|
||||
const handleMaxLinesChange = useCallback(
|
||||
(value: string | number | null) => {
|
||||
const optionsData: OptionsQuery = {
|
||||
...optionsQueryData,
|
||||
...defaultOptionsQuery,
|
||||
selectColumns: preferences?.columns || [],
|
||||
format: preferences?.formatting?.format || defaultOptionsQuery.format,
|
||||
maxLines: value as number,
|
||||
fontSize: preferences?.formatting?.fontSize || defaultOptionsQuery.fontSize,
|
||||
};
|
||||
|
||||
updateFormatting({
|
||||
maxLines: value as number,
|
||||
format: preferences?.formatting?.format || defaultOptionsQuery.format,
|
||||
fontSize: preferences?.formatting?.fontSize || defaultOptionsQuery.fontSize,
|
||||
});
|
||||
handleRedirectWithOptionsData(optionsData);
|
||||
},
|
||||
[handleRedirectWithOptionsData, optionsQueryData],
|
||||
[handleRedirectWithOptionsData, preferences, updateFormatting],
|
||||
);
|
||||
|
||||
const handleFontSizeChange = useCallback(
|
||||
(value: FontSize) => {
|
||||
const optionsData: OptionsQuery = {
|
||||
...optionsQueryData,
|
||||
...defaultOptionsQuery,
|
||||
selectColumns: preferences?.columns || [],
|
||||
format: preferences?.formatting?.format || defaultOptionsQuery.format,
|
||||
maxLines: preferences?.formatting?.maxLines || defaultOptionsQuery.maxLines,
|
||||
fontSize: value,
|
||||
};
|
||||
|
||||
updateFormatting({
|
||||
maxLines: preferences?.formatting?.maxLines || defaultOptionsQuery.maxLines,
|
||||
format: preferences?.formatting?.format || defaultOptionsQuery.format,
|
||||
fontSize: value,
|
||||
});
|
||||
handleRedirectWithOptionsData(optionsData);
|
||||
},
|
||||
[handleRedirectWithOptionsData, optionsQueryData],
|
||||
[handleRedirectWithOptionsData, preferences, updateFormatting],
|
||||
);
|
||||
|
||||
const handleSearchAttribute = useCallback((value: string) => {
|
||||
@@ -331,7 +368,7 @@ const useOptionsMenu = ({
|
||||
() => ({
|
||||
addColumn: {
|
||||
isFetching: isSearchedAttributesFetching,
|
||||
value: optionsQueryData?.selectColumns || defaultOptionsQuery.selectColumns,
|
||||
value: preferences?.columns || defaultOptionsQuery.selectColumns,
|
||||
options: optionsFromAttributeKeys || [],
|
||||
onFocus: handleFocus,
|
||||
onBlur: handleBlur,
|
||||
@@ -340,24 +377,21 @@ const useOptionsMenu = ({
|
||||
onSearch: handleSearchAttribute,
|
||||
},
|
||||
format: {
|
||||
value: optionsQueryData.format || defaultOptionsQuery.format,
|
||||
value: preferences?.formatting?.format || defaultOptionsQuery.format,
|
||||
onChange: handleFormatChange,
|
||||
},
|
||||
maxLines: {
|
||||
value: optionsQueryData.maxLines || defaultOptionsQuery.maxLines,
|
||||
value: preferences?.formatting?.maxLines || defaultOptionsQuery.maxLines,
|
||||
onChange: handleMaxLinesChange,
|
||||
},
|
||||
fontSize: {
|
||||
value: optionsQueryData?.fontSize || defaultOptionsQuery.fontSize,
|
||||
value: preferences?.formatting?.fontSize || defaultOptionsQuery.fontSize,
|
||||
onChange: handleFontSizeChange,
|
||||
},
|
||||
}),
|
||||
[
|
||||
isSearchedAttributesFetching,
|
||||
optionsQueryData?.selectColumns,
|
||||
optionsQueryData.format,
|
||||
optionsQueryData.maxLines,
|
||||
optionsQueryData?.fontSize,
|
||||
preferences,
|
||||
optionsFromAttributeKeys,
|
||||
handleSelectColumns,
|
||||
handleRemoveSelectedColumn,
|
||||
@@ -369,23 +403,25 @@ const useOptionsMenu = ({
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
if (optionsQuery || !isFetchedInitialAttributes) return;
|
||||
if (optionsQuery || !isFetchedInitialAttributes) {
|
||||
return;
|
||||
}
|
||||
|
||||
const nextOptionsQuery = localStorageOptionsQuery
|
||||
? JSON.parse(localStorageOptionsQuery)
|
||||
: initialOptionsQuery;
|
||||
|
||||
redirectWithOptionsData(nextOptionsQuery);
|
||||
redirectWithOptionsData(initialOptionsQuery);
|
||||
}, [
|
||||
isFetchedInitialAttributes,
|
||||
optionsQuery,
|
||||
initialOptionsQuery,
|
||||
localStorageOptionsQuery,
|
||||
redirectWithOptionsData,
|
||||
]);
|
||||
|
||||
return {
|
||||
options: optionsQueryData,
|
||||
options: {
|
||||
selectColumns: preferences?.columns || [],
|
||||
format: preferences?.formatting?.format || defaultOptionsQuery.format,
|
||||
maxLines: preferences?.formatting?.maxLines || defaultOptionsQuery.maxLines,
|
||||
fontSize: preferences?.formatting?.fontSize || defaultOptionsQuery.fontSize,
|
||||
},
|
||||
config: optionsMenuConfig,
|
||||
handleOptionsChange: handleRedirectWithOptionsData,
|
||||
};
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
/* eslint-disable sonarjs/no-duplicate-string */
|
||||
import { screen } from '@testing-library/react';
|
||||
import { PreferenceContextProvider } from 'providers/preferences/context/PreferenceContextProvider';
|
||||
import { findByText, fireEvent, render, waitFor } from 'tests/test-utils';
|
||||
|
||||
import { pipelineApiResponseMockData } from '../mocks/pipeline';
|
||||
@@ -19,6 +20,18 @@ jest.mock('uplot', () => {
|
||||
};
|
||||
});
|
||||
|
||||
// Mock useUrlQuery hook
|
||||
const mockUrlQuery = {
|
||||
get: jest.fn(),
|
||||
set: jest.fn(),
|
||||
toString: jest.fn(() => ''),
|
||||
};
|
||||
|
||||
jest.mock('hooks/useUrlQuery', () => ({
|
||||
__esModule: true,
|
||||
default: jest.fn(() => mockUrlQuery),
|
||||
}));
|
||||
|
||||
const samplePipelinePreviewResponse = {
|
||||
isLoading: false,
|
||||
logs: [
|
||||
@@ -57,17 +70,38 @@ jest.mock(
|
||||
}),
|
||||
);
|
||||
|
||||
// Mock usePreferenceSync
|
||||
jest.mock('providers/preferences/sync/usePreferenceSync', () => ({
|
||||
usePreferenceSync: (): any => ({
|
||||
preferences: {
|
||||
columns: [],
|
||||
formatting: {
|
||||
maxLines: 2,
|
||||
format: 'table',
|
||||
fontSize: 'small',
|
||||
version: 1,
|
||||
},
|
||||
},
|
||||
loading: false,
|
||||
error: null,
|
||||
updateColumns: jest.fn(),
|
||||
updateFormatting: jest.fn(),
|
||||
}),
|
||||
}));
|
||||
|
||||
describe('PipelinePage container test', () => {
|
||||
it('should render PipelineListsView section', () => {
|
||||
const { getByText, container } = render(
|
||||
<PipelineListsView
|
||||
setActionType={jest.fn()}
|
||||
isActionMode="viewing-mode"
|
||||
setActionMode={jest.fn()}
|
||||
pipelineData={pipelineApiResponseMockData}
|
||||
isActionType=""
|
||||
refetchPipelineLists={jest.fn()}
|
||||
/>,
|
||||
<PreferenceContextProvider>
|
||||
<PipelineListsView
|
||||
setActionType={jest.fn()}
|
||||
isActionMode="viewing-mode"
|
||||
setActionMode={jest.fn()}
|
||||
pipelineData={pipelineApiResponseMockData}
|
||||
isActionType=""
|
||||
refetchPipelineLists={jest.fn()}
|
||||
/>
|
||||
</PreferenceContextProvider>,
|
||||
);
|
||||
|
||||
// table headers assertions
|
||||
@@ -91,14 +125,16 @@ describe('PipelinePage container test', () => {
|
||||
|
||||
it('should render expanded content and edit mode correctly', async () => {
|
||||
const { getByText } = render(
|
||||
<PipelineListsView
|
||||
setActionType={jest.fn()}
|
||||
isActionMode="editing-mode"
|
||||
setActionMode={jest.fn()}
|
||||
pipelineData={pipelineApiResponseMockData}
|
||||
isActionType=""
|
||||
refetchPipelineLists={jest.fn()}
|
||||
/>,
|
||||
<PreferenceContextProvider>
|
||||
<PipelineListsView
|
||||
setActionType={jest.fn()}
|
||||
isActionMode="editing-mode"
|
||||
setActionMode={jest.fn()}
|
||||
pipelineData={pipelineApiResponseMockData}
|
||||
isActionType=""
|
||||
refetchPipelineLists={jest.fn()}
|
||||
/>
|
||||
</PreferenceContextProvider>,
|
||||
);
|
||||
|
||||
// content assertion
|
||||
@@ -122,14 +158,16 @@ describe('PipelinePage container test', () => {
|
||||
|
||||
it('should be able to perform actions and edit on expanded view content', async () => {
|
||||
render(
|
||||
<PipelineListsView
|
||||
setActionType={jest.fn()}
|
||||
isActionMode="editing-mode"
|
||||
setActionMode={jest.fn()}
|
||||
pipelineData={pipelineApiResponseMockData}
|
||||
isActionType=""
|
||||
refetchPipelineLists={jest.fn()}
|
||||
/>,
|
||||
<PreferenceContextProvider>
|
||||
<PipelineListsView
|
||||
setActionType={jest.fn()}
|
||||
isActionMode="editing-mode"
|
||||
setActionMode={jest.fn()}
|
||||
pipelineData={pipelineApiResponseMockData}
|
||||
isActionType=""
|
||||
refetchPipelineLists={jest.fn()}
|
||||
/>
|
||||
</PreferenceContextProvider>,
|
||||
);
|
||||
|
||||
// content assertion
|
||||
@@ -180,14 +218,16 @@ describe('PipelinePage container test', () => {
|
||||
|
||||
it('should be able to toggle and delete pipeline', async () => {
|
||||
const { getByText } = render(
|
||||
<PipelineListsView
|
||||
setActionType={jest.fn()}
|
||||
isActionMode="editing-mode"
|
||||
setActionMode={jest.fn()}
|
||||
pipelineData={pipelineApiResponseMockData}
|
||||
isActionType=""
|
||||
refetchPipelineLists={jest.fn()}
|
||||
/>,
|
||||
<PreferenceContextProvider>
|
||||
<PipelineListsView
|
||||
setActionType={jest.fn()}
|
||||
isActionMode="editing-mode"
|
||||
setActionMode={jest.fn()}
|
||||
pipelineData={pipelineApiResponseMockData}
|
||||
isActionType=""
|
||||
refetchPipelineLists={jest.fn()}
|
||||
/>
|
||||
</PreferenceContextProvider>,
|
||||
);
|
||||
|
||||
const addNewPipelineBtn = getByText('add_new_pipeline');
|
||||
@@ -247,14 +287,16 @@ describe('PipelinePage container test', () => {
|
||||
|
||||
it('should have populated form fields when edit pipeline is clicked', async () => {
|
||||
render(
|
||||
<PipelineListsView
|
||||
setActionType={jest.fn()}
|
||||
isActionMode="editing-mode"
|
||||
setActionMode={jest.fn()}
|
||||
pipelineData={pipelineApiResponseMockData}
|
||||
isActionType="edit-pipeline"
|
||||
refetchPipelineLists={jest.fn()}
|
||||
/>,
|
||||
<PreferenceContextProvider>
|
||||
<PipelineListsView
|
||||
setActionType={jest.fn()}
|
||||
isActionMode="editing-mode"
|
||||
setActionMode={jest.fn()}
|
||||
pipelineData={pipelineApiResponseMockData}
|
||||
isActionType="edit-pipeline"
|
||||
refetchPipelineLists={jest.fn()}
|
||||
/>
|
||||
</PreferenceContextProvider>,
|
||||
);
|
||||
|
||||
// content assertion
|
||||
|
||||
@@ -324,7 +324,7 @@ export const Query = memo(function Query({
|
||||
]);
|
||||
|
||||
const disableOperatorSelector =
|
||||
!query?.aggregateAttribute.key || query?.aggregateAttribute.key === '';
|
||||
!query?.aggregateAttribute?.key || query?.aggregateAttribute?.key === '';
|
||||
|
||||
const isVersionV4 = version && version === ENTITY_VERSION_V4;
|
||||
|
||||
|
||||
@@ -1037,7 +1037,9 @@ function QueryBuilderSearchV2(
|
||||
);
|
||||
})}
|
||||
</Select>
|
||||
{!hideSpanScopeSelector && <SpanScopeSelector queryName={query.queryName} />}
|
||||
{!hideSpanScopeSelector && (
|
||||
<SpanScopeSelector query={query} onChange={onChange} />
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -2,7 +2,11 @@ import { Select } from 'antd';
|
||||
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
|
||||
import { cloneDeep } from 'lodash-es';
|
||||
import { useEffect, useState } from 'react';
|
||||
import { TagFilterItem } from 'types/api/queryBuilder/queryBuilderData';
|
||||
import {
|
||||
IBuilderQuery,
|
||||
TagFilter,
|
||||
TagFilterItem,
|
||||
} from 'types/api/queryBuilder/queryBuilderData';
|
||||
import { v4 as uuid } from 'uuid';
|
||||
|
||||
enum SpanScope {
|
||||
@@ -17,7 +21,8 @@ interface SpanFilterConfig {
|
||||
}
|
||||
|
||||
interface SpanScopeSelectorProps {
|
||||
queryName: string;
|
||||
onChange?: (value: TagFilter) => void;
|
||||
query?: IBuilderQuery;
|
||||
}
|
||||
|
||||
const SPAN_FILTER_CONFIG: Record<SpanScope, SpanFilterConfig | null> = {
|
||||
@@ -50,7 +55,10 @@ const SELECT_OPTIONS = [
|
||||
{ value: SpanScope.ENTRYPOINT_SPANS, label: 'Entrypoint Spans' },
|
||||
];
|
||||
|
||||
function SpanScopeSelector({ queryName }: SpanScopeSelectorProps): JSX.Element {
|
||||
function SpanScopeSelector({
|
||||
onChange,
|
||||
query,
|
||||
}: SpanScopeSelectorProps): JSX.Element {
|
||||
const { currentQuery, redirectWithQueryBuilderData } = useQueryBuilder();
|
||||
const [selectedScope, setSelectedScope] = useState<SpanScope>(
|
||||
SpanScope.ALL_SPANS,
|
||||
@@ -60,7 +68,7 @@ function SpanScopeSelector({ queryName }: SpanScopeSelectorProps): JSX.Element {
|
||||
filters: TagFilterItem[] = [],
|
||||
): SpanScope => {
|
||||
const hasFilter = (key: string): boolean =>
|
||||
filters.some(
|
||||
filters?.some(
|
||||
(filter) =>
|
||||
filter.key?.type === 'spanSearchScope' &&
|
||||
filter.key.key === key &&
|
||||
@@ -71,15 +79,19 @@ function SpanScopeSelector({ queryName }: SpanScopeSelectorProps): JSX.Element {
|
||||
if (hasFilter('isEntryPoint')) return SpanScope.ENTRYPOINT_SPANS;
|
||||
return SpanScope.ALL_SPANS;
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
const queryData = (currentQuery?.builder?.queryData || [])?.find(
|
||||
(item) => item.queryName === queryName,
|
||||
let queryData = (currentQuery?.builder?.queryData || [])?.find(
|
||||
(item) => item.queryName === query?.queryName,
|
||||
);
|
||||
|
||||
if (onChange && query) {
|
||||
queryData = query;
|
||||
}
|
||||
|
||||
const filters = queryData?.filters?.items;
|
||||
const currentScope = getCurrentScopeFromFilters(filters);
|
||||
setSelectedScope(currentScope);
|
||||
}, [currentQuery, queryName]);
|
||||
}, [currentQuery, onChange, query]);
|
||||
|
||||
const handleScopeChange = (newScope: SpanScope): void => {
|
||||
const newQuery = cloneDeep(currentQuery);
|
||||
@@ -108,14 +120,28 @@ function SpanScopeSelector({ queryName }: SpanScopeSelectorProps): JSX.Element {
|
||||
...item,
|
||||
filters: {
|
||||
...item.filters,
|
||||
items: getUpdatedFilters(item.filters?.items, item.queryName === queryName),
|
||||
items: getUpdatedFilters(
|
||||
item.filters?.items,
|
||||
item.queryName === query?.queryName,
|
||||
),
|
||||
},
|
||||
}));
|
||||
|
||||
redirectWithQueryBuilderData(newQuery);
|
||||
if (onChange && query) {
|
||||
onChange({
|
||||
...query.filters,
|
||||
items: getUpdatedFilters(
|
||||
[...query.filters.items, ...newQuery.builder.queryData[0].filters.items],
|
||||
true,
|
||||
),
|
||||
});
|
||||
|
||||
setSelectedScope(newScope);
|
||||
} else {
|
||||
redirectWithQueryBuilderData(newQuery);
|
||||
}
|
||||
};
|
||||
|
||||
//
|
||||
return (
|
||||
<Select
|
||||
value={selectedScope}
|
||||
@@ -127,4 +153,9 @@ function SpanScopeSelector({ queryName }: SpanScopeSelectorProps): JSX.Element {
|
||||
);
|
||||
}
|
||||
|
||||
SpanScopeSelector.defaultProps = {
|
||||
onChange: undefined,
|
||||
query: undefined,
|
||||
};
|
||||
|
||||
export default SpanScopeSelector;
|
||||
|
||||
@@ -6,7 +6,12 @@ import {
|
||||
} from '@testing-library/react';
|
||||
import { initialQueriesMap } from 'constants/queryBuilder';
|
||||
import { QueryBuilderContext } from 'providers/QueryBuilder';
|
||||
import { Query, TagFilterItem } from 'types/api/queryBuilder/queryBuilderData';
|
||||
import {
|
||||
IBuilderQuery,
|
||||
Query,
|
||||
TagFilter,
|
||||
TagFilterItem,
|
||||
} from 'types/api/queryBuilder/queryBuilderData';
|
||||
|
||||
import SpanScopeSelector from '../SpanScopeSelector';
|
||||
|
||||
@@ -23,6 +28,13 @@ const createSpanScopeFilter = (key: string): TagFilterItem => ({
|
||||
value: 'true',
|
||||
});
|
||||
|
||||
const createNonScopeFilter = (key: string, value: string): TagFilterItem => ({
|
||||
id: `non-scope-${key}`,
|
||||
key: { key, isColumn: false, type: 'tag' },
|
||||
op: '=',
|
||||
value,
|
||||
});
|
||||
|
||||
const defaultQuery = {
|
||||
...initialQueriesMap.traces,
|
||||
builder: {
|
||||
@@ -36,6 +48,12 @@ const defaultQuery = {
|
||||
},
|
||||
};
|
||||
|
||||
const defaultQueryBuilderQuery: IBuilderQuery = {
|
||||
...initialQueriesMap.traces.builder.queryData[0],
|
||||
queryName: 'A',
|
||||
filters: { items: [], op: 'AND' },
|
||||
};
|
||||
|
||||
// Helper to create query with filters
|
||||
const createQueryWithFilters = (filters: TagFilterItem[]): Query => ({
|
||||
...defaultQuery,
|
||||
@@ -44,6 +62,7 @@ const createQueryWithFilters = (filters: TagFilterItem[]): Query => ({
|
||||
queryData: [
|
||||
{
|
||||
...defaultQuery.builder.queryData[0],
|
||||
queryName: 'A',
|
||||
filters: {
|
||||
items: filters,
|
||||
op: 'AND',
|
||||
@@ -54,8 +73,9 @@ const createQueryWithFilters = (filters: TagFilterItem[]): Query => ({
|
||||
});
|
||||
|
||||
const renderWithContext = (
|
||||
queryName = 'A',
|
||||
initialQuery = defaultQuery,
|
||||
onChangeProp?: (value: TagFilter) => void,
|
||||
queryProp?: IBuilderQuery,
|
||||
): RenderResult =>
|
||||
render(
|
||||
<QueryBuilderContext.Provider
|
||||
@@ -67,10 +87,24 @@ const renderWithContext = (
|
||||
} as any
|
||||
}
|
||||
>
|
||||
<SpanScopeSelector queryName={queryName} />
|
||||
<SpanScopeSelector onChange={onChangeProp} query={queryProp} />
|
||||
</QueryBuilderContext.Provider>,
|
||||
);
|
||||
|
||||
const selectOption = async (optionText: string): Promise<void> => {
|
||||
const selector = screen.getByRole('combobox');
|
||||
fireEvent.mouseDown(selector);
|
||||
|
||||
// Wait for dropdown to appear
|
||||
await screen.findByRole('listbox');
|
||||
|
||||
// Find the option by its content text and click it
|
||||
const option = await screen.findByText(optionText, {
|
||||
selector: '.ant-select-item-option-content',
|
||||
});
|
||||
fireEvent.click(option);
|
||||
};
|
||||
|
||||
describe('SpanScopeSelector', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
@@ -82,13 +116,6 @@ describe('SpanScopeSelector', () => {
|
||||
});
|
||||
|
||||
describe('when selecting different options', () => {
|
||||
const selectOption = (optionText: string): void => {
|
||||
const selector = screen.getByRole('combobox');
|
||||
fireEvent.mouseDown(selector);
|
||||
const option = screen.getByText(optionText);
|
||||
fireEvent.click(option);
|
||||
};
|
||||
|
||||
const assertFilterAdded = (
|
||||
updatedQuery: Query,
|
||||
expectedKey: string,
|
||||
@@ -106,13 +133,13 @@ describe('SpanScopeSelector', () => {
|
||||
);
|
||||
};
|
||||
|
||||
it('should remove span scope filters when selecting ALL_SPANS', () => {
|
||||
it('should remove span scope filters when selecting ALL_SPANS', async () => {
|
||||
const queryWithSpanScope = createQueryWithFilters([
|
||||
createSpanScopeFilter('isRoot'),
|
||||
]);
|
||||
renderWithContext('A', queryWithSpanScope);
|
||||
renderWithContext(queryWithSpanScope, undefined, defaultQueryBuilderQuery);
|
||||
|
||||
selectOption('All Spans');
|
||||
await selectOption('All Spans');
|
||||
|
||||
expect(mockRedirectWithQueryBuilderData).toHaveBeenCalled();
|
||||
const updatedQuery = mockRedirectWithQueryBuilderData.mock.calls[0][0];
|
||||
@@ -125,7 +152,8 @@ describe('SpanScopeSelector', () => {
|
||||
});
|
||||
|
||||
it('should add isRoot filter when selecting ROOT_SPANS', async () => {
|
||||
renderWithContext();
|
||||
renderWithContext(defaultQuery, undefined, defaultQueryBuilderQuery);
|
||||
// eslint-disable-next-line sonarjs/no-duplicate-string
|
||||
await selectOption('Root Spans');
|
||||
|
||||
expect(mockRedirectWithQueryBuilderData).toHaveBeenCalled();
|
||||
@@ -135,9 +163,10 @@ describe('SpanScopeSelector', () => {
|
||||
);
|
||||
});
|
||||
|
||||
it('should add isEntryPoint filter when selecting ENTRYPOINT_SPANS', () => {
|
||||
renderWithContext();
|
||||
selectOption('Entrypoint Spans');
|
||||
it('should add isEntryPoint filter when selecting ENTRYPOINT_SPANS', async () => {
|
||||
renderWithContext(defaultQuery, undefined, defaultQueryBuilderQuery);
|
||||
// eslint-disable-next-line sonarjs/no-duplicate-string
|
||||
await selectOption('Entrypoint Spans');
|
||||
|
||||
expect(mockRedirectWithQueryBuilderData).toHaveBeenCalled();
|
||||
assertFilterAdded(
|
||||
@@ -157,9 +186,180 @@ describe('SpanScopeSelector', () => {
|
||||
const queryWithFilter = createQueryWithFilters([
|
||||
createSpanScopeFilter(filterKey),
|
||||
]);
|
||||
renderWithContext('A', queryWithFilter);
|
||||
renderWithContext(queryWithFilter, undefined, defaultQueryBuilderQuery);
|
||||
expect(await screen.findByText(expectedText)).toBeInTheDocument();
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
describe('when onChange and query props are provided', () => {
|
||||
const mockOnChange = jest.fn();
|
||||
|
||||
const createLocalQuery = (
|
||||
filterItems: TagFilterItem[] = [],
|
||||
op: 'AND' | 'OR' = 'AND',
|
||||
): IBuilderQuery => ({
|
||||
...defaultQueryBuilderQuery,
|
||||
filters: { items: filterItems, op },
|
||||
});
|
||||
|
||||
const assertOnChangePayload = (
|
||||
callNumber: number, // To handle multiple calls if needed, usually 0 for single interaction
|
||||
expectedScopeKey: string | null,
|
||||
expectedNonScopeItems: TagFilterItem[] = [],
|
||||
): void => {
|
||||
expect(mockOnChange).toHaveBeenCalled();
|
||||
const onChangeArg = mockOnChange.mock.calls[callNumber][0] as TagFilter;
|
||||
const { items } = onChangeArg;
|
||||
|
||||
// Check for preservation of specific non-scope items
|
||||
expectedNonScopeItems.forEach((nonScopeItem) => {
|
||||
expect(items).toContainEqual(nonScopeItem);
|
||||
});
|
||||
|
||||
const scopeFiltersInPayload = items.filter(
|
||||
(filter) => filter.key?.type === 'spanSearchScope',
|
||||
);
|
||||
|
||||
if (expectedScopeKey) {
|
||||
expect(scopeFiltersInPayload.length).toBe(1);
|
||||
expect(scopeFiltersInPayload[0].key?.key).toBe(expectedScopeKey);
|
||||
expect(scopeFiltersInPayload[0].value).toBe('true');
|
||||
expect(scopeFiltersInPayload[0].op).toBe('=');
|
||||
} else {
|
||||
expect(scopeFiltersInPayload.length).toBe(0);
|
||||
}
|
||||
|
||||
const expectedTotalFilters =
|
||||
expectedNonScopeItems.length + (expectedScopeKey ? 1 : 0);
|
||||
expect(items.length).toBe(expectedTotalFilters);
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
mockOnChange.mockClear();
|
||||
mockRedirectWithQueryBuilderData.mockClear();
|
||||
});
|
||||
|
||||
it('should initialize with ALL_SPANS if query prop has no scope filters', async () => {
|
||||
const localQuery = createLocalQuery();
|
||||
renderWithContext(defaultQuery, mockOnChange, localQuery);
|
||||
expect(await screen.findByText('All Spans')).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('should initialize with ROOT_SPANS if query prop has isRoot filter', async () => {
|
||||
const localQuery = createLocalQuery([createSpanScopeFilter('isRoot')]);
|
||||
renderWithContext(defaultQuery, mockOnChange, localQuery);
|
||||
expect(await screen.findByText('Root Spans')).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('should initialize with ENTRYPOINT_SPANS if query prop has isEntryPoint filter', async () => {
|
||||
const localQuery = createLocalQuery([createSpanScopeFilter('isEntryPoint')]);
|
||||
renderWithContext(defaultQuery, mockOnChange, localQuery);
|
||||
expect(await screen.findByText('Entrypoint Spans')).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('should call onChange and not redirect when selecting ROOT_SPANS (from ALL_SPANS)', async () => {
|
||||
const localQuery = createLocalQuery(); // Initially All Spans
|
||||
const { container } = renderWithContext(
|
||||
defaultQuery,
|
||||
mockOnChange,
|
||||
localQuery,
|
||||
);
|
||||
expect(await screen.findByText('All Spans')).toBeInTheDocument();
|
||||
|
||||
await selectOption('Root Spans');
|
||||
|
||||
expect(mockRedirectWithQueryBuilderData).not.toHaveBeenCalled();
|
||||
assertOnChangePayload(0, 'isRoot', []);
|
||||
expect(
|
||||
container.querySelector('span[title="Root Spans"]'),
|
||||
).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('should call onChange with removed scope when selecting ALL_SPANS (from ROOT_SPANS)', async () => {
|
||||
const initialRootFilter = createSpanScopeFilter('isRoot');
|
||||
const localQuery = createLocalQuery([initialRootFilter]);
|
||||
const { container } = renderWithContext(
|
||||
defaultQuery,
|
||||
mockOnChange,
|
||||
localQuery,
|
||||
);
|
||||
expect(await screen.findByText('Root Spans')).toBeInTheDocument();
|
||||
|
||||
await selectOption('All Spans');
|
||||
|
||||
expect(mockRedirectWithQueryBuilderData).not.toHaveBeenCalled();
|
||||
assertOnChangePayload(0, null, []);
|
||||
|
||||
expect(
|
||||
container.querySelector('span[title="All Spans"]'),
|
||||
).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('should call onChange, replacing isRoot with isEntryPoint', async () => {
|
||||
const initialRootFilter = createSpanScopeFilter('isRoot');
|
||||
const localQuery = createLocalQuery([initialRootFilter]);
|
||||
const { container } = renderWithContext(
|
||||
defaultQuery,
|
||||
mockOnChange,
|
||||
localQuery,
|
||||
);
|
||||
expect(await screen.findByText('Root Spans')).toBeInTheDocument();
|
||||
|
||||
await selectOption('Entrypoint Spans');
|
||||
|
||||
expect(mockRedirectWithQueryBuilderData).not.toHaveBeenCalled();
|
||||
assertOnChangePayload(0, 'isEntryPoint', []);
|
||||
expect(
|
||||
container.querySelector('span[title="Entrypoint Spans"]'),
|
||||
).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('should preserve non-scope filters from query prop when changing scope', async () => {
|
||||
const nonScopeItem = createNonScopeFilter('customTag', 'customValue');
|
||||
const initialRootFilter = createSpanScopeFilter('isRoot');
|
||||
const localQuery = createLocalQuery([nonScopeItem, initialRootFilter], 'OR');
|
||||
|
||||
const { container } = renderWithContext(
|
||||
defaultQuery,
|
||||
mockOnChange,
|
||||
localQuery,
|
||||
);
|
||||
expect(await screen.findByText('Root Spans')).toBeInTheDocument();
|
||||
|
||||
await selectOption('Entrypoint Spans');
|
||||
|
||||
expect(mockRedirectWithQueryBuilderData).not.toHaveBeenCalled();
|
||||
assertOnChangePayload(0, 'isEntryPoint', [nonScopeItem]);
|
||||
expect(
|
||||
container.querySelector('span[title="Entrypoint Spans"]'),
|
||||
).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('should preserve non-scope filters when changing to ALL_SPANS', async () => {
|
||||
const nonScopeItem1 = createNonScopeFilter('service', 'checkout');
|
||||
const nonScopeItem2 = createNonScopeFilter('version', 'v1');
|
||||
const initialEntryFilter = createSpanScopeFilter('isEntryPoint');
|
||||
const localQuery = createLocalQuery([
|
||||
nonScopeItem1,
|
||||
initialEntryFilter,
|
||||
nonScopeItem2,
|
||||
]);
|
||||
|
||||
const { container } = renderWithContext(
|
||||
defaultQuery,
|
||||
mockOnChange,
|
||||
localQuery,
|
||||
);
|
||||
expect(await screen.findByText('Entrypoint Spans')).toBeInTheDocument();
|
||||
|
||||
await selectOption('All Spans');
|
||||
|
||||
expect(mockRedirectWithQueryBuilderData).not.toHaveBeenCalled();
|
||||
assertOnChangePayload(0, null, [nonScopeItem1, nonScopeItem2]);
|
||||
expect(
|
||||
container.querySelector('span[title="All Spans"]'),
|
||||
).toBeInTheDocument();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -30,14 +30,15 @@ export const getChartData = (
|
||||
};
|
||||
const chartLabels: ChartData<'line'>['labels'] = [];
|
||||
|
||||
Object.keys(allDataPoints ?? {}).forEach((timestamp) => {
|
||||
const key = allDataPoints[timestamp];
|
||||
if (key.value) {
|
||||
chartDataset.data.push(key.value);
|
||||
const date = dayjs(key.timestamp / 1000000);
|
||||
chartLabels.push(date.toDate().getTime());
|
||||
}
|
||||
});
|
||||
if (allDataPoints && typeof allDataPoints === 'object')
|
||||
Object.keys(allDataPoints).forEach((timestamp) => {
|
||||
const key = allDataPoints[timestamp];
|
||||
if (key.value) {
|
||||
chartDataset.data.push(key.value);
|
||||
const date = dayjs(key.timestamp / 1000000);
|
||||
chartLabels.push(date.toDate().getTime());
|
||||
}
|
||||
});
|
||||
|
||||
return {
|
||||
datasets: [
|
||||
|
||||
@@ -136,8 +136,12 @@ function Filters({
|
||||
return (
|
||||
<div className="filter-row">
|
||||
<QueryBuilderSearchV2
|
||||
query={BASE_FILTER_QUERY}
|
||||
query={{
|
||||
...BASE_FILTER_QUERY,
|
||||
filters,
|
||||
}}
|
||||
onChange={handleFilterChange}
|
||||
hideSpanScopeSelector={false}
|
||||
/>
|
||||
{filteredSpanIds.length > 0 && (
|
||||
<div className="pre-next-toggle">
|
||||
|
||||
@@ -4,6 +4,7 @@ import LiveLogsContainer from 'container/LiveLogs/LiveLogsContainer';
|
||||
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
|
||||
import { useShareBuilderUrl } from 'hooks/queryBuilder/useShareBuilderUrl';
|
||||
import { EventSourceProvider } from 'providers/EventSource';
|
||||
import { PreferenceContextProvider } from 'providers/preferences/context/PreferenceContextProvider';
|
||||
import { useEffect } from 'react';
|
||||
import { DataSource } from 'types/common/queryBuilder';
|
||||
|
||||
@@ -17,7 +18,9 @@ function LiveLogs(): JSX.Element {
|
||||
|
||||
return (
|
||||
<EventSourceProvider>
|
||||
<LiveLogsContainer />
|
||||
<PreferenceContextProvider>
|
||||
<LiveLogsContainer />
|
||||
</PreferenceContextProvider>
|
||||
</EventSourceProvider>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import { noop } from 'lodash-es';
|
||||
import { logsQueryRangeSuccessResponse } from 'mocks-server/__mockdata__/logs_query_range';
|
||||
import { server } from 'mocks-server/server';
|
||||
import { rest } from 'msw';
|
||||
import { PreferenceContextProvider } from 'providers/preferences/context/PreferenceContextProvider';
|
||||
import { QueryBuilderContext } from 'providers/QueryBuilder';
|
||||
// https://virtuoso.dev/mocking-in-tests/
|
||||
import { VirtuosoMockContext } from 'react-virtuoso';
|
||||
@@ -73,6 +74,25 @@ jest.mock('hooks/useSafeNavigate', () => ({
|
||||
}),
|
||||
}));
|
||||
|
||||
// Mock usePreferenceSync
|
||||
jest.mock('providers/preferences/sync/usePreferenceSync', () => ({
|
||||
usePreferenceSync: (): any => ({
|
||||
preferences: {
|
||||
columns: [],
|
||||
formatting: {
|
||||
maxLines: 2,
|
||||
format: 'table',
|
||||
fontSize: 'small',
|
||||
version: 1,
|
||||
},
|
||||
},
|
||||
loading: false,
|
||||
error: null,
|
||||
updateColumns: jest.fn(),
|
||||
updateFormatting: jest.fn(),
|
||||
}),
|
||||
}));
|
||||
|
||||
const logsQueryServerRequest = (): void =>
|
||||
server.use(
|
||||
rest.post(queryRangeURL, (req, res, ctx) =>
|
||||
@@ -88,7 +108,11 @@ describe('Logs Explorer Tests', () => {
|
||||
queryByText,
|
||||
getByTestId,
|
||||
queryByTestId,
|
||||
} = render(<LogsExplorer />);
|
||||
} = render(
|
||||
<PreferenceContextProvider>
|
||||
<LogsExplorer />
|
||||
</PreferenceContextProvider>,
|
||||
);
|
||||
|
||||
// check the presence of frequency chart content
|
||||
expect(getByText(frequencyChartContent)).toBeInTheDocument();
|
||||
@@ -124,11 +148,13 @@ describe('Logs Explorer Tests', () => {
|
||||
// mocking the query range API to return the logs
|
||||
logsQueryServerRequest();
|
||||
const { queryByText, queryByTestId } = render(
|
||||
<VirtuosoMockContext.Provider
|
||||
value={{ viewportHeight: 300, itemHeight: 100 }}
|
||||
>
|
||||
<LogsExplorer />
|
||||
</VirtuosoMockContext.Provider>,
|
||||
<PreferenceContextProvider>
|
||||
<VirtuosoMockContext.Provider
|
||||
value={{ viewportHeight: 300, itemHeight: 100 }}
|
||||
>
|
||||
<LogsExplorer />
|
||||
</VirtuosoMockContext.Provider>
|
||||
</PreferenceContextProvider>,
|
||||
);
|
||||
|
||||
// check for loading state to be not present
|
||||
@@ -192,11 +218,13 @@ describe('Logs Explorer Tests', () => {
|
||||
isStagedQueryUpdated: (): boolean => false,
|
||||
}}
|
||||
>
|
||||
<VirtuosoMockContext.Provider
|
||||
value={{ viewportHeight: 300, itemHeight: 100 }}
|
||||
>
|
||||
<LogsExplorer />
|
||||
</VirtuosoMockContext.Provider>
|
||||
<PreferenceContextProvider>
|
||||
<VirtuosoMockContext.Provider
|
||||
value={{ viewportHeight: 300, itemHeight: 100 }}
|
||||
>
|
||||
<LogsExplorer />
|
||||
</VirtuosoMockContext.Provider>
|
||||
</PreferenceContextProvider>
|
||||
</QueryBuilderContext.Provider>,
|
||||
);
|
||||
|
||||
@@ -213,7 +241,11 @@ describe('Logs Explorer Tests', () => {
|
||||
});
|
||||
|
||||
test('frequency chart visibility and switch toggle', async () => {
|
||||
const { getByRole, queryByText } = render(<LogsExplorer />);
|
||||
const { getByRole, queryByText } = render(
|
||||
<PreferenceContextProvider>
|
||||
<LogsExplorer />
|
||||
</PreferenceContextProvider>,
|
||||
);
|
||||
|
||||
// check the presence of Frequency Chart
|
||||
expect(queryByText('Frequency chart')).toBeInTheDocument();
|
||||
|
||||
@@ -23,6 +23,7 @@ import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
|
||||
import useUrlQueryData from 'hooks/useUrlQueryData';
|
||||
import { isEqual, isNull } from 'lodash-es';
|
||||
import ErrorBoundaryFallback from 'pages/ErrorBoundaryFallback/ErrorBoundaryFallback';
|
||||
import { usePreferenceContext } from 'providers/preferences/context/PreferenceContextProvider';
|
||||
import { useCallback, useEffect, useMemo, useRef, useState } from 'react';
|
||||
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
|
||||
import { DataSource } from 'types/common/queryBuilder';
|
||||
@@ -35,6 +36,8 @@ function LogsExplorer(): JSX.Element {
|
||||
const [selectedView, setSelectedView] = useState<SELECTED_VIEWS>(
|
||||
SELECTED_VIEWS.SEARCH,
|
||||
);
|
||||
const { preferences, loading: preferencesLoading } = usePreferenceContext();
|
||||
|
||||
const [showFilters, setShowFilters] = useState<boolean>(() => {
|
||||
const localStorageValue = getLocalStorageKey(
|
||||
LOCALSTORAGE.SHOW_LOGS_QUICK_FILTERS,
|
||||
@@ -83,7 +86,6 @@ function LogsExplorer(): JSX.Element {
|
||||
}, [currentQuery.builder.queryData, currentQuery.builder.queryData.length]);
|
||||
|
||||
const {
|
||||
queryData: optionsQueryData,
|
||||
redirectWithQuery: redirectWithOptionsData,
|
||||
} = useUrlQueryData<OptionsQuery>(URL_OPTIONS, defaultOptionsQuery);
|
||||
|
||||
@@ -164,12 +166,34 @@ function LogsExplorer(): JSX.Element {
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
const migratedQuery = migrateOptionsQuery(optionsQueryData);
|
||||
if (!preferences || preferencesLoading) {
|
||||
return;
|
||||
}
|
||||
const migratedQuery = migrateOptionsQuery({
|
||||
selectColumns: preferences.columns || defaultLogsSelectedColumns,
|
||||
maxLines: preferences.formatting?.maxLines || defaultOptionsQuery.maxLines,
|
||||
format: preferences.formatting?.format || defaultOptionsQuery.format,
|
||||
fontSize: preferences.formatting?.fontSize || defaultOptionsQuery.fontSize,
|
||||
version: preferences.formatting?.version,
|
||||
});
|
||||
// Only redirect if the query was actually modified
|
||||
if (!isEqual(migratedQuery, optionsQueryData)) {
|
||||
if (
|
||||
!isEqual(migratedQuery, {
|
||||
selectColumns: preferences?.columns,
|
||||
maxLines: preferences?.formatting?.maxLines,
|
||||
format: preferences?.formatting?.format,
|
||||
fontSize: preferences?.formatting?.fontSize,
|
||||
version: preferences?.formatting?.version,
|
||||
})
|
||||
) {
|
||||
redirectWithOptionsData(migratedQuery);
|
||||
}
|
||||
}, [migrateOptionsQuery, optionsQueryData, redirectWithOptionsData]);
|
||||
}, [
|
||||
migrateOptionsQuery,
|
||||
preferences,
|
||||
redirectWithOptionsData,
|
||||
preferencesLoading,
|
||||
]);
|
||||
|
||||
const isMultipleQueries = useMemo(
|
||||
() =>
|
||||
|
||||
@@ -4,9 +4,14 @@ import { Compass, TowerControl, Workflow } from 'lucide-react';
|
||||
import LogsExplorer from 'pages/LogsExplorer';
|
||||
import Pipelines from 'pages/Pipelines';
|
||||
import SaveView from 'pages/SaveView';
|
||||
import { PreferenceContextProvider } from 'providers/preferences/context/PreferenceContextProvider';
|
||||
|
||||
export const logsExplorer: TabRoutes = {
|
||||
Component: LogsExplorer,
|
||||
Component: (): JSX.Element => (
|
||||
<PreferenceContextProvider>
|
||||
<LogsExplorer />
|
||||
</PreferenceContextProvider>
|
||||
),
|
||||
name: (
|
||||
<div className="tab-item">
|
||||
<Compass size={16} /> Explorer
|
||||
|
||||
@@ -4,6 +4,7 @@ import ExplorerPage from 'container/MetricsExplorer/Explorer';
|
||||
import SummaryPage from 'container/MetricsExplorer/Summary';
|
||||
import { BarChart2, Compass, TowerControl } from 'lucide-react';
|
||||
import SaveView from 'pages/SaveView';
|
||||
import { PreferenceContextProvider } from 'providers/preferences/context/PreferenceContextProvider';
|
||||
|
||||
export const Summary: TabRoutes = {
|
||||
Component: SummaryPage,
|
||||
@@ -17,7 +18,11 @@ export const Summary: TabRoutes = {
|
||||
};
|
||||
|
||||
export const Explorer: TabRoutes = {
|
||||
Component: ExplorerPage,
|
||||
Component: (): JSX.Element => (
|
||||
<PreferenceContextProvider>
|
||||
<ExplorerPage />
|
||||
</PreferenceContextProvider>
|
||||
),
|
||||
name: (
|
||||
<div className="tab-item">
|
||||
<Compass size={16} /> Explorer
|
||||
|
||||
@@ -75,7 +75,7 @@ function TracesExplorer(): JSX.Element {
|
||||
|
||||
const isGroupByExist = useMemo(() => {
|
||||
const groupByCount: number = currentQuery.builder.queryData.reduce<number>(
|
||||
(acc, query) => acc + query.groupBy.length,
|
||||
(acc, query) => acc + (query?.groupBy?.length || 0),
|
||||
0,
|
||||
);
|
||||
|
||||
|
||||
@@ -5,10 +5,15 @@ import SaveView from 'pages/SaveView';
|
||||
import TracesExplorer from 'pages/TracesExplorer';
|
||||
import TracesFunnelDetails from 'pages/TracesFunnelDetails';
|
||||
import TracesFunnels from 'pages/TracesFunnels';
|
||||
import { PreferenceContextProvider } from 'providers/preferences/context/PreferenceContextProvider';
|
||||
import { matchPath } from 'react-router-dom';
|
||||
|
||||
export const tracesExplorer: TabRoutes = {
|
||||
Component: TracesExplorer,
|
||||
Component: (): JSX.Element => (
|
||||
<PreferenceContextProvider>
|
||||
<TracesExplorer />
|
||||
</PreferenceContextProvider>
|
||||
),
|
||||
name: (
|
||||
<div className="tab-item">
|
||||
<Compass size={16} /> Explorer
|
||||
|
||||
@@ -0,0 +1,154 @@
|
||||
/* eslint-disable sonarjs/no-identical-functions */
|
||||
import { render, screen } from '@testing-library/react';
|
||||
import {
|
||||
FormattingOptions,
|
||||
PreferenceMode,
|
||||
Preferences,
|
||||
} from 'providers/preferences/types';
|
||||
import { MemoryRouter, Route, Switch } from 'react-router-dom';
|
||||
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
|
||||
|
||||
import {
|
||||
PreferenceContextProvider,
|
||||
usePreferenceContext,
|
||||
} from '../context/PreferenceContextProvider';
|
||||
|
||||
// Mock the usePreferenceSync hook
|
||||
jest.mock('../sync/usePreferenceSync', () => ({
|
||||
usePreferenceSync: jest.fn().mockReturnValue({
|
||||
preferences: {
|
||||
columns: [] as BaseAutocompleteData[],
|
||||
formatting: {
|
||||
maxLines: 2,
|
||||
format: 'table',
|
||||
fontSize: 'small',
|
||||
version: 1,
|
||||
} as FormattingOptions,
|
||||
} as Preferences,
|
||||
loading: false,
|
||||
error: null,
|
||||
updateColumns: jest.fn(),
|
||||
updateFormatting: jest.fn(),
|
||||
}),
|
||||
}));
|
||||
|
||||
// Test component that consumes the context
|
||||
function TestConsumer(): JSX.Element {
|
||||
const context = usePreferenceContext();
|
||||
return (
|
||||
<div>
|
||||
<div data-testid="mode">{context.mode}</div>
|
||||
<div data-testid="dataSource">{context.dataSource}</div>
|
||||
<div data-testid="loading">{String(context.loading)}</div>
|
||||
<div data-testid="error">{String(context.error)}</div>
|
||||
<div data-testid="savedViewId">{context.savedViewId || 'no-view-id'}</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
describe('PreferenceContextProvider', () => {
|
||||
it('should provide context with direct mode when no viewKey is present', () => {
|
||||
render(
|
||||
<MemoryRouter initialEntries={['/logs']}>
|
||||
<Switch>
|
||||
<Route
|
||||
path="/logs"
|
||||
component={(): JSX.Element => (
|
||||
<PreferenceContextProvider>
|
||||
<TestConsumer />
|
||||
</PreferenceContextProvider>
|
||||
)}
|
||||
/>
|
||||
</Switch>
|
||||
</MemoryRouter>,
|
||||
);
|
||||
|
||||
expect(screen.getByTestId('mode')).toHaveTextContent(PreferenceMode.DIRECT);
|
||||
expect(screen.getByTestId('dataSource')).toHaveTextContent('logs');
|
||||
expect(screen.getByTestId('loading')).toHaveTextContent('false');
|
||||
expect(screen.getByTestId('error')).toHaveTextContent('null');
|
||||
expect(screen.getByTestId('savedViewId')).toHaveTextContent('no-view-id');
|
||||
});
|
||||
|
||||
it('should provide context with savedView mode when viewKey is present', () => {
|
||||
render(
|
||||
<MemoryRouter initialEntries={['/logs?viewKey="test-view-id"']}>
|
||||
<Switch>
|
||||
<Route
|
||||
path="/logs"
|
||||
component={(): JSX.Element => (
|
||||
<PreferenceContextProvider>
|
||||
<TestConsumer />
|
||||
</PreferenceContextProvider>
|
||||
)}
|
||||
/>
|
||||
</Switch>
|
||||
</MemoryRouter>,
|
||||
);
|
||||
|
||||
expect(screen.getByTestId('mode')).toHaveTextContent('savedView');
|
||||
expect(screen.getByTestId('dataSource')).toHaveTextContent('logs');
|
||||
expect(screen.getByTestId('savedViewId')).toHaveTextContent('test-view-id');
|
||||
});
|
||||
|
||||
it('should set traces dataSource when pathname includes traces', () => {
|
||||
render(
|
||||
<MemoryRouter initialEntries={['/traces']}>
|
||||
<Switch>
|
||||
<Route
|
||||
path="/traces"
|
||||
component={(): JSX.Element => (
|
||||
<PreferenceContextProvider>
|
||||
<TestConsumer />
|
||||
</PreferenceContextProvider>
|
||||
)}
|
||||
/>
|
||||
</Switch>
|
||||
</MemoryRouter>,
|
||||
);
|
||||
|
||||
expect(screen.getByTestId('dataSource')).toHaveTextContent('traces');
|
||||
});
|
||||
|
||||
it('should handle invalid viewKey JSON gracefully', () => {
|
||||
// Mock console.error to avoid test output clutter
|
||||
const originalConsoleError = console.error;
|
||||
console.error = jest.fn();
|
||||
|
||||
render(
|
||||
<MemoryRouter initialEntries={['/logs?viewKey=invalid-json']}>
|
||||
<Switch>
|
||||
<Route
|
||||
path="/logs"
|
||||
component={(): JSX.Element => (
|
||||
<PreferenceContextProvider>
|
||||
<TestConsumer />
|
||||
</PreferenceContextProvider>
|
||||
)}
|
||||
/>
|
||||
</Switch>
|
||||
</MemoryRouter>,
|
||||
);
|
||||
|
||||
expect(screen.getByTestId('mode')).toHaveTextContent(PreferenceMode.DIRECT);
|
||||
expect(console.error).toHaveBeenCalled();
|
||||
|
||||
// Restore console.error
|
||||
console.error = originalConsoleError;
|
||||
});
|
||||
|
||||
it('should throw error when usePreferenceContext is used outside provider', () => {
|
||||
// Suppress the error output for this test
|
||||
const originalConsoleError = console.error;
|
||||
console.error = jest.fn();
|
||||
|
||||
expect(() => {
|
||||
render(<TestConsumer />);
|
||||
}).toThrow(
|
||||
'usePreferenceContext must be used within PreferenceContextProvider',
|
||||
);
|
||||
|
||||
// Restore console.error
|
||||
console.error = originalConsoleError;
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,162 @@
|
||||
import { LOCALSTORAGE } from 'constants/localStorage';
|
||||
import { LogViewMode } from 'container/LogsTable';
|
||||
import { defaultLogsSelectedColumns } from 'container/OptionsMenu/constants';
|
||||
import { FontSize } from 'container/OptionsMenu/types';
|
||||
import { FormattingOptions } from 'providers/preferences/types';
|
||||
import {
|
||||
BaseAutocompleteData,
|
||||
DataTypes,
|
||||
} from 'types/api/queryBuilder/queryAutocompleteResponse';
|
||||
|
||||
import logsLoaderConfig from '../configs/logsLoaderConfig';
|
||||
|
||||
// Mock localStorage
|
||||
const mockLocalStorage: Record<string, string> = {};
|
||||
|
||||
jest.mock('api/browser/localstorage/get', () => ({
|
||||
__esModule: true,
|
||||
default: jest.fn((key: string) => mockLocalStorage[key] || null),
|
||||
}));
|
||||
|
||||
describe('logsLoaderConfig', () => {
|
||||
// Save original location object
|
||||
const originalWindowLocation = window.location;
|
||||
let mockedLocation: Partial<Location>;
|
||||
|
||||
beforeEach(() => {
|
||||
// Setup a mocked location object
|
||||
mockedLocation = {
|
||||
...originalWindowLocation,
|
||||
search: '',
|
||||
};
|
||||
|
||||
// Mock the window.location property
|
||||
Object.defineProperty(window, 'location', {
|
||||
configurable: true,
|
||||
value: mockedLocation,
|
||||
writable: true,
|
||||
});
|
||||
|
||||
// Clear mocked localStorage
|
||||
Object.keys(mockLocalStorage).forEach((key) => {
|
||||
delete mockLocalStorage[key];
|
||||
});
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
// Restore original location
|
||||
Object.defineProperty(window, 'location', {
|
||||
configurable: true,
|
||||
value: originalWindowLocation,
|
||||
writable: true,
|
||||
});
|
||||
});
|
||||
|
||||
it('should have priority order: local, url, default', () => {
|
||||
expect(logsLoaderConfig.priority).toEqual(['local', 'url', 'default']);
|
||||
});
|
||||
|
||||
it('should load from localStorage when available', async () => {
|
||||
const mockColumns: BaseAutocompleteData[] = [
|
||||
{
|
||||
key: 'test-column',
|
||||
type: 'tag',
|
||||
dataType: DataTypes.String,
|
||||
isColumn: true,
|
||||
},
|
||||
];
|
||||
|
||||
// Set up localStorage mock data with the correct key from LOCALSTORAGE enum
|
||||
mockLocalStorage[LOCALSTORAGE.LOGS_LIST_OPTIONS] = JSON.stringify({
|
||||
selectColumns: mockColumns,
|
||||
maxLines: 10,
|
||||
format: 'json',
|
||||
fontSize: 'large',
|
||||
version: 2,
|
||||
});
|
||||
|
||||
const result = await logsLoaderConfig.local();
|
||||
|
||||
expect(result).toEqual({
|
||||
columns: mockColumns,
|
||||
formatting: {
|
||||
maxLines: 10,
|
||||
format: 'json' as LogViewMode,
|
||||
fontSize: 'large' as FontSize,
|
||||
version: 2,
|
||||
} as FormattingOptions,
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle invalid localStorage data gracefully', async () => {
|
||||
// Set up invalid localStorage mock data
|
||||
mockLocalStorage[LOCALSTORAGE.LOGS_LIST_OPTIONS] = 'invalid-json';
|
||||
|
||||
const result = await logsLoaderConfig.local();
|
||||
|
||||
expect(result).toEqual({
|
||||
columns: [] as BaseAutocompleteData[],
|
||||
formatting: undefined,
|
||||
});
|
||||
});
|
||||
|
||||
it('should load from URL when available', async () => {
|
||||
const mockColumns: BaseAutocompleteData[] = [
|
||||
{
|
||||
key: 'url-column',
|
||||
type: 'tag',
|
||||
dataType: DataTypes.String,
|
||||
isColumn: true,
|
||||
},
|
||||
];
|
||||
|
||||
// Set up URL search params
|
||||
mockedLocation.search = `?options=${encodeURIComponent(
|
||||
JSON.stringify({
|
||||
selectColumns: mockColumns,
|
||||
maxLines: 5,
|
||||
format: 'raw',
|
||||
fontSize: 'medium',
|
||||
version: 1,
|
||||
}),
|
||||
)}`;
|
||||
|
||||
const result = await logsLoaderConfig.url();
|
||||
|
||||
expect(result).toEqual({
|
||||
columns: mockColumns,
|
||||
formatting: {
|
||||
maxLines: 5,
|
||||
format: 'raw' as LogViewMode,
|
||||
fontSize: 'medium' as FontSize,
|
||||
version: 1,
|
||||
} as FormattingOptions,
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle invalid URL data gracefully', async () => {
|
||||
// Set up invalid URL search params
|
||||
mockedLocation.search = '?options=invalid-json';
|
||||
|
||||
const result = await logsLoaderConfig.url();
|
||||
|
||||
expect(result).toEqual({
|
||||
columns: [] as BaseAutocompleteData[],
|
||||
formatting: undefined,
|
||||
});
|
||||
});
|
||||
|
||||
it('should provide default values when no other source is available', async () => {
|
||||
const result = await logsLoaderConfig.default();
|
||||
|
||||
expect(result).toEqual({
|
||||
columns: defaultLogsSelectedColumns as BaseAutocompleteData[],
|
||||
formatting: {
|
||||
maxLines: 2,
|
||||
format: 'table' as LogViewMode,
|
||||
fontSize: 'small' as FontSize,
|
||||
version: 1,
|
||||
} as FormattingOptions,
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,261 @@
|
||||
import { LOCALSTORAGE } from 'constants/localStorage';
|
||||
import { LogViewMode } from 'container/LogsTable';
|
||||
import { defaultOptionsQuery } from 'container/OptionsMenu/constants';
|
||||
import { FontSize } from 'container/OptionsMenu/types';
|
||||
import {
|
||||
FormattingOptions,
|
||||
PreferenceMode,
|
||||
Preferences,
|
||||
} from 'providers/preferences/types';
|
||||
import {
|
||||
BaseAutocompleteData,
|
||||
DataTypes,
|
||||
} from 'types/api/queryBuilder/queryAutocompleteResponse';
|
||||
|
||||
import getLogsUpdaterConfig from '../configs/logsUpdaterConfig';
|
||||
|
||||
// Mock localStorage
|
||||
const mockLocalStorage: Record<string, string> = {};
|
||||
|
||||
jest.mock('api/browser/localstorage/set', () => ({
|
||||
__esModule: true,
|
||||
default: jest.fn((key: string, value: string) => {
|
||||
mockLocalStorage[key] = value;
|
||||
}),
|
||||
}));
|
||||
|
||||
// Mock localStorage.getItem
|
||||
Object.defineProperty(window, 'localStorage', {
|
||||
value: {
|
||||
getItem: jest.fn((key: string) => mockLocalStorage[key] || null),
|
||||
setItem: jest.fn((key: string, value: string) => {
|
||||
mockLocalStorage[key] = value;
|
||||
}),
|
||||
},
|
||||
writable: true,
|
||||
});
|
||||
|
||||
describe('logsUpdaterConfig', () => {
|
||||
// Mock redirectWithOptionsData and setSavedViewPreferences
|
||||
const redirectWithOptionsData = jest.fn();
|
||||
const setSavedViewPreferences = jest.fn();
|
||||
|
||||
const mockPreferences: Preferences = {
|
||||
columns: [],
|
||||
formatting: {
|
||||
maxLines: 2,
|
||||
format: 'table' as LogViewMode,
|
||||
fontSize: 'small' as FontSize,
|
||||
version: 1,
|
||||
},
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
// Clear mocked localStorage
|
||||
Object.keys(mockLocalStorage).forEach((key) => {
|
||||
delete mockLocalStorage[key];
|
||||
});
|
||||
});
|
||||
|
||||
it('should update columns in localStorage for direct mode', () => {
|
||||
const logsUpdater = getLogsUpdaterConfig(
|
||||
mockPreferences,
|
||||
redirectWithOptionsData,
|
||||
setSavedViewPreferences,
|
||||
);
|
||||
|
||||
const newColumns: BaseAutocompleteData[] = [
|
||||
{
|
||||
key: 'new-column',
|
||||
type: 'tag',
|
||||
dataType: DataTypes.String,
|
||||
isColumn: true,
|
||||
},
|
||||
];
|
||||
|
||||
// Set initial localStorage data
|
||||
mockLocalStorage[LOCALSTORAGE.LOGS_LIST_OPTIONS] = JSON.stringify({
|
||||
selectColumns: [
|
||||
{
|
||||
key: 'old-column',
|
||||
type: 'tag',
|
||||
dataType: DataTypes.String,
|
||||
isColumn: true,
|
||||
},
|
||||
],
|
||||
maxLines: 2,
|
||||
});
|
||||
|
||||
logsUpdater.updateColumns(newColumns, PreferenceMode.DIRECT);
|
||||
|
||||
// Should update URL
|
||||
expect(redirectWithOptionsData).toHaveBeenCalledWith({
|
||||
...defaultOptionsQuery,
|
||||
...mockPreferences.formatting,
|
||||
selectColumns: newColumns,
|
||||
});
|
||||
|
||||
// Should update localStorage
|
||||
const storedData = JSON.parse(
|
||||
mockLocalStorage[LOCALSTORAGE.LOGS_LIST_OPTIONS],
|
||||
);
|
||||
expect(storedData.selectColumns).toEqual(newColumns);
|
||||
expect(storedData.maxLines).toBe(2); // Should preserve other fields
|
||||
|
||||
// Should not update saved view preferences
|
||||
expect(setSavedViewPreferences).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should update columns in savedViewPreferences for savedView mode', () => {
|
||||
const logsUpdater = getLogsUpdaterConfig(
|
||||
mockPreferences,
|
||||
redirectWithOptionsData,
|
||||
setSavedViewPreferences,
|
||||
);
|
||||
|
||||
const newColumns: BaseAutocompleteData[] = [
|
||||
{
|
||||
key: 'new-column',
|
||||
type: 'tag',
|
||||
dataType: DataTypes.String,
|
||||
isColumn: true,
|
||||
},
|
||||
];
|
||||
|
||||
logsUpdater.updateColumns(newColumns, PreferenceMode.SAVED_VIEW);
|
||||
|
||||
// Should not update URL in savedView mode
|
||||
expect(redirectWithOptionsData).not.toHaveBeenCalled();
|
||||
|
||||
// Should not update localStorage in savedView mode
|
||||
expect(mockLocalStorage[LOCALSTORAGE.LOGS_LIST_OPTIONS]).toBeUndefined();
|
||||
|
||||
// Should update saved view preferences
|
||||
expect(setSavedViewPreferences).toHaveBeenCalledWith(expect.any(Function));
|
||||
});
|
||||
|
||||
it('should update formatting options in localStorage for direct mode', () => {
|
||||
const logsUpdater = getLogsUpdaterConfig(
|
||||
mockPreferences,
|
||||
redirectWithOptionsData,
|
||||
setSavedViewPreferences,
|
||||
);
|
||||
|
||||
const newFormatting: FormattingOptions = {
|
||||
maxLines: 5,
|
||||
format: 'json' as LogViewMode,
|
||||
fontSize: 'large' as FontSize,
|
||||
version: 1,
|
||||
};
|
||||
|
||||
// Set initial localStorage data
|
||||
mockLocalStorage[LOCALSTORAGE.LOGS_LIST_OPTIONS] = JSON.stringify({
|
||||
selectColumns: [
|
||||
{
|
||||
key: 'column',
|
||||
type: 'tag',
|
||||
dataType: DataTypes.String,
|
||||
isColumn: true,
|
||||
},
|
||||
],
|
||||
maxLines: 2,
|
||||
format: 'table',
|
||||
});
|
||||
|
||||
logsUpdater.updateFormatting(newFormatting, PreferenceMode.DIRECT);
|
||||
|
||||
// Should always update URL for both modes
|
||||
expect(redirectWithOptionsData).toHaveBeenCalledWith({
|
||||
...defaultOptionsQuery,
|
||||
...mockPreferences.formatting,
|
||||
...newFormatting,
|
||||
});
|
||||
|
||||
// Should update localStorage in direct mode
|
||||
const storedData = JSON.parse(
|
||||
mockLocalStorage[LOCALSTORAGE.LOGS_LIST_OPTIONS],
|
||||
);
|
||||
expect(storedData.maxLines).toBe(5);
|
||||
expect(storedData.format).toBe('json');
|
||||
expect(storedData.fontSize).toBe('large');
|
||||
expect(storedData.version).toBe(1);
|
||||
expect(storedData.selectColumns).toEqual([
|
||||
{
|
||||
key: 'column',
|
||||
type: 'tag',
|
||||
dataType: DataTypes.String,
|
||||
isColumn: true,
|
||||
},
|
||||
]); // Should preserve columns
|
||||
});
|
||||
|
||||
it('should not update localStorage for savedView mode in updateFormatting', () => {
|
||||
const logsUpdater = getLogsUpdaterConfig(
|
||||
mockPreferences,
|
||||
redirectWithOptionsData,
|
||||
setSavedViewPreferences,
|
||||
);
|
||||
|
||||
const newFormatting: FormattingOptions = {
|
||||
maxLines: 5,
|
||||
format: 'json' as LogViewMode,
|
||||
fontSize: 'large' as FontSize,
|
||||
version: 1,
|
||||
};
|
||||
|
||||
// Set initial localStorage data
|
||||
mockLocalStorage[LOCALSTORAGE.LOGS_LIST_OPTIONS] = JSON.stringify({
|
||||
selectColumns: [
|
||||
{
|
||||
key: 'column',
|
||||
type: 'tag',
|
||||
dataType: DataTypes.String,
|
||||
isColumn: true,
|
||||
},
|
||||
],
|
||||
maxLines: 2,
|
||||
format: 'table',
|
||||
});
|
||||
|
||||
logsUpdater.updateFormatting(newFormatting, PreferenceMode.SAVED_VIEW);
|
||||
|
||||
// Should not override localStorage in savedView mode
|
||||
const storedData = JSON.parse(
|
||||
mockLocalStorage[LOCALSTORAGE.LOGS_LIST_OPTIONS],
|
||||
);
|
||||
expect(storedData.maxLines).toBe(2); // Should remain the same
|
||||
expect(storedData.format).toBe('table'); // Should remain the same
|
||||
|
||||
// Should update saved view preferences
|
||||
expect(setSavedViewPreferences).toHaveBeenCalledWith(expect.any(Function));
|
||||
});
|
||||
|
||||
it('should initialize localStorage if it does not exist', () => {
|
||||
const logsUpdater = getLogsUpdaterConfig(
|
||||
mockPreferences,
|
||||
redirectWithOptionsData,
|
||||
setSavedViewPreferences,
|
||||
);
|
||||
|
||||
const newFormatting: FormattingOptions = {
|
||||
maxLines: 5,
|
||||
format: 'json' as LogViewMode,
|
||||
fontSize: 'large' as FontSize,
|
||||
version: 1,
|
||||
};
|
||||
|
||||
// No initial localStorage data
|
||||
|
||||
logsUpdater.updateFormatting(newFormatting, PreferenceMode.DIRECT);
|
||||
|
||||
// Should create localStorage entry
|
||||
const storedData = JSON.parse(
|
||||
mockLocalStorage[LOCALSTORAGE.LOGS_LIST_OPTIONS],
|
||||
);
|
||||
expect(storedData.maxLines).toBe(5);
|
||||
expect(storedData.format).toBe('json');
|
||||
expect(storedData.fontSize).toBe('large');
|
||||
expect(storedData.version).toBe(1);
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,131 @@
|
||||
import { LOCALSTORAGE } from 'constants/localStorage';
|
||||
import { defaultTraceSelectedColumns } from 'container/OptionsMenu/constants';
|
||||
import {
|
||||
BaseAutocompleteData,
|
||||
DataTypes,
|
||||
} from 'types/api/queryBuilder/queryAutocompleteResponse';
|
||||
|
||||
import tracesLoaderConfig from '../configs/tracesLoaderConfig';
|
||||
|
||||
// Mock localStorage
|
||||
const mockLocalStorage: Record<string, string> = {};
|
||||
|
||||
jest.mock('api/browser/localstorage/get', () => ({
|
||||
__esModule: true,
|
||||
default: jest.fn((key: string) => mockLocalStorage[key] || null),
|
||||
}));
|
||||
|
||||
describe('tracesLoaderConfig', () => {
|
||||
// Save original location object
|
||||
const originalWindowLocation = window.location;
|
||||
let mockedLocation: Partial<Location>;
|
||||
|
||||
beforeEach(() => {
|
||||
// Setup a mocked location object
|
||||
mockedLocation = {
|
||||
...originalWindowLocation,
|
||||
search: '',
|
||||
};
|
||||
|
||||
// Mock the window.location property
|
||||
Object.defineProperty(window, 'location', {
|
||||
configurable: true,
|
||||
value: mockedLocation,
|
||||
writable: true,
|
||||
});
|
||||
|
||||
// Clear mocked localStorage
|
||||
Object.keys(mockLocalStorage).forEach((key) => {
|
||||
delete mockLocalStorage[key];
|
||||
});
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
// Restore original location
|
||||
Object.defineProperty(window, 'location', {
|
||||
configurable: true,
|
||||
value: originalWindowLocation,
|
||||
writable: true,
|
||||
});
|
||||
});
|
||||
|
||||
it('should have priority order: local, url, default', () => {
|
||||
expect(tracesLoaderConfig.priority).toEqual(['local', 'url', 'default']);
|
||||
});
|
||||
|
||||
it('should load from localStorage when available', async () => {
|
||||
const mockColumns: BaseAutocompleteData[] = [
|
||||
{
|
||||
key: 'test-trace-column',
|
||||
type: 'tag',
|
||||
dataType: DataTypes.String,
|
||||
isColumn: true,
|
||||
},
|
||||
];
|
||||
|
||||
// Set up localStorage mock data with the correct key from LOCALSTORAGE enum
|
||||
mockLocalStorage[LOCALSTORAGE.TRACES_LIST_OPTIONS] = JSON.stringify({
|
||||
selectColumns: mockColumns,
|
||||
});
|
||||
|
||||
const result = await tracesLoaderConfig.local();
|
||||
|
||||
expect(result).toEqual({
|
||||
columns: mockColumns,
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle invalid localStorage data gracefully', async () => {
|
||||
// Set up invalid localStorage mock data
|
||||
mockLocalStorage[LOCALSTORAGE.TRACES_LIST_OPTIONS] = 'invalid-json';
|
||||
|
||||
const result = await tracesLoaderConfig.local();
|
||||
|
||||
expect(result).toEqual({
|
||||
columns: [] as BaseAutocompleteData[],
|
||||
});
|
||||
});
|
||||
|
||||
it('should load from URL when available', async () => {
|
||||
const mockColumns: BaseAutocompleteData[] = [
|
||||
{
|
||||
key: 'url-trace-column',
|
||||
type: 'tag',
|
||||
dataType: DataTypes.String,
|
||||
isColumn: true,
|
||||
},
|
||||
];
|
||||
|
||||
// Set up URL search params
|
||||
mockedLocation.search = `?options=${encodeURIComponent(
|
||||
JSON.stringify({
|
||||
selectColumns: mockColumns,
|
||||
}),
|
||||
)}`;
|
||||
|
||||
const result = await tracesLoaderConfig.url();
|
||||
|
||||
expect(result).toEqual({
|
||||
columns: mockColumns,
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle invalid URL data gracefully', async () => {
|
||||
// Set up invalid URL search params
|
||||
mockedLocation.search = '?options=invalid-json';
|
||||
|
||||
const result = await tracesLoaderConfig.url();
|
||||
|
||||
expect(result).toEqual({
|
||||
columns: [] as BaseAutocompleteData[],
|
||||
});
|
||||
});
|
||||
|
||||
it('should provide default values when no other source is available', async () => {
|
||||
const result = await tracesLoaderConfig.default();
|
||||
|
||||
expect(result).toEqual({
|
||||
columns: defaultTraceSelectedColumns as BaseAutocompleteData[],
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,142 @@
|
||||
import { LOCALSTORAGE } from 'constants/localStorage';
|
||||
import { defaultOptionsQuery } from 'container/OptionsMenu/constants';
|
||||
import {
|
||||
BaseAutocompleteData,
|
||||
DataTypes,
|
||||
} from 'types/api/queryBuilder/queryAutocompleteResponse';
|
||||
|
||||
import getTracesUpdaterConfig from '../configs/tracesUpdaterConfig';
|
||||
import { PreferenceMode } from '../types';
|
||||
|
||||
// Mock setLocalStorageKey
|
||||
const mockSetLocalStorageKey = jest.fn();
|
||||
jest.mock('api/browser/localstorage/set', () => ({
|
||||
__esModule: true,
|
||||
default: (key: string, value: string): void =>
|
||||
mockSetLocalStorageKey(key, value),
|
||||
}));
|
||||
|
||||
// Mock localStorage
|
||||
let mockLocalStorage: Record<string, string> = {};
|
||||
Object.defineProperty(global, 'localStorage', {
|
||||
value: {
|
||||
getItem: jest.fn((key: string) => mockLocalStorage[key] || null),
|
||||
setItem: jest.fn((key: string, value: string) => {
|
||||
mockLocalStorage[key] = value;
|
||||
}),
|
||||
},
|
||||
writable: true,
|
||||
});
|
||||
|
||||
describe('tracesUpdaterConfig', () => {
|
||||
// Mock functions
|
||||
const mockRedirectWithOptionsData = jest.fn();
|
||||
const mockSetSavedViewPreferences = jest.fn();
|
||||
|
||||
// Test data
|
||||
const mockColumns: BaseAutocompleteData[] = [
|
||||
{
|
||||
key: 'test-trace-column',
|
||||
type: 'tag',
|
||||
dataType: DataTypes.String,
|
||||
isColumn: true,
|
||||
},
|
||||
];
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
// Reset mockLocalStorage
|
||||
mockLocalStorage = {};
|
||||
});
|
||||
|
||||
it('should update columns in localStorage and redirect with options in direct mode', () => {
|
||||
const tracesUpdaterConfig = getTracesUpdaterConfig(
|
||||
mockRedirectWithOptionsData,
|
||||
mockSetSavedViewPreferences,
|
||||
);
|
||||
|
||||
tracesUpdaterConfig.updateColumns(mockColumns, PreferenceMode.DIRECT);
|
||||
|
||||
// Should redirect with the updated columns
|
||||
expect(mockRedirectWithOptionsData).toHaveBeenCalledWith({
|
||||
...defaultOptionsQuery,
|
||||
selectColumns: mockColumns,
|
||||
});
|
||||
|
||||
// Should set localStorage with the updated columns
|
||||
expect(mockSetLocalStorageKey).toHaveBeenCalledWith(
|
||||
LOCALSTORAGE.TRACES_LIST_OPTIONS,
|
||||
JSON.stringify({ selectColumns: mockColumns }),
|
||||
);
|
||||
});
|
||||
|
||||
it('should merge with existing localStorage data in direct mode', () => {
|
||||
// Setup existing localStorage data
|
||||
mockLocalStorage[LOCALSTORAGE.TRACES_LIST_OPTIONS] = JSON.stringify({
|
||||
selectColumns: [
|
||||
{
|
||||
key: 'existing-column',
|
||||
type: 'tag',
|
||||
dataType: DataTypes.String,
|
||||
isColumn: true,
|
||||
},
|
||||
],
|
||||
otherProp: 'value',
|
||||
});
|
||||
|
||||
const tracesUpdaterConfig = getTracesUpdaterConfig(
|
||||
mockRedirectWithOptionsData,
|
||||
mockSetSavedViewPreferences,
|
||||
);
|
||||
|
||||
tracesUpdaterConfig.updateColumns(mockColumns, PreferenceMode.DIRECT);
|
||||
|
||||
// Should set localStorage with the updated columns while preserving other props
|
||||
expect(mockSetLocalStorageKey).toHaveBeenCalledWith(
|
||||
LOCALSTORAGE.TRACES_LIST_OPTIONS,
|
||||
JSON.stringify({
|
||||
selectColumns: mockColumns,
|
||||
otherProp: 'value',
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it('should update savedViewPreferences in savedView mode', () => {
|
||||
const tracesUpdaterConfig = getTracesUpdaterConfig(
|
||||
mockRedirectWithOptionsData,
|
||||
mockSetSavedViewPreferences,
|
||||
);
|
||||
|
||||
tracesUpdaterConfig.updateColumns(mockColumns, PreferenceMode.SAVED_VIEW);
|
||||
|
||||
// Should not redirect or modify localStorage in savedView mode
|
||||
expect(mockRedirectWithOptionsData).not.toHaveBeenCalled();
|
||||
expect(mockSetLocalStorageKey).not.toHaveBeenCalled();
|
||||
|
||||
// Should update savedViewPreferences
|
||||
expect(mockSetSavedViewPreferences).toHaveBeenCalledWith({
|
||||
columns: mockColumns,
|
||||
formatting: {
|
||||
maxLines: 2,
|
||||
format: 'table',
|
||||
fontSize: 'small',
|
||||
version: 1,
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it('should have a no-op updateFormatting method', () => {
|
||||
const tracesUpdaterConfig = getTracesUpdaterConfig(
|
||||
mockRedirectWithOptionsData,
|
||||
mockSetSavedViewPreferences,
|
||||
);
|
||||
|
||||
// Call updateFormatting and verify it does nothing
|
||||
tracesUpdaterConfig.updateFormatting();
|
||||
|
||||
// No API calls should be made
|
||||
expect(mockRedirectWithOptionsData).not.toHaveBeenCalled();
|
||||
expect(mockSetLocalStorageKey).not.toHaveBeenCalled();
|
||||
expect(mockSetSavedViewPreferences).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,152 @@
|
||||
/* eslint-disable sonarjs/no-identical-functions */
|
||||
/* eslint-disable sonarjs/no-duplicate-string */
|
||||
import { renderHook, waitFor } from '@testing-library/react';
|
||||
import { DataSource } from 'types/common/queryBuilder';
|
||||
|
||||
import logsLoaderConfig from '../configs/logsLoaderConfig';
|
||||
import { usePreferenceLoader } from '../loader/usePreferenceLoader';
|
||||
|
||||
// Mock the config loaders
|
||||
jest.mock('../configs/logsLoaderConfig', () => ({
|
||||
__esModule: true,
|
||||
default: {
|
||||
priority: ['local', 'url', 'default'],
|
||||
local: jest.fn().mockResolvedValue({
|
||||
columns: [{ name: 'local-column' }],
|
||||
formatting: { maxLines: 5, format: 'table', fontSize: 'medium', version: 1 },
|
||||
}),
|
||||
url: jest.fn().mockResolvedValue({
|
||||
columns: [{ name: 'url-column' }],
|
||||
formatting: { maxLines: 3, format: 'table', fontSize: 'small', version: 1 },
|
||||
}),
|
||||
default: jest.fn().mockResolvedValue({
|
||||
columns: [{ name: 'default-column' }],
|
||||
formatting: { maxLines: 2, format: 'table', fontSize: 'small', version: 1 },
|
||||
}),
|
||||
},
|
||||
}));
|
||||
|
||||
jest.mock('../configs/tracesLoaderConfig', () => ({
|
||||
__esModule: true,
|
||||
default: {
|
||||
priority: ['local', 'url', 'default'],
|
||||
local: jest.fn().mockResolvedValue({
|
||||
columns: [{ name: 'local-trace-column' }],
|
||||
}),
|
||||
url: jest.fn().mockResolvedValue({
|
||||
columns: [{ name: 'url-trace-column' }],
|
||||
}),
|
||||
default: jest.fn().mockResolvedValue({
|
||||
columns: [{ name: 'default-trace-column' }],
|
||||
}),
|
||||
},
|
||||
}));
|
||||
|
||||
describe('usePreferenceLoader', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
|
||||
it('should load logs preferences based on priority order', async () => {
|
||||
const setReSync = jest.fn();
|
||||
const { result } = renderHook(() =>
|
||||
usePreferenceLoader({
|
||||
dataSource: DataSource.LOGS,
|
||||
reSync: false,
|
||||
setReSync,
|
||||
}),
|
||||
);
|
||||
|
||||
// Initially it should be loading
|
||||
expect(result.current.loading).toBe(true);
|
||||
expect(result.current.preferences).toBe(null);
|
||||
expect(result.current.error).toBe(null);
|
||||
|
||||
// Wait for the loader to complete
|
||||
await waitFor(() => {
|
||||
expect(result.current.loading).toBe(false);
|
||||
});
|
||||
|
||||
// Should have loaded from local storage (highest priority)
|
||||
expect(result.current.preferences).toEqual({
|
||||
columns: [{ name: 'local-column' }],
|
||||
formatting: { maxLines: 5, format: 'table', fontSize: 'medium', version: 1 },
|
||||
});
|
||||
expect(result.current.error).toBe(null);
|
||||
expect(setReSync).not.toHaveBeenCalled(); // Should not call setReSync when reSync is false
|
||||
});
|
||||
|
||||
it('should load traces preferences', async () => {
|
||||
const setReSync = jest.fn();
|
||||
const { result } = renderHook(() =>
|
||||
usePreferenceLoader({
|
||||
dataSource: DataSource.TRACES,
|
||||
reSync: false,
|
||||
setReSync,
|
||||
}),
|
||||
);
|
||||
|
||||
// Wait for the loader to complete
|
||||
await waitFor(() => {
|
||||
expect(result.current.loading).toBe(false);
|
||||
});
|
||||
|
||||
// Should have loaded trace columns
|
||||
expect(result.current.preferences).toEqual({
|
||||
columns: [{ name: 'local-trace-column' }],
|
||||
});
|
||||
expect(setReSync).not.toHaveBeenCalled(); // Should not call setReSync when reSync is false
|
||||
});
|
||||
|
||||
it('should call setReSync when reSync is true', async () => {
|
||||
const setReSync = jest.fn();
|
||||
|
||||
// Test that the hook calls setReSync(false) when reSync is true
|
||||
// We'll unmount quickly to avoid the infinite loop
|
||||
const { unmount } = renderHook(() =>
|
||||
usePreferenceLoader({
|
||||
dataSource: DataSource.LOGS,
|
||||
reSync: true,
|
||||
setReSync,
|
||||
}),
|
||||
);
|
||||
// Wait for the effect to run
|
||||
await waitFor(() => {
|
||||
expect(setReSync).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
// Unmount to stop the effect
|
||||
unmount();
|
||||
|
||||
// Should have called setReSync(false) to reset the reSync flag
|
||||
expect(setReSync).toHaveBeenCalledWith(false);
|
||||
});
|
||||
|
||||
it('should handle errors during loading', async () => {
|
||||
// Mock an error in the loader using jest.spyOn
|
||||
const localSpy = jest.spyOn(logsLoaderConfig, 'local');
|
||||
localSpy.mockRejectedValueOnce(new Error('Loading failed'));
|
||||
|
||||
const setReSync = jest.fn();
|
||||
const { result } = renderHook(() =>
|
||||
usePreferenceLoader({
|
||||
dataSource: DataSource.LOGS,
|
||||
reSync: false,
|
||||
setReSync,
|
||||
}),
|
||||
);
|
||||
|
||||
// Wait for the loader to complete
|
||||
await waitFor(() => {
|
||||
expect(result.current.loading).toBe(false);
|
||||
});
|
||||
|
||||
// Should have set the error
|
||||
expect(result.current.error).toBeInstanceOf(Error);
|
||||
expect(result.current.error?.message).toBe('Loading failed');
|
||||
expect(result.current.preferences).toBe(null);
|
||||
|
||||
// Restore original implementation
|
||||
localSpy.mockRestore();
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,240 @@
|
||||
/* eslint-disable sonarjs/no-identical-functions */
|
||||
import { renderHook } from '@testing-library/react';
|
||||
import { LogViewMode } from 'container/LogsTable';
|
||||
import { FontSize } from 'container/OptionsMenu/types';
|
||||
import {
|
||||
FormattingOptions,
|
||||
PreferenceMode,
|
||||
Preferences,
|
||||
} from 'providers/preferences/types';
|
||||
import { act } from 'react-dom/test-utils';
|
||||
import {
|
||||
BaseAutocompleteData,
|
||||
DataTypes,
|
||||
} from 'types/api/queryBuilder/queryAutocompleteResponse';
|
||||
import { DataSource } from 'types/common/queryBuilder';
|
||||
|
||||
import { usePreferenceUpdater } from '../updater/usePreferenceUpdater';
|
||||
|
||||
// Mock the config updaters
|
||||
const mockUpdateColumns = jest.fn();
|
||||
const mockUpdateFormatting = jest.fn();
|
||||
|
||||
jest.mock('../configs/logsUpdaterConfig', () => ({
|
||||
__esModule: true,
|
||||
default: jest.fn().mockImplementation(() => ({
|
||||
updateColumns: mockUpdateColumns,
|
||||
updateFormatting: mockUpdateFormatting,
|
||||
})),
|
||||
}));
|
||||
|
||||
jest.mock('../configs/tracesUpdaterConfig', () => ({
|
||||
__esModule: true,
|
||||
default: jest.fn().mockImplementation(() => ({
|
||||
updateColumns: mockUpdateColumns,
|
||||
updateFormatting: mockUpdateFormatting,
|
||||
})),
|
||||
}));
|
||||
|
||||
// Mock the URL query hook
|
||||
jest.mock('hooks/useUrlQueryData', () => ({
|
||||
__esModule: true,
|
||||
default: jest.fn().mockReturnValue({
|
||||
redirectWithQuery: jest.fn(),
|
||||
}),
|
||||
}));
|
||||
|
||||
describe('usePreferenceUpdater', () => {
|
||||
const mockPreferences: Preferences = {
|
||||
columns: [],
|
||||
formatting: {
|
||||
maxLines: 2,
|
||||
format: 'table' as LogViewMode,
|
||||
fontSize: 'small' as FontSize,
|
||||
version: 1,
|
||||
},
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
|
||||
it('should return updateColumns and updateFormatting functions', () => {
|
||||
const setReSync = jest.fn();
|
||||
const setSavedViewPreferences = jest.fn();
|
||||
|
||||
const { result } = renderHook(() =>
|
||||
usePreferenceUpdater({
|
||||
dataSource: DataSource.LOGS,
|
||||
mode: PreferenceMode.DIRECT,
|
||||
preferences: mockPreferences,
|
||||
setReSync,
|
||||
setSavedViewPreferences,
|
||||
}),
|
||||
);
|
||||
|
||||
// Should return the update functions
|
||||
expect(typeof result.current.updateColumns).toBe('function');
|
||||
expect(typeof result.current.updateFormatting).toBe('function');
|
||||
});
|
||||
|
||||
it('should call the logs updater for updateColumns with logs dataSource', () => {
|
||||
const setReSync = jest.fn();
|
||||
const setSavedViewPreferences = jest.fn();
|
||||
const newColumns: BaseAutocompleteData[] = [
|
||||
{
|
||||
key: 'new-column',
|
||||
type: 'tag',
|
||||
dataType: DataTypes.String,
|
||||
isColumn: true,
|
||||
},
|
||||
];
|
||||
|
||||
const { result } = renderHook(() =>
|
||||
usePreferenceUpdater({
|
||||
dataSource: DataSource.LOGS,
|
||||
mode: PreferenceMode.DIRECT,
|
||||
preferences: mockPreferences,
|
||||
setReSync,
|
||||
setSavedViewPreferences,
|
||||
}),
|
||||
);
|
||||
|
||||
act(() => {
|
||||
result.current.updateColumns(newColumns);
|
||||
});
|
||||
|
||||
// Should call the logs updater
|
||||
expect(mockUpdateColumns).toHaveBeenCalledWith(
|
||||
newColumns,
|
||||
PreferenceMode.DIRECT,
|
||||
);
|
||||
expect(setReSync).toHaveBeenCalledWith(true);
|
||||
});
|
||||
|
||||
it('should call the logs updater for updateFormatting with logs dataSource', () => {
|
||||
const setReSync = jest.fn();
|
||||
const setSavedViewPreferences = jest.fn();
|
||||
const newFormatting: FormattingOptions = {
|
||||
maxLines: 10,
|
||||
format: 'table' as LogViewMode,
|
||||
fontSize: 'large' as FontSize,
|
||||
version: 1,
|
||||
};
|
||||
|
||||
const { result } = renderHook(() =>
|
||||
usePreferenceUpdater({
|
||||
dataSource: DataSource.LOGS,
|
||||
mode: PreferenceMode.DIRECT,
|
||||
preferences: mockPreferences,
|
||||
setReSync,
|
||||
setSavedViewPreferences,
|
||||
}),
|
||||
);
|
||||
|
||||
act(() => {
|
||||
result.current.updateFormatting(newFormatting);
|
||||
});
|
||||
|
||||
// Should call the logs updater
|
||||
expect(mockUpdateFormatting).toHaveBeenCalledWith(
|
||||
newFormatting,
|
||||
PreferenceMode.DIRECT,
|
||||
);
|
||||
expect(setReSync).toHaveBeenCalledWith(true);
|
||||
});
|
||||
|
||||
it('should call the traces updater for updateColumns with traces dataSource', () => {
|
||||
const setReSync = jest.fn();
|
||||
const setSavedViewPreferences = jest.fn();
|
||||
const newColumns: BaseAutocompleteData[] = [
|
||||
{
|
||||
key: 'new-trace-column',
|
||||
type: 'tag',
|
||||
dataType: DataTypes.String,
|
||||
isColumn: true,
|
||||
},
|
||||
];
|
||||
|
||||
const { result } = renderHook(() =>
|
||||
usePreferenceUpdater({
|
||||
dataSource: DataSource.TRACES,
|
||||
mode: PreferenceMode.DIRECT,
|
||||
preferences: mockPreferences,
|
||||
setReSync,
|
||||
setSavedViewPreferences,
|
||||
}),
|
||||
);
|
||||
|
||||
act(() => {
|
||||
result.current.updateColumns(newColumns);
|
||||
});
|
||||
|
||||
// Should call the traces updater
|
||||
expect(mockUpdateColumns).toHaveBeenCalledWith(
|
||||
newColumns,
|
||||
PreferenceMode.DIRECT,
|
||||
);
|
||||
expect(setReSync).toHaveBeenCalledWith(true);
|
||||
});
|
||||
|
||||
it('should call the traces updater for updateFormatting with traces dataSource', () => {
|
||||
const setReSync = jest.fn();
|
||||
const setSavedViewPreferences = jest.fn();
|
||||
const newFormatting: FormattingOptions = {
|
||||
maxLines: 10,
|
||||
format: 'table' as LogViewMode,
|
||||
fontSize: 'large' as FontSize,
|
||||
version: 1,
|
||||
};
|
||||
|
||||
const { result } = renderHook(() =>
|
||||
usePreferenceUpdater({
|
||||
dataSource: DataSource.TRACES,
|
||||
mode: PreferenceMode.DIRECT,
|
||||
preferences: mockPreferences,
|
||||
setReSync,
|
||||
setSavedViewPreferences,
|
||||
}),
|
||||
);
|
||||
|
||||
act(() => {
|
||||
result.current.updateFormatting(newFormatting);
|
||||
});
|
||||
|
||||
// Should call the traces updater
|
||||
expect(mockUpdateFormatting).toHaveBeenCalledWith(
|
||||
newFormatting,
|
||||
PreferenceMode.DIRECT,
|
||||
);
|
||||
expect(setReSync).toHaveBeenCalledWith(true);
|
||||
});
|
||||
|
||||
it('should increment reSync counter when updates are called', () => {
|
||||
const setReSync = jest.fn();
|
||||
const setSavedViewPreferences = jest.fn();
|
||||
|
||||
const { result } = renderHook(() =>
|
||||
usePreferenceUpdater({
|
||||
dataSource: DataSource.LOGS,
|
||||
mode: PreferenceMode.DIRECT,
|
||||
preferences: mockPreferences,
|
||||
setReSync,
|
||||
setSavedViewPreferences,
|
||||
}),
|
||||
);
|
||||
|
||||
act(() => {
|
||||
result.current.updateColumns([
|
||||
{
|
||||
key: 'column',
|
||||
type: 'tag',
|
||||
dataType: DataTypes.String,
|
||||
isColumn: true,
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
expect(setReSync).toHaveBeenCalledWith(true);
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,67 @@
|
||||
/* eslint-disable no-empty */
|
||||
import getLocalStorageKey from 'api/browser/localstorage/get';
|
||||
import { LOCALSTORAGE } from 'constants/localStorage';
|
||||
import { defaultLogsSelectedColumns } from 'container/OptionsMenu/constants';
|
||||
import { FontSize } from 'container/OptionsMenu/types';
|
||||
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
|
||||
|
||||
import { FormattingOptions } from '../types';
|
||||
|
||||
// --- LOGS preferences loader config ---
|
||||
const logsLoaders = {
|
||||
local: async (): Promise<{
|
||||
columns: BaseAutocompleteData[];
|
||||
formatting: FormattingOptions;
|
||||
}> => {
|
||||
const local = getLocalStorageKey(LOCALSTORAGE.LOGS_LIST_OPTIONS);
|
||||
if (local) {
|
||||
try {
|
||||
const parsed = JSON.parse(local);
|
||||
return {
|
||||
columns: parsed.selectColumns || [],
|
||||
formatting: {
|
||||
maxLines: parsed.maxLines ?? 2,
|
||||
format: parsed.format ?? 'table',
|
||||
fontSize: parsed.fontSize ?? 'small',
|
||||
version: parsed.version ?? 1,
|
||||
},
|
||||
};
|
||||
} catch {}
|
||||
}
|
||||
return { columns: [], formatting: undefined } as any;
|
||||
},
|
||||
url: async (): Promise<{
|
||||
columns: BaseAutocompleteData[];
|
||||
formatting: FormattingOptions;
|
||||
}> => {
|
||||
const urlParams = new URLSearchParams(window.location.search);
|
||||
try {
|
||||
const options = JSON.parse(urlParams.get('options') || '{}');
|
||||
return {
|
||||
columns: options.selectColumns || [],
|
||||
formatting: {
|
||||
maxLines: options.maxLines ?? 2,
|
||||
format: options.format ?? 'table',
|
||||
fontSize: options.fontSize ?? 'small',
|
||||
version: options.version ?? 1,
|
||||
},
|
||||
};
|
||||
} catch {}
|
||||
return { columns: [], formatting: undefined } as any;
|
||||
},
|
||||
default: async (): Promise<{
|
||||
columns: BaseAutocompleteData[];
|
||||
formatting: FormattingOptions;
|
||||
}> => ({
|
||||
columns: defaultLogsSelectedColumns as BaseAutocompleteData[],
|
||||
formatting: {
|
||||
maxLines: 2,
|
||||
format: 'table',
|
||||
fontSize: 'small' as FontSize,
|
||||
version: 1,
|
||||
},
|
||||
}),
|
||||
priority: ['local', 'url', 'default'] as const,
|
||||
};
|
||||
|
||||
export default logsLoaders;
|
||||
@@ -0,0 +1,85 @@
|
||||
import setLocalStorageKey from 'api/browser/localstorage/set';
|
||||
import { LOCALSTORAGE } from 'constants/localStorage';
|
||||
import { defaultOptionsQuery } from 'container/OptionsMenu/constants';
|
||||
import { FontSize, OptionsQuery } from 'container/OptionsMenu/types';
|
||||
import { Dispatch, SetStateAction } from 'react';
|
||||
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
|
||||
|
||||
import { FormattingOptions, PreferenceMode, Preferences } from '../types';
|
||||
|
||||
// --- LOGS preferences updater config ---
|
||||
const getLogsUpdaterConfig = (
|
||||
preferences: Preferences | null,
|
||||
redirectWithOptionsData: (options: OptionsQuery) => void,
|
||||
setSavedViewPreferences: Dispatch<SetStateAction<Preferences | null>>,
|
||||
): {
|
||||
updateColumns: (newColumns: BaseAutocompleteData[], mode: string) => void;
|
||||
updateFormatting: (newFormatting: FormattingOptions, mode: string) => void;
|
||||
} => ({
|
||||
updateColumns: (newColumns: BaseAutocompleteData[], mode: string): void => {
|
||||
if (mode === PreferenceMode.SAVED_VIEW) {
|
||||
setSavedViewPreferences((prev) => {
|
||||
if (!prev) {
|
||||
return {
|
||||
columns: newColumns,
|
||||
formatting: {
|
||||
maxLines: 2,
|
||||
format: 'table',
|
||||
fontSize: 'small' as FontSize,
|
||||
version: 1,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
...prev,
|
||||
columns: newColumns,
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
if (mode === PreferenceMode.DIRECT) {
|
||||
// just need to update the columns see for remove props
|
||||
redirectWithOptionsData({
|
||||
...defaultOptionsQuery,
|
||||
...preferences?.formatting,
|
||||
selectColumns: newColumns,
|
||||
});
|
||||
|
||||
// Also update local storage
|
||||
const local = JSON.parse(
|
||||
localStorage.getItem(LOCALSTORAGE.LOGS_LIST_OPTIONS) || '{}',
|
||||
);
|
||||
local.selectColumns = newColumns;
|
||||
setLocalStorageKey(LOCALSTORAGE.LOGS_LIST_OPTIONS, JSON.stringify(local));
|
||||
}
|
||||
},
|
||||
updateFormatting: (newFormatting: FormattingOptions, mode: string): void => {
|
||||
if (mode === PreferenceMode.SAVED_VIEW) {
|
||||
setSavedViewPreferences((prev) => {
|
||||
if (!prev) return { columns: [], formatting: newFormatting };
|
||||
return {
|
||||
...prev,
|
||||
formatting: newFormatting,
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
if (mode === PreferenceMode.DIRECT) {
|
||||
redirectWithOptionsData({
|
||||
...defaultOptionsQuery,
|
||||
...preferences?.formatting,
|
||||
...newFormatting,
|
||||
});
|
||||
|
||||
// Also update local storage
|
||||
const local = JSON.parse(
|
||||
localStorage.getItem(LOCALSTORAGE.LOGS_LIST_OPTIONS) || '{}',
|
||||
);
|
||||
Object.assign(local, newFormatting);
|
||||
setLocalStorageKey(LOCALSTORAGE.LOGS_LIST_OPTIONS, JSON.stringify(local));
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
export default getLogsUpdaterConfig;
|
||||
@@ -0,0 +1,43 @@
|
||||
/* eslint-disable no-empty */
|
||||
import getLocalStorageKey from 'api/browser/localstorage/get';
|
||||
import { LOCALSTORAGE } from 'constants/localStorage';
|
||||
import { defaultTraceSelectedColumns } from 'container/OptionsMenu/constants';
|
||||
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
|
||||
|
||||
// --- TRACES preferences loader config ---
|
||||
const tracesLoaders = {
|
||||
local: async (): Promise<{
|
||||
columns: BaseAutocompleteData[];
|
||||
}> => {
|
||||
const local = getLocalStorageKey(LOCALSTORAGE.TRACES_LIST_OPTIONS);
|
||||
if (local) {
|
||||
try {
|
||||
const parsed = JSON.parse(local);
|
||||
return {
|
||||
columns: parsed.selectColumns || [],
|
||||
};
|
||||
} catch {}
|
||||
}
|
||||
return { columns: [] };
|
||||
},
|
||||
url: async (): Promise<{
|
||||
columns: BaseAutocompleteData[];
|
||||
}> => {
|
||||
const urlParams = new URLSearchParams(window.location.search);
|
||||
try {
|
||||
const options = JSON.parse(urlParams.get('options') || '{}');
|
||||
return {
|
||||
columns: options.selectColumns || [],
|
||||
};
|
||||
} catch {}
|
||||
return { columns: [] };
|
||||
},
|
||||
default: async (): Promise<{
|
||||
columns: BaseAutocompleteData[];
|
||||
}> => ({
|
||||
columns: defaultTraceSelectedColumns as BaseAutocompleteData[],
|
||||
}),
|
||||
priority: ['local', 'url', 'default'] as const,
|
||||
};
|
||||
|
||||
export default tracesLoaders;
|
||||
@@ -0,0 +1,49 @@
|
||||
import setLocalStorageKey from 'api/browser/localstorage/set';
|
||||
import { LOCALSTORAGE } from 'constants/localStorage';
|
||||
import { defaultOptionsQuery } from 'container/OptionsMenu/constants';
|
||||
import { FontSize, OptionsQuery } from 'container/OptionsMenu/types';
|
||||
import { Dispatch, SetStateAction } from 'react';
|
||||
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
|
||||
|
||||
import { PreferenceMode, Preferences } from '../types';
|
||||
|
||||
// --- TRACES preferences updater config ---
|
||||
const getTracesUpdaterConfig = (
|
||||
redirectWithOptionsData: (options: OptionsQuery) => void,
|
||||
setSavedViewPreferences: Dispatch<SetStateAction<Preferences | null>>,
|
||||
): {
|
||||
updateColumns: (newColumns: BaseAutocompleteData[], mode: string) => void;
|
||||
updateFormatting: () => void;
|
||||
} => ({
|
||||
updateColumns: (newColumns: BaseAutocompleteData[], mode: string): void => {
|
||||
// remove the formatting props
|
||||
if (mode === PreferenceMode.SAVED_VIEW) {
|
||||
setSavedViewPreferences({
|
||||
columns: newColumns,
|
||||
formatting: {
|
||||
maxLines: 2,
|
||||
format: 'table',
|
||||
fontSize: 'small' as FontSize,
|
||||
version: 1,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
if (mode === PreferenceMode.DIRECT) {
|
||||
// just need to update the columns see for remove props
|
||||
redirectWithOptionsData({
|
||||
...defaultOptionsQuery,
|
||||
selectColumns: newColumns,
|
||||
});
|
||||
|
||||
const local = JSON.parse(
|
||||
localStorage.getItem(LOCALSTORAGE.TRACES_LIST_OPTIONS) || '{}',
|
||||
);
|
||||
local.selectColumns = newColumns;
|
||||
setLocalStorageKey(LOCALSTORAGE.TRACES_LIST_OPTIONS, JSON.stringify(local));
|
||||
}
|
||||
},
|
||||
updateFormatting: (): void => {}, // no-op for traces
|
||||
});
|
||||
|
||||
export default getTracesUpdaterConfig;
|
||||
@@ -0,0 +1,84 @@
|
||||
import useUrlQuery from 'hooks/useUrlQuery';
|
||||
import {
|
||||
PreferenceContextValue,
|
||||
PreferenceMode,
|
||||
} from 'providers/preferences/types';
|
||||
import React, { createContext, useContext, useMemo } from 'react';
|
||||
import { useLocation } from 'react-router-dom';
|
||||
import { DataSource } from 'types/common/queryBuilder';
|
||||
|
||||
import { usePreferenceSync } from '../sync/usePreferenceSync';
|
||||
|
||||
const PreferenceContext = createContext<PreferenceContextValue | undefined>(
|
||||
undefined,
|
||||
);
|
||||
|
||||
export function PreferenceContextProvider({
|
||||
children,
|
||||
}: {
|
||||
children: React.ReactNode;
|
||||
}): JSX.Element {
|
||||
const location = useLocation();
|
||||
const params = useUrlQuery();
|
||||
|
||||
let savedViewId = '';
|
||||
const viewKeyParam = params.get('viewKey');
|
||||
if (viewKeyParam) {
|
||||
try {
|
||||
savedViewId = JSON.parse(viewKeyParam);
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
}
|
||||
}
|
||||
let dataSource: DataSource = DataSource.LOGS;
|
||||
if (location.pathname.includes('traces')) dataSource = DataSource.TRACES;
|
||||
|
||||
const {
|
||||
preferences,
|
||||
loading,
|
||||
error,
|
||||
updateColumns,
|
||||
updateFormatting,
|
||||
} = usePreferenceSync({
|
||||
mode: savedViewId ? PreferenceMode.SAVED_VIEW : PreferenceMode.DIRECT,
|
||||
savedViewId: savedViewId || undefined,
|
||||
dataSource,
|
||||
});
|
||||
|
||||
const value = useMemo<PreferenceContextValue>(
|
||||
() => ({
|
||||
preferences,
|
||||
loading,
|
||||
error,
|
||||
mode: savedViewId ? PreferenceMode.SAVED_VIEW : PreferenceMode.DIRECT,
|
||||
savedViewId: savedViewId || undefined,
|
||||
dataSource,
|
||||
updateColumns,
|
||||
updateFormatting,
|
||||
}),
|
||||
[
|
||||
savedViewId,
|
||||
dataSource,
|
||||
preferences,
|
||||
loading,
|
||||
error,
|
||||
updateColumns,
|
||||
updateFormatting,
|
||||
],
|
||||
);
|
||||
|
||||
return (
|
||||
<PreferenceContext.Provider value={value}>
|
||||
{children}
|
||||
</PreferenceContext.Provider>
|
||||
);
|
||||
}
|
||||
|
||||
export function usePreferenceContext(): PreferenceContextValue {
|
||||
const ctx = useContext(PreferenceContext);
|
||||
if (!ctx)
|
||||
throw new Error(
|
||||
'usePreferenceContext must be used within PreferenceContextProvider',
|
||||
);
|
||||
return ctx;
|
||||
}
|
||||
108
frontend/src/providers/preferences/loader/usePreferenceLoader.ts
Normal file
108
frontend/src/providers/preferences/loader/usePreferenceLoader.ts
Normal file
@@ -0,0 +1,108 @@
|
||||
/* eslint-disable sonarjs/cognitive-complexity */
|
||||
/* eslint-disable no-empty */
|
||||
import { useEffect, useState } from 'react';
|
||||
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
|
||||
import { DataSource } from 'types/common/queryBuilder';
|
||||
|
||||
import logsLoaderConfig from '../configs/logsLoaderConfig';
|
||||
import tracesLoaderConfig from '../configs/tracesLoaderConfig';
|
||||
import { FormattingOptions, Preferences } from '../types';
|
||||
|
||||
// Generic preferences loader that works with any config
|
||||
async function preferencesLoader<T>(config: {
|
||||
priority: readonly string[];
|
||||
[key: string]: any;
|
||||
}): Promise<T> {
|
||||
const findValidLoader = async (): Promise<T> => {
|
||||
// Try each loader in priority order
|
||||
const results = await Promise.all(
|
||||
config.priority.map(async (source) => ({
|
||||
source,
|
||||
result: await config[source](),
|
||||
})),
|
||||
);
|
||||
|
||||
// Find valid columns and formatting independently
|
||||
const validColumnsResult = results.find(
|
||||
({ result }) => result.columns?.length,
|
||||
);
|
||||
const validFormattingResult = results.find(({ result }) => result.formatting);
|
||||
|
||||
// Combine valid results or fallback to default
|
||||
const finalResult = {
|
||||
columns: validColumnsResult?.result.columns || config.default().columns,
|
||||
formatting:
|
||||
validFormattingResult?.result.formatting || config.default().formatting,
|
||||
};
|
||||
|
||||
return finalResult as T;
|
||||
};
|
||||
|
||||
return findValidLoader();
|
||||
}
|
||||
|
||||
// Use the generic loader with specific configs
|
||||
async function logsPreferencesLoader(): Promise<{
|
||||
columns: BaseAutocompleteData[];
|
||||
formatting: FormattingOptions;
|
||||
}> {
|
||||
return preferencesLoader(logsLoaderConfig);
|
||||
}
|
||||
|
||||
async function tracesPreferencesLoader(): Promise<{
|
||||
columns: BaseAutocompleteData[];
|
||||
}> {
|
||||
return preferencesLoader(tracesLoaderConfig);
|
||||
}
|
||||
|
||||
export function usePreferenceLoader({
|
||||
dataSource,
|
||||
reSync,
|
||||
setReSync,
|
||||
}: {
|
||||
dataSource: DataSource;
|
||||
reSync: boolean;
|
||||
setReSync: (value: boolean) => void;
|
||||
}): {
|
||||
preferences: Preferences | null;
|
||||
loading: boolean;
|
||||
error: Error | null;
|
||||
} {
|
||||
const [preferences, setPreferences] = useState<Preferences | null>(null);
|
||||
const [loading, setLoading] = useState(true);
|
||||
const [error, setError] = useState<Error | null>(null);
|
||||
|
||||
useEffect((): void => {
|
||||
async function loadPreferences(): Promise<void> {
|
||||
setLoading(true);
|
||||
setError(null);
|
||||
|
||||
try {
|
||||
if (dataSource === DataSource.LOGS) {
|
||||
const { columns, formatting } = await logsPreferencesLoader();
|
||||
setPreferences({ columns, formatting });
|
||||
}
|
||||
|
||||
if (dataSource === DataSource.TRACES) {
|
||||
const { columns } = await tracesPreferencesLoader();
|
||||
setPreferences({ columns });
|
||||
}
|
||||
} catch (e) {
|
||||
setError(e as Error);
|
||||
} finally {
|
||||
setLoading(false);
|
||||
// Reset reSync back to false after loading is complete
|
||||
if (reSync) {
|
||||
setReSync(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Only load preferences on initial mount or when reSync is true
|
||||
if (loading || reSync) {
|
||||
loadPreferences();
|
||||
}
|
||||
}, [dataSource, reSync, setReSync, loading]);
|
||||
|
||||
return { preferences, loading, error };
|
||||
}
|
||||
84
frontend/src/providers/preferences/sync/usePreferenceSync.ts
Normal file
84
frontend/src/providers/preferences/sync/usePreferenceSync.ts
Normal file
@@ -0,0 +1,84 @@
|
||||
import { defaultLogsSelectedColumns } from 'container/OptionsMenu/constants';
|
||||
import { defaultSelectedColumns as defaultTracesSelectedColumns } from 'container/TracesExplorer/ListView/configs';
|
||||
import { useGetAllViews } from 'hooks/saveViews/useGetAllViews';
|
||||
import { useEffect, useState } from 'react';
|
||||
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
|
||||
import { DataSource } from 'types/common/queryBuilder';
|
||||
|
||||
import { usePreferenceLoader } from '../loader/usePreferenceLoader';
|
||||
import { FormattingOptions, PreferenceMode, Preferences } from '../types';
|
||||
import { usePreferenceUpdater } from '../updater/usePreferenceUpdater';
|
||||
|
||||
export function usePreferenceSync({
|
||||
mode,
|
||||
dataSource,
|
||||
savedViewId,
|
||||
}: {
|
||||
mode: PreferenceMode;
|
||||
dataSource: DataSource;
|
||||
savedViewId: string | undefined;
|
||||
}): {
|
||||
preferences: Preferences | null;
|
||||
loading: boolean;
|
||||
error: Error | null;
|
||||
updateColumns: (newColumns: BaseAutocompleteData[]) => void;
|
||||
updateFormatting: (newFormatting: FormattingOptions) => void;
|
||||
} {
|
||||
const { data: viewsData } = useGetAllViews(dataSource);
|
||||
|
||||
const [
|
||||
savedViewPreferences,
|
||||
setSavedViewPreferences,
|
||||
] = useState<Preferences | null>(null);
|
||||
|
||||
useEffect(() => {
|
||||
const extraData = viewsData?.data?.data?.find(
|
||||
(view) => view.id === savedViewId,
|
||||
)?.extraData;
|
||||
|
||||
const parsedExtraData = JSON.parse(extraData || '{}');
|
||||
let columns: BaseAutocompleteData[] = [];
|
||||
let formatting: FormattingOptions | undefined;
|
||||
if (dataSource === DataSource.LOGS) {
|
||||
columns = parsedExtraData?.selectColumns || defaultLogsSelectedColumns;
|
||||
formatting = {
|
||||
maxLines: parsedExtraData?.maxLines ?? 2,
|
||||
format: parsedExtraData?.format ?? 'table',
|
||||
fontSize: parsedExtraData?.fontSize ?? 'small',
|
||||
version: parsedExtraData?.version ?? 1,
|
||||
};
|
||||
}
|
||||
if (dataSource === DataSource.TRACES) {
|
||||
columns = parsedExtraData?.selectColumns || defaultTracesSelectedColumns;
|
||||
}
|
||||
setSavedViewPreferences({ columns, formatting });
|
||||
}, [viewsData, dataSource, savedViewId, mode]);
|
||||
|
||||
// We are using a reSync state because we have URL updates as well as local storage updates
|
||||
// and we want to make sure we are always using the latest preferences
|
||||
const [reSync, setReSync] = useState(false);
|
||||
const { preferences, loading, error } = usePreferenceLoader({
|
||||
dataSource,
|
||||
reSync,
|
||||
setReSync,
|
||||
});
|
||||
|
||||
const { updateColumns, updateFormatting } = usePreferenceUpdater({
|
||||
dataSource,
|
||||
mode,
|
||||
preferences,
|
||||
setReSync,
|
||||
setSavedViewPreferences,
|
||||
});
|
||||
|
||||
return {
|
||||
preferences:
|
||||
mode === PreferenceMode.SAVED_VIEW && savedViewId
|
||||
? savedViewPreferences
|
||||
: preferences,
|
||||
loading,
|
||||
error,
|
||||
updateColumns,
|
||||
updateFormatting,
|
||||
};
|
||||
}
|
||||
32
frontend/src/providers/preferences/types/index.ts
Normal file
32
frontend/src/providers/preferences/types/index.ts
Normal file
@@ -0,0 +1,32 @@
|
||||
import { LogViewMode } from 'container/LogsTable';
|
||||
import { FontSize } from 'container/OptionsMenu/types';
|
||||
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
|
||||
import { DataSource } from 'types/common/queryBuilder';
|
||||
|
||||
export enum PreferenceMode {
|
||||
SAVED_VIEW = 'savedView',
|
||||
DIRECT = 'direct',
|
||||
}
|
||||
|
||||
export interface PreferenceContextValue {
|
||||
preferences: Preferences | null;
|
||||
loading: boolean;
|
||||
error: Error | null;
|
||||
mode: PreferenceMode;
|
||||
savedViewId?: string;
|
||||
dataSource: DataSource;
|
||||
updateColumns: (newColumns: BaseAutocompleteData[]) => void;
|
||||
updateFormatting: (newFormatting: FormattingOptions) => void;
|
||||
}
|
||||
|
||||
export interface FormattingOptions {
|
||||
maxLines?: number;
|
||||
format?: LogViewMode;
|
||||
fontSize?: FontSize;
|
||||
version?: number;
|
||||
}
|
||||
|
||||
export interface Preferences {
|
||||
columns: BaseAutocompleteData[];
|
||||
formatting?: FormattingOptions;
|
||||
}
|
||||
@@ -0,0 +1,78 @@
|
||||
import {
|
||||
defaultOptionsQuery,
|
||||
URL_OPTIONS,
|
||||
} from 'container/OptionsMenu/constants';
|
||||
import { OptionsQuery } from 'container/OptionsMenu/types';
|
||||
import useUrlQueryData from 'hooks/useUrlQueryData';
|
||||
import { Dispatch, SetStateAction } from 'react';
|
||||
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
|
||||
import { DataSource } from 'types/common/queryBuilder';
|
||||
|
||||
import getLogsUpdaterConfig from '../configs/logsUpdaterConfig';
|
||||
import getTracesUpdaterConfig from '../configs/tracesUpdaterConfig';
|
||||
import { FormattingOptions, Preferences } from '../types';
|
||||
|
||||
const metricsUpdater = {
|
||||
updateColumns: (): void => {}, // no-op for metrics
|
||||
updateFormatting: (): void => {}, // no-op for metrics
|
||||
};
|
||||
|
||||
const getUpdaterConfig = (
|
||||
preferences: Preferences | null,
|
||||
redirectWithOptionsData: (options: OptionsQuery) => void,
|
||||
setSavedViewPreferences: Dispatch<SetStateAction<Preferences | null>>,
|
||||
): Record<
|
||||
DataSource,
|
||||
{
|
||||
updateColumns: (newColumns: BaseAutocompleteData[], mode: string) => void;
|
||||
updateFormatting: (newFormatting: FormattingOptions, mode: string) => void;
|
||||
}
|
||||
> => ({
|
||||
[DataSource.LOGS]: getLogsUpdaterConfig(
|
||||
preferences,
|
||||
redirectWithOptionsData,
|
||||
setSavedViewPreferences,
|
||||
),
|
||||
[DataSource.TRACES]: getTracesUpdaterConfig(
|
||||
redirectWithOptionsData,
|
||||
setSavedViewPreferences,
|
||||
),
|
||||
[DataSource.METRICS]: metricsUpdater,
|
||||
});
|
||||
|
||||
export function usePreferenceUpdater({
|
||||
dataSource,
|
||||
mode,
|
||||
preferences,
|
||||
setReSync,
|
||||
setSavedViewPreferences,
|
||||
}: {
|
||||
dataSource: DataSource;
|
||||
mode: string;
|
||||
preferences: Preferences | null;
|
||||
setReSync: Dispatch<SetStateAction<boolean>>;
|
||||
setSavedViewPreferences: Dispatch<SetStateAction<Preferences | null>>;
|
||||
}): {
|
||||
updateColumns: (newColumns: BaseAutocompleteData[]) => void;
|
||||
updateFormatting: (newFormatting: FormattingOptions) => void;
|
||||
} {
|
||||
const {
|
||||
redirectWithQuery: redirectWithOptionsData,
|
||||
} = useUrlQueryData<OptionsQuery>(URL_OPTIONS, defaultOptionsQuery);
|
||||
const updater = getUpdaterConfig(
|
||||
preferences,
|
||||
redirectWithOptionsData,
|
||||
setSavedViewPreferences,
|
||||
)[dataSource];
|
||||
|
||||
return {
|
||||
updateColumns: (newColumns: BaseAutocompleteData[]): void => {
|
||||
updater.updateColumns(newColumns, mode);
|
||||
setReSync(true);
|
||||
},
|
||||
updateFormatting: (newFormatting: FormattingOptions): void => {
|
||||
updater.updateFormatting(newFormatting, mode);
|
||||
setReSync(true);
|
||||
},
|
||||
};
|
||||
}
|
||||
@@ -44,6 +44,7 @@ func Success(rw http.ResponseWriter, httpCode int, data interface{}) {
|
||||
}
|
||||
|
||||
rw.WriteHeader(httpCode)
|
||||
rw.Header().Set("Content-Type", "application/json")
|
||||
_, _ = rw.Write(body)
|
||||
}
|
||||
|
||||
|
||||
@@ -33,6 +33,12 @@ func (a *API) QueryRange(rw http.ResponseWriter, req *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// Validate the query request
|
||||
if err := queryRangeRequest.Validate(); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgID, err := valuer.NewUUID(claims.OrgID)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
|
||||
@@ -70,7 +70,12 @@ func (bc *bucketCache) GetMissRanges(
|
||||
// Get query window
|
||||
startMs, endMs := q.Window()
|
||||
|
||||
bc.logger.DebugContext(ctx, "getting miss ranges", "fingerprint", q.Fingerprint(), "start", startMs, "end", endMs)
|
||||
bc.logger.DebugContext(ctx, "getting miss ranges",
|
||||
"fingerprint", q.Fingerprint(),
|
||||
"start", startMs,
|
||||
"end", endMs,
|
||||
"start_time", time.UnixMilli(int64(startMs)).Format(time.RFC3339),
|
||||
"end_time", time.UnixMilli(int64(endMs)).Format(time.RFC3339))
|
||||
|
||||
// Generate cache key
|
||||
cacheKey := bc.generateCacheKey(q)
|
||||
@@ -117,7 +122,7 @@ func (bc *bucketCache) GetMissRanges(
|
||||
}
|
||||
|
||||
// Put stores fresh query results in the cache
|
||||
func (bc *bucketCache) Put(ctx context.Context, orgID valuer.UUID, q qbtypes.Query, fresh *qbtypes.Result) {
|
||||
func (bc *bucketCache) Put(ctx context.Context, orgID valuer.UUID, q qbtypes.Query, step qbtypes.Step, fresh *qbtypes.Result) {
|
||||
// Get query window
|
||||
startMs, endMs := q.Window()
|
||||
|
||||
@@ -159,8 +164,51 @@ func (bc *bucketCache) Put(ctx context.Context, orgID valuer.UUID, q qbtypes.Que
|
||||
return
|
||||
}
|
||||
|
||||
// Convert trimmed result to buckets
|
||||
freshBuckets := bc.resultToBuckets(ctx, trimmedResult, startMs, cachableEndMs)
|
||||
// Adjust start and end times to only cache complete intervals
|
||||
cachableStartMs := startMs
|
||||
stepMs := uint64(step.Duration.Milliseconds())
|
||||
|
||||
// If we have a step interval, adjust boundaries to only cache complete intervals
|
||||
if stepMs > 0 {
|
||||
// If start is not aligned, round up to next step boundary (first complete interval)
|
||||
if startMs%stepMs != 0 {
|
||||
cachableStartMs = ((startMs / stepMs) + 1) * stepMs
|
||||
}
|
||||
|
||||
// If end is not aligned, round down to previous step boundary (last complete interval)
|
||||
if cachableEndMs%stepMs != 0 {
|
||||
cachableEndMs = (cachableEndMs / stepMs) * stepMs
|
||||
}
|
||||
|
||||
// If after adjustment we have no complete intervals, don't cache
|
||||
if cachableStartMs >= cachableEndMs {
|
||||
bc.logger.DebugContext(ctx, "no complete intervals to cache",
|
||||
"original_start", startMs,
|
||||
"original_end", endMs,
|
||||
"adjusted_start", cachableStartMs,
|
||||
"adjusted_end", cachableEndMs,
|
||||
"step", stepMs)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Convert trimmed result to buckets with adjusted boundaries
|
||||
freshBuckets := bc.resultToBuckets(ctx, trimmedResult, cachableStartMs, cachableEndMs)
|
||||
|
||||
// Debug: Log what we're about to cache
|
||||
if tsData, ok := trimmedResult.Value.(*qbtypes.TimeSeriesData); ok {
|
||||
totalSeries := 0
|
||||
for _, agg := range tsData.Aggregations {
|
||||
totalSeries += len(agg.Series)
|
||||
}
|
||||
bc.logger.DebugContext(ctx, "converting result to buckets",
|
||||
"total_series", totalSeries,
|
||||
"original_start", startMs,
|
||||
"original_end", endMs,
|
||||
"cachable_start", cachableStartMs,
|
||||
"cachable_end", cachableEndMs,
|
||||
"step", stepMs)
|
||||
}
|
||||
|
||||
// If no fresh buckets and no existing data, don't cache
|
||||
if len(freshBuckets) == 0 && len(existingData.Buckets) == 0 {
|
||||
@@ -485,6 +533,12 @@ func (bc *bucketCache) mergeTimeSeriesValues(ctx context.Context, buckets []*cac
|
||||
}
|
||||
|
||||
if existingSeries, ok := seriesMap[key]; ok {
|
||||
// Merge values, avoiding duplicate timestamps
|
||||
timestampMap := make(map[int64]bool)
|
||||
for _, v := range existingSeries.Values {
|
||||
timestampMap[v.Timestamp] = true
|
||||
}
|
||||
|
||||
// Pre-allocate capacity for merged values
|
||||
newCap := len(existingSeries.Values) + len(series.Values)
|
||||
if cap(existingSeries.Values) < newCap {
|
||||
@@ -492,7 +546,13 @@ func (bc *bucketCache) mergeTimeSeriesValues(ctx context.Context, buckets []*cac
|
||||
copy(newValues, existingSeries.Values)
|
||||
existingSeries.Values = newValues
|
||||
}
|
||||
existingSeries.Values = append(existingSeries.Values, series.Values...)
|
||||
|
||||
// Only add values with new timestamps
|
||||
for _, v := range series.Values {
|
||||
if !timestampMap[v.Timestamp] {
|
||||
existingSeries.Values = append(existingSeries.Values, v)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// New series
|
||||
seriesMap[key] = series
|
||||
@@ -697,7 +757,7 @@ func (bc *bucketCache) trimResultToFluxBoundary(result *qbtypes.Result, fluxBoun
|
||||
switch result.Type {
|
||||
case qbtypes.RequestTypeTimeSeries:
|
||||
// Trim time series data
|
||||
if tsData, ok := result.Value.(*qbtypes.TimeSeriesData); ok {
|
||||
if tsData, ok := result.Value.(*qbtypes.TimeSeriesData); ok && tsData != nil {
|
||||
trimmedData := &qbtypes.TimeSeriesData{
|
||||
QueryName: tsData.QueryName,
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ func BenchmarkBucketCache_GetMissRanges(b *testing.B) {
|
||||
endMs: uint64((i + 1) * 10000),
|
||||
}
|
||||
result := createBenchmarkResult(query.startMs, query.endMs, 1000)
|
||||
bc.Put(ctx, orgID, query, result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{}, result)
|
||||
}
|
||||
|
||||
// Create test queries with varying cache hit patterns
|
||||
@@ -121,7 +121,7 @@ func BenchmarkBucketCache_Put(b *testing.B) {
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
for j := 0; j < tc.numQueries; j++ {
|
||||
bc.Put(ctx, orgID, queries[j], results[j])
|
||||
bc.Put(ctx, orgID, queries[j], qbtypes.Step{}, results[j])
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -259,7 +259,7 @@ func BenchmarkBucketCache_ConcurrentOperations(b *testing.B) {
|
||||
endMs: uint64((i + 1) * 10000),
|
||||
}
|
||||
result := createBenchmarkResult(query.startMs, query.endMs, 1000)
|
||||
bc.Put(ctx, orgID, query, result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{}, result)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
@@ -284,7 +284,7 @@ func BenchmarkBucketCache_ConcurrentOperations(b *testing.B) {
|
||||
endMs: uint64((i + 1) * 10000),
|
||||
}
|
||||
result := createBenchmarkResult(query.startMs, query.endMs, 1000)
|
||||
bc.Put(ctx, orgID, query, result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{}, result)
|
||||
case 2: // Partial read
|
||||
query := &mockQuery{
|
||||
fingerprint: fmt.Sprintf("concurrent-query-%d", i%100),
|
||||
|
||||
117
pkg/querier/bucket_cache_step_test.go
Normal file
117
pkg/querier/bucket_cache_step_test.go
Normal file
@@ -0,0 +1,117 @@
|
||||
package querier
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestBucketCacheStepAlignment(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
orgID := valuer.UUID{}
|
||||
cache := createTestCache(t)
|
||||
bc := NewBucketCache(instrumentationtest.New().ToProviderSettings(), cache, time.Hour, 5*time.Minute)
|
||||
|
||||
// Test with 5-minute step
|
||||
step := qbtypes.Step{Duration: 5 * time.Minute}
|
||||
|
||||
// Query from 12:02 to 12:58 (both unaligned)
|
||||
// Complete intervals: 12:05 to 12:55
|
||||
query := &mockQuery{
|
||||
fingerprint: "test-step-alignment",
|
||||
startMs: 1672563720000, // 12:02
|
||||
endMs: 1672567080000, // 12:58
|
||||
}
|
||||
|
||||
result := &qbtypes.Result{
|
||||
Type: qbtypes.RequestTypeTimeSeries,
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "test",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Index: 0,
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Labels: []*qbtypes.Label{
|
||||
{Key: telemetrytypes.TelemetryFieldKey{Name: "service"}, Value: "test"},
|
||||
},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1672563720000, Value: 1, Partial: true}, // 12:02
|
||||
{Timestamp: 1672563900000, Value: 2}, // 12:05
|
||||
{Timestamp: 1672564200000, Value: 2.5}, // 12:10
|
||||
{Timestamp: 1672564500000, Value: 2.6}, // 12:15
|
||||
{Timestamp: 1672566600000, Value: 2.9}, // 12:50
|
||||
{Timestamp: 1672566900000, Value: 3}, // 12:55
|
||||
{Timestamp: 1672567080000, Value: 4, Partial: true}, // 12:58
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Put result in cache
|
||||
bc.Put(ctx, orgID, query, step, result)
|
||||
|
||||
// Get cached data
|
||||
cached, missing := bc.GetMissRanges(ctx, orgID, query, step)
|
||||
|
||||
// Should have cached data
|
||||
require.NotNil(t, cached)
|
||||
|
||||
// Log the missing ranges to debug
|
||||
t.Logf("Missing ranges: %v", missing)
|
||||
for i, r := range missing {
|
||||
t.Logf("Missing range %d: From=%d, To=%d", i, r.From, r.To)
|
||||
}
|
||||
|
||||
// Should have 2 missing ranges for partial intervals
|
||||
require.Len(t, missing, 2)
|
||||
|
||||
// First partial: 12:02 to 12:05
|
||||
assert.Equal(t, uint64(1672563720000), missing[0].From)
|
||||
assert.Equal(t, uint64(1672563900000), missing[0].To)
|
||||
|
||||
// Second partial: 12:55 to 12:58
|
||||
assert.Equal(t, uint64(1672566900000), missing[1].From, "Second missing range From")
|
||||
assert.Equal(t, uint64(1672567080000), missing[1].To, "Second missing range To")
|
||||
}
|
||||
|
||||
func TestBucketCacheNoStepInterval(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
orgID := valuer.UUID{}
|
||||
cache := createTestCache(t)
|
||||
bc := NewBucketCache(instrumentationtest.New().ToProviderSettings(), cache, time.Hour, 5*time.Minute)
|
||||
|
||||
// Test with no step (stepMs = 0)
|
||||
step := qbtypes.Step{Duration: 0}
|
||||
|
||||
query := &mockQuery{
|
||||
fingerprint: "test-no-step",
|
||||
startMs: 1672563720000,
|
||||
endMs: 1672567080000,
|
||||
}
|
||||
|
||||
result := &qbtypes.Result{
|
||||
Type: qbtypes.RequestTypeTimeSeries,
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "test",
|
||||
Aggregations: []*qbtypes.AggregationBucket{{Index: 0, Series: []*qbtypes.TimeSeries{}}},
|
||||
},
|
||||
}
|
||||
|
||||
// Should cache the entire range when step is 0
|
||||
bc.Put(ctx, orgID, query, step, result)
|
||||
|
||||
cached, missing := bc.GetMissRanges(ctx, orgID, query, step)
|
||||
assert.NotNil(t, cached)
|
||||
assert.Len(t, missing, 0)
|
||||
}
|
||||
@@ -159,7 +159,7 @@ func TestBucketCache_Put_And_Get(t *testing.T) {
|
||||
}
|
||||
|
||||
// Store in cache
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, result)
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, qbtypes.Step{}, result)
|
||||
|
||||
// Wait a bit for cache to be written
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
@@ -193,7 +193,7 @@ func TestBucketCache_PartialHit(t *testing.T) {
|
||||
Type: qbtypes.RequestTypeTimeSeries,
|
||||
Value: createTestTimeSeries("A", 1000, 3000, 1000),
|
||||
}
|
||||
bc.Put(context.Background(), valuer.UUID{}, query1, result1)
|
||||
bc.Put(context.Background(), valuer.UUID{}, query1, qbtypes.Step{}, result1)
|
||||
|
||||
// Wait for cache write
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
@@ -226,7 +226,7 @@ func TestBucketCache_MultipleBuckets(t *testing.T) {
|
||||
startMs: 1000,
|
||||
endMs: 2000,
|
||||
}
|
||||
bc.Put(context.Background(), valuer.UUID{}, query1, &qbtypes.Result{
|
||||
bc.Put(context.Background(), valuer.UUID{}, query1, qbtypes.Step{}, &qbtypes.Result{
|
||||
Type: qbtypes.RequestTypeTimeSeries,
|
||||
Value: createTestTimeSeries("A", 1000, 2000, 100),
|
||||
})
|
||||
@@ -236,7 +236,7 @@ func TestBucketCache_MultipleBuckets(t *testing.T) {
|
||||
startMs: 3000,
|
||||
endMs: 4000,
|
||||
}
|
||||
bc.Put(context.Background(), valuer.UUID{}, query2, &qbtypes.Result{
|
||||
bc.Put(context.Background(), valuer.UUID{}, query2, qbtypes.Step{}, &qbtypes.Result{
|
||||
Type: qbtypes.RequestTypeTimeSeries,
|
||||
Value: createTestTimeSeries("A", 3000, 4000, 100),
|
||||
})
|
||||
@@ -284,7 +284,7 @@ func TestBucketCache_FluxInterval(t *testing.T) {
|
||||
}
|
||||
|
||||
// This should not be cached due to flux interval
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, result)
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, qbtypes.Step{}, result)
|
||||
|
||||
// Wait a bit
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
@@ -354,7 +354,7 @@ func TestBucketCache_MergeTimeSeriesResults(t *testing.T) {
|
||||
startMs: 1000,
|
||||
endMs: 3000,
|
||||
}
|
||||
bc.Put(context.Background(), valuer.UUID{}, query1, &qbtypes.Result{
|
||||
bc.Put(context.Background(), valuer.UUID{}, query1, qbtypes.Step{}, &qbtypes.Result{
|
||||
Type: qbtypes.RequestTypeTimeSeries,
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "A",
|
||||
@@ -370,7 +370,7 @@ func TestBucketCache_MergeTimeSeriesResults(t *testing.T) {
|
||||
startMs: 3000,
|
||||
endMs: 5000,
|
||||
}
|
||||
bc.Put(context.Background(), valuer.UUID{}, query2, &qbtypes.Result{
|
||||
bc.Put(context.Background(), valuer.UUID{}, query2, qbtypes.Step{}, &qbtypes.Result{
|
||||
Type: qbtypes.RequestTypeTimeSeries,
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "A",
|
||||
@@ -445,7 +445,7 @@ func TestBucketCache_RawData(t *testing.T) {
|
||||
Value: rawData,
|
||||
}
|
||||
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, result)
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, qbtypes.Step{}, result)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
cached, missing := bc.GetMissRanges(context.Background(), valuer.UUID{}, query, qbtypes.Step{Duration: 1000})
|
||||
@@ -485,7 +485,7 @@ func TestBucketCache_ScalarData(t *testing.T) {
|
||||
Value: scalarData,
|
||||
}
|
||||
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, result)
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, qbtypes.Step{}, result)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
cached, missing := bc.GetMissRanges(context.Background(), valuer.UUID{}, query, qbtypes.Step{Duration: 1000})
|
||||
@@ -513,7 +513,7 @@ func TestBucketCache_EmptyFingerprint(t *testing.T) {
|
||||
Value: createTestTimeSeries("A", 1000, 5000, 1000),
|
||||
}
|
||||
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, result)
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, qbtypes.Step{}, result)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Should still be able to retrieve
|
||||
@@ -568,7 +568,7 @@ func TestBucketCache_ConcurrentAccess(t *testing.T) {
|
||||
Type: qbtypes.RequestTypeTimeSeries,
|
||||
Value: createTestTimeSeries(fmt.Sprintf("Q%d", id), query.startMs, query.endMs, 100),
|
||||
}
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, result)
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, qbtypes.Step{}, result)
|
||||
done <- true
|
||||
}(i)
|
||||
}
|
||||
@@ -628,7 +628,7 @@ func TestBucketCache_GetMissRanges_FluxInterval(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
bc.Put(ctx, orgID, query, cachedResult)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{}, cachedResult)
|
||||
|
||||
// Get miss ranges
|
||||
cached, missing := bc.GetMissRanges(ctx, orgID, query, qbtypes.Step{Duration: 1000})
|
||||
@@ -690,7 +690,7 @@ func TestBucketCache_Put_FluxIntervalTrimming(t *testing.T) {
|
||||
}
|
||||
|
||||
// Put the result
|
||||
bc.Put(ctx, orgID, query, result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{}, result)
|
||||
|
||||
// Retrieve cached data
|
||||
cached, missing := bc.GetMissRanges(ctx, orgID, query, qbtypes.Step{Duration: 1000})
|
||||
@@ -760,7 +760,7 @@ func TestBucketCache_Put_EntireRangeInFluxInterval(t *testing.T) {
|
||||
}
|
||||
|
||||
// Put the result - should not cache anything
|
||||
bc.Put(ctx, orgID, query, result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{}, result)
|
||||
|
||||
// Try to get cached data - should have no cached data
|
||||
cached, missing := bc.GetMissRanges(ctx, orgID, query, qbtypes.Step{Duration: 1000})
|
||||
@@ -878,7 +878,7 @@ func TestBucketCache_EmptyDataHandling(t *testing.T) {
|
||||
}
|
||||
|
||||
// Put the result
|
||||
bc.Put(ctx, orgID, query, tt.result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{}, tt.result)
|
||||
|
||||
// Wait a bit for cache to be written
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
@@ -944,7 +944,7 @@ func TestBucketCache_PartialValues(t *testing.T) {
|
||||
}
|
||||
|
||||
// Put the result
|
||||
bc.Put(ctx, orgID, query, result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{}, result)
|
||||
|
||||
// Wait for cache to be written
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
@@ -1014,7 +1014,7 @@ func TestBucketCache_AllPartialValues(t *testing.T) {
|
||||
}
|
||||
|
||||
// Put the result
|
||||
bc.Put(ctx, orgID, query, result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{}, result)
|
||||
|
||||
// Wait for cache to be written
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
@@ -1075,7 +1075,7 @@ func TestBucketCache_FilteredCachedResults(t *testing.T) {
|
||||
}
|
||||
|
||||
// Cache the wide range
|
||||
bc.Put(ctx, orgID, query1, result1)
|
||||
bc.Put(ctx, orgID, query1, qbtypes.Step{}, result1)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Now query for a smaller range (2000-3500ms)
|
||||
@@ -1246,7 +1246,7 @@ func TestBucketCache_PartialValueDetection(t *testing.T) {
|
||||
}
|
||||
|
||||
// Put the result
|
||||
bc.Put(ctx, orgID, query, result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{}, result)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Get cached data
|
||||
@@ -1300,7 +1300,7 @@ func TestBucketCache_PartialValueDetection(t *testing.T) {
|
||||
}
|
||||
|
||||
// Put the result
|
||||
bc.Put(ctx, orgID, query, result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{}, result)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Get cached data
|
||||
@@ -1352,7 +1352,7 @@ func TestBucketCache_PartialValueDetection(t *testing.T) {
|
||||
}
|
||||
|
||||
// Put the result
|
||||
bc.Put(ctx, orgID, query, result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{}, result)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Get cached data
|
||||
@@ -1409,7 +1409,7 @@ func TestBucketCache_NoCache(t *testing.T) {
|
||||
}
|
||||
|
||||
// Put the result in cache
|
||||
bc.Put(ctx, orgID, query, result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{}, result)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Verify data is cached
|
||||
|
||||
@@ -118,6 +118,10 @@ func (q *builderQuery[T]) Fingerprint() string {
|
||||
parts = append(parts, fmt.Sprintf("having=%s", q.spec.Having.Expression))
|
||||
}
|
||||
|
||||
if q.spec.ShiftBy != 0 {
|
||||
parts = append(parts, fmt.Sprintf("shiftby=%d", q.spec.ShiftBy))
|
||||
}
|
||||
|
||||
return strings.Join(parts, "&")
|
||||
}
|
||||
|
||||
@@ -224,16 +228,18 @@ func (q *builderQuery[T]) executeWindowList(ctx context.Context) (*qbtypes.Resul
|
||||
isAsc := len(q.spec.Order) > 0 &&
|
||||
strings.ToLower(string(q.spec.Order[0].Direction.StringValue())) == "asc"
|
||||
|
||||
fromMS, toMS := q.fromMS, q.toMS
|
||||
|
||||
// Adjust [fromMS,toMS] window if a cursor was supplied
|
||||
if cur := strings.TrimSpace(q.spec.Cursor); cur != "" {
|
||||
if ts, err := decodeCursor(cur); err == nil {
|
||||
if isAsc {
|
||||
if uint64(ts) >= q.fromMS {
|
||||
q.fromMS = uint64(ts + 1)
|
||||
if uint64(ts) >= fromMS {
|
||||
fromMS = uint64(ts + 1)
|
||||
}
|
||||
} else { // DESC
|
||||
if uint64(ts) <= q.toMS {
|
||||
q.toMS = uint64(ts - 1)
|
||||
if uint64(ts) <= toMS {
|
||||
toMS = uint64(ts - 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -252,7 +258,16 @@ func (q *builderQuery[T]) executeWindowList(ctx context.Context) (*qbtypes.Resul
|
||||
totalBytes := uint64(0)
|
||||
start := time.Now()
|
||||
|
||||
for _, r := range makeBuckets(q.fromMS, q.toMS) {
|
||||
// Get buckets and reverse them for ascending order
|
||||
buckets := makeBuckets(fromMS, toMS)
|
||||
if isAsc {
|
||||
// Reverse the buckets for ascending order
|
||||
for i, j := 0, len(buckets)-1; i < j; i, j = i+1, j-1 {
|
||||
buckets[i], buckets[j] = buckets[j], buckets[i]
|
||||
}
|
||||
}
|
||||
|
||||
for _, r := range buckets {
|
||||
q.spec.Offset = 0
|
||||
q.spec.Limit = need
|
||||
|
||||
|
||||
133
pkg/querier/builder_query_test.go
Normal file
133
pkg/querier/builder_query_test.go
Normal file
@@ -0,0 +1,133 @@
|
||||
package querier
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/querybuilder"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
|
||||
func TestBuilderQueryFingerprint(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
query *builderQuery[qbtypes.MetricAggregation]
|
||||
expectInKey []string
|
||||
notExpectInKey []string
|
||||
}{
|
||||
{
|
||||
name: "fingerprint includes shiftby when ShiftBy field is set",
|
||||
query: &builderQuery[qbtypes.MetricAggregation]{
|
||||
kind: qbtypes.RequestTypeTimeSeries,
|
||||
spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
ShiftBy: 3600,
|
||||
Functions: []qbtypes.Function{
|
||||
{
|
||||
Name: qbtypes.FunctionNameTimeShift,
|
||||
Args: []qbtypes.FunctionArg{
|
||||
{Value: "3600"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectInKey: []string{"shiftby=3600"},
|
||||
notExpectInKey: []string{"functions=", "timeshift", "absolute"},
|
||||
},
|
||||
{
|
||||
name: "fingerprint includes shiftby but not other functions",
|
||||
query: &builderQuery[qbtypes.MetricAggregation]{
|
||||
kind: qbtypes.RequestTypeTimeSeries,
|
||||
spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
ShiftBy: 3600,
|
||||
Functions: []qbtypes.Function{
|
||||
{
|
||||
Name: qbtypes.FunctionNameTimeShift,
|
||||
Args: []qbtypes.FunctionArg{
|
||||
{Value: "3600"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: qbtypes.FunctionNameAbsolute,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectInKey: []string{"shiftby=3600"},
|
||||
notExpectInKey: []string{"functions=", "absolute"},
|
||||
},
|
||||
{
|
||||
name: "no shiftby in fingerprint when ShiftBy is zero",
|
||||
query: &builderQuery[qbtypes.MetricAggregation]{
|
||||
kind: qbtypes.RequestTypeTimeSeries,
|
||||
spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
ShiftBy: 0,
|
||||
Functions: []qbtypes.Function{
|
||||
{
|
||||
Name: qbtypes.FunctionNameAbsolute,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectInKey: []string{},
|
||||
notExpectInKey: []string{"shiftby=", "functions=", "absolute"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fingerprint := tt.query.Fingerprint()
|
||||
for _, expected := range tt.expectInKey {
|
||||
assert.True(t, strings.Contains(fingerprint, expected),
|
||||
"Expected fingerprint to contain '%s', got: %s", expected, fingerprint)
|
||||
}
|
||||
for _, notExpected := range tt.notExpectInKey {
|
||||
assert.False(t, strings.Contains(fingerprint, notExpected),
|
||||
"Expected fingerprint NOT to contain '%s', got: %s", notExpected, fingerprint)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
func TestMakeBucketsOrder(t *testing.T) {
|
||||
// Test that makeBuckets returns buckets in reverse chronological order by default
|
||||
// Using milliseconds as input - need > 1 hour range to get multiple buckets
|
||||
now := uint64(1700000000000) // Some timestamp in ms
|
||||
startMS := now
|
||||
endMS := now + uint64(10*60*60*1000) // 10 hours later
|
||||
|
||||
buckets := makeBuckets(startMS, endMS)
|
||||
|
||||
// Should have multiple buckets for a 10 hour range
|
||||
assert.True(t, len(buckets) > 1, "Should have multiple buckets for 10 hour range, got %d", len(buckets))
|
||||
|
||||
// Log buckets for debugging
|
||||
t.Logf("Generated %d buckets:", len(buckets))
|
||||
for i, b := range buckets {
|
||||
durationMs := (b.toNS - b.fromNS) / 1e6
|
||||
t.Logf("Bucket %d: duration=%dms", i, durationMs)
|
||||
}
|
||||
|
||||
// Verify buckets are in reverse chronological order (newest to oldest)
|
||||
for i := 0; i < len(buckets)-1; i++ {
|
||||
assert.True(t, buckets[i].toNS > buckets[i+1].toNS,
|
||||
"Bucket %d end should be after bucket %d end", i, i+1)
|
||||
assert.Equal(t, buckets[i].fromNS, buckets[i+1].toNS,
|
||||
"Bucket %d start should equal bucket %d end (continuous buckets)", i, i+1)
|
||||
}
|
||||
|
||||
// First bucket should end at endNS (converted to nanoseconds)
|
||||
expectedEndNS := querybuilder.ToNanoSecs(endMS)
|
||||
assert.Equal(t, expectedEndNS, buckets[0].toNS)
|
||||
|
||||
// Last bucket should start at startNS (converted to nanoseconds)
|
||||
expectedStartNS := querybuilder.ToNanoSecs(startMS)
|
||||
assert.Equal(t, expectedStartNS, buckets[len(buckets)-1].fromNS)
|
||||
}
|
||||
@@ -176,7 +176,7 @@ func readAsTimeSeries(rows driver.Rows, queryWindow *qbtypes.TimeRange, step qbt
|
||||
lblVals = append(lblVals, *val)
|
||||
lblObjs = append(lblObjs, &qbtypes.Label{
|
||||
Key: telemetrytypes.TelemetryFieldKey{Name: name},
|
||||
Value: val,
|
||||
Value: *val, // Dereference to get the actual string value
|
||||
})
|
||||
|
||||
default:
|
||||
|
||||
@@ -17,5 +17,5 @@ type BucketCache interface {
|
||||
// cached portion + list of gaps to fetch
|
||||
GetMissRanges(ctx context.Context, orgID valuer.UUID, q qbtypes.Query, step qbtypes.Step) (cached *qbtypes.Result, missing []*qbtypes.TimeRange)
|
||||
// store fresh buckets for future hits
|
||||
Put(ctx context.Context, orgID valuer.UUID, q qbtypes.Query, fresh *qbtypes.Result)
|
||||
Put(ctx context.Context, orgID valuer.UUID, q qbtypes.Query, step qbtypes.Step, fresh *qbtypes.Result)
|
||||
}
|
||||
223
pkg/querier/merge_metadata_test.go
Normal file
223
pkg/querier/merge_metadata_test.go
Normal file
@@ -0,0 +1,223 @@
|
||||
package querier
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestMergeTimeSeriesResults_PreservesMetadata(t *testing.T) {
|
||||
q := &querier{}
|
||||
|
||||
// Create cached data with metadata
|
||||
cachedValue := &qbtypes.TimeSeriesData{
|
||||
QueryName: "testQuery",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Index: 0,
|
||||
Alias: "count_result",
|
||||
Meta: struct {
|
||||
Unit string `json:"unit,omitempty"`
|
||||
}{
|
||||
Unit: "requests",
|
||||
},
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Labels: []*qbtypes.Label{
|
||||
{
|
||||
Key: telemetrytypes.TelemetryFieldKey{Name: "service"},
|
||||
Value: "frontend",
|
||||
},
|
||||
},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 10},
|
||||
{Timestamp: 2000, Value: 20},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Index: 1,
|
||||
Alias: "sum_result",
|
||||
Meta: struct {
|
||||
Unit string `json:"unit,omitempty"`
|
||||
}{
|
||||
Unit: "bytes",
|
||||
},
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Labels: []*qbtypes.Label{
|
||||
{
|
||||
Key: telemetrytypes.TelemetryFieldKey{Name: "service"},
|
||||
Value: "backend",
|
||||
},
|
||||
},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 100},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Create fresh results with some overlapping and new data
|
||||
freshResults := []*qbtypes.Result{
|
||||
{
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "testQuery",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Index: 0,
|
||||
Alias: "count_result", // Same alias
|
||||
Meta: struct {
|
||||
Unit string `json:"unit,omitempty"`
|
||||
}{
|
||||
Unit: "requests", // Same unit
|
||||
},
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Labels: []*qbtypes.Label{
|
||||
{
|
||||
Key: telemetrytypes.TelemetryFieldKey{Name: "service"},
|
||||
Value: "frontend",
|
||||
},
|
||||
},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 3000, Value: 30},
|
||||
{Timestamp: 4000, Value: 40},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Index: 2, // New aggregation
|
||||
Alias: "avg_result",
|
||||
Meta: struct {
|
||||
Unit string `json:"unit,omitempty"`
|
||||
}{
|
||||
Unit: "milliseconds",
|
||||
},
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Labels: []*qbtypes.Label{
|
||||
{
|
||||
Key: telemetrytypes.TelemetryFieldKey{Name: "service"},
|
||||
Value: "api",
|
||||
},
|
||||
},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 50},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Merge the results
|
||||
result := q.mergeTimeSeriesResults(cachedValue, freshResults)
|
||||
|
||||
// Verify the result
|
||||
require.NotNil(t, result)
|
||||
assert.Equal(t, "testQuery", result.QueryName)
|
||||
assert.Len(t, result.Aggregations, 3) // Should have 3 aggregations
|
||||
|
||||
// Check each aggregation
|
||||
for _, agg := range result.Aggregations {
|
||||
switch agg.Index {
|
||||
case 0:
|
||||
assert.Equal(t, "count_result", agg.Alias)
|
||||
assert.Equal(t, "requests", agg.Meta.Unit)
|
||||
assert.Len(t, agg.Series, 1)
|
||||
// Should have merged values
|
||||
assert.Len(t, agg.Series[0].Values, 4)
|
||||
case 1:
|
||||
assert.Equal(t, "sum_result", agg.Alias)
|
||||
assert.Equal(t, "bytes", agg.Meta.Unit)
|
||||
assert.Len(t, agg.Series, 1)
|
||||
assert.Len(t, agg.Series[0].Values, 1)
|
||||
case 2:
|
||||
assert.Equal(t, "avg_result", agg.Alias)
|
||||
assert.Equal(t, "milliseconds", agg.Meta.Unit)
|
||||
assert.Len(t, agg.Series, 1)
|
||||
assert.Len(t, agg.Series[0].Values, 1)
|
||||
default:
|
||||
t.Fatalf("Unexpected aggregation index: %d", agg.Index)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMergeTimeSeriesResults_HandlesEmptyMetadata(t *testing.T) {
|
||||
q := &querier{}
|
||||
|
||||
// Create cached data without metadata
|
||||
cachedValue := &qbtypes.TimeSeriesData{
|
||||
QueryName: "testQuery",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Index: 0,
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Labels: []*qbtypes.Label{
|
||||
{
|
||||
Key: telemetrytypes.TelemetryFieldKey{Name: "service"},
|
||||
Value: "frontend",
|
||||
},
|
||||
},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 10},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Create fresh results with metadata
|
||||
freshResults := []*qbtypes.Result{
|
||||
{
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "testQuery",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Index: 0,
|
||||
Alias: "new_alias",
|
||||
Meta: struct {
|
||||
Unit string `json:"unit,omitempty"`
|
||||
}{
|
||||
Unit: "items",
|
||||
},
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Labels: []*qbtypes.Label{
|
||||
{
|
||||
Key: telemetrytypes.TelemetryFieldKey{Name: "service"},
|
||||
Value: "frontend",
|
||||
},
|
||||
},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 2000, Value: 20},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Merge the results
|
||||
result := q.mergeTimeSeriesResults(cachedValue, freshResults)
|
||||
|
||||
// Verify the metadata from fresh results is preserved
|
||||
require.NotNil(t, result)
|
||||
assert.Len(t, result.Aggregations, 1)
|
||||
assert.Equal(t, "new_alias", result.Aggregations[0].Alias)
|
||||
assert.Equal(t, "items", result.Aggregations[0].Meta.Unit)
|
||||
}
|
||||
333
pkg/querier/postprocess.go
Normal file
333
pkg/querier/postprocess.go
Normal file
@@ -0,0 +1,333 @@
|
||||
package querier
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
)
|
||||
|
||||
// PostProcessResults applies postprocessing to query results
|
||||
func (q *querier) PostProcessResults(results map[string]any, req *qbtypes.QueryRangeRequest) (map[string]any, error) {
|
||||
// Convert results to typed format for processing
|
||||
typedResults := make(map[string]*qbtypes.Result)
|
||||
for name, result := range results {
|
||||
typedResults[name] = &qbtypes.Result{
|
||||
Value: result,
|
||||
}
|
||||
}
|
||||
|
||||
// Apply postprocessing based on query types
|
||||
for _, query := range req.CompositeQuery.Queries {
|
||||
switch spec := query.Spec.(type) {
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]:
|
||||
if result, ok := typedResults[spec.Name]; ok {
|
||||
result = postProcessBuilderQuery(q, result, spec, req)
|
||||
typedResults[spec.Name] = result
|
||||
}
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]:
|
||||
if result, ok := typedResults[spec.Name]; ok {
|
||||
result = postProcessBuilderQuery(q, result, spec, req)
|
||||
typedResults[spec.Name] = result
|
||||
}
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]:
|
||||
if result, ok := typedResults[spec.Name]; ok {
|
||||
result = postProcessMetricQuery(q, result, spec, req)
|
||||
typedResults[spec.Name] = result
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Apply formula calculations
|
||||
typedResults = q.applyFormulas(typedResults, req)
|
||||
|
||||
// Filter out disabled queries
|
||||
typedResults = q.filterDisabledQueries(typedResults, req)
|
||||
|
||||
// Apply fill gaps if requested
|
||||
if req.FormatOptions != nil && req.FormatOptions.FillGaps {
|
||||
typedResults = q.fillGaps(typedResults, req)
|
||||
}
|
||||
|
||||
// Apply table formatting for UI if requested
|
||||
if req.FormatOptions != nil && req.FormatOptions.FormatTableResultForUI && req.RequestType == qbtypes.RequestTypeScalar {
|
||||
// Format results as a table - this merges all queries into a single table
|
||||
tableResult := q.formatScalarResultsAsTable(typedResults, req)
|
||||
|
||||
// Return the table under the first query's name so it gets included in results
|
||||
if len(req.CompositeQuery.Queries) > 0 {
|
||||
var firstQueryName string
|
||||
switch spec := req.CompositeQuery.Queries[0].Spec.(type) {
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]:
|
||||
firstQueryName = spec.Name
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]:
|
||||
firstQueryName = spec.Name
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]:
|
||||
firstQueryName = spec.Name
|
||||
}
|
||||
|
||||
if firstQueryName != "" && tableResult["table"] != nil {
|
||||
// Return table under first query name
|
||||
return map[string]any{firstQueryName: tableResult["table"]}, nil
|
||||
}
|
||||
}
|
||||
|
||||
return tableResult, nil
|
||||
}
|
||||
|
||||
// Convert back to map[string]any
|
||||
finalResults := make(map[string]any)
|
||||
for name, result := range typedResults {
|
||||
finalResults[name] = result.Value
|
||||
}
|
||||
|
||||
return finalResults, nil
|
||||
}
|
||||
|
||||
// postProcessBuilderQuery applies postprocessing to a single builder query result
|
||||
func postProcessBuilderQuery[T any](
|
||||
q *querier,
|
||||
result *qbtypes.Result,
|
||||
query qbtypes.QueryBuilderQuery[T],
|
||||
req *qbtypes.QueryRangeRequest,
|
||||
) *qbtypes.Result {
|
||||
|
||||
// Apply functions
|
||||
if len(query.Functions) > 0 {
|
||||
result = q.applyFunctions(result, query.Functions)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// postProcessMetricQuery applies postprocessing to a metric query result
|
||||
func postProcessMetricQuery(
|
||||
q *querier,
|
||||
result *qbtypes.Result,
|
||||
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
|
||||
req *qbtypes.QueryRangeRequest,
|
||||
) *qbtypes.Result {
|
||||
// Apply having clause
|
||||
result = q.applyHavingClause(result, query.Having)
|
||||
|
||||
// Apply series limit
|
||||
if query.Limit > 0 {
|
||||
result = q.applySeriesLimit(result, query.Limit, query.Order)
|
||||
}
|
||||
|
||||
// Apply functions
|
||||
if len(query.Functions) > 0 {
|
||||
result = q.applyFunctions(result, query.Functions)
|
||||
}
|
||||
|
||||
// Apply reduce to for scalar request type
|
||||
if req.RequestType == qbtypes.RequestTypeScalar {
|
||||
// For metrics, prefer the ReduceTo field from first aggregation if set
|
||||
if len(query.Aggregations) > 0 && query.Aggregations[0].ReduceTo != qbtypes.ReduceToUnknown {
|
||||
result = q.applyMetricReduceTo(result, query.Aggregations[0].ReduceTo)
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// applyMetricReduceTo applies reduce to operation using the metric's ReduceTo field
|
||||
func (q *querier) applyMetricReduceTo(result *qbtypes.Result, reduceOp qbtypes.ReduceTo) *qbtypes.Result {
|
||||
tsData, ok := result.Value.(*qbtypes.TimeSeriesData)
|
||||
if !ok {
|
||||
return result
|
||||
}
|
||||
|
||||
if tsData != nil {
|
||||
for _, agg := range tsData.Aggregations {
|
||||
for i, series := range agg.Series {
|
||||
// Use the FunctionReduceTo helper
|
||||
reducedSeries := qbtypes.FunctionReduceTo(series, reduceOp)
|
||||
agg.Series[i] = reducedSeries
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// applyHavingClause filters results based on having conditions
|
||||
func (q *querier) applyHavingClause(result *qbtypes.Result, having *qbtypes.Having) *qbtypes.Result {
|
||||
// TODO: Implement having clause evaluation once expression parser is available
|
||||
// For now, we skip having clause processing
|
||||
return result
|
||||
}
|
||||
|
||||
// evaluateHavingExpression evaluates a having expression
|
||||
// TODO: Implement this once we have an expression parser for having clauses
|
||||
func evaluateHavingExpression(value float64, expression string) bool {
|
||||
// For now, always return true (no filtering)
|
||||
return true
|
||||
}
|
||||
|
||||
// applySeriesLimit limits the number of series in the result
|
||||
func (q *querier) applySeriesLimit(result *qbtypes.Result, limit int, orderBy []qbtypes.OrderBy) *qbtypes.Result {
|
||||
tsData, ok := result.Value.(*qbtypes.TimeSeriesData)
|
||||
if !ok {
|
||||
return result
|
||||
}
|
||||
|
||||
if tsData != nil {
|
||||
for _, agg := range tsData.Aggregations {
|
||||
if len(agg.Series) <= limit {
|
||||
continue
|
||||
}
|
||||
|
||||
// Sort series based on orderBy
|
||||
q.sortSeries(agg.Series, orderBy)
|
||||
|
||||
// Keep only the top 'limit' series
|
||||
agg.Series = agg.Series[:limit]
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// sortSeries sorts time series based on orderBy criteria
|
||||
func (q *querier) sortSeries(series []*qbtypes.TimeSeries, orderBy []qbtypes.OrderBy) {
|
||||
if len(orderBy) == 0 {
|
||||
// Default: sort by value (average) in descending order
|
||||
sort.SliceStable(series, func(i, j int) bool {
|
||||
avgI := calculateAverage(series[i].Values)
|
||||
avgJ := calculateAverage(series[j].Values)
|
||||
return avgI > avgJ
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Sort by specified criteria
|
||||
sort.SliceStable(series, func(i, j int) bool {
|
||||
for _, order := range orderBy {
|
||||
cmp := 0
|
||||
|
||||
if order.Key.Name == "#value" {
|
||||
// Sort by value
|
||||
avgI := calculateAverage(series[i].Values)
|
||||
avgJ := calculateAverage(series[j].Values)
|
||||
if avgI < avgJ {
|
||||
cmp = -1
|
||||
} else if avgI > avgJ {
|
||||
cmp = 1
|
||||
}
|
||||
} else {
|
||||
// Sort by label
|
||||
valI := getLabelValue(series[i].Labels, order.Key.Name)
|
||||
valJ := getLabelValue(series[j].Labels, order.Key.Name)
|
||||
cmp = strings.Compare(valI, valJ)
|
||||
}
|
||||
|
||||
if cmp != 0 {
|
||||
if order.Direction == qbtypes.OrderDirectionAsc {
|
||||
return cmp < 0
|
||||
}
|
||||
return cmp > 0
|
||||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
}
|
||||
|
||||
// calculateAverage calculates the average of time series values
|
||||
func calculateAverage(values []*qbtypes.TimeSeriesValue) float64 {
|
||||
if len(values) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
sum := 0.0
|
||||
count := 0
|
||||
for _, v := range values {
|
||||
if !math.IsNaN(v.Value) && !math.IsInf(v.Value, 0) {
|
||||
sum += v.Value
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
if count == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
return sum / float64(count)
|
||||
}
|
||||
|
||||
// getLabelValue gets the value of a label by name
|
||||
func getLabelValue(labels []*qbtypes.Label, name string) string {
|
||||
for _, label := range labels {
|
||||
if label.Key.Name == name {
|
||||
return fmt.Sprintf("%v", label.Value)
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// applyFunctions applies functions to time series data
|
||||
func (q *querier) applyFunctions(result *qbtypes.Result, functions []qbtypes.Function) *qbtypes.Result {
|
||||
tsData, ok := result.Value.(*qbtypes.TimeSeriesData)
|
||||
if !ok {
|
||||
return result
|
||||
}
|
||||
|
||||
if tsData != nil {
|
||||
for _, agg := range tsData.Aggregations {
|
||||
for i, series := range agg.Series {
|
||||
agg.Series[i] = qbtypes.ApplyFunctions(functions, series)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// applyReduceTo reduces time series to a single value
|
||||
func (q *querier) applyReduceTo(result *qbtypes.Result, secondaryAggs []qbtypes.SecondaryAggregation) *qbtypes.Result {
|
||||
tsData, ok := result.Value.(*qbtypes.TimeSeriesData)
|
||||
if !ok {
|
||||
return result
|
||||
}
|
||||
|
||||
// For now, we'll use the first secondary aggregation's expression
|
||||
// In the future, this might need to handle multiple secondary aggregations
|
||||
expression := ""
|
||||
if len(secondaryAggs) > 0 {
|
||||
expression = secondaryAggs[0].Expression
|
||||
}
|
||||
|
||||
if expression == "" {
|
||||
return result
|
||||
}
|
||||
|
||||
// Map expression to reduce operation
|
||||
var reduceOp qbtypes.ReduceTo
|
||||
switch expression {
|
||||
case "last":
|
||||
reduceOp = qbtypes.ReduceToLast
|
||||
case "sum":
|
||||
reduceOp = qbtypes.ReduceToSum
|
||||
case "avg":
|
||||
reduceOp = qbtypes.ReduceToAvg
|
||||
case "min":
|
||||
reduceOp = qbtypes.ReduceToMin
|
||||
case "max":
|
||||
reduceOp = qbtypes.ReduceToMax
|
||||
default:
|
||||
// Unknown reduce operation, return as-is
|
||||
return result
|
||||
}
|
||||
|
||||
for _, agg := range tsData.Aggregations {
|
||||
for i, series := range agg.Series {
|
||||
// Use the FunctionReduceTo helper
|
||||
reducedSeries := qbtypes.FunctionReduceTo(series, reduceOp)
|
||||
agg.Series[i] = reducedSeries
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
125
pkg/querier/postprocess_formula.go
Normal file
125
pkg/querier/postprocess_formula.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package querier
|
||||
|
||||
import (
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
)
|
||||
|
||||
// applyFormulas processes formula queries in the composite query
|
||||
func (q *querier) applyFormulas(results map[string]*qbtypes.Result, req *qbtypes.QueryRangeRequest) map[string]*qbtypes.Result {
|
||||
// Collect formula queries
|
||||
formulaQueries := make(map[string]qbtypes.QueryBuilderFormula)
|
||||
|
||||
for _, query := range req.CompositeQuery.Queries {
|
||||
if query.Type == qbtypes.QueryTypeFormula {
|
||||
if formula, ok := query.Spec.(qbtypes.QueryBuilderFormula); ok {
|
||||
formulaQueries[formula.Name] = formula
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Process each formula
|
||||
for name, formula := range formulaQueries {
|
||||
// Prepare time series data for formula evaluation
|
||||
timeSeriesData := make(map[string]*qbtypes.TimeSeriesData)
|
||||
|
||||
// Extract time series data from results
|
||||
for queryName, result := range results {
|
||||
if tsData, ok := result.Value.(*qbtypes.TimeSeriesData); ok {
|
||||
timeSeriesData[queryName] = tsData
|
||||
}
|
||||
}
|
||||
|
||||
// Create formula evaluator
|
||||
canDefaultZero := make(map[string]bool)
|
||||
for _, query := range req.CompositeQuery.Queries {
|
||||
switch spec := query.Spec.(type) {
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]:
|
||||
// Metrics can default to zero for rate/increase operations
|
||||
canDefaultZero[spec.Name] = true
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]:
|
||||
canDefaultZero[spec.Name] = false
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]:
|
||||
canDefaultZero[spec.Name] = false
|
||||
}
|
||||
}
|
||||
|
||||
evaluator, err := qbtypes.NewFormulaEvaluator(formula.Expression, canDefaultZero)
|
||||
if err != nil {
|
||||
q.logger.Error("failed to create formula evaluator", "error", err, "formula", name)
|
||||
continue
|
||||
}
|
||||
|
||||
// Evaluate the formula
|
||||
formulaSeries, err := evaluator.EvaluateFormula(timeSeriesData)
|
||||
if err != nil {
|
||||
q.logger.Error("failed to evaluate formula", "error", err, "formula", name)
|
||||
continue
|
||||
}
|
||||
|
||||
// Create result for formula
|
||||
formulaResult := &qbtypes.TimeSeriesData{
|
||||
QueryName: name,
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Index: 0,
|
||||
Series: formulaSeries,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Apply functions if any
|
||||
if len(formula.Functions) > 0 {
|
||||
for _, agg := range formulaResult.Aggregations {
|
||||
for i, series := range agg.Series {
|
||||
agg.Series[i] = qbtypes.ApplyFunctions(formula.Functions, series)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
results[name] = &qbtypes.Result{
|
||||
Value: formulaResult,
|
||||
}
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
// filterDisabledQueries removes results for disabled queries
|
||||
func (q *querier) filterDisabledQueries(results map[string]*qbtypes.Result, req *qbtypes.QueryRangeRequest) map[string]*qbtypes.Result {
|
||||
filtered := make(map[string]*qbtypes.Result)
|
||||
|
||||
for _, query := range req.CompositeQuery.Queries {
|
||||
var queryName string
|
||||
var disabled bool
|
||||
|
||||
switch spec := query.Spec.(type) {
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]:
|
||||
queryName = spec.Name
|
||||
disabled = spec.Disabled
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]:
|
||||
queryName = spec.Name
|
||||
disabled = spec.Disabled
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]:
|
||||
queryName = spec.Name
|
||||
disabled = spec.Disabled
|
||||
case qbtypes.QueryBuilderFormula:
|
||||
queryName = spec.Name
|
||||
// Formulas don't have a disabled flag, include them
|
||||
disabled = false
|
||||
case qbtypes.PromQuery:
|
||||
queryName = spec.Name
|
||||
disabled = spec.Disabled
|
||||
case qbtypes.ClickHouseQuery:
|
||||
queryName = spec.Name
|
||||
disabled = spec.Disabled
|
||||
}
|
||||
|
||||
if !disabled {
|
||||
if result, ok := results[queryName]; ok {
|
||||
filtered[queryName] = result
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return filtered
|
||||
}
|
||||
702
pkg/querier/postprocess_gaps.go
Normal file
702
pkg/querier/postprocess_gaps.go
Normal file
@@ -0,0 +1,702 @@
|
||||
package querier
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
// fillGaps fills missing data points with zeros in time series data
|
||||
func (q *querier) fillGaps(results map[string]*qbtypes.Result, req *qbtypes.QueryRangeRequest) map[string]*qbtypes.Result {
|
||||
// Only fill gaps for time series data
|
||||
if req.RequestType != qbtypes.RequestTypeTimeSeries {
|
||||
return results
|
||||
}
|
||||
|
||||
// Get the step interval from the first query
|
||||
var step int64 = 60000 // Default to 1 minute in milliseconds
|
||||
for _, query := range req.CompositeQuery.Queries {
|
||||
switch spec := query.Spec.(type) {
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]:
|
||||
if spec.StepInterval.Duration > 0 {
|
||||
step = int64(spec.StepInterval.Duration) / int64(time.Millisecond)
|
||||
break
|
||||
}
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]:
|
||||
if spec.StepInterval.Duration > 0 {
|
||||
step = int64(spec.StepInterval.Duration) / int64(time.Millisecond)
|
||||
break
|
||||
}
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]:
|
||||
if spec.StepInterval.Duration > 0 {
|
||||
step = int64(spec.StepInterval.Duration) / int64(time.Millisecond)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
startMs := int64(req.Start)
|
||||
endMs := int64(req.End)
|
||||
|
||||
for name, result := range results {
|
||||
tsData, ok := result.Value.(*qbtypes.TimeSeriesData)
|
||||
if !ok || tsData == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// If no aggregations, create an empty one
|
||||
if len(tsData.Aggregations) == 0 {
|
||||
tsData.Aggregations = []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Index: 0,
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Labels: []*qbtypes.Label{},
|
||||
Values: fillGapForSeries(nil, startMs, endMs, step),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Fill gaps for each series
|
||||
for _, agg := range tsData.Aggregations {
|
||||
if len(agg.Series) == 0 {
|
||||
// Create empty series if none exist
|
||||
agg.Series = []*qbtypes.TimeSeries{
|
||||
{
|
||||
Labels: []*qbtypes.Label{},
|
||||
Values: fillGapForSeries(nil, startMs, endMs, step),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
// Fill gaps for existing series
|
||||
for _, series := range agg.Series {
|
||||
series.Values = fillGapForSeries(series.Values, startMs, endMs, step)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
results[name] = result
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
// fillGapForSeries fills gaps in a single time series
|
||||
func fillGapForSeries(values []*qbtypes.TimeSeriesValue, startMs, endMs, step int64) []*qbtypes.TimeSeriesValue {
|
||||
// Safeguard against invalid step
|
||||
if step <= 0 {
|
||||
step = 60000 // Default to 1 minute
|
||||
}
|
||||
|
||||
// Create a map of existing values
|
||||
valueMap := make(map[int64]float64)
|
||||
for _, v := range values {
|
||||
if v != nil && !v.Partial {
|
||||
valueMap[v.Timestamp] = v.Value
|
||||
}
|
||||
}
|
||||
|
||||
// Generate all expected timestamps
|
||||
var filledValues []*qbtypes.TimeSeriesValue
|
||||
for ts := startMs; ts <= endMs; ts += step {
|
||||
value := 0.0
|
||||
if v, ok := valueMap[ts]; ok {
|
||||
value = v
|
||||
}
|
||||
|
||||
filledValues = append(filledValues, &qbtypes.TimeSeriesValue{
|
||||
Timestamp: ts,
|
||||
Value: value,
|
||||
})
|
||||
}
|
||||
|
||||
return filledValues
|
||||
}
|
||||
|
||||
// formatScalarResultsAsTable formats scalar results as a table for UI display
|
||||
func (q *querier) formatScalarResultsAsTable(results map[string]*qbtypes.Result, req *qbtypes.QueryRangeRequest) map[string]any {
|
||||
if len(results) == 0 {
|
||||
return map[string]any{"table": &qbtypes.ScalarData{}}
|
||||
}
|
||||
|
||||
// Convert all results to ScalarData first
|
||||
for name, result := range results {
|
||||
if tsData, ok := result.Value.(*qbtypes.TimeSeriesData); ok {
|
||||
// Convert TimeSeriesData to ScalarData
|
||||
columns := []*qbtypes.ColumnDescriptor{}
|
||||
data := [][]any{}
|
||||
|
||||
// Extract group columns from labels
|
||||
if len(tsData.Aggregations) > 0 && len(tsData.Aggregations[0].Series) > 0 {
|
||||
// Get group columns from the first series
|
||||
for _, label := range tsData.Aggregations[0].Series[0].Labels {
|
||||
col := &qbtypes.ColumnDescriptor{
|
||||
TelemetryFieldKey: label.Key,
|
||||
QueryName: name,
|
||||
Type: qbtypes.ColumnTypeGroup,
|
||||
}
|
||||
// Ensure Name is set
|
||||
if col.Name == "" {
|
||||
col.Name = label.Key.Name
|
||||
}
|
||||
columns = append(columns, col)
|
||||
}
|
||||
}
|
||||
|
||||
// Add aggregation columns
|
||||
for _, agg := range tsData.Aggregations {
|
||||
col := &qbtypes.ColumnDescriptor{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: agg.Alias,
|
||||
},
|
||||
QueryName: name,
|
||||
AggregationIndex: int64(agg.Index),
|
||||
Meta: agg.Meta,
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
}
|
||||
if col.Name == "" {
|
||||
col.Name = fmt.Sprintf("__result_%d", agg.Index)
|
||||
}
|
||||
columns = append(columns, col)
|
||||
}
|
||||
|
||||
// Convert series to rows
|
||||
for seriesIdx, series := range tsData.Aggregations[0].Series {
|
||||
row := make([]any, len(columns))
|
||||
colIdx := 0
|
||||
|
||||
// Add group values
|
||||
for _, label := range series.Labels {
|
||||
row[colIdx] = label.Value
|
||||
colIdx++
|
||||
}
|
||||
|
||||
// Add aggregation values (last value from each aggregation)
|
||||
for _, agg := range tsData.Aggregations {
|
||||
if seriesIdx < len(agg.Series) && len(agg.Series[seriesIdx].Values) > 0 {
|
||||
value := agg.Series[seriesIdx].Values[len(agg.Series[seriesIdx].Values)-1].Value
|
||||
row[colIdx] = roundToTwoDecimal(value)
|
||||
} else {
|
||||
row[colIdx] = 0.0
|
||||
}
|
||||
colIdx++
|
||||
}
|
||||
|
||||
data = append(data, row)
|
||||
}
|
||||
|
||||
results[name] = &qbtypes.Result{
|
||||
Value: &qbtypes.ScalarData{
|
||||
Columns: columns,
|
||||
Data: data,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check if we have a single result that already contains all columns from multiple queries
|
||||
// This happens when the SQL query already joins multiple queries
|
||||
if len(results) == 1 {
|
||||
for queryName, result := range results {
|
||||
if scalarData, ok := result.Value.(*qbtypes.ScalarData); ok {
|
||||
// Check if this result already has columns from multiple queries
|
||||
queryNamesInColumns := make(map[string]bool)
|
||||
for _, col := range scalarData.Columns {
|
||||
if col.Type == qbtypes.ColumnTypeAggregation && col.QueryName != "" {
|
||||
queryNamesInColumns[col.QueryName] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Debug: log what we found
|
||||
if q.logger != nil {
|
||||
q.logger.Debug("Single result analysis",
|
||||
"queryNamesInColumns", queryNamesInColumns,
|
||||
"num_columns", len(scalarData.Columns),
|
||||
"num_rows", len(scalarData.Data))
|
||||
}
|
||||
|
||||
// If we have columns from multiple queries, we need to deduplicate rows
|
||||
if len(queryNamesInColumns) > 1 {
|
||||
if q.logger != nil {
|
||||
q.logger.Debug("Deduplicating scalar rows")
|
||||
}
|
||||
deduplicatedResult := q.deduplicateScalarRows(scalarData)
|
||||
// Return the deduplicated result under the original query name
|
||||
return map[string]any{queryName: deduplicatedResult["table"]}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Now merge all ScalarData results
|
||||
// First, collect all unique group columns
|
||||
groupColumnMap := make(map[string]*qbtypes.ColumnDescriptor)
|
||||
groupColumnOrder := []string{}
|
||||
|
||||
for _, result := range results {
|
||||
if scalarData, ok := result.Value.(*qbtypes.ScalarData); ok {
|
||||
for _, col := range scalarData.Columns {
|
||||
if col.Type == qbtypes.ColumnTypeGroup {
|
||||
if _, exists := groupColumnMap[col.Name]; !exists {
|
||||
groupColumnMap[col.Name] = col
|
||||
groupColumnOrder = append(groupColumnOrder, col.Name)
|
||||
if q.logger != nil {
|
||||
q.logger.Debug("Found group column", "name", col.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Debug: log the group columns we found
|
||||
if q.logger != nil {
|
||||
q.logger.Debug("Group columns collected",
|
||||
"groupColumnOrder", groupColumnOrder,
|
||||
"num_group_columns", len(groupColumnOrder))
|
||||
}
|
||||
|
||||
// Build final columns
|
||||
mergedColumns := []*qbtypes.ColumnDescriptor{}
|
||||
|
||||
// Add group columns
|
||||
for _, colName := range groupColumnOrder {
|
||||
mergedColumns = append(mergedColumns, groupColumnMap[colName])
|
||||
}
|
||||
|
||||
// Add aggregation columns from each query
|
||||
queryNames := []string{}
|
||||
for name := range results {
|
||||
queryNames = append(queryNames, name)
|
||||
}
|
||||
sort.Strings(queryNames)
|
||||
|
||||
for _, queryName := range queryNames {
|
||||
result := results[queryName]
|
||||
if scalarData, ok := result.Value.(*qbtypes.ScalarData); ok {
|
||||
for _, col := range scalarData.Columns {
|
||||
if col.Type == qbtypes.ColumnTypeAggregation {
|
||||
newCol := &qbtypes.ColumnDescriptor{
|
||||
TelemetryFieldKey: col.TelemetryFieldKey,
|
||||
QueryName: queryName,
|
||||
AggregationIndex: col.AggregationIndex,
|
||||
Meta: col.Meta,
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
}
|
||||
mergedColumns = append(mergedColumns, newCol)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Build a map of unique rows by group values
|
||||
type rowKey struct {
|
||||
values []string
|
||||
}
|
||||
rowMap := make(map[string][]any)
|
||||
|
||||
// Debug: log the input data
|
||||
if q.logger != nil {
|
||||
for _, queryName := range queryNames {
|
||||
if scalarData, ok := results[queryName].Value.(*qbtypes.ScalarData); ok {
|
||||
q.logger.Debug("Processing query result",
|
||||
"query", queryName,
|
||||
"num_columns", len(scalarData.Columns),
|
||||
"num_rows", len(scalarData.Data),
|
||||
"columns", func() []string {
|
||||
names := []string{}
|
||||
for _, col := range scalarData.Columns {
|
||||
names = append(names, fmt.Sprintf("%s(%s)", col.Name, col.Type))
|
||||
}
|
||||
return names
|
||||
}())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Process each query's results
|
||||
for _, queryName := range queryNames {
|
||||
result := results[queryName]
|
||||
if scalarData, ok := result.Value.(*qbtypes.ScalarData); ok {
|
||||
// Map column indices
|
||||
groupIndices := make(map[string]int)
|
||||
aggIndices := []int{}
|
||||
|
||||
for i, col := range scalarData.Columns {
|
||||
if col.Type == qbtypes.ColumnTypeGroup {
|
||||
groupIndices[col.Name] = i
|
||||
} else if col.Type == qbtypes.ColumnTypeAggregation {
|
||||
aggIndices = append(aggIndices, i)
|
||||
}
|
||||
}
|
||||
|
||||
// Process each row
|
||||
for rowIdx, row := range scalarData.Data {
|
||||
// Build key from group values in consistent order
|
||||
keyParts := make([]string, len(groupColumnOrder))
|
||||
for i, colName := range groupColumnOrder {
|
||||
if idx, ok := groupIndices[colName]; ok && idx < len(row) {
|
||||
// Convert the value to string properly
|
||||
switch v := row[idx].(type) {
|
||||
case string:
|
||||
keyParts[i] = v
|
||||
case *string:
|
||||
if v != nil {
|
||||
keyParts[i] = *v
|
||||
} else {
|
||||
keyParts[i] = "n/a"
|
||||
}
|
||||
default:
|
||||
keyParts[i] = fmt.Sprintf("%v", v)
|
||||
}
|
||||
} else {
|
||||
keyParts[i] = "n/a"
|
||||
}
|
||||
}
|
||||
|
||||
// Debug first few rows
|
||||
if q.logger != nil && rowIdx < 3 {
|
||||
q.logger.Debug("Building key",
|
||||
"query", queryName,
|
||||
"rowIdx", rowIdx,
|
||||
"groupColumnOrder", groupColumnOrder,
|
||||
"groupIndices", groupIndices,
|
||||
"row", row,
|
||||
"keyParts", keyParts)
|
||||
}
|
||||
// Create a unique key by joining parts with a delimiter
|
||||
key := ""
|
||||
for i, part := range keyParts {
|
||||
if i > 0 {
|
||||
key += "|"
|
||||
}
|
||||
key += part
|
||||
}
|
||||
|
||||
// Debug: log the key generation
|
||||
if q.logger != nil {
|
||||
q.logger.Debug("Generated row key",
|
||||
"query", queryName,
|
||||
"key", key,
|
||||
"keyParts", strings.Join(keyParts, ","),
|
||||
"numKeyParts", len(keyParts),
|
||||
"firstRowValue", func() string {
|
||||
if len(row) > 0 {
|
||||
return fmt.Sprintf("%v", row[0])
|
||||
}
|
||||
return "empty"
|
||||
}())
|
||||
}
|
||||
|
||||
// Initialize row if needed
|
||||
if _, exists := rowMap[key]; !exists {
|
||||
rowMap[key] = make([]any, len(mergedColumns))
|
||||
// Set group values
|
||||
for i, colName := range groupColumnOrder {
|
||||
if idx, ok := groupIndices[colName]; ok && idx < len(row) {
|
||||
// Store the actual value, not a pointer
|
||||
switch v := row[idx].(type) {
|
||||
case *string:
|
||||
if v != nil {
|
||||
rowMap[key][i] = *v
|
||||
} else {
|
||||
rowMap[key][i] = "n/a"
|
||||
}
|
||||
default:
|
||||
rowMap[key][i] = v
|
||||
}
|
||||
} else {
|
||||
rowMap[key][i] = "n/a"
|
||||
}
|
||||
}
|
||||
// Initialize all aggregation values to "n/a"
|
||||
for i := len(groupColumnOrder); i < len(mergedColumns); i++ {
|
||||
rowMap[key][i] = "n/a"
|
||||
}
|
||||
}
|
||||
|
||||
// Set aggregation values for this query
|
||||
aggStartIdx := len(groupColumnOrder)
|
||||
for _, queryName2 := range queryNames {
|
||||
if queryName2 == queryName {
|
||||
// Copy aggregation values
|
||||
for i, aggIdx := range aggIndices {
|
||||
if aggIdx < len(row) {
|
||||
rowMap[key][aggStartIdx+i] = row[aggIdx]
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
// Skip columns for other queries
|
||||
result2 := results[queryName2]
|
||||
if scalarData2, ok := result2.Value.(*qbtypes.ScalarData); ok {
|
||||
aggCount := 0
|
||||
for _, col := range scalarData2.Columns {
|
||||
if col.Type == qbtypes.ColumnTypeAggregation {
|
||||
aggCount++
|
||||
}
|
||||
}
|
||||
aggStartIdx += aggCount
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert map to slice
|
||||
mergedData := [][]any{}
|
||||
for _, row := range rowMap {
|
||||
mergedData = append(mergedData, row)
|
||||
}
|
||||
|
||||
// Sort rows by first aggregation column (descending)
|
||||
if len(mergedColumns) > len(groupColumnOrder) {
|
||||
sort.SliceStable(mergedData, func(i, j int) bool {
|
||||
valI := mergedData[i][len(groupColumnOrder)]
|
||||
valJ := mergedData[j][len(groupColumnOrder)]
|
||||
|
||||
// Handle n/a values
|
||||
if valI == "n/a" {
|
||||
return false
|
||||
}
|
||||
if valJ == "n/a" {
|
||||
return true
|
||||
}
|
||||
|
||||
// Compare numeric values
|
||||
switch vI := valI.(type) {
|
||||
case float64:
|
||||
if vJ, ok := valJ.(float64); ok {
|
||||
return vI > vJ
|
||||
}
|
||||
case int64:
|
||||
if vJ, ok := valJ.(int64); ok {
|
||||
return vI > vJ
|
||||
}
|
||||
case int:
|
||||
if vJ, ok := valJ.(int); ok {
|
||||
return vI > vJ
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
})
|
||||
}
|
||||
|
||||
return map[string]any{
|
||||
"table": &qbtypes.ScalarData{
|
||||
Columns: mergedColumns,
|
||||
Data: mergedData,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// sortTableRows sorts the table rows based on the query order
|
||||
func sortTableRows(rows [][]any, columns []*qbtypes.ColumnDescriptor, req *qbtypes.QueryRangeRequest) {
|
||||
// Get query names in order
|
||||
var queryNames []string
|
||||
for _, query := range req.CompositeQuery.Queries {
|
||||
switch spec := query.Spec.(type) {
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]:
|
||||
queryNames = append(queryNames, spec.Name)
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]:
|
||||
queryNames = append(queryNames, spec.Name)
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]:
|
||||
queryNames = append(queryNames, spec.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// Create a map of column indices by query name
|
||||
columnIndices := make(map[string][]int)
|
||||
for i, col := range columns {
|
||||
if col.Type == qbtypes.ColumnTypeAggregation && col.QueryName != "" {
|
||||
columnIndices[col.QueryName] = append(columnIndices[col.QueryName], i)
|
||||
}
|
||||
}
|
||||
|
||||
// Sort in reverse order of query names (stable sort)
|
||||
for i := len(queryNames) - 1; i >= 0; i-- {
|
||||
queryName := queryNames[i]
|
||||
indices, ok := columnIndices[queryName]
|
||||
if !ok || len(indices) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Use the first aggregation column for this query
|
||||
colIdx := indices[0]
|
||||
|
||||
sort.SliceStable(rows, func(i, j int) bool {
|
||||
valI := rows[i][colIdx]
|
||||
valJ := rows[j][colIdx]
|
||||
|
||||
// Handle n/a values
|
||||
if valI == "n/a" && valJ == "n/a" {
|
||||
return false
|
||||
}
|
||||
if valI == "n/a" {
|
||||
return false
|
||||
}
|
||||
if valJ == "n/a" {
|
||||
return true
|
||||
}
|
||||
|
||||
// Compare numeric values (default descending)
|
||||
if numI, ok := valI.(float64); ok {
|
||||
if numJ, ok := valJ.(float64); ok {
|
||||
return numI > numJ
|
||||
}
|
||||
}
|
||||
|
||||
// Compare int64 values
|
||||
if numI, ok := valI.(int64); ok {
|
||||
if numJ, ok := valJ.(int64); ok {
|
||||
return numI > numJ
|
||||
}
|
||||
}
|
||||
|
||||
// Compare int values
|
||||
if numI, ok := valI.(int); ok {
|
||||
if numJ, ok := valJ.(int); ok {
|
||||
return numI > numJ
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// deduplicateScalarRows deduplicates rows in a ScalarData that already contains columns from multiple queries
|
||||
func (q *querier) deduplicateScalarRows(data *qbtypes.ScalarData) map[string]any {
|
||||
// First, identify group columns
|
||||
groupColumnIndices := []int{}
|
||||
for i, col := range data.Columns {
|
||||
if col.Type == qbtypes.ColumnTypeGroup {
|
||||
groupColumnIndices = append(groupColumnIndices, i)
|
||||
}
|
||||
}
|
||||
|
||||
// Build a map to merge rows by group key
|
||||
rowMap := make(map[string][]any)
|
||||
|
||||
for _, row := range data.Data {
|
||||
// Build key from group values
|
||||
keyParts := make([]string, len(groupColumnIndices))
|
||||
for i, colIdx := range groupColumnIndices {
|
||||
if colIdx < len(row) {
|
||||
// Convert the value to string properly
|
||||
switch v := row[colIdx].(type) {
|
||||
case string:
|
||||
keyParts[i] = v
|
||||
case *string:
|
||||
if v != nil {
|
||||
keyParts[i] = *v
|
||||
} else {
|
||||
keyParts[i] = "n/a"
|
||||
}
|
||||
default:
|
||||
keyParts[i] = fmt.Sprintf("%v", v)
|
||||
}
|
||||
} else {
|
||||
keyParts[i] = "n/a"
|
||||
}
|
||||
}
|
||||
key := strings.Join(keyParts, "|")
|
||||
|
||||
if existingRow, exists := rowMap[key]; exists {
|
||||
// Merge this row with existing row
|
||||
// Replace "n/a" values with actual values
|
||||
for i, val := range row {
|
||||
if existingRow[i] == "n/a" && val != "n/a" {
|
||||
existingRow[i] = val
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// First time seeing this key, store the row
|
||||
rowCopy := make([]any, len(row))
|
||||
copy(rowCopy, row)
|
||||
rowMap[key] = rowCopy
|
||||
}
|
||||
}
|
||||
|
||||
// Convert map back to slice
|
||||
mergedData := make([][]any, 0, len(rowMap))
|
||||
for _, row := range rowMap {
|
||||
mergedData = append(mergedData, row)
|
||||
}
|
||||
|
||||
// Sort by first aggregation column if available
|
||||
firstAggCol := -1
|
||||
for i, col := range data.Columns {
|
||||
if col.Type == qbtypes.ColumnTypeAggregation {
|
||||
firstAggCol = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if firstAggCol >= 0 {
|
||||
sort.SliceStable(mergedData, func(i, j int) bool {
|
||||
valI := mergedData[i][firstAggCol]
|
||||
valJ := mergedData[j][firstAggCol]
|
||||
|
||||
// Handle n/a values
|
||||
if valI == "n/a" {
|
||||
return false
|
||||
}
|
||||
if valJ == "n/a" {
|
||||
return true
|
||||
}
|
||||
|
||||
// Compare numeric values
|
||||
switch vI := valI.(type) {
|
||||
case float64:
|
||||
if vJ, ok := valJ.(float64); ok {
|
||||
return vI > vJ
|
||||
}
|
||||
case int64:
|
||||
if vJ, ok := valJ.(int64); ok {
|
||||
return vI > vJ
|
||||
}
|
||||
case int:
|
||||
if vJ, ok := valJ.(int); ok {
|
||||
return vI > vJ
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
})
|
||||
}
|
||||
|
||||
return map[string]any{
|
||||
"table": &qbtypes.ScalarData{
|
||||
Columns: data.Columns,
|
||||
Data: mergedData,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// roundToTwoDecimal rounds a number to two decimal places
|
||||
func roundToTwoDecimal(number float64) float64 {
|
||||
// Handle very small numbers
|
||||
if math.Abs(number) < 0.000001 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Determine the number of decimal places to round to
|
||||
decimalPlaces := 2
|
||||
if math.Abs(number) < 0.01 {
|
||||
decimalPlaces = int(math.Ceil(-math.Log10(math.Abs(number)))) + 1
|
||||
}
|
||||
|
||||
// Round to the determined number of decimal places
|
||||
scale := math.Pow(10, float64(decimalPlaces))
|
||||
return math.Round(number*scale) / scale
|
||||
}
|
||||
242
pkg/querier/postprocess_table_duplicate_test.go
Normal file
242
pkg/querier/postprocess_table_duplicate_test.go
Normal file
@@ -0,0 +1,242 @@
|
||||
package querier
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestFormatScalarResultsAsTableDuplicateIssue reproduces the exact issue from the user's JSON
|
||||
func TestFormatScalarResultsAsTableDuplicateIssue(t *testing.T) {
|
||||
q := &querier{}
|
||||
|
||||
// Create results that exactly match the user's problematic case
|
||||
// Query A has data for all services
|
||||
// Query B also has data for all services
|
||||
// But they're coming as separate ScalarData results
|
||||
results := map[string]*qbtypes.Result{
|
||||
"A": {
|
||||
Value: &qbtypes.ScalarData{
|
||||
Columns: []*qbtypes.ColumnDescriptor{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"},
|
||||
QueryName: "B", // Note: This says "B" in the user's JSON!
|
||||
Type: qbtypes.ColumnTypeGroup,
|
||||
},
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "__result_0"},
|
||||
QueryName: "A",
|
||||
AggregationIndex: 0,
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
},
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "__result_1"},
|
||||
QueryName: "A",
|
||||
AggregationIndex: 1,
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
},
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "__result_0"},
|
||||
QueryName: "B",
|
||||
AggregationIndex: 0,
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
},
|
||||
},
|
||||
Data: [][]any{
|
||||
// These rows have values for A but "n/a" for B
|
||||
{"currencyservice", 3380.0, 1.0, "n/a"},
|
||||
{"producer-svc-3", 25.0, 1.0, "n/a"},
|
||||
{"producer-svc-5", 45.0, 1.0, "n/a"},
|
||||
{"mongodb", 5713.0, 1.0, "n/a"},
|
||||
{"recommendationservice", 1724.0, 1.0, "n/a"},
|
||||
{"producer-svc-1", 180.0, 1.0, "n/a"},
|
||||
{"consumer-svc-4", 210.0, 1.0, "n/a"},
|
||||
{"frauddetectionservice", 101.0, 1.0, "n/a"},
|
||||
{"kafka", 1376.0, 1.0, "n/a"},
|
||||
{"consumer-svc-3", 122.0, 1.0, "n/a"},
|
||||
{"producer-svc-6", 60.0, 1.0, "n/a"},
|
||||
{"cartservice", 3322.0, 1.0, "n/a"},
|
||||
{"consumer-svc-2", 1080.0, 1.0, "n/a"},
|
||||
{"adservice", 133.0, 1.0, "n/a"},
|
||||
{"demo-app", 1449.0, 1.0, "n/a"},
|
||||
{"quoteservice", 101.0, 1.0, "n/a"},
|
||||
{"producer-svc-2", 360.0, 1.0, "n/a"},
|
||||
{"producer-svc-4", 36.0, 1.0, "n/a"},
|
||||
// These rows have "n/a" for A but values for B
|
||||
{"consumer-svc-4", "n/a", "n/a", 1.0},
|
||||
{"currencyservice", "n/a", "n/a", 1.0},
|
||||
{"producer-svc-4", "n/a", "n/a", 1.0},
|
||||
{"producer-svc-2", "n/a", "n/a", 1.0},
|
||||
{"producer-svc-3", "n/a", "n/a", 1.0},
|
||||
{"adservice", "n/a", "n/a", 1.0},
|
||||
{"kafka", "n/a", "n/a", 1.0},
|
||||
{"frauddetectionservice", "n/a", "n/a", 1.0},
|
||||
{"recommendationservice", "n/a", "n/a", 1.0},
|
||||
{"consumer-svc-3", "n/a", "n/a", 1.0},
|
||||
{"consumer-svc-2", "n/a", "n/a", 1.0},
|
||||
{"cartservice", "n/a", "n/a", 1.0},
|
||||
{"quoteservice", "n/a", "n/a", 1.0},
|
||||
{"producer-svc-5", "n/a", "n/a", 1.0},
|
||||
{"demo-app", "n/a", "n/a", 1.0},
|
||||
{"mongodb", "n/a", "n/a", 1.0},
|
||||
{"producer-svc-6", "n/a", "n/a", 1.0},
|
||||
{"producer-svc-1", "n/a", "n/a", 1.0},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
req := &qbtypes.QueryRangeRequest{
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: []qbtypes.QueryEnvelope{
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "A",
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "B",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Format as table
|
||||
result := q.formatScalarResultsAsTable(results, req)
|
||||
|
||||
// Get the table - it should be under "A" key now
|
||||
var table *qbtypes.ScalarData
|
||||
var ok bool
|
||||
if tableResult, exists := result["A"]; exists {
|
||||
table, ok = tableResult.(*qbtypes.ScalarData)
|
||||
} else if tableResult, exists := result["table"]; exists {
|
||||
table, ok = tableResult.(*qbtypes.ScalarData)
|
||||
}
|
||||
require.True(t, ok, "Expected table result, got: %+v", result)
|
||||
|
||||
// The problem: we should have 18 unique services, not 36 rows
|
||||
assert.Len(t, table.Data, 18, "Should have 18 unique services, not duplicate rows")
|
||||
|
||||
// Create a map to check row values by service name
|
||||
rowMap := make(map[string][]any)
|
||||
for _, row := range table.Data {
|
||||
serviceName := row[0].(string)
|
||||
assert.NotContains(t, rowMap, serviceName, "Service %s should not appear twice", serviceName)
|
||||
rowMap[serviceName] = row
|
||||
}
|
||||
|
||||
// Check some specific services that appear in both lists
|
||||
// currencyservice should have values from both A and B
|
||||
currencyRow := rowMap["currencyservice"]
|
||||
assert.Equal(t, "currencyservice", currencyRow[0])
|
||||
assert.Equal(t, 3380.0, currencyRow[1]) // A result 0
|
||||
assert.Equal(t, 1.0, currencyRow[2]) // A result 1
|
||||
assert.Equal(t, 1.0, currencyRow[3]) // B result 0
|
||||
}
|
||||
|
||||
// TestFormatScalarResultsAsTableSingleResultAlreadyMerged tests the case where
|
||||
// a single result already contains all columns from multiple queries
|
||||
func TestFormatScalarResultsAsTableSingleResultAlreadyMerged(t *testing.T) {
|
||||
q := &querier{}
|
||||
|
||||
// This is what we're actually getting - a single result that already has columns from both queries
|
||||
results := map[string]*qbtypes.Result{
|
||||
"merged": {
|
||||
Value: &qbtypes.ScalarData{
|
||||
Columns: []*qbtypes.ColumnDescriptor{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"},
|
||||
QueryName: "B",
|
||||
Type: qbtypes.ColumnTypeGroup,
|
||||
},
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "__result_0"},
|
||||
QueryName: "A",
|
||||
AggregationIndex: 0,
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
},
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "__result_1"},
|
||||
QueryName: "A",
|
||||
AggregationIndex: 1,
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
},
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "__result_0"},
|
||||
QueryName: "B",
|
||||
AggregationIndex: 0,
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
},
|
||||
},
|
||||
Data: [][]any{
|
||||
{"currencyservice", 3380.0, 1.0, "n/a"},
|
||||
{"mongodb", 5713.0, 1.0, "n/a"},
|
||||
{"currencyservice", "n/a", "n/a", 1.0},
|
||||
{"mongodb", "n/a", "n/a", 1.0},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
req := &qbtypes.QueryRangeRequest{
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: []qbtypes.QueryEnvelope{
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "A",
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "B",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Format as table
|
||||
result := q.formatScalarResultsAsTable(results, req)
|
||||
|
||||
// Get the table - it should be under "merged" key now
|
||||
var table *qbtypes.ScalarData
|
||||
var ok bool
|
||||
if tableResult, exists := result["merged"]; exists {
|
||||
table, ok = tableResult.(*qbtypes.ScalarData)
|
||||
} else if tableResult, exists := result["table"]; exists {
|
||||
table, ok = tableResult.(*qbtypes.ScalarData)
|
||||
}
|
||||
require.True(t, ok, "Expected table result, got: %+v", result)
|
||||
|
||||
// Should have 2 unique services, not 4 rows
|
||||
assert.Len(t, table.Data, 2, "Should have 2 unique services after merging duplicates")
|
||||
|
||||
// Create a map to check row values by service name
|
||||
rowMap := make(map[string][]any)
|
||||
for _, row := range table.Data {
|
||||
serviceName := row[0].(string)
|
||||
rowMap[serviceName] = row
|
||||
}
|
||||
|
||||
// Check that values are properly merged
|
||||
currencyRow := rowMap["currencyservice"]
|
||||
assert.Equal(t, "currencyservice", currencyRow[0])
|
||||
assert.Equal(t, 3380.0, currencyRow[1]) // A result 0
|
||||
assert.Equal(t, 1.0, currencyRow[2]) // A result 1
|
||||
assert.Equal(t, 1.0, currencyRow[3]) // B result 0
|
||||
|
||||
mongoRow := rowMap["mongodb"]
|
||||
assert.Equal(t, "mongodb", mongoRow[0])
|
||||
assert.Equal(t, 5713.0, mongoRow[1]) // A result 0
|
||||
assert.Equal(t, 1.0, mongoRow[2]) // A result 1
|
||||
assert.Equal(t, 1.0, mongoRow[3]) // B result 0
|
||||
}
|
||||
290
pkg/querier/postprocess_table_test.go
Normal file
290
pkg/querier/postprocess_table_test.go
Normal file
@@ -0,0 +1,290 @@
|
||||
package querier
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFormatScalarResultsAsTableMergesCorrectly(t *testing.T) {
|
||||
q := &querier{}
|
||||
|
||||
// Create results that simulate the problematic case
|
||||
results := map[string]*qbtypes.Result{
|
||||
"A": {
|
||||
Value: &qbtypes.ScalarData{
|
||||
Columns: []*qbtypes.ColumnDescriptor{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"},
|
||||
QueryName: "A",
|
||||
Type: qbtypes.ColumnTypeGroup,
|
||||
},
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "__result_0"},
|
||||
QueryName: "A",
|
||||
AggregationIndex: 0,
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
},
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "__result_1"},
|
||||
QueryName: "A",
|
||||
AggregationIndex: 1,
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
},
|
||||
},
|
||||
Data: [][]any{
|
||||
{"currencyservice", 3380.0, 1.0},
|
||||
{"mongodb", 5713.0, 1.0},
|
||||
{"cartservice", 3322.0, 1.0},
|
||||
},
|
||||
},
|
||||
},
|
||||
"B": {
|
||||
Value: &qbtypes.ScalarData{
|
||||
Columns: []*qbtypes.ColumnDescriptor{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"},
|
||||
QueryName: "B",
|
||||
Type: qbtypes.ColumnTypeGroup,
|
||||
},
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "__result_0"},
|
||||
QueryName: "B",
|
||||
AggregationIndex: 0,
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
},
|
||||
},
|
||||
Data: [][]any{
|
||||
{"currencyservice", 1.0},
|
||||
{"mongodb", 1.0},
|
||||
{"cartservice", 1.0},
|
||||
{"kafka", 1.0}, // Service only in B
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
req := &qbtypes.QueryRangeRequest{
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: []qbtypes.QueryEnvelope{
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "A",
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "B",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Format as table
|
||||
result := q.formatScalarResultsAsTable(results, req)
|
||||
|
||||
// Get the table
|
||||
table, ok := result["table"].(*qbtypes.ScalarData)
|
||||
require.True(t, ok)
|
||||
|
||||
// Should have 5 columns: 1 group + 2 from A + 1 from B
|
||||
assert.Len(t, table.Columns, 4)
|
||||
|
||||
// Check column names and query associations
|
||||
assert.Equal(t, "service.name", table.Columns[0].Name)
|
||||
assert.Equal(t, qbtypes.ColumnTypeGroup, table.Columns[0].Type)
|
||||
|
||||
assert.Equal(t, "__result_0", table.Columns[1].Name)
|
||||
assert.Equal(t, "A", table.Columns[1].QueryName)
|
||||
assert.Equal(t, qbtypes.ColumnTypeAggregation, table.Columns[1].Type)
|
||||
|
||||
assert.Equal(t, "__result_1", table.Columns[2].Name)
|
||||
assert.Equal(t, "A", table.Columns[2].QueryName)
|
||||
assert.Equal(t, qbtypes.ColumnTypeAggregation, table.Columns[2].Type)
|
||||
|
||||
assert.Equal(t, "__result_0", table.Columns[3].Name)
|
||||
assert.Equal(t, "B", table.Columns[3].QueryName)
|
||||
assert.Equal(t, qbtypes.ColumnTypeAggregation, table.Columns[3].Type)
|
||||
|
||||
// Should have 4 rows (one for each unique service)
|
||||
assert.Len(t, table.Data, 4)
|
||||
|
||||
// Create a map to check row values by service name
|
||||
rowMap := make(map[string][]any)
|
||||
for _, row := range table.Data {
|
||||
serviceName := row[0].(string)
|
||||
rowMap[serviceName] = row
|
||||
}
|
||||
|
||||
// Check currencyservice row
|
||||
currencyRow := rowMap["currencyservice"]
|
||||
assert.Equal(t, "currencyservice", currencyRow[0])
|
||||
assert.Equal(t, 3380.0, currencyRow[1]) // A result 0
|
||||
assert.Equal(t, 1.0, currencyRow[2]) // A result 1
|
||||
assert.Equal(t, 1.0, currencyRow[3]) // B result 0
|
||||
|
||||
// Check mongodb row
|
||||
mongoRow := rowMap["mongodb"]
|
||||
assert.Equal(t, "mongodb", mongoRow[0])
|
||||
assert.Equal(t, 5713.0, mongoRow[1]) // A result 0
|
||||
assert.Equal(t, 1.0, mongoRow[2]) // A result 1
|
||||
assert.Equal(t, 1.0, mongoRow[3]) // B result 0
|
||||
|
||||
// Check cartservice row
|
||||
cartRow := rowMap["cartservice"]
|
||||
assert.Equal(t, "cartservice", cartRow[0])
|
||||
assert.Equal(t, 3322.0, cartRow[1]) // A result 0
|
||||
assert.Equal(t, 1.0, cartRow[2]) // A result 1
|
||||
assert.Equal(t, 1.0, cartRow[3]) // B result 0
|
||||
|
||||
// Check kafka row (only in B)
|
||||
kafkaRow := rowMap["kafka"]
|
||||
assert.Equal(t, "kafka", kafkaRow[0])
|
||||
assert.Equal(t, "n/a", kafkaRow[1]) // A result 0
|
||||
assert.Equal(t, "n/a", kafkaRow[2]) // A result 1
|
||||
assert.Equal(t, 1.0, kafkaRow[3]) // B result 0
|
||||
}
|
||||
|
||||
func TestFormatScalarResultsAsTableWithTimeSeriesData(t *testing.T) {
|
||||
q := &querier{}
|
||||
|
||||
// Create time series results that need to be converted to scalar
|
||||
results := map[string]*qbtypes.Result{
|
||||
"A": {
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "A",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Index: 0,
|
||||
Alias: "count",
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Labels: []*qbtypes.Label{
|
||||
{
|
||||
Key: telemetrytypes.TelemetryFieldKey{Name: "service.name"},
|
||||
Value: "frontend",
|
||||
},
|
||||
},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 100},
|
||||
{Timestamp: 2000, Value: 200},
|
||||
},
|
||||
},
|
||||
{
|
||||
Labels: []*qbtypes.Label{
|
||||
{
|
||||
Key: telemetrytypes.TelemetryFieldKey{Name: "service.name"},
|
||||
Value: "backend",
|
||||
},
|
||||
},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 300},
|
||||
{Timestamp: 2000, Value: 400},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"B": {
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "B",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Index: 0,
|
||||
Alias: "sum",
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Labels: []*qbtypes.Label{
|
||||
{
|
||||
Key: telemetrytypes.TelemetryFieldKey{Name: "service.name"},
|
||||
Value: "frontend",
|
||||
},
|
||||
},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 10},
|
||||
{Timestamp: 2000, Value: 20},
|
||||
},
|
||||
},
|
||||
{
|
||||
Labels: []*qbtypes.Label{
|
||||
{
|
||||
Key: telemetrytypes.TelemetryFieldKey{Name: "service.name"},
|
||||
Value: "backend",
|
||||
},
|
||||
},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 30},
|
||||
{Timestamp: 2000, Value: 40},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
req := &qbtypes.QueryRangeRequest{
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: []qbtypes.QueryEnvelope{
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "A",
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "B",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Format as table
|
||||
result := q.formatScalarResultsAsTable(results, req)
|
||||
|
||||
// Get the table
|
||||
table, ok := result["table"].(*qbtypes.ScalarData)
|
||||
require.True(t, ok)
|
||||
|
||||
// Should have 3 columns: 1 group + 1 from A + 1 from B
|
||||
assert.Len(t, table.Columns, 3)
|
||||
|
||||
// Check column names
|
||||
assert.Equal(t, "service.name", table.Columns[0].Name)
|
||||
assert.Equal(t, "count", table.Columns[1].Name) // Should use alias
|
||||
assert.Equal(t, "sum", table.Columns[2].Name) // Should use alias
|
||||
|
||||
// Should have 2 rows (frontend and backend)
|
||||
assert.Len(t, table.Data, 2)
|
||||
|
||||
// Create a map to check row values by service name
|
||||
rowMap := make(map[string][]any)
|
||||
for _, row := range table.Data {
|
||||
serviceName := row[0].(string)
|
||||
rowMap[serviceName] = row
|
||||
}
|
||||
|
||||
// Check frontend row (should have last values)
|
||||
frontendRow := rowMap["frontend"]
|
||||
assert.Equal(t, "frontend", frontendRow[0])
|
||||
assert.Equal(t, 200.0, frontendRow[1]) // Last value from A
|
||||
assert.Equal(t, 20.0, frontendRow[2]) // Last value from B
|
||||
|
||||
// Check backend row
|
||||
backendRow := rowMap["backend"]
|
||||
assert.Equal(t, "backend", backendRow[0])
|
||||
assert.Equal(t, 400.0, backendRow[1]) // Last value from A
|
||||
assert.Equal(t, 40.0, backendRow[2]) // Last value from B
|
||||
}
|
||||
813
pkg/querier/postprocess_test.go
Normal file
813
pkg/querier/postprocess_test.go
Normal file
@@ -0,0 +1,813 @@
|
||||
package querier
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestApplyHavingClause(t *testing.T) {
|
||||
q := &querier{}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
result *qbtypes.Result
|
||||
having *qbtypes.Having
|
||||
expected int // expected number of values after filtering
|
||||
}{
|
||||
{
|
||||
name: "having clause not implemented yet",
|
||||
result: &qbtypes.Result{
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "test",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 5},
|
||||
{Timestamp: 2000, Value: 15},
|
||||
{Timestamp: 3000, Value: 8},
|
||||
{Timestamp: 4000, Value: 20},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
having: &qbtypes.Having{
|
||||
Expression: "value > 10",
|
||||
},
|
||||
expected: 4, // No filtering for now
|
||||
},
|
||||
{
|
||||
name: "no having clause",
|
||||
result: &qbtypes.Result{
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "test",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 5},
|
||||
{Timestamp: 2000, Value: 15},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
having: nil,
|
||||
expected: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := q.applyHavingClause(tt.result, tt.having)
|
||||
tsData := result.Value.(*qbtypes.TimeSeriesData)
|
||||
|
||||
totalValues := 0
|
||||
for _, agg := range tsData.Aggregations {
|
||||
for _, series := range agg.Series {
|
||||
totalValues += len(series.Values)
|
||||
}
|
||||
}
|
||||
|
||||
assert.Equal(t, tt.expected, totalValues)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplySeriesLimit(t *testing.T) {
|
||||
q := &querier{}
|
||||
|
||||
result := &qbtypes.Result{
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "test",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Labels: []*qbtypes.Label{
|
||||
{Key: telemetrytypes.TelemetryFieldKey{Name: "service"}, Value: "service1"},
|
||||
},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 10},
|
||||
{Timestamp: 2000, Value: 20},
|
||||
},
|
||||
},
|
||||
{
|
||||
Labels: []*qbtypes.Label{
|
||||
{Key: telemetrytypes.TelemetryFieldKey{Name: "service"}, Value: "service2"},
|
||||
},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 30},
|
||||
{Timestamp: 2000, Value: 40},
|
||||
},
|
||||
},
|
||||
{
|
||||
Labels: []*qbtypes.Label{
|
||||
{Key: telemetrytypes.TelemetryFieldKey{Name: "service"}, Value: "service3"},
|
||||
},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 5},
|
||||
{Timestamp: 2000, Value: 10},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Test limiting to 2 series with default ordering (by value desc)
|
||||
limited := q.applySeriesLimit(result, 2, nil)
|
||||
tsData := limited.Value.(*qbtypes.TimeSeriesData)
|
||||
|
||||
assert.Len(t, tsData.Aggregations[0].Series, 2)
|
||||
|
||||
// Should keep service2 (avg=35) and service1 (avg=15), drop service3 (avg=7.5)
|
||||
assert.Equal(t, "service2", tsData.Aggregations[0].Series[0].Labels[0].Value)
|
||||
assert.Equal(t, "service1", tsData.Aggregations[0].Series[1].Labels[0].Value)
|
||||
}
|
||||
|
||||
func TestApplyReduceTo(t *testing.T) {
|
||||
q := &querier{}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
expression string
|
||||
values []float64
|
||||
expectedValue float64
|
||||
}{
|
||||
{
|
||||
name: "reduce to last",
|
||||
expression: "last",
|
||||
values: []float64{10, 20, 30},
|
||||
expectedValue: 30,
|
||||
},
|
||||
{
|
||||
name: "reduce to sum",
|
||||
expression: "sum",
|
||||
values: []float64{10, 20, 30},
|
||||
expectedValue: 60,
|
||||
},
|
||||
{
|
||||
name: "reduce to avg",
|
||||
expression: "avg",
|
||||
values: []float64{10, 20, 30},
|
||||
expectedValue: 20,
|
||||
},
|
||||
{
|
||||
name: "reduce to min",
|
||||
expression: "min",
|
||||
values: []float64{10, 20, 5, 30},
|
||||
expectedValue: 5,
|
||||
},
|
||||
{
|
||||
name: "reduce to max",
|
||||
expression: "max",
|
||||
values: []float64{10, 20, 50, 30},
|
||||
expectedValue: 50,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Create time series values
|
||||
var values []*qbtypes.TimeSeriesValue
|
||||
for i, v := range tt.values {
|
||||
values = append(values, &qbtypes.TimeSeriesValue{
|
||||
Timestamp: int64(i * 1000),
|
||||
Value: v,
|
||||
})
|
||||
}
|
||||
|
||||
result := &qbtypes.Result{
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "test",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Values: values,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
secondaryAggs := []qbtypes.SecondaryAggregation{
|
||||
{Expression: tt.expression},
|
||||
}
|
||||
|
||||
reduced := q.applyReduceTo(result, secondaryAggs)
|
||||
tsData := reduced.Value.(*qbtypes.TimeSeriesData)
|
||||
|
||||
require.Len(t, tsData.Aggregations[0].Series[0].Values, 1)
|
||||
assert.Equal(t, tt.expectedValue, tsData.Aggregations[0].Series[0].Values[0].Value)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFillGaps(t *testing.T) {
|
||||
q := &querier{}
|
||||
|
||||
req := &qbtypes.QueryRangeRequest{
|
||||
Start: 1000,
|
||||
End: 5000,
|
||||
RequestType: qbtypes.RequestTypeTimeSeries,
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: []qbtypes.QueryEnvelope{
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "test",
|
||||
StepInterval: qbtypes.Step{Duration: time.Second},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
FormatOptions: &qbtypes.FormatOptions{
|
||||
FillGaps: true,
|
||||
},
|
||||
}
|
||||
|
||||
results := map[string]*qbtypes.Result{
|
||||
"test": {
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "test",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 10},
|
||||
{Timestamp: 3000, Value: 30},
|
||||
// Missing 2000, 4000, 5000
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
filled := q.fillGaps(results, req)
|
||||
tsData := filled["test"].Value.(*qbtypes.TimeSeriesData)
|
||||
values := tsData.Aggregations[0].Series[0].Values
|
||||
|
||||
// Should have 5 values: 1000, 2000, 3000, 4000, 5000
|
||||
assert.Len(t, values, 5)
|
||||
|
||||
// Check filled values
|
||||
assert.Equal(t, int64(1000), values[0].Timestamp)
|
||||
assert.Equal(t, 10.0, values[0].Value)
|
||||
|
||||
assert.Equal(t, int64(2000), values[1].Timestamp)
|
||||
assert.Equal(t, 0.0, values[1].Value) // Filled with 0
|
||||
|
||||
assert.Equal(t, int64(3000), values[2].Timestamp)
|
||||
assert.Equal(t, 30.0, values[2].Value)
|
||||
|
||||
assert.Equal(t, int64(4000), values[3].Timestamp)
|
||||
assert.Equal(t, 0.0, values[3].Value) // Filled with 0
|
||||
|
||||
assert.Equal(t, int64(5000), values[4].Timestamp)
|
||||
assert.Equal(t, 0.0, values[4].Value) // Filled with 0
|
||||
}
|
||||
|
||||
func TestApplyMetricReduceTo(t *testing.T) {
|
||||
q := &querier{}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
reduceOp qbtypes.ReduceTo
|
||||
values []float64
|
||||
expectedValue float64
|
||||
}{
|
||||
{
|
||||
name: "reduce to last",
|
||||
reduceOp: qbtypes.ReduceToLast,
|
||||
values: []float64{10, 20, 30},
|
||||
expectedValue: 30,
|
||||
},
|
||||
{
|
||||
name: "reduce to sum",
|
||||
reduceOp: qbtypes.ReduceToSum,
|
||||
values: []float64{10, 20, 30},
|
||||
expectedValue: 60,
|
||||
},
|
||||
{
|
||||
name: "reduce to avg",
|
||||
reduceOp: qbtypes.ReduceToAvg,
|
||||
values: []float64{10, 20, 30},
|
||||
expectedValue: 20,
|
||||
},
|
||||
{
|
||||
name: "reduce to min",
|
||||
reduceOp: qbtypes.ReduceToMin,
|
||||
values: []float64{10, 20, 5, 30},
|
||||
expectedValue: 5,
|
||||
},
|
||||
{
|
||||
name: "reduce to max",
|
||||
reduceOp: qbtypes.ReduceToMax,
|
||||
values: []float64{10, 20, 50, 30},
|
||||
expectedValue: 50,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Create time series values
|
||||
var values []*qbtypes.TimeSeriesValue
|
||||
for i, v := range tt.values {
|
||||
values = append(values, &qbtypes.TimeSeriesValue{
|
||||
Timestamp: int64(i * 1000),
|
||||
Value: v,
|
||||
})
|
||||
}
|
||||
|
||||
result := &qbtypes.Result{
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "test",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Values: values,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
reduced := q.applyMetricReduceTo(result, tt.reduceOp)
|
||||
tsData := reduced.Value.(*qbtypes.TimeSeriesData)
|
||||
|
||||
require.Len(t, tsData.Aggregations[0].Series[0].Values, 1)
|
||||
assert.Equal(t, tt.expectedValue, tsData.Aggregations[0].Series[0].Values[0].Value)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostProcessResultsWithMetricReduceTo(t *testing.T) {
|
||||
q := &querier{}
|
||||
|
||||
// Test complete PostProcessResults flow with metric ReduceTo
|
||||
results := map[string]any{
|
||||
"metric_query": &qbtypes.TimeSeriesData{
|
||||
QueryName: "metric_query",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 100},
|
||||
{Timestamp: 2000, Value: 200},
|
||||
{Timestamp: 3000, Value: 150},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
req := &qbtypes.QueryRangeRequest{
|
||||
RequestType: qbtypes.RequestTypeScalar,
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: []qbtypes.QueryEnvelope{
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "metric_query",
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "test_metric",
|
||||
ReduceTo: qbtypes.ReduceToAvg, // Should use average (150)
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Process results
|
||||
processed, err := q.PostProcessResults(results, req)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that the metric was reduced to average
|
||||
tsData := processed["metric_query"].(*qbtypes.TimeSeriesData)
|
||||
require.Len(t, tsData.Aggregations[0].Series[0].Values, 1)
|
||||
assert.Equal(t, 150.0, tsData.Aggregations[0].Series[0].Values[0].Value)
|
||||
}
|
||||
|
||||
func TestPostProcessMetricQuery(t *testing.T) {
|
||||
q := &querier{}
|
||||
|
||||
// Test that metric query uses ReduceTo field
|
||||
result := &qbtypes.Result{
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "test_metric",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 10},
|
||||
{Timestamp: 2000, Value: 20},
|
||||
{Timestamp: 3000, Value: 30},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
req := &qbtypes.QueryRangeRequest{
|
||||
RequestType: qbtypes.RequestTypeScalar,
|
||||
}
|
||||
|
||||
query := qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "test_metric",
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "test_metric",
|
||||
ReduceTo: qbtypes.ReduceToMax,
|
||||
},
|
||||
},
|
||||
Functions: []qbtypes.Function{},
|
||||
SecondaryAggregations: []qbtypes.SecondaryAggregation{
|
||||
{Expression: "sum"}, // This should be ignored when ReduceTo is set
|
||||
},
|
||||
}
|
||||
|
||||
// Process the metric query
|
||||
processed := postProcessMetricQuery(q, result, query, req)
|
||||
tsData := processed.Value.(*qbtypes.TimeSeriesData)
|
||||
|
||||
// Should have reduced to max value (30)
|
||||
require.Len(t, tsData.Aggregations[0].Series[0].Values, 1)
|
||||
assert.Equal(t, 30.0, tsData.Aggregations[0].Series[0].Values[0].Value)
|
||||
}
|
||||
|
||||
func TestFormatScalarResultsAsTable(t *testing.T) {
|
||||
q := &querier{}
|
||||
|
||||
// Test simple scalar queries without groupBy (TimeSeriesData to ScalarData conversion)
|
||||
req := &qbtypes.QueryRangeRequest{
|
||||
RequestType: qbtypes.RequestTypeScalar,
|
||||
FormatOptions: &qbtypes.FormatOptions{
|
||||
FormatTableResultForUI: true,
|
||||
},
|
||||
}
|
||||
|
||||
results := map[string]*qbtypes.Result{
|
||||
"queryA": {
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "queryA",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Index: 0,
|
||||
Alias: "count_result",
|
||||
Meta: struct {
|
||||
Unit string `json:"unit,omitempty"`
|
||||
}{
|
||||
Unit: "requests",
|
||||
},
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 100},
|
||||
{Timestamp: 2000, Value: 200},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"queryB": {
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "queryB",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Index: 0,
|
||||
Alias: "sum_result",
|
||||
Meta: struct {
|
||||
Unit string `json:"unit,omitempty"`
|
||||
}{
|
||||
Unit: "bytes",
|
||||
},
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 50.5678},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
formatted := q.formatScalarResultsAsTable(results, req)
|
||||
|
||||
// Should return table under "table" key when called directly
|
||||
table, ok := formatted["table"].(*qbtypes.ScalarData)
|
||||
require.True(t, ok)
|
||||
|
||||
// Should have 2 columns
|
||||
assert.Len(t, table.Columns, 2)
|
||||
|
||||
// Check column names and metadata
|
||||
assert.Equal(t, "count_result", table.Columns[0].Name)
|
||||
assert.Equal(t, "requests", table.Columns[0].Meta.Unit)
|
||||
|
||||
assert.Equal(t, "sum_result", table.Columns[1].Name)
|
||||
assert.Equal(t, "bytes", table.Columns[1].Meta.Unit)
|
||||
|
||||
// Should have 1 row with 2 values
|
||||
assert.Len(t, table.Data, 1)
|
||||
assert.Len(t, table.Data[0], 2)
|
||||
|
||||
// Check values (last value from time series, rounded)
|
||||
assert.Equal(t, 200.0, table.Data[0][0]) // Last value from queryA
|
||||
assert.Equal(t, 50.57, table.Data[0][1]) // Rounded value from queryB
|
||||
}
|
||||
|
||||
func TestFormatScalarResultsAsTableWithScalarData(t *testing.T) {
|
||||
q := &querier{}
|
||||
|
||||
// Test with ScalarData (already formatted from query execution)
|
||||
req := &qbtypes.QueryRangeRequest{
|
||||
RequestType: qbtypes.RequestTypeScalar,
|
||||
FormatOptions: &qbtypes.FormatOptions{
|
||||
FormatTableResultForUI: true,
|
||||
},
|
||||
}
|
||||
|
||||
results := map[string]*qbtypes.Result{
|
||||
"queryA": {
|
||||
Value: &qbtypes.ScalarData{
|
||||
Columns: []*qbtypes.ColumnDescriptor{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
},
|
||||
QueryName: "queryA",
|
||||
Type: qbtypes.ColumnTypeGroup,
|
||||
},
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "count",
|
||||
},
|
||||
QueryName: "queryA",
|
||||
AggregationIndex: 0,
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
},
|
||||
},
|
||||
Data: [][]any{
|
||||
{"service1", 100.0},
|
||||
{"service2", 200.0},
|
||||
},
|
||||
},
|
||||
},
|
||||
"queryB": {
|
||||
Value: &qbtypes.ScalarData{
|
||||
Columns: []*qbtypes.ColumnDescriptor{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
},
|
||||
QueryName: "queryB",
|
||||
Type: qbtypes.ColumnTypeGroup,
|
||||
},
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "sum",
|
||||
},
|
||||
QueryName: "queryB",
|
||||
AggregationIndex: 0,
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
},
|
||||
},
|
||||
Data: [][]any{
|
||||
{"service1", 50.0},
|
||||
{"service2", 75.0},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
formatted := q.formatScalarResultsAsTable(results, req)
|
||||
|
||||
// Should return a merged table with all results
|
||||
table, ok := formatted["table"].(*qbtypes.ScalarData)
|
||||
require.True(t, ok)
|
||||
|
||||
// Should have 3 columns: service.name (group), count (from queryA), sum (from queryB)
|
||||
assert.Len(t, table.Columns, 3)
|
||||
assert.Equal(t, "service.name", table.Columns[0].Name)
|
||||
assert.Equal(t, qbtypes.ColumnTypeGroup, table.Columns[0].Type)
|
||||
// Aggregation columns
|
||||
assert.Equal(t, qbtypes.ColumnTypeAggregation, table.Columns[1].Type)
|
||||
assert.Equal(t, "queryA", table.Columns[1].QueryName)
|
||||
assert.Equal(t, qbtypes.ColumnTypeAggregation, table.Columns[2].Type)
|
||||
assert.Equal(t, "queryB", table.Columns[2].QueryName)
|
||||
|
||||
// Should have 2 rows
|
||||
assert.Len(t, table.Data, 2)
|
||||
// Check row values - sorted by first aggregation column (descending)
|
||||
// service2 has value 200, service1 has value 100, so service2 comes first
|
||||
assert.Equal(t, "service2", table.Data[0][0])
|
||||
assert.Equal(t, 200.0, table.Data[0][1])
|
||||
assert.Equal(t, 75.0, table.Data[0][2])
|
||||
assert.Equal(t, "service1", table.Data[1][0])
|
||||
assert.Equal(t, 100.0, table.Data[1][1])
|
||||
assert.Equal(t, 50.0, table.Data[1][2])
|
||||
}
|
||||
|
||||
func TestFormatScalarResultsAsTableMergesDuplicateRows(t *testing.T) {
|
||||
q := &querier{}
|
||||
|
||||
// Test that duplicate rows are properly merged
|
||||
req := &qbtypes.QueryRangeRequest{
|
||||
RequestType: qbtypes.RequestTypeScalar,
|
||||
FormatOptions: &qbtypes.FormatOptions{
|
||||
FormatTableResultForUI: true,
|
||||
},
|
||||
}
|
||||
|
||||
results := map[string]*qbtypes.Result{
|
||||
"A": {
|
||||
Value: &qbtypes.ScalarData{
|
||||
Columns: []*qbtypes.ColumnDescriptor{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
},
|
||||
QueryName: "A",
|
||||
Type: qbtypes.ColumnTypeGroup,
|
||||
},
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "count",
|
||||
},
|
||||
QueryName: "A",
|
||||
AggregationIndex: 0,
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
},
|
||||
},
|
||||
Data: [][]any{
|
||||
{"service1", 100.0},
|
||||
{"service2", 200.0},
|
||||
{"service3", 300.0},
|
||||
},
|
||||
},
|
||||
},
|
||||
"B": {
|
||||
Value: &qbtypes.ScalarData{
|
||||
Columns: []*qbtypes.ColumnDescriptor{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
},
|
||||
QueryName: "B",
|
||||
Type: qbtypes.ColumnTypeGroup,
|
||||
},
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "sum",
|
||||
},
|
||||
QueryName: "B",
|
||||
AggregationIndex: 0,
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
},
|
||||
},
|
||||
Data: [][]any{
|
||||
{"service1", 150.0},
|
||||
{"service2", 250.0},
|
||||
{"service3", 350.0},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
formatted := q.formatScalarResultsAsTable(results, req)
|
||||
|
||||
// Should return a merged table
|
||||
table, ok := formatted["table"].(*qbtypes.ScalarData)
|
||||
require.True(t, ok)
|
||||
|
||||
// Should have 3 columns: service.name, count (from A), sum (from B)
|
||||
assert.Len(t, table.Columns, 3)
|
||||
|
||||
// Should have 3 rows (not 6) - one per service
|
||||
assert.Len(t, table.Data, 3)
|
||||
|
||||
// Check that rows are properly merged (sorted by first aggregation column desc)
|
||||
assert.Equal(t, "service3", table.Data[0][0]) // Highest count value
|
||||
assert.Equal(t, 300.0, table.Data[0][1]) // count from A
|
||||
assert.Equal(t, 350.0, table.Data[0][2]) // sum from B
|
||||
|
||||
assert.Equal(t, "service2", table.Data[1][0])
|
||||
assert.Equal(t, 200.0, table.Data[1][1])
|
||||
assert.Equal(t, 250.0, table.Data[1][2])
|
||||
|
||||
assert.Equal(t, "service1", table.Data[2][0]) // Lowest count value
|
||||
assert.Equal(t, 100.0, table.Data[2][1])
|
||||
assert.Equal(t, 150.0, table.Data[2][2])
|
||||
}
|
||||
|
||||
func TestFormatScalarResultsAsTableWithEmptyResults(t *testing.T) {
|
||||
q := &querier{}
|
||||
|
||||
// Test with empty results (queries executed but returned no data)
|
||||
req := &qbtypes.QueryRangeRequest{
|
||||
RequestType: qbtypes.RequestTypeScalar,
|
||||
FormatOptions: &qbtypes.FormatOptions{
|
||||
FormatTableResultForUI: true,
|
||||
},
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: []qbtypes.QueryEnvelope{
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
|
||||
Name: "A",
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
|
||||
Name: "B",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
results := map[string]*qbtypes.Result{
|
||||
"A": {
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "A",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Index: 0,
|
||||
Alias: "logs_count",
|
||||
Series: []*qbtypes.TimeSeries{}, // Empty series
|
||||
},
|
||||
{
|
||||
Index: 1,
|
||||
Alias: "unique hosts",
|
||||
Series: []*qbtypes.TimeSeries{}, // Empty series
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"B": {
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "B",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Index: 0,
|
||||
Alias: "hosts",
|
||||
Series: []*qbtypes.TimeSeries{}, // Empty series
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
formatted := q.formatScalarResultsAsTable(results, req)
|
||||
|
||||
// Should return a table structure even with empty results
|
||||
table, ok := formatted["table"].(*qbtypes.ScalarData)
|
||||
require.True(t, ok)
|
||||
|
||||
// Should have columns for the aggregations even with no data
|
||||
// Columns: logs_count, unique hosts (from A), hosts (from B)
|
||||
assert.Len(t, table.Columns, 3)
|
||||
|
||||
// Should have no data rows
|
||||
assert.Len(t, table.Data, 0)
|
||||
|
||||
// But should have columns for the empty aggregations
|
||||
assert.True(t, len(table.Columns) > 0)
|
||||
}
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"slices"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
@@ -12,7 +13,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
@@ -54,6 +54,53 @@ func New(
|
||||
}
|
||||
}
|
||||
|
||||
// extractShiftFromBuilderQuery extracts the shift value from timeShift function if present
|
||||
func extractShiftFromBuilderQuery[T any](spec qbtypes.QueryBuilderQuery[T]) int64 {
|
||||
for _, fn := range spec.Functions {
|
||||
if fn.Name == qbtypes.FunctionNameTimeShift && len(fn.Args) > 0 {
|
||||
switch v := fn.Args[0].Value.(type) {
|
||||
case float64:
|
||||
return int64(v)
|
||||
case int64:
|
||||
return v
|
||||
case int:
|
||||
return int64(v)
|
||||
case string:
|
||||
if shiftFloat, err := strconv.ParseFloat(v, 64); err == nil {
|
||||
return int64(shiftFloat)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// adjustTimeRangeForShift adjusts the time range based on the shift value from timeShift function
|
||||
func adjustTimeRangeForShift[T any](spec qbtypes.QueryBuilderQuery[T], tr qbtypes.TimeRange, kind qbtypes.RequestType) qbtypes.TimeRange {
|
||||
// Only apply time shift for time series and scalar queries
|
||||
// Raw/list queries don't support timeshift
|
||||
if kind != qbtypes.RequestTypeTimeSeries && kind != qbtypes.RequestTypeScalar {
|
||||
return tr
|
||||
}
|
||||
|
||||
// Use the ShiftBy field if it's already populated, otherwise extract it
|
||||
shiftBy := spec.ShiftBy
|
||||
if shiftBy == 0 {
|
||||
shiftBy = extractShiftFromBuilderQuery(spec)
|
||||
}
|
||||
|
||||
if shiftBy == 0 {
|
||||
return tr
|
||||
}
|
||||
|
||||
// ShiftBy is in seconds, convert to milliseconds and shift backward in time
|
||||
shiftMS := shiftBy * 1000
|
||||
return qbtypes.TimeRange{
|
||||
From: tr.From - uint64(shiftMS),
|
||||
To: tr.To - uint64(shiftMS),
|
||||
}
|
||||
}
|
||||
|
||||
func (q *querier) QueryRange(ctx context.Context, orgID valuer.UUID, req *qbtypes.QueryRangeRequest) (*qbtypes.QueryRangeResponse, error) {
|
||||
|
||||
queries := make(map[string]qbtypes.Query)
|
||||
@@ -79,15 +126,24 @@ func (q *querier) QueryRange(ctx context.Context, orgID valuer.UUID, req *qbtype
|
||||
case qbtypes.QueryTypeBuilder:
|
||||
switch spec := query.Spec.(type) {
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]:
|
||||
bq := newBuilderQuery(q.telemetryStore, q.traceStmtBuilder, spec, qbtypes.TimeRange{From: req.Start, To: req.End}, req.RequestType)
|
||||
// Populate ShiftBy field for caching
|
||||
spec.ShiftBy = extractShiftFromBuilderQuery(spec)
|
||||
timeRange := adjustTimeRangeForShift(spec, qbtypes.TimeRange{From: req.Start, To: req.End}, req.RequestType)
|
||||
bq := newBuilderQuery(q.telemetryStore, q.traceStmtBuilder, spec, timeRange, req.RequestType)
|
||||
queries[spec.Name] = bq
|
||||
steps[spec.Name] = spec.StepInterval
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]:
|
||||
bq := newBuilderQuery(q.telemetryStore, q.logStmtBuilder, spec, qbtypes.TimeRange{From: req.Start, To: req.End}, req.RequestType)
|
||||
// Populate ShiftBy field for caching
|
||||
spec.ShiftBy = extractShiftFromBuilderQuery(spec)
|
||||
timeRange := adjustTimeRangeForShift(spec, qbtypes.TimeRange{From: req.Start, To: req.End}, req.RequestType)
|
||||
bq := newBuilderQuery(q.telemetryStore, q.logStmtBuilder, spec, timeRange, req.RequestType)
|
||||
queries[spec.Name] = bq
|
||||
steps[spec.Name] = spec.StepInterval
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]:
|
||||
bq := newBuilderQuery(q.telemetryStore, q.metricStmtBuilder, spec, qbtypes.TimeRange{From: req.Start, To: req.End}, req.RequestType)
|
||||
// Populate ShiftBy field for caching
|
||||
spec.ShiftBy = extractShiftFromBuilderQuery(spec)
|
||||
timeRange := adjustTimeRangeForShift(spec, qbtypes.TimeRange{From: req.Start, To: req.End}, req.RequestType)
|
||||
bq := newBuilderQuery(q.telemetryStore, q.metricStmtBuilder, spec, timeRange, req.RequestType)
|
||||
queries[spec.Name] = bq
|
||||
steps[spec.Name] = spec.StepInterval
|
||||
default:
|
||||
@@ -133,13 +189,43 @@ func (q *querier) run(ctx context.Context, orgID valuer.UUID, qs map[string]qbty
|
||||
}
|
||||
}
|
||||
|
||||
// Apply postprocessing
|
||||
processedResults, err := q.PostProcessResults(results, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert results to slice for response
|
||||
resultSlice := make([]any, 0, len(processedResults))
|
||||
for _, query := range req.CompositeQuery.Queries {
|
||||
var queryName string
|
||||
switch spec := query.Spec.(type) {
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]:
|
||||
queryName = spec.Name
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]:
|
||||
queryName = spec.Name
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]:
|
||||
queryName = spec.Name
|
||||
case qbtypes.QueryBuilderFormula:
|
||||
queryName = spec.Name
|
||||
case qbtypes.PromQuery:
|
||||
queryName = spec.Name
|
||||
case qbtypes.ClickHouseQuery:
|
||||
queryName = spec.Name
|
||||
}
|
||||
|
||||
if result, ok := processedResults[queryName]; ok {
|
||||
resultSlice = append(resultSlice, result)
|
||||
}
|
||||
}
|
||||
|
||||
return &qbtypes.QueryRangeResponse{
|
||||
Type: req.RequestType,
|
||||
Data: struct {
|
||||
Results []any `json:"results"`
|
||||
Warnings []string `json:"warnings"`
|
||||
}{
|
||||
Results: maps.Values(results),
|
||||
Results: resultSlice,
|
||||
Warnings: warnings,
|
||||
},
|
||||
Meta: struct {
|
||||
@@ -159,6 +245,22 @@ func (q *querier) executeWithCache(ctx context.Context, orgID valuer.UUID, query
|
||||
// Get cached data and missing ranges
|
||||
cachedResult, missingRanges := q.bucketCache.GetMissRanges(ctx, orgID, query, step)
|
||||
|
||||
// Debug: Log cached result
|
||||
if cachedResult != nil {
|
||||
if tsData, ok := cachedResult.Value.(*qbtypes.TimeSeriesData); ok {
|
||||
totalSeries := 0
|
||||
seriesPerBucket := make(map[int]int)
|
||||
for _, agg := range tsData.Aggregations {
|
||||
totalSeries += len(agg.Series)
|
||||
seriesPerBucket[agg.Index] = len(agg.Series)
|
||||
}
|
||||
q.logger.DebugContext(ctx, "received cached result",
|
||||
"total_series", totalSeries,
|
||||
"series_per_bucket", seriesPerBucket,
|
||||
"missing_ranges", len(missingRanges))
|
||||
}
|
||||
}
|
||||
|
||||
// If no missing ranges, return cached result
|
||||
if len(missingRanges) == 0 && cachedResult != nil {
|
||||
return cachedResult, nil
|
||||
@@ -173,7 +275,7 @@ func (q *querier) executeWithCache(ctx context.Context, orgID valuer.UUID, query
|
||||
return nil, err
|
||||
}
|
||||
// Store in cache for future use
|
||||
q.bucketCache.Put(ctx, orgID, query, result)
|
||||
q.bucketCache.Put(ctx, orgID, query, step, result)
|
||||
return result, nil
|
||||
}
|
||||
}
|
||||
@@ -183,6 +285,10 @@ func (q *querier) executeWithCache(ctx context.Context, orgID valuer.UUID, query
|
||||
errors := make([]error, len(missingRanges))
|
||||
totalStats := qbtypes.ExecStats{}
|
||||
|
||||
q.logger.DebugContext(ctx, "executing queries for missing ranges",
|
||||
"missing_ranges_count", len(missingRanges),
|
||||
"ranges", missingRanges)
|
||||
|
||||
sem := make(chan struct{}, 4)
|
||||
var wg sync.WaitGroup
|
||||
|
||||
@@ -224,7 +330,7 @@ func (q *querier) executeWithCache(ctx context.Context, orgID valuer.UUID, query
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
q.bucketCache.Put(ctx, orgID, query, result)
|
||||
q.bucketCache.Put(ctx, orgID, query, step, result)
|
||||
return result, nil
|
||||
}
|
||||
}
|
||||
@@ -247,8 +353,21 @@ func (q *querier) executeWithCache(ctx context.Context, orgID valuer.UUID, query
|
||||
mergedResult.Stats.BytesScanned += totalStats.BytesScanned
|
||||
mergedResult.Stats.DurationMS += totalStats.DurationMS
|
||||
|
||||
// Debug: Log before storing in cache
|
||||
if tsData, ok := mergedResult.Value.(*qbtypes.TimeSeriesData); ok {
|
||||
totalSeries := 0
|
||||
seriesPerBucket := make(map[int]int)
|
||||
for _, agg := range tsData.Aggregations {
|
||||
totalSeries += len(agg.Series)
|
||||
seriesPerBucket[agg.Index] = len(agg.Series)
|
||||
}
|
||||
q.logger.DebugContext(ctx, "storing merged result in cache",
|
||||
"total_series", totalSeries,
|
||||
"series_per_bucket", seriesPerBucket)
|
||||
}
|
||||
|
||||
// Store merged result in cache
|
||||
q.bucketCache.Put(ctx, orgID, query, mergedResult)
|
||||
q.bucketCache.Put(ctx, orgID, query, step, mergedResult)
|
||||
|
||||
return mergedResult, nil
|
||||
}
|
||||
@@ -261,11 +380,20 @@ func (q *querier) createRangedQuery(originalQuery qbtypes.Query, timeRange qbtyp
|
||||
case *chSQLQuery:
|
||||
return newchSQLQuery(q.telemetryStore, qt.query, qt.args, timeRange, qt.kind)
|
||||
case *builderQuery[qbtypes.TraceAggregation]:
|
||||
return newBuilderQuery(q.telemetryStore, q.traceStmtBuilder, qt.spec, timeRange, qt.kind)
|
||||
// Populate ShiftBy for the new query
|
||||
qt.spec.ShiftBy = extractShiftFromBuilderQuery(qt.spec)
|
||||
adjustedTimeRange := adjustTimeRangeForShift(qt.spec, timeRange, qt.kind)
|
||||
return newBuilderQuery(q.telemetryStore, q.traceStmtBuilder, qt.spec, adjustedTimeRange, qt.kind)
|
||||
case *builderQuery[qbtypes.LogAggregation]:
|
||||
return newBuilderQuery(q.telemetryStore, q.logStmtBuilder, qt.spec, timeRange, qt.kind)
|
||||
// Populate ShiftBy for the new query
|
||||
qt.spec.ShiftBy = extractShiftFromBuilderQuery(qt.spec)
|
||||
adjustedTimeRange := adjustTimeRangeForShift(qt.spec, timeRange, qt.kind)
|
||||
return newBuilderQuery(q.telemetryStore, q.logStmtBuilder, qt.spec, adjustedTimeRange, qt.kind)
|
||||
case *builderQuery[qbtypes.MetricAggregation]:
|
||||
return newBuilderQuery(q.telemetryStore, q.metricStmtBuilder, qt.spec, timeRange, qt.kind)
|
||||
// Populate ShiftBy for the new query
|
||||
qt.spec.ShiftBy = extractShiftFromBuilderQuery(qt.spec)
|
||||
adjustedTimeRange := adjustTimeRangeForShift(qt.spec, timeRange, qt.kind)
|
||||
return newBuilderQuery(q.telemetryStore, q.metricStmtBuilder, qt.spec, adjustedTimeRange, qt.kind)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
@@ -273,8 +401,29 @@ func (q *querier) createRangedQuery(originalQuery qbtypes.Query, timeRange qbtyp
|
||||
|
||||
// mergeResults merges cached result with fresh results
|
||||
func (q *querier) mergeResults(cached *qbtypes.Result, fresh []*qbtypes.Result) *qbtypes.Result {
|
||||
if cached == nil && len(fresh) == 1 {
|
||||
return fresh[0]
|
||||
if cached == nil {
|
||||
if len(fresh) == 1 {
|
||||
return fresh[0]
|
||||
}
|
||||
if len(fresh) == 0 {
|
||||
return nil
|
||||
}
|
||||
// If cached is nil but we have multiple fresh results, we need to merge them
|
||||
// We need to merge all fresh results properly to avoid duplicates
|
||||
merged := &qbtypes.Result{
|
||||
Type: fresh[0].Type,
|
||||
Stats: fresh[0].Stats,
|
||||
Warnings: fresh[0].Warnings,
|
||||
}
|
||||
|
||||
// Merge all fresh results including the first one
|
||||
switch merged.Type {
|
||||
case qbtypes.RequestTypeTimeSeries:
|
||||
// Pass nil as cached value to ensure proper merging of all fresh results
|
||||
merged.Value = q.mergeTimeSeriesResults(nil, fresh)
|
||||
}
|
||||
|
||||
return merged
|
||||
}
|
||||
|
||||
// Start with cached result
|
||||
@@ -315,23 +464,71 @@ func (q *querier) mergeResults(cached *qbtypes.Result, fresh []*qbtypes.Result)
|
||||
// mergeTimeSeriesResults merges time series data
|
||||
func (q *querier) mergeTimeSeriesResults(cachedValue *qbtypes.TimeSeriesData, freshResults []*qbtypes.Result) *qbtypes.TimeSeriesData {
|
||||
|
||||
// Map to store merged series by query name and series key
|
||||
// Map to store merged series by aggregation index and series key
|
||||
seriesMap := make(map[int]map[string]*qbtypes.TimeSeries)
|
||||
// Map to store aggregation bucket metadata
|
||||
bucketMetadata := make(map[int]*qbtypes.AggregationBucket)
|
||||
|
||||
for _, aggBucket := range cachedValue.Aggregations {
|
||||
if seriesMap[aggBucket.Index] == nil {
|
||||
seriesMap[aggBucket.Index] = make(map[string]*qbtypes.TimeSeries)
|
||||
// Debug: Log input data
|
||||
if q.logger != nil {
|
||||
cachedCount := 0
|
||||
cachedSeriesDetails := make(map[int][]string)
|
||||
if cachedValue != nil && cachedValue.Aggregations != nil {
|
||||
for _, agg := range cachedValue.Aggregations {
|
||||
cachedCount += len(agg.Series)
|
||||
for _, s := range agg.Series {
|
||||
key := qbtypes.GetUniqueSeriesKey(s.Labels)
|
||||
cachedSeriesDetails[agg.Index] = append(cachedSeriesDetails[agg.Index], key)
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, series := range aggBucket.Series {
|
||||
key := qbtypes.GetUniqueSeriesKey(series.Labels)
|
||||
seriesMap[aggBucket.Index][key] = series
|
||||
q.logger.Debug("mergeTimeSeriesResults called",
|
||||
"cached_series_count", cachedCount,
|
||||
"cached_series_details", cachedSeriesDetails,
|
||||
"fresh_results_count", len(freshResults))
|
||||
}
|
||||
|
||||
// Process cached data if available
|
||||
if cachedValue != nil && cachedValue.Aggregations != nil {
|
||||
for _, aggBucket := range cachedValue.Aggregations {
|
||||
if seriesMap[aggBucket.Index] == nil {
|
||||
seriesMap[aggBucket.Index] = make(map[string]*qbtypes.TimeSeries)
|
||||
}
|
||||
if bucketMetadata[aggBucket.Index] == nil {
|
||||
bucketMetadata[aggBucket.Index] = aggBucket
|
||||
}
|
||||
for _, series := range aggBucket.Series {
|
||||
key := qbtypes.GetUniqueSeriesKey(series.Labels)
|
||||
if existingSeries, ok := seriesMap[aggBucket.Index][key]; ok {
|
||||
// Merge values from duplicate series in cached data, avoiding duplicate timestamps
|
||||
timestampMap := make(map[int64]bool)
|
||||
for _, v := range existingSeries.Values {
|
||||
timestampMap[v.Timestamp] = true
|
||||
}
|
||||
|
||||
// Only add values with new timestamps
|
||||
for _, v := range series.Values {
|
||||
if !timestampMap[v.Timestamp] {
|
||||
existingSeries.Values = append(existingSeries.Values, v)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Create a copy to avoid modifying the cached data
|
||||
seriesCopy := &qbtypes.TimeSeries{
|
||||
Labels: series.Labels,
|
||||
Values: make([]*qbtypes.TimeSeriesValue, len(series.Values)),
|
||||
}
|
||||
copy(seriesCopy.Values, series.Values)
|
||||
seriesMap[aggBucket.Index][key] = seriesCopy
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add fresh series
|
||||
for _, result := range freshResults {
|
||||
freshTS, ok := result.Value.(*qbtypes.TimeSeriesData)
|
||||
if !ok {
|
||||
if !ok || freshTS == nil || freshTS.Aggregations == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -339,6 +536,12 @@ func (q *querier) mergeTimeSeriesResults(cachedValue *qbtypes.TimeSeriesData, fr
|
||||
if seriesMap[aggBucket.Index] == nil {
|
||||
seriesMap[aggBucket.Index] = make(map[string]*qbtypes.TimeSeries)
|
||||
}
|
||||
// Prefer fresh metadata over cached metadata
|
||||
if aggBucket.Alias != "" || aggBucket.Meta.Unit != "" {
|
||||
bucketMetadata[aggBucket.Index] = aggBucket
|
||||
} else if bucketMetadata[aggBucket.Index] == nil {
|
||||
bucketMetadata[aggBucket.Index] = aggBucket
|
||||
}
|
||||
}
|
||||
|
||||
for _, aggBucket := range freshTS.Aggregations {
|
||||
@@ -346,8 +549,19 @@ func (q *querier) mergeTimeSeriesResults(cachedValue *qbtypes.TimeSeriesData, fr
|
||||
key := qbtypes.GetUniqueSeriesKey(series.Labels)
|
||||
|
||||
if existingSeries, ok := seriesMap[aggBucket.Index][key]; ok {
|
||||
// Merge values
|
||||
existingSeries.Values = append(existingSeries.Values, series.Values...)
|
||||
// Merge values, avoiding duplicate timestamps
|
||||
// Create a map to track existing timestamps
|
||||
timestampMap := make(map[int64]bool)
|
||||
for _, v := range existingSeries.Values {
|
||||
timestampMap[v.Timestamp] = true
|
||||
}
|
||||
|
||||
// Only add values with new timestamps
|
||||
for _, v := range series.Values {
|
||||
if !timestampMap[v.Timestamp] {
|
||||
existingSeries.Values = append(existingSeries.Values, v)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// New series
|
||||
seriesMap[aggBucket.Index][key] = series
|
||||
@@ -357,10 +571,18 @@ func (q *querier) mergeTimeSeriesResults(cachedValue *qbtypes.TimeSeriesData, fr
|
||||
}
|
||||
|
||||
result := &qbtypes.TimeSeriesData{
|
||||
QueryName: cachedValue.QueryName,
|
||||
Aggregations: []*qbtypes.AggregationBucket{},
|
||||
}
|
||||
|
||||
// Set QueryName from cached or first fresh result
|
||||
if cachedValue != nil {
|
||||
result.QueryName = cachedValue.QueryName
|
||||
} else if len(freshResults) > 0 {
|
||||
if freshTS, ok := freshResults[0].Value.(*qbtypes.TimeSeriesData); ok && freshTS != nil {
|
||||
result.QueryName = freshTS.QueryName
|
||||
}
|
||||
}
|
||||
|
||||
for index, series := range seriesMap {
|
||||
var aggSeries []*qbtypes.TimeSeries
|
||||
for _, s := range series {
|
||||
@@ -377,10 +599,38 @@ func (q *querier) mergeTimeSeriesResults(cachedValue *qbtypes.TimeSeriesData, fr
|
||||
aggSeries = append(aggSeries, s)
|
||||
}
|
||||
|
||||
result.Aggregations = append(result.Aggregations, &qbtypes.AggregationBucket{
|
||||
// Preserve bucket metadata from either cached or fresh results
|
||||
bucket := &qbtypes.AggregationBucket{
|
||||
Index: index,
|
||||
Series: aggSeries,
|
||||
})
|
||||
}
|
||||
if metadata, ok := bucketMetadata[index]; ok {
|
||||
bucket.Alias = metadata.Alias
|
||||
bucket.Meta = metadata.Meta
|
||||
}
|
||||
|
||||
result.Aggregations = append(result.Aggregations, bucket)
|
||||
}
|
||||
|
||||
// Debug: Log output data
|
||||
if q.logger != nil {
|
||||
finalCount := 0
|
||||
finalSeriesDetails := make(map[int][]string)
|
||||
for _, agg := range result.Aggregations {
|
||||
finalCount += len(agg.Series)
|
||||
for _, s := range agg.Series {
|
||||
key := qbtypes.GetUniqueSeriesKey(s.Labels)
|
||||
// Also log the actual label values for debugging
|
||||
labelDetails := make([]string, 0, len(s.Labels))
|
||||
for _, l := range s.Labels {
|
||||
labelDetails = append(labelDetails, fmt.Sprintf("%s=%v", l.Key.Name, l.Value))
|
||||
}
|
||||
finalSeriesDetails[agg.Index] = append(finalSeriesDetails[agg.Index], fmt.Sprintf("key=%s,labels=%v", key, labelDetails))
|
||||
}
|
||||
}
|
||||
q.logger.Debug("mergeTimeSeriesResults returning",
|
||||
"final_series_count", finalCount,
|
||||
"final_series_details", finalSeriesDetails)
|
||||
}
|
||||
|
||||
return result
|
||||
|
||||
229
pkg/querier/querier_test.go
Normal file
229
pkg/querier/querier_test.go
Normal file
@@ -0,0 +1,229 @@
|
||||
package querier
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// TestAdjustTimeRangeForShift tests the time range adjustment logic
|
||||
func TestAdjustTimeRangeForShift(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
spec qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]
|
||||
timeRange qbtypes.TimeRange
|
||||
requestType qbtypes.RequestType
|
||||
expectedFromMS uint64
|
||||
expectedToMS uint64
|
||||
}{
|
||||
{
|
||||
name: "no shift",
|
||||
spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Functions: []qbtypes.Function{},
|
||||
},
|
||||
timeRange: qbtypes.TimeRange{
|
||||
From: 1000000,
|
||||
To: 2000000,
|
||||
},
|
||||
requestType: qbtypes.RequestTypeTimeSeries,
|
||||
expectedFromMS: 1000000,
|
||||
expectedToMS: 2000000,
|
||||
},
|
||||
{
|
||||
name: "shift by 60 seconds using timeShift function",
|
||||
spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Functions: []qbtypes.Function{
|
||||
{
|
||||
Name: qbtypes.FunctionNameTimeShift,
|
||||
Args: []qbtypes.FunctionArg{
|
||||
{Value: "60"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
timeRange: qbtypes.TimeRange{
|
||||
From: 1000000,
|
||||
To: 2000000,
|
||||
},
|
||||
requestType: qbtypes.RequestTypeTimeSeries,
|
||||
expectedFromMS: 940000, // 1000000 - 60000
|
||||
expectedToMS: 1940000, // 2000000 - 60000
|
||||
},
|
||||
{
|
||||
name: "shift by negative 30 seconds (future shift)",
|
||||
spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Functions: []qbtypes.Function{
|
||||
{
|
||||
Name: qbtypes.FunctionNameTimeShift,
|
||||
Args: []qbtypes.FunctionArg{
|
||||
{Value: "-30"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
timeRange: qbtypes.TimeRange{
|
||||
From: 1000000,
|
||||
To: 2000000,
|
||||
},
|
||||
requestType: qbtypes.RequestTypeTimeSeries,
|
||||
expectedFromMS: 1030000, // 1000000 - (-30000)
|
||||
expectedToMS: 2030000, // 2000000 - (-30000)
|
||||
},
|
||||
{
|
||||
name: "no shift for raw request type even with timeShift function",
|
||||
spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Functions: []qbtypes.Function{
|
||||
{
|
||||
Name: qbtypes.FunctionNameTimeShift,
|
||||
Args: []qbtypes.FunctionArg{
|
||||
{Value: "3600"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
timeRange: qbtypes.TimeRange{
|
||||
From: 1000000,
|
||||
To: 2000000,
|
||||
},
|
||||
requestType: qbtypes.RequestTypeRaw,
|
||||
expectedFromMS: 1000000, // No shift for raw queries
|
||||
expectedToMS: 2000000,
|
||||
},
|
||||
{
|
||||
name: "shift applied for scalar request type with timeShift function",
|
||||
spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Functions: []qbtypes.Function{
|
||||
{
|
||||
Name: qbtypes.FunctionNameTimeShift,
|
||||
Args: []qbtypes.FunctionArg{
|
||||
{Value: "3600"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
timeRange: qbtypes.TimeRange{
|
||||
From: 10000000,
|
||||
To: 20000000,
|
||||
},
|
||||
requestType: qbtypes.RequestTypeScalar,
|
||||
expectedFromMS: 6400000, // 10000000 - 3600000
|
||||
expectedToMS: 16400000, // 20000000 - 3600000
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := adjustTimeRangeForShift(tt.spec, tt.timeRange, tt.requestType)
|
||||
assert.Equal(t, tt.expectedFromMS, result.From, "fromMS mismatch")
|
||||
assert.Equal(t, tt.expectedToMS, result.To, "toMS mismatch")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestExtractShiftFromBuilderQuery tests the shift extraction logic
|
||||
func TestExtractShiftFromBuilderQuery(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
spec qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]
|
||||
expectedShiftBy int64
|
||||
}{
|
||||
{
|
||||
name: "extract from timeShift function with float64",
|
||||
spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Functions: []qbtypes.Function{
|
||||
{
|
||||
Name: qbtypes.FunctionNameTimeShift,
|
||||
Args: []qbtypes.FunctionArg{
|
||||
{Value: float64(3600)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedShiftBy: 3600,
|
||||
},
|
||||
{
|
||||
name: "extract from timeShift function with int64",
|
||||
spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Functions: []qbtypes.Function{
|
||||
{
|
||||
Name: qbtypes.FunctionNameTimeShift,
|
||||
Args: []qbtypes.FunctionArg{
|
||||
{Value: int64(3600)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedShiftBy: 3600,
|
||||
},
|
||||
{
|
||||
name: "extract from timeShift function with string",
|
||||
spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Functions: []qbtypes.Function{
|
||||
{
|
||||
Name: qbtypes.FunctionNameTimeShift,
|
||||
Args: []qbtypes.FunctionArg{
|
||||
{Value: "3600"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedShiftBy: 3600,
|
||||
},
|
||||
{
|
||||
name: "no timeShift function",
|
||||
spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Functions: []qbtypes.Function{
|
||||
{
|
||||
Name: qbtypes.FunctionNameAbsolute,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedShiftBy: 0,
|
||||
},
|
||||
{
|
||||
name: "invalid timeShift value",
|
||||
spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Functions: []qbtypes.Function{
|
||||
{
|
||||
Name: qbtypes.FunctionNameTimeShift,
|
||||
Args: []qbtypes.FunctionArg{
|
||||
{Value: "invalid"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedShiftBy: 0,
|
||||
},
|
||||
{
|
||||
name: "multiple functions with timeShift",
|
||||
spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Functions: []qbtypes.Function{
|
||||
{
|
||||
Name: qbtypes.FunctionNameAbsolute,
|
||||
},
|
||||
{
|
||||
Name: qbtypes.FunctionNameTimeShift,
|
||||
Args: []qbtypes.FunctionArg{
|
||||
{Value: "1800"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: qbtypes.FunctionNameClampMax,
|
||||
Args: []qbtypes.FunctionArg{
|
||||
{Value: "100"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedShiftBy: 1800,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
shiftBy := extractShiftFromBuilderQuery(tt.spec)
|
||||
assert.Equal(t, tt.expectedShiftBy, shiftBy)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -68,7 +68,7 @@ func CollisionHandledFinalExpr(
|
||||
return "", nil, errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, correction)
|
||||
} else {
|
||||
// not even a close match, return an error
|
||||
return "", nil, err
|
||||
return "", nil, errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, "field %s not found", field.Name)
|
||||
}
|
||||
} else {
|
||||
for _, key := range keysForField {
|
||||
|
||||
@@ -46,7 +46,7 @@ func (b *defaultConditionBuilder) ConditionFor(
|
||||
) (string, error) {
|
||||
|
||||
if key.FieldContext != telemetrytypes.FieldContextResource {
|
||||
return "", nil
|
||||
return "true", nil
|
||||
}
|
||||
|
||||
column, err := b.fm.ColumnFor(ctx, key)
|
||||
|
||||
@@ -22,7 +22,7 @@ type filterExpressionVisitor struct {
|
||||
conditionBuilder qbtypes.ConditionBuilder
|
||||
warnings []string
|
||||
fieldKeys map[string][]*telemetrytypes.TelemetryFieldKey
|
||||
errors []error
|
||||
errors []string
|
||||
builder *sqlbuilder.SelectBuilder
|
||||
fullTextColumn *telemetrytypes.TelemetryFieldKey
|
||||
jsonBodyPrefix string
|
||||
@@ -90,10 +90,12 @@ func PrepareWhereClause(query string, opts FilterExprVisitorOpts) (*sqlbuilder.W
|
||||
combinedErrors := errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"found %d syntax errors while parsing the filter expression: %v",
|
||||
"found %d syntax errors while parsing the filter expression",
|
||||
len(parserErrorListener.SyntaxErrors),
|
||||
parserErrorListener.SyntaxErrors,
|
||||
)
|
||||
for _, err := range parserErrorListener.SyntaxErrors {
|
||||
combinedErrors = combinedErrors.WithAdditional(err.Error())
|
||||
}
|
||||
return nil, nil, combinedErrors
|
||||
}
|
||||
|
||||
@@ -105,10 +107,12 @@ func PrepareWhereClause(query string, opts FilterExprVisitorOpts) (*sqlbuilder.W
|
||||
combinedErrors := errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"found %d errors while parsing the search expression: %v",
|
||||
"found %d errors while parsing the search expression",
|
||||
len(visitor.errors),
|
||||
visitor.errors,
|
||||
)
|
||||
for _, err := range visitor.errors {
|
||||
combinedErrors = combinedErrors.WithAdditional(err)
|
||||
}
|
||||
return nil, nil, combinedErrors
|
||||
}
|
||||
|
||||
@@ -238,11 +242,7 @@ func (v *filterExpressionVisitor) VisitPrimary(ctx *grammar.PrimaryContext) any
|
||||
}
|
||||
|
||||
if v.fullTextColumn == nil {
|
||||
v.errors = append(v.errors, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"full text search is not supported",
|
||||
))
|
||||
v.errors = append(v.errors, "full text search is not supported")
|
||||
return ""
|
||||
}
|
||||
child := ctx.GetChild(0)
|
||||
@@ -251,7 +251,7 @@ func (v *filterExpressionVisitor) VisitPrimary(ctx *grammar.PrimaryContext) any
|
||||
keyText := keyCtx.GetText()
|
||||
cond, err := v.conditionBuilder.ConditionFor(context.Background(), v.fullTextColumn, qbtypes.FilterOperatorRegexp, keyText, v.builder)
|
||||
if err != nil {
|
||||
v.errors = append(v.errors, errors.WrapInternalf(err, errors.CodeInternal, "failed to build full text search condition"))
|
||||
v.errors = append(v.errors, fmt.Sprintf("failed to build full text search condition: %s", err.Error()))
|
||||
return ""
|
||||
}
|
||||
return cond
|
||||
@@ -266,12 +266,12 @@ func (v *filterExpressionVisitor) VisitPrimary(ctx *grammar.PrimaryContext) any
|
||||
} else if valCtx.KEY() != nil {
|
||||
text = valCtx.KEY().GetText()
|
||||
} else {
|
||||
v.errors = append(v.errors, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "unsupported value type: %s", valCtx.GetText()))
|
||||
v.errors = append(v.errors, fmt.Sprintf("unsupported value type: %s", valCtx.GetText()))
|
||||
return ""
|
||||
}
|
||||
cond, err := v.conditionBuilder.ConditionFor(context.Background(), v.fullTextColumn, qbtypes.FilterOperatorRegexp, text, v.builder)
|
||||
if err != nil {
|
||||
v.errors = append(v.errors, errors.WrapInternalf(err, errors.CodeInternal, "failed to build full text search condition"))
|
||||
v.errors = append(v.errors, fmt.Sprintf("failed to build full text search condition: %s", err.Error()))
|
||||
return ""
|
||||
}
|
||||
return cond
|
||||
@@ -419,7 +419,7 @@ func (v *filterExpressionVisitor) VisitComparison(ctx *grammar.ComparisonContext
|
||||
for _, key := range keys {
|
||||
condition, err := v.conditionBuilder.ConditionFor(context.Background(), key, op, value, v.builder)
|
||||
if err != nil {
|
||||
v.errors = append(v.errors, errors.WrapInternalf(err, errors.CodeInternal, "failed to build condition"))
|
||||
v.errors = append(v.errors, fmt.Sprintf("failed to build condition: %s", err.Error()))
|
||||
return ""
|
||||
}
|
||||
conds = append(conds, condition)
|
||||
@@ -471,16 +471,12 @@ func (v *filterExpressionVisitor) VisitFullText(ctx *grammar.FullTextContext) an
|
||||
}
|
||||
|
||||
if v.fullTextColumn == nil {
|
||||
v.errors = append(v.errors, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"full text search is not supported",
|
||||
))
|
||||
v.errors = append(v.errors, "full text search is not supported")
|
||||
return ""
|
||||
}
|
||||
cond, err := v.conditionBuilder.ConditionFor(context.Background(), v.fullTextColumn, qbtypes.FilterOperatorRegexp, text, v.builder)
|
||||
if err != nil {
|
||||
v.errors = append(v.errors, errors.WrapInternalf(err, errors.CodeInternal, "failed to build full text search condition"))
|
||||
v.errors = append(v.errors, fmt.Sprintf("failed to build full text search condition: %s", err.Error()))
|
||||
return ""
|
||||
}
|
||||
return cond
|
||||
@@ -498,34 +494,19 @@ func (v *filterExpressionVisitor) VisitFunctionCall(ctx *grammar.FunctionCallCon
|
||||
functionName = "hasAll"
|
||||
} else {
|
||||
// Default fallback
|
||||
v.errors = append(v.errors, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"unknown function `%s`",
|
||||
ctx.GetText(),
|
||||
))
|
||||
v.errors = append(v.errors, fmt.Sprintf("unknown function `%s`", ctx.GetText()))
|
||||
return ""
|
||||
}
|
||||
params := v.Visit(ctx.FunctionParamList()).([]any)
|
||||
|
||||
if len(params) < 2 {
|
||||
v.errors = append(v.errors, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"function `%s` expects key and value parameters",
|
||||
functionName,
|
||||
))
|
||||
v.errors = append(v.errors, fmt.Sprintf("function `%s` expects key and value parameters", functionName))
|
||||
return ""
|
||||
}
|
||||
|
||||
keys, ok := params[0].([]*telemetrytypes.TelemetryFieldKey)
|
||||
if !ok {
|
||||
v.errors = append(v.errors, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"function `%s` expects key parameter to be a field key",
|
||||
functionName,
|
||||
))
|
||||
v.errors = append(v.errors, fmt.Sprintf("function `%s` expects key parameter to be a field key", functionName))
|
||||
return ""
|
||||
}
|
||||
value := params[1:]
|
||||
@@ -536,12 +517,7 @@ func (v *filterExpressionVisitor) VisitFunctionCall(ctx *grammar.FunctionCallCon
|
||||
if strings.HasPrefix(key.Name, v.jsonBodyPrefix) {
|
||||
fieldName, _ = v.jsonKeyToKey(context.Background(), key, qbtypes.FilterOperatorUnknown, value)
|
||||
} else {
|
||||
v.errors = append(v.errors, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"function `%s` supports only body JSON search",
|
||||
functionName,
|
||||
))
|
||||
v.errors = append(v.errors, fmt.Sprintf("function `%s` supports only body JSON search", functionName))
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -603,12 +579,7 @@ func (v *filterExpressionVisitor) VisitValue(ctx *grammar.ValueContext) any {
|
||||
} else if ctx.NUMBER() != nil {
|
||||
number, err := strconv.ParseFloat(ctx.NUMBER().GetText(), 64)
|
||||
if err != nil {
|
||||
v.errors = append(v.errors, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"failed to parse number %s",
|
||||
ctx.NUMBER().GetText(),
|
||||
))
|
||||
v.errors = append(v.errors, fmt.Sprintf("failed to parse number %s", ctx.NUMBER().GetText()))
|
||||
return ""
|
||||
}
|
||||
return number
|
||||
@@ -648,19 +619,11 @@ func (v *filterExpressionVisitor) VisitKey(ctx *grammar.KeyContext) any {
|
||||
|
||||
if len(fieldKeysForName) == 0 {
|
||||
if strings.HasPrefix(fieldKey.Name, v.jsonBodyPrefix) && v.jsonBodyPrefix != "" && keyName == "" {
|
||||
v.errors = append(v.errors, errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"missing key for body json search - expected key of the form `body.key` (ex: `body.status`)",
|
||||
))
|
||||
v.errors = append(v.errors, "missing key for body json search - expected key of the form `body.key` (ex: `body.status`)")
|
||||
} else {
|
||||
// TODO(srikanthccv): do we want to return an error here?
|
||||
// should we infer the type and auto-magically build a key for expression?
|
||||
v.errors = append(v.errors, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"key `%s` not found",
|
||||
fieldKey.Name,
|
||||
))
|
||||
v.errors = append(v.errors, fmt.Sprintf("key `%s` not found", fieldKey.Name))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -173,7 +173,7 @@ func (m *fieldMapper) ColumnExpressionFor(
|
||||
return "", errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, correction)
|
||||
} else {
|
||||
// not even a close match, return an error
|
||||
return "", err
|
||||
return "", errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, "field %s not found", field.Name)
|
||||
}
|
||||
}
|
||||
} else if len(keysForField) == 1 {
|
||||
@@ -186,7 +186,7 @@ func (m *fieldMapper) ColumnExpressionFor(
|
||||
colName, _ = m.FieldFor(ctx, key)
|
||||
args = append(args, fmt.Sprintf("toString(%s) != '', toString(%s)", colName, colName))
|
||||
}
|
||||
colName = fmt.Sprintf("multiIf(%s)", strings.Join(args, ", "))
|
||||
colName = fmt.Sprintf("multiIf(%s, NULL)", strings.Join(args, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"log/slog"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/querybuilder"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
@@ -14,10 +13,6 @@ import (
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrUnsupportedAggregation = errors.NewInvalidInputf(errors.CodeInvalidInput, "unsupported aggregation")
|
||||
)
|
||||
|
||||
type logQueryStatementBuilder struct {
|
||||
logger *slog.Logger
|
||||
metadataStore telemetrytypes.MetadataStore
|
||||
@@ -165,12 +160,19 @@ func (b *logQueryStatementBuilder) buildListQuery(
|
||||
|
||||
// Add order by
|
||||
for _, orderBy := range query.Order {
|
||||
sb.OrderBy(fmt.Sprintf("`%s` %s", orderBy.Key.Name, orderBy.Direction))
|
||||
colExpr, err := b.fm.ColumnExpressionFor(ctx, &orderBy.Key.TelemetryFieldKey, keys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sb.OrderBy(fmt.Sprintf("%s %s", colExpr, orderBy.Direction.StringValue()))
|
||||
}
|
||||
|
||||
// Add limit and offset
|
||||
if query.Limit > 0 {
|
||||
sb.Limit(query.Limit)
|
||||
} else {
|
||||
// default to 1k rows
|
||||
sb.Limit(100)
|
||||
}
|
||||
|
||||
if query.Offset > 0 {
|
||||
@@ -381,9 +383,9 @@ func (b *logQueryStatementBuilder) buildScalarQuery(
|
||||
for _, orderBy := range query.Order {
|
||||
idx, ok := aggOrderBy(orderBy, query)
|
||||
if ok {
|
||||
sb.OrderBy(fmt.Sprintf("__result_%d %s", idx, orderBy.Direction))
|
||||
sb.OrderBy(fmt.Sprintf("__result_%d %s", idx, orderBy.Direction.StringValue()))
|
||||
} else {
|
||||
sb.OrderBy(fmt.Sprintf("`%s` %s", orderBy.Key.Name, orderBy.Direction))
|
||||
sb.OrderBy(fmt.Sprintf("`%s` %s", orderBy.Key.Name, orderBy.Direction.StringValue()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -420,19 +422,25 @@ func (b *logQueryStatementBuilder) addFilterCondition(
|
||||
keys map[string][]*telemetrytypes.TelemetryFieldKey,
|
||||
) ([]string, error) {
|
||||
|
||||
// add filter expression
|
||||
filterWhereClause, warnings, err := querybuilder.PrepareWhereClause(query.Filter.Expression, querybuilder.FilterExprVisitorOpts{
|
||||
FieldMapper: b.fm,
|
||||
ConditionBuilder: b.cb,
|
||||
FieldKeys: keys,
|
||||
SkipResourceFilter: true,
|
||||
FullTextColumn: b.fullTextColumn,
|
||||
JsonBodyPrefix: b.jsonBodyPrefix,
|
||||
JsonKeyToKey: b.jsonKeyToKey,
|
||||
})
|
||||
var filterWhereClause *sqlbuilder.WhereClause
|
||||
var warnings []string
|
||||
var err error
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if query.Filter != nil && query.Filter.Expression != "" {
|
||||
// add filter expression
|
||||
filterWhereClause, warnings, err = querybuilder.PrepareWhereClause(query.Filter.Expression, querybuilder.FilterExprVisitorOpts{
|
||||
FieldMapper: b.fm,
|
||||
ConditionBuilder: b.cb,
|
||||
FieldKeys: keys,
|
||||
SkipResourceFilter: true,
|
||||
FullTextColumn: b.fullTextColumn,
|
||||
JsonBodyPrefix: b.jsonBodyPrefix,
|
||||
JsonKeyToKey: b.jsonKeyToKey,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if filterWhereClause != nil {
|
||||
|
||||
@@ -95,7 +95,7 @@ func (m *fieldMapper) ColumnExpressionFor(
|
||||
return "", errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, correction)
|
||||
} else {
|
||||
// not even a close match, return an error
|
||||
return "", err
|
||||
return "", errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, "field %s not found", field.Name)
|
||||
}
|
||||
}
|
||||
} else if len(keysForField) == 1 {
|
||||
@@ -108,7 +108,7 @@ func (m *fieldMapper) ColumnExpressionFor(
|
||||
colName, _ = m.FieldFor(ctx, key)
|
||||
args = append(args, fmt.Sprintf("toString(%s) != '', toString(%s)", colName, colName))
|
||||
}
|
||||
colName = fmt.Sprintf("multiIf(%s)", strings.Join(args, ", "))
|
||||
colName = fmt.Sprintf("multiIf(%s, NULL)", strings.Join(args, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -250,7 +250,7 @@ func (m *defaultFieldMapper) ColumnExpressionFor(
|
||||
return "", errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, correction)
|
||||
} else {
|
||||
// not even a close match, return an error
|
||||
return "", err
|
||||
return "", errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, "field %s not found", field.Name)
|
||||
}
|
||||
}
|
||||
} else if len(keysForField) == 1 {
|
||||
@@ -263,7 +263,7 @@ func (m *defaultFieldMapper) ColumnExpressionFor(
|
||||
colName, _ = m.FieldFor(ctx, key)
|
||||
args = append(args, fmt.Sprintf("toString(%s) != '', toString(%s)", colName, colName))
|
||||
}
|
||||
colName = fmt.Sprintf("multiIf(%s)", strings.Join(args, ", "))
|
||||
colName = fmt.Sprintf("multiIf(%s, NULL)", strings.Join(args, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -179,12 +179,19 @@ func (b *traceQueryStatementBuilder) buildListQuery(
|
||||
|
||||
// Add order by
|
||||
for _, orderBy := range query.Order {
|
||||
sb.OrderBy(fmt.Sprintf("`%s` %s", orderBy.Key.Name, orderBy.Direction.StringValue()))
|
||||
colExpr, err := b.fm.ColumnExpressionFor(ctx, &orderBy.Key.TelemetryFieldKey, keys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sb.OrderBy(fmt.Sprintf("%s %s", colExpr, orderBy.Direction.StringValue()))
|
||||
}
|
||||
|
||||
// Add limit and offset
|
||||
if query.Limit > 0 {
|
||||
sb.Limit(query.Limit)
|
||||
} else {
|
||||
// default to 1k rows
|
||||
sb.Limit(100)
|
||||
}
|
||||
|
||||
if query.Offset > 0 {
|
||||
|
||||
@@ -328,6 +328,8 @@ type MetricAggregation struct {
|
||||
TableHints *metrictypes.MetricTableHints `json:"-"`
|
||||
// value filter to apply to the query
|
||||
ValueFilter *metrictypes.MetricValueFilter `json:"-"`
|
||||
// reduce to operator for scalar requests
|
||||
ReduceTo ReduceTo `json:"reduceTo,omitempty"`
|
||||
}
|
||||
|
||||
type Filter struct {
|
||||
@@ -379,7 +381,7 @@ type FunctionArg struct {
|
||||
// name of the argument
|
||||
Name string `json:"name,omitempty"`
|
||||
// value of the argument
|
||||
Value string `json:"value"`
|
||||
Value any `json:"value"`
|
||||
}
|
||||
|
||||
type Function struct {
|
||||
|
||||
@@ -55,4 +55,25 @@ type QueryBuilderQuery[T any] struct {
|
||||
|
||||
// functions to apply to the query
|
||||
Functions []Function `json:"functions,omitempty"`
|
||||
|
||||
// ShiftBy is extracted from timeShift function for internal use (caching)
|
||||
// This field is not serialized to JSON
|
||||
ShiftBy int64 `json:"-"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements custom JSON unmarshaling to disallow unknown fields
|
||||
func (q *QueryBuilderQuery[T]) UnmarshalJSON(data []byte) error {
|
||||
// Define a type alias to avoid infinite recursion
|
||||
type Alias QueryBuilderQuery[T]
|
||||
|
||||
var temp Alias
|
||||
// Use UnmarshalJSONWithContext for better error messages
|
||||
if err := UnmarshalJSONWithContext(data, &temp, "query spec"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Copy the decoded values back to the original struct
|
||||
*q = QueryBuilderQuery[T](temp)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -20,10 +20,30 @@ type QueryBuilderFormula struct {
|
||||
// expression to apply to the query
|
||||
Expression string `json:"expression"`
|
||||
|
||||
// order by keys and directions
|
||||
Order []OrderBy `json:"order,omitempty"`
|
||||
|
||||
// limit the maximum number of rows to return
|
||||
Limit int `json:"limit,omitempty"`
|
||||
|
||||
// having clause to apply to the query
|
||||
Having *Having `json:"having,omitempty"`
|
||||
|
||||
// functions to apply to the formula result
|
||||
Functions []Function `json:"functions,omitempty"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements custom JSON unmarshaling to disallow unknown fields
|
||||
func (f *QueryBuilderFormula) UnmarshalJSON(data []byte) error {
|
||||
type Alias QueryBuilderFormula
|
||||
var temp Alias
|
||||
if err := UnmarshalJSONWithContext(data, &temp, "formula spec"); err != nil {
|
||||
return err
|
||||
}
|
||||
*f = QueryBuilderFormula(temp)
|
||||
return nil
|
||||
}
|
||||
|
||||
// small container to store the query name and index or alias reference
|
||||
// for a variable in the formula expression
|
||||
// read below for more details on aggregation references
|
||||
|
||||
@@ -93,9 +93,20 @@ func ApplyFunction(fn Function, result *TimeSeries) *TimeSeries {
|
||||
return result
|
||||
}
|
||||
|
||||
// parseFloat64Arg parses a string argument to float64
|
||||
func parseFloat64Arg(value string) (float64, error) {
|
||||
return strconv.ParseFloat(value, 64)
|
||||
// parseFloat64Arg parses an argument to float64
|
||||
func parseFloat64Arg(value any) (float64, error) {
|
||||
switch v := value.(type) {
|
||||
case float64:
|
||||
return v, nil
|
||||
case int64:
|
||||
return float64(v), nil
|
||||
case int:
|
||||
return float64(v), nil
|
||||
case string:
|
||||
return strconv.ParseFloat(v, 64)
|
||||
default:
|
||||
return 0, strconv.ErrSyntax
|
||||
}
|
||||
}
|
||||
|
||||
// getEWMAAlpha calculates the alpha value for EWMA functions
|
||||
|
||||
@@ -677,3 +677,62 @@ func TestApplyFunctions(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFloat64Arg(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
value any
|
||||
expected float64
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "float64 value",
|
||||
value: float64(3600),
|
||||
expected: 3600,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "int64 value",
|
||||
value: int64(3600),
|
||||
expected: 3600,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "int value",
|
||||
value: int(3600),
|
||||
expected: 3600,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "string value",
|
||||
value: "3600",
|
||||
expected: 3600,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid string value",
|
||||
value: "invalid",
|
||||
expected: 0,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "unsupported type",
|
||||
value: []int{1, 2, 3},
|
||||
expected: 0,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := parseFloat64Arg(tt.value)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("parseFloat64Arg() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if got != tt.expected {
|
||||
t.Errorf("parseFloat64Arg() = %v, want %v", got, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
109
pkg/types/querybuildertypes/querybuildertypesv5/json_decoder.go
Normal file
109
pkg/types/querybuildertypes/querybuildertypesv5/json_decoder.go
Normal file
@@ -0,0 +1,109 @@
|
||||
package querybuildertypesv5
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
)
|
||||
|
||||
// UnmarshalJSONWithSuggestions unmarshals JSON data into the target struct
|
||||
// and provides field name suggestions for unknown fields
|
||||
func UnmarshalJSONWithSuggestions(data []byte, target any) error {
|
||||
return UnmarshalJSONWithContext(data, target, "")
|
||||
}
|
||||
|
||||
// UnmarshalJSONWithContext unmarshals JSON with context information for better error messages
|
||||
func UnmarshalJSONWithContext(data []byte, target any, context string) error {
|
||||
// First, try to unmarshal with DisallowUnknownFields to catch unknown fields
|
||||
dec := json.NewDecoder(bytes.NewReader(data))
|
||||
dec.DisallowUnknownFields()
|
||||
|
||||
err := dec.Decode(target)
|
||||
if err == nil {
|
||||
// No error, successful unmarshal
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if it's an unknown field error
|
||||
if strings.Contains(err.Error(), "unknown field") {
|
||||
// Extract the unknown field name
|
||||
unknownField := extractUnknownField(err.Error())
|
||||
if unknownField != "" {
|
||||
// Get valid field names from the target struct
|
||||
validFields := getJSONFieldNames(target)
|
||||
|
||||
// Build error message with context
|
||||
errorMsg := "unknown field %q"
|
||||
if context != "" {
|
||||
errorMsg = "unknown field %q in " + context
|
||||
}
|
||||
|
||||
// Find closest match with max distance of 3 (reasonable for typos)
|
||||
if suggestion, found := findClosestMatch(unknownField, validFields, 3); found {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
errorMsg,
|
||||
unknownField,
|
||||
).WithAdditional(
|
||||
"Did you mean '" + suggestion + "'?",
|
||||
)
|
||||
}
|
||||
|
||||
// No good suggestion found
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
errorMsg,
|
||||
unknownField,
|
||||
).WithAdditional(
|
||||
"Valid fields are: " + strings.Join(validFields, ", "),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Return the original error if it's not an unknown field error
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid JSON: %v", err)
|
||||
}
|
||||
|
||||
// extractUnknownField extracts the field name from an unknown field error message
|
||||
func extractUnknownField(errMsg string) string {
|
||||
// The error message format is: json: unknown field "fieldname"
|
||||
parts := strings.Split(errMsg, `"`)
|
||||
if len(parts) >= 2 {
|
||||
return parts[1]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// getJSONFieldNames extracts all JSON field names from a struct
|
||||
func getJSONFieldNames(v any) []string {
|
||||
var fields []string
|
||||
|
||||
t := reflect.TypeOf(v)
|
||||
if t.Kind() == reflect.Ptr {
|
||||
t = t.Elem()
|
||||
}
|
||||
|
||||
if t.Kind() != reflect.Struct {
|
||||
return fields
|
||||
}
|
||||
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
field := t.Field(i)
|
||||
jsonTag := field.Tag.Get("json")
|
||||
|
||||
if jsonTag == "" || jsonTag == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Extract the field name from the JSON tag
|
||||
fieldName := strings.Split(jsonTag, ",")[0]
|
||||
if fieldName != "" {
|
||||
fields = append(fields, fieldName)
|
||||
}
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
@@ -0,0 +1,87 @@
|
||||
package querybuildertypesv5
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
func levenshteinDistance(s1, s2 string) int {
|
||||
if len(s1) == 0 {
|
||||
return len(s2)
|
||||
}
|
||||
if len(s2) == 0 {
|
||||
return len(s1)
|
||||
}
|
||||
|
||||
// Create a matrix to store distances
|
||||
matrix := make([][]int, len(s1)+1)
|
||||
for i := range matrix {
|
||||
matrix[i] = make([]int, len(s2)+1)
|
||||
}
|
||||
|
||||
// Initialize first column and row
|
||||
for i := 0; i <= len(s1); i++ {
|
||||
matrix[i][0] = i
|
||||
}
|
||||
for j := 0; j <= len(s2); j++ {
|
||||
matrix[0][j] = j
|
||||
}
|
||||
|
||||
// Calculate distances
|
||||
for i := 1; i <= len(s1); i++ {
|
||||
for j := 1; j <= len(s2); j++ {
|
||||
cost := 0
|
||||
if s1[i-1] != s2[j-1] {
|
||||
cost = 1
|
||||
}
|
||||
matrix[i][j] = min(
|
||||
matrix[i-1][j]+1, // deletion
|
||||
matrix[i][j-1]+1, // insertion
|
||||
matrix[i-1][j-1]+cost, // substitution
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return matrix[len(s1)][len(s2)]
|
||||
}
|
||||
|
||||
func findClosestMatch(target string, validOptions []string, maxDistance int) (string, bool) {
|
||||
if len(validOptions) == 0 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
bestMatch := ""
|
||||
bestDistance := maxDistance + 1
|
||||
|
||||
// Convert target to lowercase for case-insensitive comparison
|
||||
targetLower := strings.ToLower(target)
|
||||
|
||||
for _, option := range validOptions {
|
||||
// Case-insensitive comparison
|
||||
distance := levenshteinDistance(targetLower, strings.ToLower(option))
|
||||
if distance < bestDistance {
|
||||
bestDistance = distance
|
||||
bestMatch = option
|
||||
}
|
||||
}
|
||||
|
||||
// Only return a match if it's within the threshold
|
||||
if bestDistance <= maxDistance {
|
||||
return bestMatch, true
|
||||
}
|
||||
|
||||
return "", false
|
||||
}
|
||||
|
||||
// min returns the minimum of three integers
|
||||
func min(a, b, c int) int {
|
||||
if a < b {
|
||||
if a < c {
|
||||
return a
|
||||
}
|
||||
return c
|
||||
}
|
||||
if b < c {
|
||||
return b
|
||||
}
|
||||
return c
|
||||
}
|
||||
@@ -0,0 +1,323 @@
|
||||
package querybuildertypesv5
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestLevenshteinDistance(t *testing.T) {
|
||||
tests := []struct {
|
||||
s1 string
|
||||
s2 string
|
||||
expected int
|
||||
}{
|
||||
{"", "", 0},
|
||||
{"a", "", 1},
|
||||
{"", "a", 1},
|
||||
{"a", "a", 0},
|
||||
{"abc", "abc", 0},
|
||||
{"kitten", "sitting", 3},
|
||||
{"saturday", "sunday", 3},
|
||||
{"expires", "expires_in", 3},
|
||||
{"start", "end", 5}, // s->e, t->n, a->d, r->"", t->""
|
||||
{"schemaVersion", "schema_version", 2}, // V->_ and ""->_
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.s1+"_"+tt.s2, func(t *testing.T) {
|
||||
result := levenshteinDistance(tt.s1, tt.s2)
|
||||
assert.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindClosestMatch(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
target string
|
||||
validOptions []string
|
||||
maxDistance int
|
||||
expectedMatch string
|
||||
expectedFound bool
|
||||
}{
|
||||
{
|
||||
name: "exact match",
|
||||
target: "start",
|
||||
validOptions: []string{"start", "end", "limit"},
|
||||
maxDistance: 3,
|
||||
expectedMatch: "start",
|
||||
expectedFound: true,
|
||||
},
|
||||
{
|
||||
name: "close match",
|
||||
target: "strt",
|
||||
validOptions: []string{"start", "end", "limit"},
|
||||
maxDistance: 3,
|
||||
expectedMatch: "start",
|
||||
expectedFound: true,
|
||||
},
|
||||
{
|
||||
name: "case insensitive match",
|
||||
target: "START",
|
||||
validOptions: []string{"start", "end", "limit"},
|
||||
maxDistance: 3,
|
||||
expectedMatch: "start",
|
||||
expectedFound: true,
|
||||
},
|
||||
{
|
||||
name: "no match within distance",
|
||||
target: "completely_different",
|
||||
validOptions: []string{"start", "end", "limit"},
|
||||
maxDistance: 3,
|
||||
expectedMatch: "",
|
||||
expectedFound: false,
|
||||
},
|
||||
{
|
||||
name: "expires to expires_in",
|
||||
target: "expires",
|
||||
validOptions: []string{"expires_in", "start", "end"},
|
||||
maxDistance: 3,
|
||||
expectedMatch: "expires_in",
|
||||
expectedFound: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
match, found := findClosestMatch(tt.target, tt.validOptions, tt.maxDistance)
|
||||
assert.Equal(t, tt.expectedFound, found)
|
||||
if tt.expectedFound {
|
||||
assert.Equal(t, tt.expectedMatch, match)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryRangeRequestUnmarshalWithSuggestions(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
jsonData string
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
name: "valid request",
|
||||
jsonData: `{
|
||||
"schemaVersion": "v5",
|
||||
"start": 1000,
|
||||
"end": 2000,
|
||||
"requestType": "timeseries",
|
||||
"compositeQuery": {
|
||||
"queries": []
|
||||
}
|
||||
}`,
|
||||
expectedErr: "",
|
||||
},
|
||||
{
|
||||
name: "typo in start field",
|
||||
jsonData: `{
|
||||
"schemaVersion": "v5",
|
||||
"strt": 1000,
|
||||
"end": 2000,
|
||||
"requestType": "timeseries",
|
||||
"compositeQuery": {
|
||||
"queries": []
|
||||
}
|
||||
}`,
|
||||
expectedErr: `unknown field "strt"`,
|
||||
},
|
||||
{
|
||||
name: "typo in schemaVersion",
|
||||
jsonData: `{
|
||||
"schemaVerson": "v5",
|
||||
"start": 1000,
|
||||
"end": 2000,
|
||||
"requestType": "timeseries",
|
||||
"compositeQuery": {
|
||||
"queries": []
|
||||
}
|
||||
}`,
|
||||
expectedErr: `unknown field "schemaVerson"`,
|
||||
},
|
||||
{
|
||||
name: "requestype instead of requestType",
|
||||
jsonData: `{
|
||||
"schemaVersion": "v5",
|
||||
"start": 1000,
|
||||
"end": 2000,
|
||||
"requestype": "timeseries",
|
||||
"compositeQuery": {
|
||||
"queries": []
|
||||
}
|
||||
}`,
|
||||
expectedErr: `unknown field "requestype"`,
|
||||
},
|
||||
{
|
||||
name: "composite_query instead of compositeQuery",
|
||||
jsonData: `{
|
||||
"schemaVersion": "v5",
|
||||
"start": 1000,
|
||||
"end": 2000,
|
||||
"requestType": "timeseries",
|
||||
"composite_query": {
|
||||
"queries": []
|
||||
}
|
||||
}`,
|
||||
expectedErr: `unknown field "composite_query"`,
|
||||
},
|
||||
{
|
||||
name: "no_cache instead of noCache",
|
||||
jsonData: `{
|
||||
"schemaVersion": "v5",
|
||||
"start": 1000,
|
||||
"end": 2000,
|
||||
"requestType": "timeseries",
|
||||
"compositeQuery": {
|
||||
"queries": []
|
||||
},
|
||||
"no_cache": true
|
||||
}`,
|
||||
expectedErr: `unknown field "no_cache"`,
|
||||
},
|
||||
{
|
||||
name: "format_options instead of formatOptions",
|
||||
jsonData: `{
|
||||
"schemaVersion": "v5",
|
||||
"start": 1000,
|
||||
"end": 2000,
|
||||
"requestType": "timeseries",
|
||||
"compositeQuery": {
|
||||
"queries": []
|
||||
},
|
||||
"format_options": {}
|
||||
}`,
|
||||
expectedErr: `unknown field "format_options"`,
|
||||
},
|
||||
{
|
||||
name: "completely unknown field with no good suggestion",
|
||||
jsonData: `{
|
||||
"schemaVersion": "v5",
|
||||
"completely_unknown_field_xyz": 1000,
|
||||
"end": 2000,
|
||||
"requestType": "timeseries",
|
||||
"compositeQuery": {
|
||||
"queries": []
|
||||
}
|
||||
}`,
|
||||
expectedErr: `unknown field "completely_unknown_field_xyz"`,
|
||||
},
|
||||
{
|
||||
name: "common mistake: limit instead of variables",
|
||||
jsonData: `{
|
||||
"schemaVersion": "v5",
|
||||
"start": 1000,
|
||||
"end": 2000,
|
||||
"requestType": "timeseries",
|
||||
"compositeQuery": {
|
||||
"queries": []
|
||||
},
|
||||
"limit": 100
|
||||
}`,
|
||||
expectedErr: `unknown field "limit"`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var req QueryRangeRequest
|
||||
err := json.Unmarshal([]byte(tt.jsonData), &req)
|
||||
|
||||
if tt.expectedErr == "" {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), tt.expectedErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetJSONFieldNames(t *testing.T) {
|
||||
type TestStruct struct {
|
||||
Field1 string `json:"field1"`
|
||||
Field2 int `json:"field2,omitempty"`
|
||||
Field3 bool `json:"-"`
|
||||
Field4 string `json:""`
|
||||
Field5 string // no json tag
|
||||
}
|
||||
|
||||
fields := getJSONFieldNames(&TestStruct{})
|
||||
expected := []string{"field1", "field2"}
|
||||
|
||||
assert.ElementsMatch(t, expected, fields)
|
||||
}
|
||||
|
||||
func TestUnmarshalJSONWithSuggestions(t *testing.T) {
|
||||
type TestRequest struct {
|
||||
SchemaVersion string `json:"schemaVersion"`
|
||||
Start int64 `json:"start"`
|
||||
End int64 `json:"end"`
|
||||
Limit int `json:"limit,omitempty"`
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
jsonData string
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
name: "valid JSON",
|
||||
jsonData: `{
|
||||
"schemaVersion": "v1",
|
||||
"start": 1000,
|
||||
"end": 2000
|
||||
}`,
|
||||
expectedErr: "",
|
||||
},
|
||||
{
|
||||
name: "typo in field name",
|
||||
jsonData: `{
|
||||
"schemaVerson": "v1",
|
||||
"start": 1000,
|
||||
"end": 2000
|
||||
}`,
|
||||
expectedErr: `unknown field "schemaVerson"`,
|
||||
},
|
||||
{
|
||||
name: "multiple typos - only first is reported",
|
||||
jsonData: `{
|
||||
"strt": 1000,
|
||||
"ed": 2000
|
||||
}`,
|
||||
expectedErr: `unknown field "strt"`,
|
||||
},
|
||||
{
|
||||
name: "case sensitivity",
|
||||
jsonData: `{
|
||||
"schema_version": "v1",
|
||||
"start": 1000,
|
||||
"end": 2000
|
||||
}`,
|
||||
expectedErr: `unknown field "schema_version"`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var req TestRequest
|
||||
err := UnmarshalJSONWithSuggestions([]byte(tt.jsonData), &req)
|
||||
|
||||
if tt.expectedErr == "" {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
// Clean up the error message for comparison
|
||||
errMsg := strings.ReplaceAll(err.Error(), "\n", " ")
|
||||
assert.Contains(t, errMsg, tt.expectedErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package querybuildertypesv5
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
@@ -17,12 +18,11 @@ type QueryEnvelope struct {
|
||||
// implement custom json unmarshaler for the QueryEnvelope
|
||||
func (q *QueryEnvelope) UnmarshalJSON(data []byte) error {
|
||||
var shadow struct {
|
||||
Name string `json:"name"`
|
||||
Type QueryType `json:"type"`
|
||||
Spec json.RawMessage `json:"spec"`
|
||||
}
|
||||
if err := json.Unmarshal(data, &shadow); err != nil {
|
||||
return errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "invalid query envelope")
|
||||
if err := UnmarshalJSONWithSuggestions(data, &shadow); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
q.Type = shadow.Type
|
||||
@@ -34,62 +34,169 @@ func (q *QueryEnvelope) UnmarshalJSON(data []byte) error {
|
||||
Signal telemetrytypes.Signal `json:"signal"`
|
||||
}
|
||||
if err := json.Unmarshal(shadow.Spec, &header); err != nil {
|
||||
return errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "cannot detect builder signal")
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"cannot detect builder signal: %v",
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
switch header.Signal {
|
||||
case telemetrytypes.SignalTraces:
|
||||
var spec QueryBuilderQuery[TraceAggregation]
|
||||
if err := json.Unmarshal(shadow.Spec, &spec); err != nil {
|
||||
return errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "invalid trace builder query spec")
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "query spec"); err != nil {
|
||||
// If it's already one of our wrapped errors with additional context, return as-is
|
||||
_, _, _, _, _, additionals := errors.Unwrapb(err)
|
||||
if len(additionals) > 0 {
|
||||
return err
|
||||
}
|
||||
// Preserve helpful error messages about unknown fields
|
||||
if strings.Contains(err.Error(), "unknown field") {
|
||||
return err
|
||||
}
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid trace builder query spec: %v",
|
||||
err,
|
||||
)
|
||||
}
|
||||
q.Spec = spec
|
||||
case telemetrytypes.SignalLogs:
|
||||
var spec QueryBuilderQuery[LogAggregation]
|
||||
if err := json.Unmarshal(shadow.Spec, &spec); err != nil {
|
||||
return errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "invalid log builder query spec")
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "query spec"); err != nil {
|
||||
// If it's already one of our wrapped errors with additional context, return as-is
|
||||
_, _, _, _, _, additionals := errors.Unwrapb(err)
|
||||
if len(additionals) > 0 {
|
||||
return err
|
||||
}
|
||||
// Preserve helpful error messages about unknown fields
|
||||
if strings.Contains(err.Error(), "unknown field") {
|
||||
return err
|
||||
}
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid log builder query spec: %v",
|
||||
err,
|
||||
)
|
||||
}
|
||||
q.Spec = spec
|
||||
case telemetrytypes.SignalMetrics:
|
||||
var spec QueryBuilderQuery[MetricAggregation]
|
||||
if err := json.Unmarshal(shadow.Spec, &spec); err != nil {
|
||||
return errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "invalid metric builder query spec")
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "query spec"); err != nil {
|
||||
// If it's already one of our wrapped errors with additional context, return as-is
|
||||
_, _, _, _, _, additionals := errors.Unwrapb(err)
|
||||
if len(additionals) > 0 {
|
||||
return err
|
||||
}
|
||||
// Preserve helpful error messages about unknown fields
|
||||
if strings.Contains(err.Error(), "unknown field") {
|
||||
return err
|
||||
}
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid metric builder query spec: %v",
|
||||
err,
|
||||
)
|
||||
}
|
||||
q.Spec = spec
|
||||
default:
|
||||
return errors.WrapInvalidInputf(nil, errors.CodeInvalidInput, "unknown builder signal %q", header.Signal)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"unknown builder signal %q",
|
||||
header.Signal,
|
||||
).WithAdditional(
|
||||
"Valid signals are: traces, logs, metrics",
|
||||
)
|
||||
}
|
||||
|
||||
case QueryTypeFormula:
|
||||
var spec QueryBuilderFormula
|
||||
if err := json.Unmarshal(shadow.Spec, &spec); err != nil {
|
||||
return errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "invalid formula spec")
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "formula spec"); err != nil {
|
||||
// If it's already one of our wrapped errors with additional context, return as-is
|
||||
_, _, _, _, _, additionals := errors.Unwrapb(err)
|
||||
if len(additionals) > 0 {
|
||||
return err
|
||||
}
|
||||
// Preserve helpful error messages about unknown fields
|
||||
if strings.Contains(err.Error(), "unknown field") {
|
||||
return err
|
||||
}
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid formula spec: %v",
|
||||
err,
|
||||
)
|
||||
}
|
||||
q.Spec = spec
|
||||
|
||||
case QueryTypeJoin:
|
||||
var spec QueryBuilderJoin
|
||||
if err := json.Unmarshal(shadow.Spec, &spec); err != nil {
|
||||
return errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "invalid join spec")
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "join spec"); err != nil {
|
||||
// If it's already one of our wrapped errors with additional context, return as-is
|
||||
_, _, _, _, _, additionals := errors.Unwrapb(err)
|
||||
if len(additionals) > 0 {
|
||||
return err
|
||||
}
|
||||
// Preserve helpful error messages about unknown fields
|
||||
if strings.Contains(err.Error(), "unknown field") {
|
||||
return err
|
||||
}
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid join spec: %v",
|
||||
err,
|
||||
)
|
||||
}
|
||||
q.Spec = spec
|
||||
|
||||
case QueryTypePromQL:
|
||||
var spec PromQuery
|
||||
if err := json.Unmarshal(shadow.Spec, &spec); err != nil {
|
||||
return errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "invalid PromQL spec")
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "PromQL spec"); err != nil {
|
||||
// If it's already one of our wrapped errors with additional context, return as-is
|
||||
_, _, _, _, _, additionals := errors.Unwrapb(err)
|
||||
if len(additionals) > 0 {
|
||||
return err
|
||||
}
|
||||
// Preserve helpful error messages about unknown fields
|
||||
if strings.Contains(err.Error(), "unknown field") {
|
||||
return err
|
||||
}
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid PromQL spec: %v",
|
||||
err,
|
||||
)
|
||||
}
|
||||
q.Spec = spec
|
||||
|
||||
case QueryTypeClickHouseSQL:
|
||||
var spec ClickHouseQuery
|
||||
if err := json.Unmarshal(shadow.Spec, &spec); err != nil {
|
||||
return errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "invalid ClickHouse SQL spec")
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "ClickHouse SQL spec"); err != nil {
|
||||
// If it's already one of our wrapped errors with additional context, return as-is
|
||||
_, _, _, _, _, additionals := errors.Unwrapb(err)
|
||||
if len(additionals) > 0 {
|
||||
return err
|
||||
}
|
||||
// Preserve helpful error messages about unknown fields
|
||||
if strings.Contains(err.Error(), "unknown field") {
|
||||
return err
|
||||
}
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid ClickHouse SQL spec: %v",
|
||||
err,
|
||||
)
|
||||
}
|
||||
q.Spec = spec
|
||||
|
||||
default:
|
||||
return errors.WrapInvalidInputf(nil, errors.CodeInvalidInput, "unknown query type %q", shadow.Type)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"unknown query type %q",
|
||||
shadow.Type,
|
||||
).WithAdditional(
|
||||
"Valid query types are: builder_query, builder_sub_query, builder_formula, builder_join, promql, clickhouse_sql",
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -100,6 +207,59 @@ type CompositeQuery struct {
|
||||
Queries []QueryEnvelope `json:"queries"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements custom JSON unmarshaling to provide better error messages
|
||||
func (c *CompositeQuery) UnmarshalJSON(data []byte) error {
|
||||
type Alias CompositeQuery
|
||||
|
||||
// First do a normal unmarshal without DisallowUnknownFields
|
||||
var temp Alias
|
||||
if err := json.Unmarshal(data, &temp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Then check for unknown fields at this level only
|
||||
var check map[string]json.RawMessage
|
||||
if err := json.Unmarshal(data, &check); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check for unknown fields at this level
|
||||
validFields := map[string]bool{
|
||||
"queries": true,
|
||||
}
|
||||
|
||||
for field := range check {
|
||||
if !validFields[field] {
|
||||
// Find closest match
|
||||
var fieldNames []string
|
||||
for f := range validFields {
|
||||
fieldNames = append(fieldNames, f)
|
||||
}
|
||||
|
||||
if suggestion, found := findClosestMatch(field, fieldNames, 3); found {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"unknown field %q in composite query",
|
||||
field,
|
||||
).WithAdditional(
|
||||
"Did you mean '" + suggestion + "'?",
|
||||
)
|
||||
}
|
||||
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"unknown field %q in composite query",
|
||||
field,
|
||||
).WithAdditional(
|
||||
"Valid fields are: " + strings.Join(fieldNames, ", "),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
*c = CompositeQuery(temp)
|
||||
return nil
|
||||
}
|
||||
|
||||
type QueryRangeRequest struct {
|
||||
// SchemaVersion is the version of the schema to use for the request payload.
|
||||
SchemaVersion string `json:"schemaVersion"`
|
||||
@@ -120,6 +280,69 @@ type QueryRangeRequest struct {
|
||||
FormatOptions *FormatOptions `json:"formatOptions,omitempty"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements custom JSON unmarshaling to disallow unknown fields
|
||||
func (r *QueryRangeRequest) UnmarshalJSON(data []byte) error {
|
||||
// Define a type alias to avoid infinite recursion
|
||||
type Alias QueryRangeRequest
|
||||
|
||||
// First do a normal unmarshal without DisallowUnknownFields to let nested structures handle their own validation
|
||||
var temp Alias
|
||||
if err := json.Unmarshal(data, &temp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Then check for unknown fields at this level only
|
||||
var check map[string]json.RawMessage
|
||||
if err := json.Unmarshal(data, &check); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check for unknown fields at the top level
|
||||
validFields := map[string]bool{
|
||||
"schemaVersion": true,
|
||||
"start": true,
|
||||
"end": true,
|
||||
"requestType": true,
|
||||
"compositeQuery": true,
|
||||
"variables": true,
|
||||
"noCache": true,
|
||||
"formatOptions": true,
|
||||
}
|
||||
|
||||
for field := range check {
|
||||
if !validFields[field] {
|
||||
// Find closest match
|
||||
var fieldNames []string
|
||||
for f := range validFields {
|
||||
fieldNames = append(fieldNames, f)
|
||||
}
|
||||
|
||||
if suggestion, found := findClosestMatch(field, fieldNames, 3); found {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"unknown field %q",
|
||||
field,
|
||||
).WithAdditional(
|
||||
"Did you mean '" + suggestion + "'?",
|
||||
)
|
||||
}
|
||||
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"unknown field %q",
|
||||
field,
|
||||
).WithAdditional(
|
||||
"Valid fields are: " + strings.Join(fieldNames, ", "),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Copy the decoded values back to the original struct
|
||||
*r = QueryRangeRequest(temp)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type FormatOptions struct {
|
||||
FillGaps bool `json:"fillGaps,omitempty"`
|
||||
FormatTableResultForUI bool `json:"formatTableResultForUI,omitempty"`
|
||||
|
||||
@@ -0,0 +1,150 @@
|
||||
package querybuildertypesv5
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestQueryRangeRequest_UnmarshalJSON_ErrorMessages(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
jsonData string
|
||||
wantErrMsg string
|
||||
wantAdditionalHints []string
|
||||
}{
|
||||
{
|
||||
name: "unknown field 'function' in query spec",
|
||||
jsonData: `{
|
||||
"schemaVersion": "v1",
|
||||
"start": 1749290340000,
|
||||
"end": 1749293940000,
|
||||
"requestType": "scalar",
|
||||
"compositeQuery": {
|
||||
"queries": [{
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"name": "A",
|
||||
"signal": "logs",
|
||||
"aggregations": [{
|
||||
"expression": "count()",
|
||||
"alias": "spans_count"
|
||||
}],
|
||||
"function": [{
|
||||
"name": "absolute",
|
||||
"args": []
|
||||
}]
|
||||
}
|
||||
}]
|
||||
}
|
||||
}`,
|
||||
wantErrMsg: `unknown field "function" in query spec`,
|
||||
wantAdditionalHints: []string{
|
||||
"Did you mean 'functions'?",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unknown field 'filters' in query spec",
|
||||
jsonData: `{
|
||||
"schemaVersion": "v1",
|
||||
"start": 1749290340000,
|
||||
"end": 1749293940000,
|
||||
"requestType": "scalar",
|
||||
"compositeQuery": {
|
||||
"queries": [{
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"name": "A",
|
||||
"signal": "metrics",
|
||||
"aggregations": [{
|
||||
"metricName": "test"
|
||||
}],
|
||||
"filters": {
|
||||
"expression": "test = 1"
|
||||
}
|
||||
}
|
||||
}]
|
||||
}
|
||||
}`,
|
||||
wantErrMsg: `unknown field "filters" in query spec`,
|
||||
wantAdditionalHints: []string{
|
||||
"Did you mean 'filter'?",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unknown field at top level",
|
||||
jsonData: `{
|
||||
"schemaVersion": "v1",
|
||||
"start": 1749290340000,
|
||||
"end": 1749293940000,
|
||||
"requestType": "scalar",
|
||||
"compositeQueries": {
|
||||
"queries": []
|
||||
}
|
||||
}`,
|
||||
wantErrMsg: `unknown field "compositeQueries"`,
|
||||
wantAdditionalHints: []string{
|
||||
"Did you mean 'compositeQuery'?",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unknown field with no good suggestion",
|
||||
jsonData: `{
|
||||
"schemaVersion": "v1",
|
||||
"start": 1749290340000,
|
||||
"end": 1749293940000,
|
||||
"requestType": "scalar",
|
||||
"compositeQuery": {
|
||||
"queries": [{
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"name": "A",
|
||||
"signal": "metrics",
|
||||
"aggregations": [{
|
||||
"metricName": "test"
|
||||
}],
|
||||
"randomField": "value"
|
||||
}
|
||||
}]
|
||||
}
|
||||
}`,
|
||||
wantErrMsg: `unknown field "randomField" in query spec`,
|
||||
wantAdditionalHints: []string{
|
||||
"Valid fields are:",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var req QueryRangeRequest
|
||||
err := json.Unmarshal([]byte(tt.jsonData), &req)
|
||||
|
||||
require.Error(t, err)
|
||||
|
||||
// Check main error message
|
||||
assert.Contains(t, err.Error(), tt.wantErrMsg)
|
||||
|
||||
// Check if it's an error from our package using Unwrapb
|
||||
_, _, _, _, _, additionals := errors.Unwrapb(err)
|
||||
|
||||
// Check additional hints if we have any
|
||||
if len(additionals) > 0 {
|
||||
for _, hint := range tt.wantAdditionalHints {
|
||||
found := false
|
||||
for _, additional := range additionals {
|
||||
if strings.Contains(additional, hint) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.True(t, found, "Expected to find hint '%s' in additionals: %v", hint, additionals)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -120,8 +120,8 @@ func TestQueryRangeRequest_UnmarshalJSON(t *testing.T) {
|
||||
"expression": "severity_text = 'ERROR'"
|
||||
},
|
||||
"selectFields": [{
|
||||
"key": "body",
|
||||
"type": "log"
|
||||
"name": "body",
|
||||
"fieldContext": "log"
|
||||
}],
|
||||
"limit": 50,
|
||||
"offset": 10
|
||||
@@ -177,8 +177,8 @@ func TestQueryRangeRequest_UnmarshalJSON(t *testing.T) {
|
||||
}],
|
||||
"stepInterval": 120,
|
||||
"groupBy": [{
|
||||
"key": "method",
|
||||
"type": "tag"
|
||||
"name": "method",
|
||||
"fieldContext": "tag"
|
||||
}]
|
||||
}
|
||||
}]
|
||||
@@ -436,10 +436,9 @@ func TestQueryRangeRequest_UnmarshalJSON(t *testing.T) {
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "B",
|
||||
"type": "builder_formula",
|
||||
"spec": {
|
||||
"name": "rate",
|
||||
"name": "B",
|
||||
"expression": "A * 100"
|
||||
}
|
||||
}
|
||||
@@ -465,7 +464,7 @@ func TestQueryRangeRequest_UnmarshalJSON(t *testing.T) {
|
||||
{
|
||||
Type: QueryTypeFormula,
|
||||
Spec: QueryBuilderFormula{
|
||||
Name: "rate",
|
||||
Name: "B",
|
||||
Expression: "A * 100",
|
||||
},
|
||||
},
|
||||
|
||||
@@ -99,8 +99,8 @@ type TimeSeriesValue struct {
|
||||
Partial bool `json:"partial,omitempty"`
|
||||
|
||||
// for the heatmap type chart
|
||||
Values []float64 `json:"values,omitempty"`
|
||||
Bucket *Bucket `json:"bucket,omitempty"`
|
||||
// Values []float64 `json:"values,omitempty"`
|
||||
Bucket *Bucket `json:"bucket,omitempty"`
|
||||
}
|
||||
|
||||
type Bucket struct {
|
||||
|
||||
783
pkg/types/querybuildertypes/querybuildertypesv5/validation.go
Normal file
783
pkg/types/querybuildertypes/querybuildertypesv5/validation.go
Normal file
@@ -0,0 +1,783 @@
|
||||
package querybuildertypesv5
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
// getQueryIdentifier returns a friendly identifier for a query based on its type and name/content
|
||||
func getQueryIdentifier(envelope QueryEnvelope, index int) string {
|
||||
switch envelope.Type {
|
||||
case QueryTypeBuilder, QueryTypeSubQuery:
|
||||
switch spec := envelope.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
if spec.Name != "" {
|
||||
return fmt.Sprintf("query '%s'", spec.Name)
|
||||
}
|
||||
return fmt.Sprintf("trace query at position %d", index+1)
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
if spec.Name != "" {
|
||||
return fmt.Sprintf("query '%s'", spec.Name)
|
||||
}
|
||||
return fmt.Sprintf("log query at position %d", index+1)
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
if spec.Name != "" {
|
||||
return fmt.Sprintf("query '%s'", spec.Name)
|
||||
}
|
||||
return fmt.Sprintf("metric query at position %d", index+1)
|
||||
}
|
||||
case QueryTypeFormula:
|
||||
if spec, ok := envelope.Spec.(QueryBuilderFormula); ok && spec.Name != "" {
|
||||
return fmt.Sprintf("formula '%s'", spec.Name)
|
||||
}
|
||||
return fmt.Sprintf("formula at position %d", index+1)
|
||||
case QueryTypeJoin:
|
||||
if spec, ok := envelope.Spec.(QueryBuilderJoin); ok && spec.Name != "" {
|
||||
return fmt.Sprintf("join '%s'", spec.Name)
|
||||
}
|
||||
return fmt.Sprintf("join at position %d", index+1)
|
||||
case QueryTypePromQL:
|
||||
if spec, ok := envelope.Spec.(PromQuery); ok && spec.Name != "" {
|
||||
return fmt.Sprintf("PromQL query '%s'", spec.Name)
|
||||
}
|
||||
return fmt.Sprintf("PromQL query at position %d", index+1)
|
||||
case QueryTypeClickHouseSQL:
|
||||
if spec, ok := envelope.Spec.(ClickHouseQuery); ok && spec.Name != "" {
|
||||
return fmt.Sprintf("ClickHouse query '%s'", spec.Name)
|
||||
}
|
||||
return fmt.Sprintf("ClickHouse query at position %d", index+1)
|
||||
}
|
||||
return fmt.Sprintf("query at position %d", index+1)
|
||||
}
|
||||
|
||||
const (
|
||||
// Maximum limit for query results
|
||||
MaxQueryLimit = 10000
|
||||
)
|
||||
|
||||
// ValidateFunctionName checks if the function name is valid
|
||||
func ValidateFunctionName(name FunctionName) error {
|
||||
validFunctions := []FunctionName{
|
||||
FunctionNameCutOffMin,
|
||||
FunctionNameCutOffMax,
|
||||
FunctionNameClampMin,
|
||||
FunctionNameClampMax,
|
||||
FunctionNameAbsolute,
|
||||
FunctionNameRunningDiff,
|
||||
FunctionNameLog2,
|
||||
FunctionNameLog10,
|
||||
FunctionNameCumulativeSum,
|
||||
FunctionNameEWMA3,
|
||||
FunctionNameEWMA5,
|
||||
FunctionNameEWMA7,
|
||||
FunctionNameMedian3,
|
||||
FunctionNameMedian5,
|
||||
FunctionNameMedian7,
|
||||
FunctionNameTimeShift,
|
||||
FunctionNameAnomaly,
|
||||
}
|
||||
|
||||
if slices.Contains(validFunctions, name) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid function name: %s",
|
||||
name.StringValue(),
|
||||
).WithAdditional(fmt.Sprintf("valid functions are: %v", validFunctions))
|
||||
}
|
||||
|
||||
// Validate performs preliminary validation on QueryBuilderQuery
|
||||
func (q *QueryBuilderQuery[T]) Validate(requestType RequestType) error {
|
||||
// Validate signal
|
||||
if err := q.validateSignal(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate aggregations only for non-raw request types
|
||||
if requestType != RequestTypeRaw {
|
||||
if err := q.validateAggregations(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Validate limit and pagination
|
||||
if err := q.validateLimitAndPagination(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate functions
|
||||
if err := q.validateFunctions(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate secondary aggregations
|
||||
if err := q.validateSecondaryAggregations(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate order by
|
||||
if err := q.validateOrderBy(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *QueryBuilderQuery[T]) validateSignal() error {
|
||||
// Signal validation is handled during unmarshaling in req.go
|
||||
// Valid signals are: metrics, traces, logs
|
||||
switch q.Signal {
|
||||
case telemetrytypes.SignalMetrics,
|
||||
telemetrytypes.SignalTraces,
|
||||
telemetrytypes.SignalLogs,
|
||||
telemetrytypes.SignalUnspecified: // Empty is allowed for backward compatibility
|
||||
return nil
|
||||
default:
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid signal type: %s",
|
||||
q.Signal,
|
||||
).WithAdditional(
|
||||
"Valid signals are: metrics, traces, logs",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func (q *QueryBuilderQuery[T]) validateAggregations() error {
|
||||
// At least one aggregation required for non-disabled queries
|
||||
if len(q.Aggregations) == 0 && !q.Disabled {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"at least one aggregation is required",
|
||||
)
|
||||
}
|
||||
|
||||
// Check for duplicate aliases
|
||||
aliases := make(map[string]bool)
|
||||
for i, agg := range q.Aggregations {
|
||||
// Type-specific validation based on T
|
||||
switch v := any(agg).(type) {
|
||||
case MetricAggregation:
|
||||
if v.MetricName == "" {
|
||||
aggId := fmt.Sprintf("aggregation #%d", i+1)
|
||||
if q.Name != "" {
|
||||
aggId = fmt.Sprintf("aggregation #%d in query '%s'", i+1, q.Name)
|
||||
}
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"metric name is required for %s",
|
||||
aggId,
|
||||
)
|
||||
}
|
||||
// Validate metric-specific aggregations
|
||||
if err := validateMetricAggregation(v); err != nil {
|
||||
// Extract the underlying error details
|
||||
_, _, innerMsg, _, _, additionals := errors.Unwrapb(err)
|
||||
|
||||
// Create a new error with friendly identifier
|
||||
aggId := fmt.Sprintf("aggregation #%d", i+1)
|
||||
if q.Name != "" {
|
||||
aggId = fmt.Sprintf("aggregation #%d in query '%s'", i+1, q.Name)
|
||||
}
|
||||
newErr := errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid metric %s: %s",
|
||||
aggId,
|
||||
innerMsg,
|
||||
)
|
||||
|
||||
// Add any additional context from the inner error
|
||||
if len(additionals) > 0 {
|
||||
newErr = newErr.WithAdditional(additionals...)
|
||||
}
|
||||
|
||||
return newErr
|
||||
}
|
||||
case TraceAggregation:
|
||||
if v.Expression == "" {
|
||||
aggId := fmt.Sprintf("aggregation #%d", i+1)
|
||||
if q.Name != "" {
|
||||
aggId = fmt.Sprintf("aggregation #%d in query '%s'", i+1, q.Name)
|
||||
}
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"expression is required for trace %s",
|
||||
aggId,
|
||||
)
|
||||
}
|
||||
if v.Alias != "" {
|
||||
if aliases[v.Alias] {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"duplicate aggregation alias: %s",
|
||||
v.Alias,
|
||||
)
|
||||
}
|
||||
aliases[v.Alias] = true
|
||||
}
|
||||
case LogAggregation:
|
||||
if v.Expression == "" {
|
||||
aggId := fmt.Sprintf("aggregation #%d", i+1)
|
||||
if q.Name != "" {
|
||||
aggId = fmt.Sprintf("aggregation #%d in query '%s'", i+1, q.Name)
|
||||
}
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"expression is required for log %s",
|
||||
aggId,
|
||||
)
|
||||
}
|
||||
if v.Alias != "" {
|
||||
if aliases[v.Alias] {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"duplicate aggregation alias: %s",
|
||||
v.Alias,
|
||||
)
|
||||
}
|
||||
aliases[v.Alias] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *QueryBuilderQuery[T]) validateLimitAndPagination() error {
|
||||
// Validate limit
|
||||
if q.Limit < 0 {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"limit must be non-negative, got %d",
|
||||
q.Limit,
|
||||
)
|
||||
}
|
||||
|
||||
if q.Limit > MaxQueryLimit {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"limit exceeds maximum allowed value of %d",
|
||||
MaxQueryLimit,
|
||||
).WithAdditional(
|
||||
fmt.Sprintf("Provided limit: %d", q.Limit),
|
||||
)
|
||||
}
|
||||
|
||||
// Validate offset
|
||||
if q.Offset < 0 {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"offset must be non-negative, got %d",
|
||||
q.Offset,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *QueryBuilderQuery[T]) validateFunctions() error {
|
||||
for i, fn := range q.Functions {
|
||||
if err := ValidateFunctionName(fn.Name); err != nil {
|
||||
// Extract the underlying error details
|
||||
_, _, innerMsg, _, _, additionals := errors.Unwrapb(err)
|
||||
|
||||
// Create a new error with friendly identifier
|
||||
fnId := fmt.Sprintf("function #%d", i+1)
|
||||
if q.Name != "" {
|
||||
fnId = fmt.Sprintf("function #%d in query '%s'", i+1, q.Name)
|
||||
}
|
||||
newErr := errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid %s: %s",
|
||||
fnId,
|
||||
innerMsg,
|
||||
)
|
||||
|
||||
// Add any additional context from the inner error
|
||||
if len(additionals) > 0 {
|
||||
newErr = newErr.WithAdditional(additionals...)
|
||||
}
|
||||
|
||||
return newErr
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *QueryBuilderQuery[T]) validateSecondaryAggregations() error {
|
||||
for i, secAgg := range q.SecondaryAggregations {
|
||||
// Secondary aggregation expression can be empty - we allow it per requirements
|
||||
// Just validate structure
|
||||
if secAgg.Limit < 0 {
|
||||
secAggId := fmt.Sprintf("secondary aggregation #%d", i+1)
|
||||
if q.Name != "" {
|
||||
secAggId = fmt.Sprintf("secondary aggregation #%d in query '%s'", i+1, q.Name)
|
||||
}
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"%s: limit must be non-negative",
|
||||
secAggId,
|
||||
)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *QueryBuilderQuery[T]) validateOrderBy() error {
|
||||
for i, order := range q.Order {
|
||||
// Direction validation is handled by the OrderDirection type
|
||||
if order.Direction != OrderDirectionAsc && order.Direction != OrderDirectionDesc {
|
||||
orderId := fmt.Sprintf("order by clause #%d", i+1)
|
||||
if q.Name != "" {
|
||||
orderId = fmt.Sprintf("order by clause #%d in query '%s'", i+1, q.Name)
|
||||
}
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid direction for %s: %s",
|
||||
orderId,
|
||||
order.Direction.StringValue(),
|
||||
).WithAdditional(
|
||||
"Valid directions are: asc, desc",
|
||||
)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateQueryRangeRequest validates the entire query range request
|
||||
func (r *QueryRangeRequest) Validate() error {
|
||||
// Validate time range
|
||||
if r.Start >= r.End {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"start time must be before end time",
|
||||
)
|
||||
}
|
||||
|
||||
// Validate request type
|
||||
switch r.RequestType {
|
||||
case RequestTypeRaw, RequestTypeTimeSeries, RequestTypeScalar:
|
||||
// Valid request types
|
||||
default:
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid request type: %s",
|
||||
r.RequestType,
|
||||
).WithAdditional(
|
||||
"Valid request types are: raw, timeseries, scalar",
|
||||
)
|
||||
}
|
||||
|
||||
// Validate composite query
|
||||
if err := r.validateCompositeQuery(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *QueryRangeRequest) validateCompositeQuery() error {
|
||||
// Validate queries in composite query
|
||||
if len(r.CompositeQuery.Queries) == 0 {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"at least one query is required",
|
||||
)
|
||||
}
|
||||
|
||||
// Track query names for uniqueness (only for non-formula queries)
|
||||
queryNames := make(map[string]bool)
|
||||
|
||||
// Validate each query based on its type
|
||||
for i, envelope := range r.CompositeQuery.Queries {
|
||||
switch envelope.Type {
|
||||
case QueryTypeBuilder, QueryTypeSubQuery:
|
||||
// Validate based on the concrete type
|
||||
switch spec := envelope.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
if err := spec.Validate(r.RequestType); err != nil {
|
||||
// Extract the underlying error details
|
||||
_, _, innerMsg, _, _, additionals := errors.Unwrapb(err)
|
||||
|
||||
// Create a new error with friendly query identifier
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
newErr := errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid %s: %s",
|
||||
queryId,
|
||||
innerMsg,
|
||||
)
|
||||
|
||||
// Add any additional context from the inner error
|
||||
if len(additionals) > 0 {
|
||||
newErr = newErr.WithAdditional(additionals...)
|
||||
}
|
||||
|
||||
return newErr
|
||||
}
|
||||
// Check name uniqueness for non-formula context
|
||||
if spec.Name != "" {
|
||||
if queryNames[spec.Name] {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"duplicate query name '%s'",
|
||||
spec.Name,
|
||||
)
|
||||
}
|
||||
queryNames[spec.Name] = true
|
||||
}
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
if err := spec.Validate(r.RequestType); err != nil {
|
||||
// Extract the underlying error details
|
||||
_, _, innerMsg, _, _, additionals := errors.Unwrapb(err)
|
||||
|
||||
// Create a new error with friendly query identifier
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
newErr := errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid %s: %s",
|
||||
queryId,
|
||||
innerMsg,
|
||||
)
|
||||
|
||||
// Add any additional context from the inner error
|
||||
if len(additionals) > 0 {
|
||||
newErr = newErr.WithAdditional(additionals...)
|
||||
}
|
||||
|
||||
return newErr
|
||||
}
|
||||
// Check name uniqueness for non-formula context
|
||||
if spec.Name != "" {
|
||||
if queryNames[spec.Name] {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"duplicate query name '%s'",
|
||||
spec.Name,
|
||||
)
|
||||
}
|
||||
queryNames[spec.Name] = true
|
||||
}
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
if err := spec.Validate(r.RequestType); err != nil {
|
||||
// Extract the underlying error details
|
||||
_, _, innerMsg, _, _, additionals := errors.Unwrapb(err)
|
||||
|
||||
// Create a new error with friendly query identifier
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
newErr := errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid %s: %s",
|
||||
queryId,
|
||||
innerMsg,
|
||||
)
|
||||
|
||||
// Add any additional context from the inner error
|
||||
if len(additionals) > 0 {
|
||||
newErr = newErr.WithAdditional(additionals...)
|
||||
}
|
||||
|
||||
return newErr
|
||||
}
|
||||
// Check name uniqueness for non-formula context
|
||||
if spec.Name != "" {
|
||||
if queryNames[spec.Name] {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"duplicate query name '%s'",
|
||||
spec.Name,
|
||||
)
|
||||
}
|
||||
queryNames[spec.Name] = true
|
||||
}
|
||||
default:
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"unknown spec type for %s",
|
||||
queryId,
|
||||
)
|
||||
}
|
||||
case QueryTypeFormula:
|
||||
// Formula validation is handled separately
|
||||
spec, ok := envelope.Spec.(QueryBuilderFormula)
|
||||
if !ok {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid spec for %s",
|
||||
queryId,
|
||||
)
|
||||
}
|
||||
if spec.Expression == "" {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"expression is required for %s",
|
||||
queryId,
|
||||
)
|
||||
}
|
||||
case QueryTypeJoin:
|
||||
// Join validation is handled separately
|
||||
_, ok := envelope.Spec.(QueryBuilderJoin)
|
||||
if !ok {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid spec for %s",
|
||||
queryId,
|
||||
)
|
||||
}
|
||||
case QueryTypePromQL:
|
||||
// PromQL validation is handled separately
|
||||
spec, ok := envelope.Spec.(PromQuery)
|
||||
if !ok {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid spec for %s",
|
||||
queryId,
|
||||
)
|
||||
}
|
||||
if spec.Query == "" {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"query expression is required for %s",
|
||||
queryId,
|
||||
)
|
||||
}
|
||||
case QueryTypeClickHouseSQL:
|
||||
// ClickHouse SQL validation is handled separately
|
||||
spec, ok := envelope.Spec.(ClickHouseQuery)
|
||||
if !ok {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid spec for %s",
|
||||
queryId,
|
||||
)
|
||||
}
|
||||
if spec.Query == "" {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"query expression is required for %s",
|
||||
queryId,
|
||||
)
|
||||
}
|
||||
default:
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"unknown query type '%s' for %s",
|
||||
envelope.Type,
|
||||
queryId,
|
||||
).WithAdditional(
|
||||
"Valid query types are: builder_query, builder_formula, builder_join, promql, clickhouse_sql",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate performs validation on CompositeQuery
|
||||
func (c *CompositeQuery) Validate(requestType RequestType) error {
|
||||
if len(c.Queries) == 0 {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"at least one query is required",
|
||||
)
|
||||
}
|
||||
|
||||
// Validate each query
|
||||
for i, envelope := range c.Queries {
|
||||
if err := validateQueryEnvelope(envelope, requestType); err != nil {
|
||||
// Extract the underlying error details
|
||||
_, _, innerMsg, _, _, additionals := errors.Unwrapb(err)
|
||||
|
||||
// Create a new error with friendly query identifier
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
newErr := errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid %s: %s",
|
||||
queryId,
|
||||
innerMsg,
|
||||
)
|
||||
|
||||
// Add any additional context from the inner error
|
||||
if len(additionals) > 0 {
|
||||
newErr = newErr.WithAdditional(additionals...)
|
||||
}
|
||||
|
||||
return newErr
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateQueryEnvelope(envelope QueryEnvelope, requestType RequestType) error {
|
||||
switch envelope.Type {
|
||||
case QueryTypeBuilder, QueryTypeSubQuery:
|
||||
switch spec := envelope.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.Validate(requestType)
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.Validate(requestType)
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.Validate(requestType)
|
||||
default:
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"unknown query spec type",
|
||||
)
|
||||
}
|
||||
case QueryTypeFormula:
|
||||
spec, ok := envelope.Spec.(QueryBuilderFormula)
|
||||
if !ok {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid formula spec",
|
||||
)
|
||||
}
|
||||
if spec.Expression == "" {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"formula expression is required",
|
||||
)
|
||||
}
|
||||
return nil
|
||||
case QueryTypeJoin:
|
||||
_, ok := envelope.Spec.(QueryBuilderJoin)
|
||||
if !ok {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid join spec",
|
||||
)
|
||||
}
|
||||
return nil
|
||||
case QueryTypePromQL:
|
||||
spec, ok := envelope.Spec.(PromQuery)
|
||||
if !ok {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid PromQL spec",
|
||||
)
|
||||
}
|
||||
if spec.Query == "" {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"PromQL query is required",
|
||||
)
|
||||
}
|
||||
return nil
|
||||
case QueryTypeClickHouseSQL:
|
||||
spec, ok := envelope.Spec.(ClickHouseQuery)
|
||||
if !ok {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid ClickHouse SQL spec",
|
||||
)
|
||||
}
|
||||
if spec.Query == "" {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"ClickHouse SQL query is required",
|
||||
)
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"unknown query type: %s",
|
||||
envelope.Type,
|
||||
).WithAdditional(
|
||||
"Valid query types are: builder_query, builder_sub_query, builder_formula, builder_join, promql, clickhouse_sql",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// validateMetricAggregation validates metric-specific aggregation parameters
|
||||
func validateMetricAggregation(agg MetricAggregation) error {
|
||||
// Validate that rate/increase are only used with appropriate temporalities
|
||||
if agg.TimeAggregation == metrictypes.TimeAggregationRate || agg.TimeAggregation == metrictypes.TimeAggregationIncrease {
|
||||
// For gauge metrics (Unspecified temporality), rate/increase doesn't make sense
|
||||
if agg.Temporality == metrictypes.Unspecified {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"rate/increase aggregation cannot be used with gauge metrics (unspecified temporality)",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate percentile aggregations are only used with histogram types
|
||||
if agg.SpaceAggregation.IsPercentile() {
|
||||
if agg.Type != metrictypes.HistogramType && agg.Type != metrictypes.ExpHistogramType && agg.Type != metrictypes.SummaryType {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"percentile aggregation can only be used with histogram or summary metric types",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate time aggregation values
|
||||
validTimeAggregations := []metrictypes.TimeAggregation{
|
||||
metrictypes.TimeAggregationUnspecified,
|
||||
metrictypes.TimeAggregationLatest,
|
||||
metrictypes.TimeAggregationSum,
|
||||
metrictypes.TimeAggregationAvg,
|
||||
metrictypes.TimeAggregationMin,
|
||||
metrictypes.TimeAggregationMax,
|
||||
metrictypes.TimeAggregationCount,
|
||||
metrictypes.TimeAggregationCountDistinct,
|
||||
metrictypes.TimeAggregationRate,
|
||||
metrictypes.TimeAggregationIncrease,
|
||||
}
|
||||
|
||||
validTimeAgg := slices.Contains(validTimeAggregations, agg.TimeAggregation)
|
||||
if !validTimeAgg {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid time aggregation: %s",
|
||||
agg.TimeAggregation.StringValue(),
|
||||
).WithAdditional(
|
||||
"Valid time aggregations: latest, sum, avg, min, max, count, count_distinct, rate, increase",
|
||||
)
|
||||
}
|
||||
|
||||
// Validate space aggregation values
|
||||
validSpaceAggregations := []metrictypes.SpaceAggregation{
|
||||
metrictypes.SpaceAggregationUnspecified,
|
||||
metrictypes.SpaceAggregationSum,
|
||||
metrictypes.SpaceAggregationAvg,
|
||||
metrictypes.SpaceAggregationMin,
|
||||
metrictypes.SpaceAggregationMax,
|
||||
metrictypes.SpaceAggregationCount,
|
||||
metrictypes.SpaceAggregationPercentile50,
|
||||
metrictypes.SpaceAggregationPercentile75,
|
||||
metrictypes.SpaceAggregationPercentile90,
|
||||
metrictypes.SpaceAggregationPercentile95,
|
||||
metrictypes.SpaceAggregationPercentile99,
|
||||
}
|
||||
|
||||
validSpaceAgg := slices.Contains(validSpaceAggregations, agg.SpaceAggregation)
|
||||
if !validSpaceAgg {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid space aggregation: %s",
|
||||
agg.SpaceAggregation.StringValue(),
|
||||
).WithAdditional(
|
||||
"Valid space aggregations: sum, avg, min, max, count, p50, p75, p90, p95, p99",
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,213 @@
|
||||
package querybuildertypesv5
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestValidateMetricAggregation(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
agg MetricAggregation
|
||||
wantErr bool
|
||||
errMsg string
|
||||
}{
|
||||
{
|
||||
name: "valid sum aggregation",
|
||||
agg: MetricAggregation{
|
||||
MetricName: "test_metric",
|
||||
Type: metrictypes.SumType,
|
||||
Temporality: metrictypes.Cumulative,
|
||||
TimeAggregation: metrictypes.TimeAggregationSum,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid rate on gauge",
|
||||
agg: MetricAggregation{
|
||||
MetricName: "test_metric",
|
||||
Type: metrictypes.GaugeType,
|
||||
Temporality: metrictypes.Unspecified,
|
||||
TimeAggregation: metrictypes.TimeAggregationRate,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
},
|
||||
wantErr: true,
|
||||
errMsg: "rate/increase aggregation cannot be used with gauge metrics",
|
||||
},
|
||||
{
|
||||
name: "invalid increase on gauge",
|
||||
agg: MetricAggregation{
|
||||
MetricName: "test_metric",
|
||||
Type: metrictypes.GaugeType,
|
||||
Temporality: metrictypes.Unspecified,
|
||||
TimeAggregation: metrictypes.TimeAggregationIncrease,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
},
|
||||
wantErr: true,
|
||||
errMsg: "rate/increase aggregation cannot be used with gauge metrics",
|
||||
},
|
||||
{
|
||||
name: "valid rate on cumulative",
|
||||
agg: MetricAggregation{
|
||||
MetricName: "test_metric",
|
||||
Type: metrictypes.SumType,
|
||||
Temporality: metrictypes.Cumulative,
|
||||
TimeAggregation: metrictypes.TimeAggregationRate,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid rate on delta",
|
||||
agg: MetricAggregation{
|
||||
MetricName: "test_metric",
|
||||
Type: metrictypes.SumType,
|
||||
Temporality: metrictypes.Delta,
|
||||
TimeAggregation: metrictypes.TimeAggregationRate,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid percentile on non-histogram",
|
||||
agg: MetricAggregation{
|
||||
MetricName: "test_metric",
|
||||
Type: metrictypes.SumType,
|
||||
Temporality: metrictypes.Cumulative,
|
||||
TimeAggregation: metrictypes.TimeAggregationSum,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationPercentile95,
|
||||
},
|
||||
wantErr: true,
|
||||
errMsg: "percentile aggregation can only be used with histogram",
|
||||
},
|
||||
{
|
||||
name: "valid percentile on histogram",
|
||||
agg: MetricAggregation{
|
||||
MetricName: "test_metric",
|
||||
Type: metrictypes.HistogramType,
|
||||
Temporality: metrictypes.Delta,
|
||||
TimeAggregation: metrictypes.TimeAggregationSum,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationPercentile95,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid percentile on exp histogram",
|
||||
agg: MetricAggregation{
|
||||
MetricName: "test_metric",
|
||||
Type: metrictypes.ExpHistogramType,
|
||||
Temporality: metrictypes.Delta,
|
||||
TimeAggregation: metrictypes.TimeAggregationSum,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationPercentile99,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid percentile on summary",
|
||||
agg: MetricAggregation{
|
||||
MetricName: "test_metric",
|
||||
Type: metrictypes.SummaryType,
|
||||
Temporality: metrictypes.Delta,
|
||||
TimeAggregation: metrictypes.TimeAggregationSum,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationPercentile50,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := validateMetricAggregation(tt.agg)
|
||||
if tt.wantErr {
|
||||
assert.Error(t, err)
|
||||
if tt.errMsg != "" {
|
||||
assert.Contains(t, err.Error(), tt.errMsg)
|
||||
}
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryBuilderQuery_ValidateMetrics(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
query QueryBuilderQuery[MetricAggregation]
|
||||
reqType RequestType
|
||||
wantErr bool
|
||||
errMsg string
|
||||
}{
|
||||
{
|
||||
name: "valid metric query",
|
||||
query: QueryBuilderQuery[MetricAggregation]{
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []MetricAggregation{
|
||||
{
|
||||
MetricName: "test_metric",
|
||||
Type: metrictypes.SumType,
|
||||
Temporality: metrictypes.Cumulative,
|
||||
TimeAggregation: metrictypes.TimeAggregationRate,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
},
|
||||
},
|
||||
},
|
||||
reqType: RequestTypeTimeSeries,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid metric query - rate on gauge",
|
||||
query: QueryBuilderQuery[MetricAggregation]{
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []MetricAggregation{
|
||||
{
|
||||
MetricName: "test_metric",
|
||||
Type: metrictypes.GaugeType,
|
||||
Temporality: metrictypes.Unspecified,
|
||||
TimeAggregation: metrictypes.TimeAggregationRate,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
},
|
||||
},
|
||||
},
|
||||
reqType: RequestTypeTimeSeries,
|
||||
wantErr: true,
|
||||
errMsg: "rate/increase aggregation cannot be used with gauge metrics",
|
||||
},
|
||||
{
|
||||
name: "empty metric name",
|
||||
query: QueryBuilderQuery[MetricAggregation]{
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []MetricAggregation{
|
||||
{
|
||||
MetricName: "",
|
||||
Type: metrictypes.SumType,
|
||||
Temporality: metrictypes.Cumulative,
|
||||
TimeAggregation: metrictypes.TimeAggregationSum,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
},
|
||||
},
|
||||
},
|
||||
reqType: RequestTypeTimeSeries,
|
||||
wantErr: true,
|
||||
errMsg: "metric name is required",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := tt.query.Validate(tt.reqType)
|
||||
if tt.wantErr {
|
||||
assert.Error(t, err)
|
||||
if tt.errMsg != "" {
|
||||
assert.Contains(t, err.Error(), tt.errMsg)
|
||||
}
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user