Compare commits
15 Commits
v0.65.0-cl
...
dashboard-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d6fd19b363 | ||
|
|
e50a773fa9 | ||
|
|
cb91fee7c3 | ||
|
|
351178ef34 | ||
|
|
4b6e934510 | ||
|
|
99fb8c2a64 | ||
|
|
ae98aaad2d | ||
|
|
23d808af08 | ||
|
|
c991ee6239 | ||
|
|
f098518faa | ||
|
|
421d355e29 | ||
|
|
eb75e636e8 | ||
|
|
f121240c82 | ||
|
|
a60dbf7f89 | ||
|
|
fa4aeae508 |
@@ -150,8 +150,7 @@ services:
|
|||||||
command:
|
command:
|
||||||
[
|
[
|
||||||
"-config=/root/config/prometheus.yml",
|
"-config=/root/config/prometheus.yml",
|
||||||
"--use-logs-new-schema=true",
|
"--use-logs-new-schema=true"
|
||||||
"--use-trace-new-schema=true"
|
|
||||||
]
|
]
|
||||||
# ports:
|
# ports:
|
||||||
# - "6060:6060" # pprof port
|
# - "6060:6060" # pprof port
|
||||||
|
|||||||
@@ -110,7 +110,6 @@ exporters:
|
|||||||
clickhousetraces:
|
clickhousetraces:
|
||||||
datasource: tcp://clickhouse:9000/signoz_traces
|
datasource: tcp://clickhouse:9000/signoz_traces
|
||||||
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
|
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
|
||||||
use_new_schema: true
|
|
||||||
clickhousemetricswrite:
|
clickhousemetricswrite:
|
||||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||||
resource_to_telemetry_conversion:
|
resource_to_telemetry_conversion:
|
||||||
|
|||||||
@@ -25,8 +25,7 @@ services:
|
|||||||
command:
|
command:
|
||||||
[
|
[
|
||||||
"-config=/root/config/prometheus.yml",
|
"-config=/root/config/prometheus.yml",
|
||||||
"--use-logs-new-schema=true",
|
"--use-logs-new-schema=true"
|
||||||
"--use-trace-new-schema=true"
|
|
||||||
]
|
]
|
||||||
ports:
|
ports:
|
||||||
- "6060:6060"
|
- "6060:6060"
|
||||||
|
|||||||
@@ -167,8 +167,7 @@ services:
|
|||||||
command:
|
command:
|
||||||
[
|
[
|
||||||
"-config=/root/config/prometheus.yml",
|
"-config=/root/config/prometheus.yml",
|
||||||
"--use-logs-new-schema=true",
|
"--use-logs-new-schema=true"
|
||||||
"--use-trace-new-schema=true"
|
|
||||||
]
|
]
|
||||||
# ports:
|
# ports:
|
||||||
# - "6060:6060" # pprof port
|
# - "6060:6060" # pprof port
|
||||||
|
|||||||
@@ -173,8 +173,7 @@ services:
|
|||||||
[
|
[
|
||||||
"-config=/root/config/prometheus.yml",
|
"-config=/root/config/prometheus.yml",
|
||||||
"-gateway-url=https://api.staging.signoz.cloud",
|
"-gateway-url=https://api.staging.signoz.cloud",
|
||||||
"--use-logs-new-schema=true",
|
"--use-logs-new-schema=true"
|
||||||
"--use-trace-new-schema=true"
|
|
||||||
]
|
]
|
||||||
# ports:
|
# ports:
|
||||||
# - "6060:6060" # pprof port
|
# - "6060:6060" # pprof port
|
||||||
|
|||||||
@@ -119,7 +119,6 @@ exporters:
|
|||||||
clickhousetraces:
|
clickhousetraces:
|
||||||
datasource: tcp://clickhouse:9000/signoz_traces
|
datasource: tcp://clickhouse:9000/signoz_traces
|
||||||
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
|
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
|
||||||
use_new_schema: true
|
|
||||||
clickhousemetricswrite:
|
clickhousemetricswrite:
|
||||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||||
resource_to_telemetry_conversion:
|
resource_to_telemetry_conversion:
|
||||||
|
|||||||
@@ -1,9 +1,15 @@
|
|||||||
import { Row } from 'antd';
|
import { Row } from 'antd';
|
||||||
import { isNull } from 'lodash-es';
|
|
||||||
import { useDashboard } from 'providers/Dashboard/Dashboard';
|
import { useDashboard } from 'providers/Dashboard/Dashboard';
|
||||||
import { memo, useEffect, useState } from 'react';
|
import { memo, useEffect, useRef, useState } from 'react';
|
||||||
import { IDashboardVariable } from 'types/api/dashboard/getAll';
|
import { IDashboardVariable } from 'types/api/dashboard/getAll';
|
||||||
|
|
||||||
|
import {
|
||||||
|
buildDependencies,
|
||||||
|
buildDependencyGraph,
|
||||||
|
buildParentDependencyGraph,
|
||||||
|
onUpdateVariableNode,
|
||||||
|
VariableGraph,
|
||||||
|
} from './util';
|
||||||
import VariableItem from './VariableItem';
|
import VariableItem from './VariableItem';
|
||||||
|
|
||||||
function DashboardVariableSelection(): JSX.Element | null {
|
function DashboardVariableSelection(): JSX.Element | null {
|
||||||
@@ -21,6 +27,12 @@ function DashboardVariableSelection(): JSX.Element | null {
|
|||||||
|
|
||||||
const [variablesTableData, setVariablesTableData] = useState<any>([]);
|
const [variablesTableData, setVariablesTableData] = useState<any>([]);
|
||||||
|
|
||||||
|
const [dependencyData, setDependencyData] = useState<{
|
||||||
|
order: string[];
|
||||||
|
graph: VariableGraph;
|
||||||
|
parentDependencyGraph: VariableGraph;
|
||||||
|
} | null>(null);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (variables) {
|
if (variables) {
|
||||||
const tableRowData = [];
|
const tableRowData = [];
|
||||||
@@ -43,35 +55,28 @@ function DashboardVariableSelection(): JSX.Element | null {
|
|||||||
}
|
}
|
||||||
}, [variables]);
|
}, [variables]);
|
||||||
|
|
||||||
const onVarChanged = (name: string): void => {
|
const initializationRef = useRef(false);
|
||||||
/**
|
|
||||||
* this function takes care of adding the dependent variables to current update queue and removing
|
useEffect(() => {
|
||||||
* the updated variable name from the queue
|
if (variablesTableData.length > 0 && !initializationRef.current) {
|
||||||
*/
|
const depGrp = buildDependencies(variablesTableData);
|
||||||
const dependentVariables = variablesTableData
|
const { order, graph } = buildDependencyGraph(depGrp);
|
||||||
?.map((variable: any) => {
|
const parentDependencyGraph = buildParentDependencyGraph(graph);
|
||||||
if (variable.type === 'QUERY') {
|
setDependencyData({
|
||||||
const re = new RegExp(`\\{\\{\\s*?\\.${name}\\s*?\\}\\}`); // regex for `{{.var}}`
|
order,
|
||||||
const queryValue = variable.queryValue || '';
|
graph,
|
||||||
const dependVarReMatch = queryValue.match(re);
|
parentDependencyGraph,
|
||||||
if (dependVarReMatch !== null && dependVarReMatch.length > 0) {
|
});
|
||||||
return variable.name;
|
initializationRef.current = true;
|
||||||
}
|
}
|
||||||
}
|
}, [variablesTableData]);
|
||||||
return null;
|
|
||||||
})
|
|
||||||
.filter((val: string | null) => !isNull(val));
|
|
||||||
setVariablesToGetUpdated((prev) => [
|
|
||||||
...prev.filter((v) => v !== name),
|
|
||||||
...dependentVariables,
|
|
||||||
]);
|
|
||||||
};
|
|
||||||
|
|
||||||
const onValueUpdate = (
|
const onValueUpdate = (
|
||||||
name: string,
|
name: string,
|
||||||
id: string,
|
id: string,
|
||||||
value: IDashboardVariable['selectedValue'],
|
value: IDashboardVariable['selectedValue'],
|
||||||
allSelected: boolean,
|
allSelected: boolean,
|
||||||
|
isMountedCall?: boolean,
|
||||||
// eslint-disable-next-line sonarjs/cognitive-complexity
|
// eslint-disable-next-line sonarjs/cognitive-complexity
|
||||||
): void => {
|
): void => {
|
||||||
if (id) {
|
if (id) {
|
||||||
@@ -111,7 +116,18 @@ function DashboardVariableSelection(): JSX.Element | null {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
onVarChanged(name);
|
if (dependencyData && !isMountedCall) {
|
||||||
|
const updatedVariables: string[] = [];
|
||||||
|
onUpdateVariableNode(
|
||||||
|
name,
|
||||||
|
dependencyData.graph,
|
||||||
|
dependencyData.order,
|
||||||
|
(node) => updatedVariables.push(node),
|
||||||
|
);
|
||||||
|
setVariablesToGetUpdated(updatedVariables.filter((v) => v !== name));
|
||||||
|
} else if (isMountedCall) {
|
||||||
|
setVariablesToGetUpdated((prev) => prev.filter((v) => v !== name));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -139,6 +155,7 @@ function DashboardVariableSelection(): JSX.Element | null {
|
|||||||
onValueUpdate={onValueUpdate}
|
onValueUpdate={onValueUpdate}
|
||||||
variablesToGetUpdated={variablesToGetUpdated}
|
variablesToGetUpdated={variablesToGetUpdated}
|
||||||
setVariablesToGetUpdated={setVariablesToGetUpdated}
|
setVariablesToGetUpdated={setVariablesToGetUpdated}
|
||||||
|
dependencyData={dependencyData}
|
||||||
/>
|
/>
|
||||||
))}
|
))}
|
||||||
</Row>
|
</Row>
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ import { commaValuesParser } from 'lib/dashbaordVariables/customCommaValuesParse
|
|||||||
import sortValues from 'lib/dashbaordVariables/sortVariableValues';
|
import sortValues from 'lib/dashbaordVariables/sortVariableValues';
|
||||||
import { debounce, isArray, isString } from 'lodash-es';
|
import { debounce, isArray, isString } from 'lodash-es';
|
||||||
import map from 'lodash-es/map';
|
import map from 'lodash-es/map';
|
||||||
import { ChangeEvent, memo, useEffect, useMemo, useState } from 'react';
|
import { ChangeEvent, memo, useEffect, useMemo, useRef, useState } from 'react';
|
||||||
import { useQuery } from 'react-query';
|
import { useQuery } from 'react-query';
|
||||||
import { useSelector } from 'react-redux';
|
import { useSelector } from 'react-redux';
|
||||||
import { AppState } from 'store/reducers';
|
import { AppState } from 'store/reducers';
|
||||||
@@ -35,12 +35,15 @@ import { popupContainer } from 'utils/selectPopupContainer';
|
|||||||
|
|
||||||
import { variablePropsToPayloadVariables } from '../utils';
|
import { variablePropsToPayloadVariables } from '../utils';
|
||||||
import { SelectItemStyle } from './styles';
|
import { SelectItemStyle } from './styles';
|
||||||
import { areArraysEqual } from './util';
|
import {
|
||||||
|
areArraysEqual,
|
||||||
|
checkAPIInvocation,
|
||||||
|
onUpdateVariableNode,
|
||||||
|
VariableGraph,
|
||||||
|
} from './util';
|
||||||
|
|
||||||
const ALL_SELECT_VALUE = '__ALL__';
|
const ALL_SELECT_VALUE = '__ALL__';
|
||||||
|
|
||||||
const variableRegexPattern = /\{\{\s*?\.([^\s}]+)\s*?\}\}/g;
|
|
||||||
|
|
||||||
enum ToggleTagValue {
|
enum ToggleTagValue {
|
||||||
Only = 'Only',
|
Only = 'Only',
|
||||||
All = 'All',
|
All = 'All',
|
||||||
@@ -54,9 +57,15 @@ interface VariableItemProps {
|
|||||||
id: string,
|
id: string,
|
||||||
arg1: IDashboardVariable['selectedValue'],
|
arg1: IDashboardVariable['selectedValue'],
|
||||||
allSelected: boolean,
|
allSelected: boolean,
|
||||||
|
isMountedCall?: boolean,
|
||||||
) => void;
|
) => void;
|
||||||
variablesToGetUpdated: string[];
|
variablesToGetUpdated: string[];
|
||||||
setVariablesToGetUpdated: React.Dispatch<React.SetStateAction<string[]>>;
|
setVariablesToGetUpdated: React.Dispatch<React.SetStateAction<string[]>>;
|
||||||
|
dependencyData: {
|
||||||
|
order: string[];
|
||||||
|
graph: VariableGraph;
|
||||||
|
parentDependencyGraph: VariableGraph;
|
||||||
|
} | null;
|
||||||
}
|
}
|
||||||
|
|
||||||
const getSelectValue = (
|
const getSelectValue = (
|
||||||
@@ -79,6 +88,7 @@ function VariableItem({
|
|||||||
onValueUpdate,
|
onValueUpdate,
|
||||||
variablesToGetUpdated,
|
variablesToGetUpdated,
|
||||||
setVariablesToGetUpdated,
|
setVariablesToGetUpdated,
|
||||||
|
dependencyData,
|
||||||
}: VariableItemProps): JSX.Element {
|
}: VariableItemProps): JSX.Element {
|
||||||
const [optionsData, setOptionsData] = useState<(string | number | boolean)[]>(
|
const [optionsData, setOptionsData] = useState<(string | number | boolean)[]>(
|
||||||
[],
|
[],
|
||||||
@@ -88,60 +98,29 @@ function VariableItem({
|
|||||||
(state) => state.globalTime,
|
(state) => state.globalTime,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// logic to detect if its a rerender or a new render/mount
|
||||||
|
const isMounted = useRef(false);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (variableData.allSelected && variableData.type === 'QUERY') {
|
isMounted.current = true;
|
||||||
setVariablesToGetUpdated((prev) => {
|
}, []);
|
||||||
const variablesQueue = [...prev.filter((v) => v !== variableData.name)];
|
|
||||||
if (variableData.name) {
|
const validVariableUpdate = (): boolean => {
|
||||||
variablesQueue.push(variableData.name);
|
if (!variableData.name) {
|
||||||
}
|
return false;
|
||||||
return variablesQueue;
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
if (!isMounted.current) {
|
||||||
}, [minTime, maxTime]);
|
// variableData.name is present as the top element or next in the queue - variablesToGetUpdated
|
||||||
|
return Boolean(
|
||||||
|
variablesToGetUpdated.length &&
|
||||||
|
variablesToGetUpdated[0] === variableData.name,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return variablesToGetUpdated.includes(variableData.name);
|
||||||
|
};
|
||||||
|
|
||||||
const [errorMessage, setErrorMessage] = useState<null | string>(null);
|
const [errorMessage, setErrorMessage] = useState<null | string>(null);
|
||||||
|
|
||||||
const getDependentVariables = (queryValue: string): string[] => {
|
|
||||||
const matches = queryValue.match(variableRegexPattern);
|
|
||||||
|
|
||||||
// Extract variable names from the matches array without {{ . }}
|
|
||||||
return matches
|
|
||||||
? matches.map((match) => match.replace(variableRegexPattern, '$1'))
|
|
||||||
: [];
|
|
||||||
};
|
|
||||||
|
|
||||||
const getQueryKey = (variableData: IDashboardVariable): string[] => {
|
|
||||||
let dependentVariablesStr = '';
|
|
||||||
|
|
||||||
const dependentVariables = getDependentVariables(
|
|
||||||
variableData.queryValue || '',
|
|
||||||
);
|
|
||||||
|
|
||||||
const variableName = variableData.name || '';
|
|
||||||
|
|
||||||
dependentVariables?.forEach((element) => {
|
|
||||||
const [, variable] =
|
|
||||||
Object.entries(existingVariables).find(
|
|
||||||
([, value]) => value.name === element,
|
|
||||||
) || [];
|
|
||||||
|
|
||||||
dependentVariablesStr += `${element}${variable?.selectedValue}`;
|
|
||||||
});
|
|
||||||
|
|
||||||
const variableKey = dependentVariablesStr.replace(/\s/g, '');
|
|
||||||
|
|
||||||
// added this time dependency for variables query as API respects the passed time range now
|
|
||||||
return [
|
|
||||||
REACT_QUERY_KEY.DASHBOARD_BY_ID,
|
|
||||||
variableName,
|
|
||||||
variableKey,
|
|
||||||
`${minTime}`,
|
|
||||||
`${maxTime}`,
|
|
||||||
];
|
|
||||||
};
|
|
||||||
|
|
||||||
// eslint-disable-next-line sonarjs/cognitive-complexity
|
// eslint-disable-next-line sonarjs/cognitive-complexity
|
||||||
const getOptions = (variablesRes: VariableResponseProps | null): void => {
|
const getOptions = (variablesRes: VariableResponseProps | null): void => {
|
||||||
if (variablesRes && variableData.type === 'QUERY') {
|
if (variablesRes && variableData.type === 'QUERY') {
|
||||||
@@ -184,9 +163,7 @@ function VariableItem({
|
|||||||
if (
|
if (
|
||||||
variableData.type === 'QUERY' &&
|
variableData.type === 'QUERY' &&
|
||||||
variableData.name &&
|
variableData.name &&
|
||||||
(variablesToGetUpdated.includes(variableData.name) ||
|
(validVariableUpdate() || valueNotInList || variableData.allSelected)
|
||||||
valueNotInList ||
|
|
||||||
variableData.allSelected)
|
|
||||||
) {
|
) {
|
||||||
let value = variableData.selectedValue;
|
let value = variableData.selectedValue;
|
||||||
let allSelected = false;
|
let allSelected = false;
|
||||||
@@ -200,7 +177,16 @@ function VariableItem({
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (variableData && variableData?.name && variableData?.id) {
|
if (variableData && variableData?.name && variableData?.id) {
|
||||||
onValueUpdate(variableData.name, variableData.id, value, allSelected);
|
onValueUpdate(
|
||||||
|
variableData.name,
|
||||||
|
variableData.id,
|
||||||
|
value,
|
||||||
|
allSelected,
|
||||||
|
isMounted.current,
|
||||||
|
);
|
||||||
|
setVariablesToGetUpdated((prev) =>
|
||||||
|
prev.filter((name) => name !== variableData.name),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -224,36 +210,75 @@ function VariableItem({
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
const { isLoading } = useQuery(getQueryKey(variableData), {
|
const { isLoading } = useQuery(
|
||||||
enabled: variableData && variableData.type === 'QUERY',
|
[
|
||||||
queryFn: () =>
|
REACT_QUERY_KEY.DASHBOARD_BY_ID,
|
||||||
dashboardVariablesQuery({
|
variableData.name || '',
|
||||||
query: variableData.queryValue || '',
|
`${minTime}`,
|
||||||
variables: variablePropsToPayloadVariables(existingVariables),
|
`${maxTime}`,
|
||||||
}),
|
],
|
||||||
refetchOnWindowFocus: false,
|
{
|
||||||
onSuccess: (response) => {
|
enabled:
|
||||||
getOptions(response.payload);
|
variableData &&
|
||||||
},
|
variableData.type === 'QUERY' &&
|
||||||
onError: (error: {
|
checkAPIInvocation(
|
||||||
details: {
|
variablesToGetUpdated,
|
||||||
error: string;
|
variableData,
|
||||||
};
|
dependencyData?.parentDependencyGraph,
|
||||||
}) => {
|
),
|
||||||
const { details } = error;
|
queryFn: () =>
|
||||||
|
dashboardVariablesQuery({
|
||||||
if (details.error) {
|
query: variableData.queryValue || '',
|
||||||
let message = details.error;
|
variables: variablePropsToPayloadVariables(existingVariables),
|
||||||
if (details.error.includes('Syntax error:')) {
|
}),
|
||||||
message =
|
refetchOnWindowFocus: false,
|
||||||
'Please make sure query is valid and dependent variables are selected';
|
onSuccess: (response) => {
|
||||||
|
getOptions(response.payload);
|
||||||
|
if (
|
||||||
|
dependencyData?.parentDependencyGraph[variableData.name || ''].length === 0
|
||||||
|
) {
|
||||||
|
const updatedVariables: string[] = [];
|
||||||
|
onUpdateVariableNode(
|
||||||
|
variableData.name || '',
|
||||||
|
dependencyData.graph,
|
||||||
|
dependencyData.order,
|
||||||
|
(node) => updatedVariables.push(node),
|
||||||
|
);
|
||||||
|
setVariablesToGetUpdated((prev) => [
|
||||||
|
...prev,
|
||||||
|
...updatedVariables.filter((v) => v !== variableData.name),
|
||||||
|
]);
|
||||||
}
|
}
|
||||||
setErrorMessage(message);
|
},
|
||||||
}
|
onError: (error: {
|
||||||
|
details: {
|
||||||
|
error: string;
|
||||||
|
};
|
||||||
|
}) => {
|
||||||
|
const { details } = error;
|
||||||
|
|
||||||
|
if (details.error) {
|
||||||
|
let message = details.error;
|
||||||
|
if (details.error.includes('Syntax error:')) {
|
||||||
|
message =
|
||||||
|
'Please make sure query is valid and dependent variables are selected';
|
||||||
|
}
|
||||||
|
setErrorMessage(message);
|
||||||
|
}
|
||||||
|
},
|
||||||
},
|
},
|
||||||
});
|
);
|
||||||
|
|
||||||
const handleChange = (value: string | string[]): void => {
|
const handleChange = (value: string | string[]): void => {
|
||||||
|
// if value is equal to selected value then return
|
||||||
|
if (
|
||||||
|
value === variableData.selectedValue ||
|
||||||
|
(Array.isArray(value) &&
|
||||||
|
Array.isArray(variableData.selectedValue) &&
|
||||||
|
areArraysEqual(value, variableData.selectedValue))
|
||||||
|
) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
if (variableData.name) {
|
if (variableData.name) {
|
||||||
if (
|
if (
|
||||||
value === ALL_SELECT_VALUE ||
|
value === ALL_SELECT_VALUE ||
|
||||||
|
|||||||
@@ -0,0 +1,241 @@
|
|||||||
|
import {
|
||||||
|
buildDependencies,
|
||||||
|
buildDependencyGraph,
|
||||||
|
buildParentDependencyGraph,
|
||||||
|
checkAPIInvocation,
|
||||||
|
onUpdateVariableNode,
|
||||||
|
VariableGraph,
|
||||||
|
} from '../util';
|
||||||
|
import {
|
||||||
|
buildDependenciesMock,
|
||||||
|
buildGraphMock,
|
||||||
|
checkAPIInvocationMock,
|
||||||
|
onUpdateVariableNodeMock,
|
||||||
|
} from './mock';
|
||||||
|
|
||||||
|
describe('dashboardVariables - utilities and processors', () => {
|
||||||
|
describe('onUpdateVariableNode', () => {
|
||||||
|
const { graph, topologicalOrder } = onUpdateVariableNodeMock;
|
||||||
|
const testCases = [
|
||||||
|
{
|
||||||
|
scenario: 'root element',
|
||||||
|
nodeToUpdate: 'deployment_environment',
|
||||||
|
expected: [
|
||||||
|
'deployment_environment',
|
||||||
|
'service_name',
|
||||||
|
'endpoint',
|
||||||
|
'http_status_code',
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
scenario: 'middle child',
|
||||||
|
nodeToUpdate: 'k8s_node_name',
|
||||||
|
expected: ['k8s_node_name', 'k8s_namespace_name'],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
scenario: 'leaf element',
|
||||||
|
nodeToUpdate: 'http_status_code',
|
||||||
|
expected: ['http_status_code'],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
scenario: 'node not in graph',
|
||||||
|
nodeToUpdate: 'unknown',
|
||||||
|
expected: [],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
scenario: 'node not in topological order',
|
||||||
|
nodeToUpdate: 'unknown',
|
||||||
|
expected: [],
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
test.each(testCases)(
|
||||||
|
'should update variable node when $scenario',
|
||||||
|
({ nodeToUpdate, expected }) => {
|
||||||
|
const updatedVariables: string[] = [];
|
||||||
|
const callback = (node: string): void => {
|
||||||
|
updatedVariables.push(node);
|
||||||
|
};
|
||||||
|
|
||||||
|
onUpdateVariableNode(nodeToUpdate, graph, topologicalOrder, callback);
|
||||||
|
|
||||||
|
expect(updatedVariables).toEqual(expected);
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
it('should return empty array when topological order is empty', () => {
|
||||||
|
const updatedVariables: string[] = [];
|
||||||
|
onUpdateVariableNode('http_status_code', graph, [], (node) =>
|
||||||
|
updatedVariables.push(node),
|
||||||
|
);
|
||||||
|
expect(updatedVariables).toEqual([]);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('checkAPIInvocation', () => {
|
||||||
|
const {
|
||||||
|
variablesToGetUpdated,
|
||||||
|
variableData,
|
||||||
|
parentDependencyGraph,
|
||||||
|
} = checkAPIInvocationMock;
|
||||||
|
|
||||||
|
const mockRootElement = {
|
||||||
|
name: 'deployment_environment',
|
||||||
|
key: '036a47cd-9ffc-47de-9f27-0329198964a8',
|
||||||
|
id: '036a47cd-9ffc-47de-9f27-0329198964a8',
|
||||||
|
modificationUUID: '5f71b591-f583-497c-839d-6a1590c3f60f',
|
||||||
|
selectedValue: 'production',
|
||||||
|
type: 'QUERY',
|
||||||
|
// ... other properties omitted for brevity
|
||||||
|
} as any;
|
||||||
|
|
||||||
|
describe('edge cases', () => {
|
||||||
|
it('should return false when variableData is empty', () => {
|
||||||
|
expect(
|
||||||
|
checkAPIInvocation(
|
||||||
|
variablesToGetUpdated,
|
||||||
|
variableData,
|
||||||
|
parentDependencyGraph,
|
||||||
|
),
|
||||||
|
).toBeFalsy();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return true when parentDependencyGraph is empty', () => {
|
||||||
|
expect(
|
||||||
|
checkAPIInvocation(variablesToGetUpdated, variableData, {}),
|
||||||
|
).toBeTruthy();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('variable sequences', () => {
|
||||||
|
it('should return true for valid sequence', () => {
|
||||||
|
expect(
|
||||||
|
checkAPIInvocation(
|
||||||
|
['k8s_node_name', 'k8s_namespace_name'],
|
||||||
|
variableData,
|
||||||
|
parentDependencyGraph,
|
||||||
|
),
|
||||||
|
).toBeTruthy();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return false for invalid sequence', () => {
|
||||||
|
expect(
|
||||||
|
checkAPIInvocation(
|
||||||
|
['k8s_cluster_name', 'k8s_node_name', 'k8s_namespace_name'],
|
||||||
|
variableData,
|
||||||
|
parentDependencyGraph,
|
||||||
|
),
|
||||||
|
).toBeFalsy();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return false when variableData is not in sequence', () => {
|
||||||
|
expect(
|
||||||
|
checkAPIInvocation(
|
||||||
|
['deployment_environment', 'service_name', 'endpoint'],
|
||||||
|
variableData,
|
||||||
|
parentDependencyGraph,
|
||||||
|
),
|
||||||
|
).toBeFalsy();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('root element behavior', () => {
|
||||||
|
it('should return true for valid root element sequence', () => {
|
||||||
|
expect(
|
||||||
|
checkAPIInvocation(
|
||||||
|
[
|
||||||
|
'deployment_environment',
|
||||||
|
'service_name',
|
||||||
|
'endpoint',
|
||||||
|
'http_status_code',
|
||||||
|
],
|
||||||
|
mockRootElement,
|
||||||
|
parentDependencyGraph,
|
||||||
|
),
|
||||||
|
).toBeTruthy();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return true for empty variablesToGetUpdated array', () => {
|
||||||
|
expect(
|
||||||
|
checkAPIInvocation([], mockRootElement, parentDependencyGraph),
|
||||||
|
).toBeTruthy();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('Graph Building Utilities', () => {
|
||||||
|
const { graph } = buildGraphMock;
|
||||||
|
const { variables } = buildDependenciesMock;
|
||||||
|
|
||||||
|
describe('buildParentDependencyGraph', () => {
|
||||||
|
it('should build parent dependency graph with correct relationships', () => {
|
||||||
|
const expected = {
|
||||||
|
deployment_environment: [],
|
||||||
|
service_name: ['deployment_environment'],
|
||||||
|
endpoint: ['deployment_environment', 'service_name'],
|
||||||
|
http_status_code: ['endpoint'],
|
||||||
|
k8s_cluster_name: [],
|
||||||
|
k8s_node_name: ['k8s_cluster_name'],
|
||||||
|
k8s_namespace_name: ['k8s_cluster_name', 'k8s_node_name'],
|
||||||
|
environment: [],
|
||||||
|
};
|
||||||
|
|
||||||
|
expect(buildParentDependencyGraph(graph)).toEqual(expected);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle empty graph', () => {
|
||||||
|
expect(buildParentDependencyGraph({})).toEqual({});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('buildDependencyGraph', () => {
|
||||||
|
it('should build complete dependency graph with correct structure and order', () => {
|
||||||
|
const expected = {
|
||||||
|
graph: {
|
||||||
|
deployment_environment: ['service_name', 'endpoint'],
|
||||||
|
service_name: ['endpoint'],
|
||||||
|
endpoint: ['http_status_code'],
|
||||||
|
http_status_code: [],
|
||||||
|
k8s_cluster_name: ['k8s_node_name', 'k8s_namespace_name'],
|
||||||
|
k8s_node_name: ['k8s_namespace_name'],
|
||||||
|
k8s_namespace_name: [],
|
||||||
|
environment: [],
|
||||||
|
},
|
||||||
|
order: [
|
||||||
|
'deployment_environment',
|
||||||
|
'k8s_cluster_name',
|
||||||
|
'environment',
|
||||||
|
'service_name',
|
||||||
|
'k8s_node_name',
|
||||||
|
'endpoint',
|
||||||
|
'k8s_namespace_name',
|
||||||
|
'http_status_code',
|
||||||
|
],
|
||||||
|
};
|
||||||
|
|
||||||
|
expect(buildDependencyGraph(graph)).toEqual(expected);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('buildDependencies', () => {
|
||||||
|
it('should build dependency map from variables array', () => {
|
||||||
|
const expected: VariableGraph = {
|
||||||
|
deployment_environment: ['service_name', 'endpoint'],
|
||||||
|
service_name: ['endpoint'],
|
||||||
|
endpoint: ['http_status_code'],
|
||||||
|
http_status_code: [],
|
||||||
|
k8s_cluster_name: ['k8s_node_name', 'k8s_namespace_name'],
|
||||||
|
k8s_node_name: ['k8s_namespace_name'],
|
||||||
|
k8s_namespace_name: [],
|
||||||
|
environment: [],
|
||||||
|
};
|
||||||
|
|
||||||
|
expect(buildDependencies(variables)).toEqual(expected);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle empty variables array', () => {
|
||||||
|
expect(buildDependencies([])).toEqual({});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
@@ -0,0 +1,251 @@
|
|||||||
|
/* eslint-disable sonarjs/no-duplicate-string */
|
||||||
|
export const checkAPIInvocationMock = {
|
||||||
|
variablesToGetUpdated: [],
|
||||||
|
variableData: {
|
||||||
|
name: 'k8s_node_name',
|
||||||
|
key: '4d71d385-beaf-4434-8dbf-c62be68049fc',
|
||||||
|
allSelected: false,
|
||||||
|
customValue: '',
|
||||||
|
description: '',
|
||||||
|
id: '4d71d385-beaf-4434-8dbf-c62be68049fc',
|
||||||
|
modificationUUID: '77233d3c-96d7-4ccb-aa9d-11b04d563068',
|
||||||
|
multiSelect: false,
|
||||||
|
order: 6,
|
||||||
|
queryValue:
|
||||||
|
"SELECT JSONExtractString(labels, 'k8s_node_name') AS k8s_node_name\nFROM signoz_metrics.distributed_time_series_v4_1day\nWHERE metric_name = 'k8s_node_cpu_time' AND JSONExtractString(labels, 'k8s_cluster_name') = {{.k8s_cluster_name}}\nGROUP BY k8s_node_name",
|
||||||
|
selectedValue: 'gke-signoz-saas-si-consumer-bsc-e2sd4-a6d430fa-gvm2',
|
||||||
|
showALLOption: false,
|
||||||
|
sort: 'DISABLED',
|
||||||
|
textboxValue: '',
|
||||||
|
type: 'QUERY',
|
||||||
|
},
|
||||||
|
parentDependencyGraph: {
|
||||||
|
deployment_environment: [],
|
||||||
|
service_name: ['deployment_environment'],
|
||||||
|
endpoint: ['deployment_environment', 'service_name'],
|
||||||
|
http_status_code: ['endpoint'],
|
||||||
|
k8s_cluster_name: [],
|
||||||
|
environment: [],
|
||||||
|
k8s_node_name: ['k8s_cluster_name'],
|
||||||
|
k8s_namespace_name: ['k8s_cluster_name', 'k8s_node_name'],
|
||||||
|
},
|
||||||
|
} as any;
|
||||||
|
|
||||||
|
export const onUpdateVariableNodeMock = {
|
||||||
|
nodeToUpdate: 'deployment_environment',
|
||||||
|
graph: {
|
||||||
|
deployment_environment: ['service_name', 'endpoint'],
|
||||||
|
service_name: ['endpoint'],
|
||||||
|
endpoint: ['http_status_code'],
|
||||||
|
http_status_code: [],
|
||||||
|
k8s_cluster_name: ['k8s_node_name', 'k8s_namespace_name'],
|
||||||
|
environment: [],
|
||||||
|
k8s_node_name: ['k8s_namespace_name'],
|
||||||
|
k8s_namespace_name: [],
|
||||||
|
},
|
||||||
|
topologicalOrder: [
|
||||||
|
'deployment_environment',
|
||||||
|
'k8s_cluster_name',
|
||||||
|
'environment',
|
||||||
|
'service_name',
|
||||||
|
'k8s_node_name',
|
||||||
|
'endpoint',
|
||||||
|
'k8s_namespace_name',
|
||||||
|
'http_status_code',
|
||||||
|
],
|
||||||
|
callback: jest.fn(),
|
||||||
|
};
|
||||||
|
|
||||||
|
export const buildGraphMock = {
|
||||||
|
graph: {
|
||||||
|
deployment_environment: ['service_name', 'endpoint'],
|
||||||
|
service_name: ['endpoint'],
|
||||||
|
endpoint: ['http_status_code'],
|
||||||
|
http_status_code: [],
|
||||||
|
k8s_cluster_name: ['k8s_node_name', 'k8s_namespace_name'],
|
||||||
|
environment: [],
|
||||||
|
k8s_node_name: ['k8s_namespace_name'],
|
||||||
|
k8s_namespace_name: [],
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
export const buildDependenciesMock = {
|
||||||
|
variables: [
|
||||||
|
{
|
||||||
|
key: '036a47cd-9ffc-47de-9f27-0329198964a8',
|
||||||
|
name: 'deployment_environment',
|
||||||
|
allSelected: false,
|
||||||
|
customValue: '',
|
||||||
|
description: '',
|
||||||
|
id: '036a47cd-9ffc-47de-9f27-0329198964a8',
|
||||||
|
modificationUUID: '5f71b591-f583-497c-839d-6a1590c3f60f',
|
||||||
|
multiSelect: false,
|
||||||
|
order: 0,
|
||||||
|
queryValue:
|
||||||
|
"SELECT DISTINCT JSONExtractString(labels, 'deployment_environment') AS deployment_environment\nFROM signoz_metrics.distributed_time_series_v4_1day\nWHERE metric_name = 'signoz_calls_total'",
|
||||||
|
selectedValue: 'production',
|
||||||
|
showALLOption: false,
|
||||||
|
sort: 'DISABLED',
|
||||||
|
textboxValue: '',
|
||||||
|
type: 'QUERY',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
key: 'eed5c917-1860-4c7e-bf6d-a05b97bafbc9',
|
||||||
|
name: 'service_name',
|
||||||
|
allSelected: true,
|
||||||
|
customValue: '',
|
||||||
|
description: '',
|
||||||
|
id: 'eed5c917-1860-4c7e-bf6d-a05b97bafbc9',
|
||||||
|
modificationUUID: '85db928b-ac9b-4e9f-b274-791112102fdf',
|
||||||
|
multiSelect: true,
|
||||||
|
order: 1,
|
||||||
|
queryValue:
|
||||||
|
"SELECT DISTINCT JSONExtractString(labels, 'service_name') FROM signoz_metrics.distributed_time_series_v4_1day\n WHERE metric_name = 'signoz_calls_total' and JSONExtractString(labels, 'deployment_environment') = {{.deployment_environment}}",
|
||||||
|
selectedValue: ['otelgateway'],
|
||||||
|
showALLOption: true,
|
||||||
|
sort: 'ASC',
|
||||||
|
textboxValue: '',
|
||||||
|
type: 'QUERY',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
key: '4022d3c1-e845-4952-8984-78f25f575c7a',
|
||||||
|
name: 'endpoint',
|
||||||
|
allSelected: true,
|
||||||
|
customValue: '',
|
||||||
|
description: '',
|
||||||
|
id: '4022d3c1-e845-4952-8984-78f25f575c7a',
|
||||||
|
modificationUUID: 'c0107fa1-ebb7-4dd3-aa9d-6ba08ecc594d',
|
||||||
|
multiSelect: true,
|
||||||
|
order: 2,
|
||||||
|
queryValue:
|
||||||
|
"SELECT DISTINCT JSONExtractString(labels, 'operation') FROM signoz_metrics.distributed_time_series_v4_1day\n WHERE metric_name = 'signoz_calls_total' AND JSONExtractString(labels, 'service_name') IN {{.service_name}} and JSONExtractString(labels, 'deployment_environment') = {{.deployment_environment}}",
|
||||||
|
selectedValue: [
|
||||||
|
'//v1/traces',
|
||||||
|
'/logs/heroku',
|
||||||
|
'/logs/json',
|
||||||
|
'/logs/vector',
|
||||||
|
'/v1/logs',
|
||||||
|
'/v1/metrics',
|
||||||
|
'/v1/traces',
|
||||||
|
'SELECT',
|
||||||
|
'exporter/signozkafka/logs',
|
||||||
|
'exporter/signozkafka/metrics',
|
||||||
|
'exporter/signozkafka/traces',
|
||||||
|
'extension/signozkeyauth/Authenticate',
|
||||||
|
'get',
|
||||||
|
'hmget',
|
||||||
|
'opentelemetry.proto.collector.logs.v1.LogsService/Export',
|
||||||
|
'opentelemetry.proto.collector.metrics.v1.MetricsService/Export',
|
||||||
|
'opentelemetry.proto.collector.trace.v1.TraceService/Export',
|
||||||
|
'processor/signozlimiter/LogsProcessed',
|
||||||
|
'processor/signozlimiter/MetricsProcessed',
|
||||||
|
'processor/signozlimiter/TracesProcessed',
|
||||||
|
'receiver/otlp/LogsReceived',
|
||||||
|
'receiver/otlp/MetricsReceived',
|
||||||
|
'receiver/otlp/TraceDataReceived',
|
||||||
|
'receiver/signozhttplog/heroku/LogsReceived',
|
||||||
|
'receiver/signozhttplog/json/LogsReceived',
|
||||||
|
'receiver/signozhttplog/vector/LogsReceived',
|
||||||
|
'redis.dial',
|
||||||
|
'redis.pipeline eval',
|
||||||
|
'sadd',
|
||||||
|
'set',
|
||||||
|
'sismember',
|
||||||
|
],
|
||||||
|
showALLOption: true,
|
||||||
|
sort: 'ASC',
|
||||||
|
textboxValue: '',
|
||||||
|
type: 'QUERY',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
key: '5e8a3cd9-3cd9-42df-a76c-79471a0f75bd',
|
||||||
|
name: 'http_status_code',
|
||||||
|
customValue: '',
|
||||||
|
description: '',
|
||||||
|
id: '5e8a3cd9-3cd9-42df-a76c-79471a0f75bd',
|
||||||
|
modificationUUID: '9a4021cc-a80a-4f15-8899-78892b763ca7',
|
||||||
|
multiSelect: true,
|
||||||
|
order: 3,
|
||||||
|
queryValue:
|
||||||
|
"SELECT DISTINCT JSONExtractString(labels, 'http_status_code') FROM signoz_metrics.distributed_time_series_v4_1day\n WHERE metric_name = 'signoz_calls_total' AND JSONExtractString(labels, 'operation') IN {{.endpoint}}",
|
||||||
|
showALLOption: true,
|
||||||
|
sort: 'ASC',
|
||||||
|
textboxValue: '',
|
||||||
|
type: 'QUERY',
|
||||||
|
selectedValue: ['', '200', '301', '400', '401', '405', '415', '429'],
|
||||||
|
allSelected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
key: '48e9aa64-05ca-41c2-a1bd-6c8aeca659f1',
|
||||||
|
name: 'k8s_cluster_name',
|
||||||
|
allSelected: false,
|
||||||
|
customValue: 'test-1,\ntest-2,\ntest-3',
|
||||||
|
description: '',
|
||||||
|
id: '48e9aa64-05ca-41c2-a1bd-6c8aeca659f1',
|
||||||
|
modificationUUID: '44722322-368c-4613-bb7f-d0b12867d57a',
|
||||||
|
multiSelect: false,
|
||||||
|
order: 4,
|
||||||
|
queryValue:
|
||||||
|
"SELECT JSONExtractString(labels, 'k8s_cluster_name') AS k8s_cluster_name\nFROM signoz_metrics.distributed_time_series_v4_1day\nWHERE metric_name = 'k8s_node_cpu_time'\nGROUP BY k8s_cluster_name",
|
||||||
|
selectedValue: 'saasmonitor-cluster',
|
||||||
|
showALLOption: false,
|
||||||
|
sort: 'DISABLED',
|
||||||
|
textboxValue: '',
|
||||||
|
type: 'QUERY',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
key: '3ea18ba2-30cf-4220-b03b-720b5eaf35f8',
|
||||||
|
name: 'environment',
|
||||||
|
allSelected: false,
|
||||||
|
customValue: '',
|
||||||
|
description: '',
|
||||||
|
id: '3ea18ba2-30cf-4220-b03b-720b5eaf35f8',
|
||||||
|
modificationUUID: '9f76cb06-1b9f-460f-a174-0b210bb3cf93',
|
||||||
|
multiSelect: false,
|
||||||
|
order: 5,
|
||||||
|
queryValue:
|
||||||
|
"SELECT DISTINCT JSONExtractString(labels, 'deployment_environment') AS environment\nFROM signoz_metrics.distributed_time_series_v4_1day\nWHERE metric_name = 'signoz_calls_total'",
|
||||||
|
selectedValue: 'production',
|
||||||
|
showALLOption: false,
|
||||||
|
sort: 'DISABLED',
|
||||||
|
textboxValue: '',
|
||||||
|
type: 'QUERY',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
key: '4d71d385-beaf-4434-8dbf-c62be68049fc',
|
||||||
|
name: 'k8s_node_name',
|
||||||
|
allSelected: false,
|
||||||
|
customValue: '',
|
||||||
|
description: '',
|
||||||
|
id: '4d71d385-beaf-4434-8dbf-c62be68049fc',
|
||||||
|
modificationUUID: '77233d3c-96d7-4ccb-aa9d-11b04d563068',
|
||||||
|
multiSelect: false,
|
||||||
|
order: 6,
|
||||||
|
queryValue:
|
||||||
|
"SELECT JSONExtractString(labels, 'k8s_node_name') AS k8s_node_name\nFROM signoz_metrics.distributed_time_series_v4_1day\nWHERE metric_name = 'k8s_node_cpu_time' AND JSONExtractString(labels, 'k8s_cluster_name') = {{.k8s_cluster_name}}\nGROUP BY k8s_node_name",
|
||||||
|
selectedValue: 'gke-signoz-saas-si-consumer-bsc-e2sd4-a6d430fa-gvm2',
|
||||||
|
showALLOption: false,
|
||||||
|
sort: 'DISABLED',
|
||||||
|
textboxValue: '',
|
||||||
|
type: 'QUERY',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
key: '937ecbae-b24b-4d6d-8cc4-5d5b8d53569b',
|
||||||
|
name: 'k8s_namespace_name',
|
||||||
|
customValue: '',
|
||||||
|
description: '',
|
||||||
|
id: '937ecbae-b24b-4d6d-8cc4-5d5b8d53569b',
|
||||||
|
modificationUUID: '8ad2442d-8b4d-4c64-848e-af847d1d0eec',
|
||||||
|
multiSelect: false,
|
||||||
|
order: 7,
|
||||||
|
queryValue:
|
||||||
|
"SELECT JSONExtractString(labels, 'k8s_namespace_name') AS k8s_namespace_name\nFROM signoz_metrics.distributed_time_series_v4_1day\nWHERE metric_name = 'k8s_pod_cpu_time' AND JSONExtractString(labels, 'k8s_cluster_name') = {{.k8s_cluster_name}} AND JSONExtractString(labels, 'k8s_node_name') IN {{.k8s_node_name}}\nGROUP BY k8s_namespace_name",
|
||||||
|
showALLOption: false,
|
||||||
|
sort: 'DISABLED',
|
||||||
|
textboxValue: '',
|
||||||
|
type: 'QUERY',
|
||||||
|
selectedValue: 'saasmonitor',
|
||||||
|
allSelected: false,
|
||||||
|
},
|
||||||
|
] as any,
|
||||||
|
};
|
||||||
@@ -1,3 +1,4 @@
|
|||||||
|
import { isEmpty } from 'lodash-es';
|
||||||
import { Dashboard, IDashboardVariable } from 'types/api/dashboard/getAll';
|
import { Dashboard, IDashboardVariable } from 'types/api/dashboard/getAll';
|
||||||
|
|
||||||
export function areArraysEqual(
|
export function areArraysEqual(
|
||||||
@@ -29,3 +30,159 @@ export const convertVariablesToDbFormat = (
|
|||||||
result[id] = obj;
|
result[id] = obj;
|
||||||
return result;
|
return result;
|
||||||
}, {});
|
}, {});
|
||||||
|
|
||||||
|
const getDependentVariables = (queryValue: string): string[] => {
|
||||||
|
const variableRegexPattern = /\{\{\s*?\.([^\s}]+)\s*?\}\}/g;
|
||||||
|
|
||||||
|
const matches = queryValue.match(variableRegexPattern);
|
||||||
|
|
||||||
|
// Extract variable names from the matches array without {{ . }}
|
||||||
|
return matches
|
||||||
|
? matches.map((match) => match.replace(variableRegexPattern, '$1'))
|
||||||
|
: [];
|
||||||
|
};
|
||||||
|
export type VariableGraph = Record<string, string[]>;
|
||||||
|
|
||||||
|
export const buildDependencies = (
|
||||||
|
variables: IDashboardVariable[],
|
||||||
|
): VariableGraph => {
|
||||||
|
console.log('buildDependencies', variables);
|
||||||
|
const graph: VariableGraph = {};
|
||||||
|
|
||||||
|
// Initialize empty arrays for all variables first
|
||||||
|
variables.forEach((variable) => {
|
||||||
|
if (variable.name) {
|
||||||
|
graph[variable.name] = [];
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// For each QUERY variable, add it as a dependent to its referenced variables
|
||||||
|
variables.forEach((variable) => {
|
||||||
|
if (variable.type === 'QUERY' && variable.name) {
|
||||||
|
const dependentVariables = getDependentVariables(variable.queryValue || '');
|
||||||
|
|
||||||
|
// For each referenced variable, add the current query as a dependent
|
||||||
|
dependentVariables.forEach((referencedVar) => {
|
||||||
|
if (graph[referencedVar]) {
|
||||||
|
graph[referencedVar].push(variable.name as string);
|
||||||
|
} else {
|
||||||
|
graph[referencedVar] = [variable.name as string];
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return graph;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Function to build the dependency graph
|
||||||
|
export const buildDependencyGraph = (
|
||||||
|
dependencies: VariableGraph,
|
||||||
|
): { order: string[]; graph: VariableGraph } => {
|
||||||
|
const inDegree: Record<string, number> = {};
|
||||||
|
const adjList: VariableGraph = {};
|
||||||
|
|
||||||
|
// Initialize in-degree and adjacency list
|
||||||
|
Object.keys(dependencies).forEach((node) => {
|
||||||
|
if (!inDegree[node]) inDegree[node] = 0;
|
||||||
|
if (!adjList[node]) adjList[node] = [];
|
||||||
|
dependencies[node].forEach((child) => {
|
||||||
|
if (!inDegree[child]) inDegree[child] = 0;
|
||||||
|
inDegree[child]++;
|
||||||
|
adjList[node].push(child);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// Topological sort using Kahn's Algorithm
|
||||||
|
const queue: string[] = Object.keys(inDegree).filter(
|
||||||
|
(node) => inDegree[node] === 0,
|
||||||
|
);
|
||||||
|
const topologicalOrder: string[] = [];
|
||||||
|
|
||||||
|
while (queue.length > 0) {
|
||||||
|
const current = queue.shift();
|
||||||
|
if (current === undefined) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
topologicalOrder.push(current);
|
||||||
|
|
||||||
|
adjList[current].forEach((neighbor) => {
|
||||||
|
inDegree[neighbor]--;
|
||||||
|
if (inDegree[neighbor] === 0) queue.push(neighbor);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
if (topologicalOrder.length !== Object.keys(dependencies).length) {
|
||||||
|
throw new Error('Cycle detected in the dependency graph!');
|
||||||
|
}
|
||||||
|
|
||||||
|
return { order: topologicalOrder, graph: adjList };
|
||||||
|
};
|
||||||
|
|
||||||
|
export const onUpdateVariableNode = (
|
||||||
|
nodeToUpdate: string,
|
||||||
|
graph: VariableGraph,
|
||||||
|
topologicalOrder: string[],
|
||||||
|
callback: (node: string) => void,
|
||||||
|
): void => {
|
||||||
|
const visited = new Set<string>();
|
||||||
|
|
||||||
|
// Start processing from the node to update
|
||||||
|
topologicalOrder.forEach((node) => {
|
||||||
|
if (node === nodeToUpdate || visited.has(node)) {
|
||||||
|
visited.add(node);
|
||||||
|
callback(node);
|
||||||
|
(graph[node] || []).forEach((child) => {
|
||||||
|
visited.add(child);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
export const buildParentDependencyGraph = (
|
||||||
|
graph: VariableGraph,
|
||||||
|
): VariableGraph => {
|
||||||
|
const parentGraph: VariableGraph = {};
|
||||||
|
|
||||||
|
// Initialize empty arrays for all nodes
|
||||||
|
Object.keys(graph).forEach((node) => {
|
||||||
|
parentGraph[node] = [];
|
||||||
|
});
|
||||||
|
|
||||||
|
// For each node and its children in the original graph
|
||||||
|
Object.entries(graph).forEach(([node, children]) => {
|
||||||
|
// For each child, add the current node as its parent
|
||||||
|
children.forEach((child) => {
|
||||||
|
parentGraph[child].push(node);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
return parentGraph;
|
||||||
|
};
|
||||||
|
|
||||||
|
export const checkAPIInvocation = (
|
||||||
|
variablesToGetUpdated: string[],
|
||||||
|
variableData: IDashboardVariable,
|
||||||
|
parentDependencyGraph?: VariableGraph,
|
||||||
|
): boolean => {
|
||||||
|
if (isEmpty(variableData.name)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (isEmpty(parentDependencyGraph)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// if no dependency then true
|
||||||
|
const haveDependency =
|
||||||
|
parentDependencyGraph?.[variableData.name || '']?.length > 0;
|
||||||
|
if (!haveDependency) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// if variable is in the list and has dependency then check if its the top element in the queue then true else false
|
||||||
|
return (
|
||||||
|
variablesToGetUpdated.length > 0 &&
|
||||||
|
variablesToGetUpdated[0] === variableData.name
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|||||||
@@ -4075,9 +4075,10 @@ func (aH *APIHandler) CreateLogsPipeline(w http.ResponseWriter, r *http.Request)
|
|||||||
zap.L().Warn("found no pipelines in the http request, this will delete all the pipelines")
|
zap.L().Warn("found no pipelines in the http request, this will delete all the pipelines")
|
||||||
}
|
}
|
||||||
|
|
||||||
validationErr := aH.LogsParsingPipelineController.ValidatePipelines(ctx, postable)
|
for _, p := range postable {
|
||||||
if validationErr != nil {
|
if err := p.IsValid(); err != nil {
|
||||||
return nil, validationErr
|
return nil, model.BadRequestStr(err.Error())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return aH.LogsParsingPipelineController.ApplyPipelines(ctx, postable)
|
return aH.LogsParsingPipelineController.ApplyPipelines(ctx, postable)
|
||||||
|
|||||||
@@ -6,32 +6,28 @@ import (
|
|||||||
|
|
||||||
func generateConsumerSQL(start, end int64, topic, partition, consumerGroup, queueType string) string {
|
func generateConsumerSQL(start, end int64, topic, partition, consumerGroup, queueType string) string {
|
||||||
timeRange := (end - start) / 1000000000
|
timeRange := (end - start) / 1000000000
|
||||||
tsBucketStart := (start / 1000000000) - 1800
|
|
||||||
tsBucketEnd := end / 1000000000
|
|
||||||
query := fmt.Sprintf(`
|
query := fmt.Sprintf(`
|
||||||
WITH consumer_query AS (
|
WITH consumer_query AS (
|
||||||
SELECT
|
SELECT
|
||||||
resource_string_service$$name,
|
serviceName,
|
||||||
quantile(0.99)(durationNano) / 1000000 AS p99,
|
quantile(0.99)(durationNano) / 1000000 AS p99,
|
||||||
COUNT(*) AS total_requests,
|
COUNT(*) AS total_requests,
|
||||||
sumIf(1, status_code = 2) AS error_count,
|
sumIf(1, statusCode = 2) AS error_count,
|
||||||
avg(CASE WHEN has(attributes_number, 'messaging.message.body.size') THEN attributes_number['messaging.message.body.size'] ELSE NULL END) AS avg_msg_size
|
avg(CASE WHEN has(numberTagMap, 'messaging.message.body.size') THEN numberTagMap['messaging.message.body.size'] ELSE NULL END) AS avg_msg_size
|
||||||
FROM signoz_traces.distributed_signoz_index_v3
|
FROM signoz_traces.distributed_signoz_index_v2
|
||||||
WHERE
|
WHERE
|
||||||
timestamp >= '%d'
|
timestamp >= '%d'
|
||||||
AND timestamp <= '%d'
|
AND timestamp <= '%d'
|
||||||
AND ts_bucket_start >= '%d'
|
|
||||||
AND ts_bucket_start <= '%d'
|
|
||||||
AND kind = 5
|
AND kind = 5
|
||||||
AND attribute_string_messaging$$system = '%s'
|
AND msgSystem = '%s'
|
||||||
AND attributes_string['messaging.destination.name'] = '%s'
|
AND stringTagMap['messaging.destination.name'] = '%s'
|
||||||
AND attributes_string['messaging.destination.partition.id'] = '%s'
|
AND stringTagMap['messaging.destination.partition.id'] = '%s'
|
||||||
AND attributes_string['messaging.kafka.consumer.group'] = '%s'
|
AND stringTagMap['messaging.kafka.consumer.group'] = '%s'
|
||||||
GROUP BY resource_string_service$$name
|
GROUP BY serviceName
|
||||||
)
|
)
|
||||||
|
|
||||||
SELECT
|
SELECT
|
||||||
resource_string_service$$name AS service_name,
|
serviceName AS service_name,
|
||||||
p99,
|
p99,
|
||||||
COALESCE((error_count * 100.0) / total_requests, 0) AS error_rate,
|
COALESCE((error_count * 100.0) / total_requests, 0) AS error_rate,
|
||||||
COALESCE(total_requests / %d, 0) AS throughput,
|
COALESCE(total_requests / %d, 0) AS throughput,
|
||||||
@@ -39,31 +35,27 @@ SELECT
|
|||||||
FROM
|
FROM
|
||||||
consumer_query
|
consumer_query
|
||||||
ORDER BY
|
ORDER BY
|
||||||
resource_string_service$$name;
|
serviceName;
|
||||||
`, start, end, tsBucketStart, tsBucketEnd, queueType, topic, partition, consumerGroup, timeRange)
|
`, start, end, queueType, topic, partition, consumerGroup, timeRange)
|
||||||
return query
|
return query
|
||||||
}
|
}
|
||||||
|
|
||||||
// S1 landing
|
// S1 landing
|
||||||
func generatePartitionLatencySQL(start, end int64, queueType string) string {
|
func generatePartitionLatencySQL(start, end int64, queueType string) string {
|
||||||
timeRange := (end - start) / 1000000000
|
timeRange := (end - start) / 1000000000
|
||||||
tsBucketStart := (start / 1000000000) - 1800
|
|
||||||
tsBucketEnd := end / 1000000000
|
|
||||||
query := fmt.Sprintf(`
|
query := fmt.Sprintf(`
|
||||||
WITH partition_query AS (
|
WITH partition_query AS (
|
||||||
SELECT
|
SELECT
|
||||||
quantile(0.99)(durationNano) / 1000000 AS p99,
|
quantile(0.99)(durationNano) / 1000000 AS p99,
|
||||||
count(*) AS total_requests,
|
count(*) AS total_requests,
|
||||||
attributes_string['messaging.destination.name'] AS topic,
|
stringTagMap['messaging.destination.name'] AS topic,
|
||||||
attributes_string['messaging.destination.partition.id'] AS partition
|
stringTagMap['messaging.destination.partition.id'] AS partition
|
||||||
FROM signoz_traces.distributed_signoz_index_v3
|
FROM signoz_traces.distributed_signoz_index_v2
|
||||||
WHERE
|
WHERE
|
||||||
timestamp >= '%d'
|
timestamp >= '%d'
|
||||||
AND timestamp <= '%d'
|
AND timestamp <= '%d'
|
||||||
AND ts_bucket_start >= '%d'
|
|
||||||
AND ts_bucket_start <= '%d'
|
|
||||||
AND kind = 4
|
AND kind = 4
|
||||||
AND attribute_string_messaging$$system = '%s'
|
AND msgSystem = '%s'
|
||||||
GROUP BY topic, partition
|
GROUP BY topic, partition
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -76,39 +68,35 @@ FROM
|
|||||||
partition_query
|
partition_query
|
||||||
ORDER BY
|
ORDER BY
|
||||||
topic;
|
topic;
|
||||||
`, start, end, tsBucketStart, tsBucketEnd, queueType, timeRange)
|
`, start, end, queueType, timeRange)
|
||||||
return query
|
return query
|
||||||
}
|
}
|
||||||
|
|
||||||
// S1 consumer
|
// S1 consumer
|
||||||
func generateConsumerPartitionLatencySQL(start, end int64, topic, partition, queueType string) string {
|
func generateConsumerPartitionLatencySQL(start, end int64, topic, partition, queueType string) string {
|
||||||
timeRange := (end - start) / 1000000000
|
timeRange := (end - start) / 1000000000
|
||||||
tsBucketStart := (start / 1000000000) - 1800
|
|
||||||
tsBucketEnd := end / 1000000000
|
|
||||||
query := fmt.Sprintf(`
|
query := fmt.Sprintf(`
|
||||||
WITH consumer_pl AS (
|
WITH consumer_pl AS (
|
||||||
SELECT
|
SELECT
|
||||||
attributes_string['messaging.kafka.consumer.group'] AS consumer_group,
|
stringTagMap['messaging.kafka.consumer.group'] AS consumer_group,
|
||||||
resource_string_service$$name,
|
serviceName,
|
||||||
quantile(0.99)(durationNano) / 1000000 AS p99,
|
quantile(0.99)(durationNano) / 1000000 AS p99,
|
||||||
COUNT(*) AS total_requests,
|
COUNT(*) AS total_requests,
|
||||||
sumIf(1, status_code = 2) AS error_count
|
sumIf(1, statusCode = 2) AS error_count
|
||||||
FROM signoz_traces.distributed_signoz_index_v3
|
FROM signoz_traces.distributed_signoz_index_v2
|
||||||
WHERE
|
WHERE
|
||||||
timestamp >= '%d'
|
timestamp >= '%d'
|
||||||
AND timestamp <= '%d'
|
AND timestamp <= '%d'
|
||||||
AND ts_bucket_start >= '%d'
|
|
||||||
AND ts_bucket_start <= '%d'
|
|
||||||
AND kind = 5
|
AND kind = 5
|
||||||
AND attribute_string_messaging$$system = '%s'
|
AND msgSystem = '%s'
|
||||||
AND attributes_string['messaging.destination.name'] = '%s'
|
AND stringTagMap['messaging.destination.name'] = '%s'
|
||||||
AND attributes_string['messaging.destination.partition.id'] = '%s'
|
AND stringTagMap['messaging.destination.partition.id'] = '%s'
|
||||||
GROUP BY consumer_group, resource_string_service$$name
|
GROUP BY consumer_group, serviceName
|
||||||
)
|
)
|
||||||
|
|
||||||
SELECT
|
SELECT
|
||||||
consumer_group,
|
consumer_group,
|
||||||
resource_string_service$$name AS service_name,
|
serviceName AS service_name,
|
||||||
p99,
|
p99,
|
||||||
COALESCE((error_count * 100.0) / total_requests, 0) AS error_rate,
|
COALESCE((error_count * 100.0) / total_requests, 0) AS error_rate,
|
||||||
COALESCE(total_requests / %d, 0) AS throughput
|
COALESCE(total_requests / %d, 0) AS throughput
|
||||||
@@ -116,68 +104,61 @@ FROM
|
|||||||
consumer_pl
|
consumer_pl
|
||||||
ORDER BY
|
ORDER BY
|
||||||
consumer_group;
|
consumer_group;
|
||||||
`, start, end, tsBucketStart, tsBucketEnd, queueType, topic, partition, timeRange)
|
`, start, end, queueType, topic, partition, timeRange)
|
||||||
return query
|
return query
|
||||||
}
|
}
|
||||||
|
|
||||||
// S3, producer overview
|
// S3, producer overview
|
||||||
func generateProducerPartitionThroughputSQL(start, end int64, queueType string) string {
|
func generateProducerPartitionThroughputSQL(start, end int64, queueType string) string {
|
||||||
timeRange := (end - start) / 1000000000
|
timeRange := (end - start) / 1000000000
|
||||||
tsBucketStart := (start / 1000000000) - 1800
|
// t, svc, rps, byte*, p99, err
|
||||||
tsBucketEnd := end / 1000000000 // t, svc, rps, byte*, p99, err
|
|
||||||
query := fmt.Sprintf(`
|
query := fmt.Sprintf(`
|
||||||
WITH producer_latency AS (
|
WITH producer_latency AS (
|
||||||
SELECT
|
SELECT
|
||||||
resource_string_service$$name,
|
serviceName,
|
||||||
quantile(0.99)(durationNano) / 1000000 AS p99,
|
quantile(0.99)(durationNano) / 1000000 AS p99,
|
||||||
attributes_string['messaging.destination.name'] AS topic,
|
stringTagMap['messaging.destination.name'] AS topic,
|
||||||
COUNT(*) AS total_requests,
|
COUNT(*) AS total_requests,
|
||||||
sumIf(1, status_code = 2) AS error_count
|
sumIf(1, statusCode = 2) AS error_count
|
||||||
FROM signoz_traces.distributed_signoz_index_v3
|
FROM signoz_traces.distributed_signoz_index_v2
|
||||||
WHERE
|
WHERE
|
||||||
timestamp >= '%d'
|
timestamp >= '%d'
|
||||||
AND timestamp <= '%d'
|
AND timestamp <= '%d'
|
||||||
AND ts_bucket_start >= '%d'
|
|
||||||
AND ts_bucket_start <= '%d'
|
|
||||||
AND kind = 4
|
AND kind = 4
|
||||||
AND attribute_string_messaging$$system = '%s'
|
AND msgSystem = '%s'
|
||||||
GROUP BY topic, resource_string_service$$name
|
GROUP BY topic, serviceName
|
||||||
)
|
)
|
||||||
|
|
||||||
SELECT
|
SELECT
|
||||||
topic,
|
topic,
|
||||||
resource_string_service$$name AS service_name,
|
serviceName AS service_name,
|
||||||
p99,
|
p99,
|
||||||
COALESCE((error_count * 100.0) / total_requests, 0) AS error_rate,
|
COALESCE((error_count * 100.0) / total_requests, 0) AS error_rate,
|
||||||
COALESCE(total_requests / %d, 0) AS throughput
|
COALESCE(total_requests / %d, 0) AS throughput
|
||||||
FROM
|
FROM
|
||||||
producer_latency
|
producer_latency
|
||||||
`, start, end, tsBucketStart, tsBucketEnd, queueType, timeRange)
|
`, start, end, queueType, timeRange)
|
||||||
return query
|
return query
|
||||||
}
|
}
|
||||||
|
|
||||||
// S3, producer topic/service overview
|
// S3, producer topic/service overview
|
||||||
func generateProducerTopicLatencySQL(start, end int64, topic, service, queueType string) string {
|
func generateProducerTopicLatencySQL(start, end int64, topic, service, queueType string) string {
|
||||||
timeRange := (end - start) / 1000000000
|
timeRange := (end - start) / 1000000000
|
||||||
tsBucketStart := (start / 1000000000) - 1800
|
|
||||||
tsBucketEnd := end / 1000000000
|
|
||||||
query := fmt.Sprintf(`
|
query := fmt.Sprintf(`
|
||||||
WITH consumer_latency AS (
|
WITH consumer_latency AS (
|
||||||
SELECT
|
SELECT
|
||||||
quantile(0.99)(durationNano) / 1000000 AS p99,
|
quantile(0.99)(durationNano) / 1000000 AS p99,
|
||||||
attributes_string['messaging.destination.partition.id'] AS partition,
|
stringTagMap['messaging.destination.partition.id'] AS partition,
|
||||||
COUNT(*) AS total_requests,
|
COUNT(*) AS total_requests,
|
||||||
sumIf(1, status_code = 2) AS error_count
|
sumIf(1, statusCode = 2) AS error_count
|
||||||
FROM signoz_traces.distributed_signoz_index_v3
|
FROM signoz_traces.distributed_signoz_index_v2
|
||||||
WHERE
|
WHERE
|
||||||
timestamp >= '%d'
|
timestamp >= '%d'
|
||||||
AND timestamp <= '%d'
|
AND timestamp <= '%d'
|
||||||
AND ts_bucket_start >= '%d'
|
|
||||||
AND ts_bucket_start <= '%d'
|
|
||||||
AND kind = 4
|
AND kind = 4
|
||||||
AND resource_string_service$$name = '%s'
|
AND serviceName = '%s'
|
||||||
AND attribute_string_messaging$$system = '%s'
|
AND msgSystem = '%s'
|
||||||
AND attributes_string['messaging.destination.name'] = '%s'
|
AND stringTagMap['messaging.destination.name'] = '%s'
|
||||||
GROUP BY partition
|
GROUP BY partition
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -188,38 +169,34 @@ SELECT
|
|||||||
COALESCE(total_requests / %d, 0) AS throughput
|
COALESCE(total_requests / %d, 0) AS throughput
|
||||||
FROM
|
FROM
|
||||||
consumer_latency
|
consumer_latency
|
||||||
`, start, end, tsBucketStart, tsBucketEnd, service, queueType, topic, timeRange)
|
`, start, end, service, queueType, topic, timeRange)
|
||||||
return query
|
return query
|
||||||
}
|
}
|
||||||
|
|
||||||
// S3 consumer overview
|
// S3 consumer overview
|
||||||
func generateConsumerLatencySQL(start, end int64, queueType string) string {
|
func generateConsumerLatencySQL(start, end int64, queueType string) string {
|
||||||
timeRange := (end - start) / 1000000000
|
timeRange := (end - start) / 1000000000
|
||||||
tsBucketStart := (start / 1000000000) - 1800
|
|
||||||
tsBucketEnd := end / 1000000000
|
|
||||||
query := fmt.Sprintf(`
|
query := fmt.Sprintf(`
|
||||||
WITH consumer_latency AS (
|
WITH consumer_latency AS (
|
||||||
SELECT
|
SELECT
|
||||||
resource_string_service$$name,
|
serviceName,
|
||||||
attributes_string['messaging.destination.name'] AS topic,
|
stringTagMap['messaging.destination.name'] AS topic,
|
||||||
quantile(0.99)(durationNano) / 1000000 AS p99,
|
quantile(0.99)(durationNano) / 1000000 AS p99,
|
||||||
COUNT(*) AS total_requests,
|
COUNT(*) AS total_requests,
|
||||||
sumIf(1, status_code = 2) AS error_count,
|
sumIf(1, statusCode = 2) AS error_count,
|
||||||
SUM(attributes_number['messaging.message.body.size']) AS total_bytes
|
SUM(numberTagMap['messaging.message.body.size']) AS total_bytes
|
||||||
FROM signoz_traces.distributed_signoz_index_v3
|
FROM signoz_traces.distributed_signoz_index_v2
|
||||||
WHERE
|
WHERE
|
||||||
timestamp >= '%d'
|
timestamp >= '%d'
|
||||||
AND timestamp <= '%d'
|
AND timestamp <= '%d'
|
||||||
AND ts_bucket_start >= '%d'
|
|
||||||
AND ts_bucket_start <= '%d'
|
|
||||||
AND kind = 5
|
AND kind = 5
|
||||||
AND attribute_string_messaging$$system = '%s'
|
AND msgSystem = '%s'
|
||||||
GROUP BY topic, resource_string_service$$name
|
GROUP BY topic, serviceName
|
||||||
)
|
)
|
||||||
|
|
||||||
SELECT
|
SELECT
|
||||||
topic,
|
topic,
|
||||||
resource_string_service$$name AS service_name,
|
serviceName AS service_name,
|
||||||
p99,
|
p99,
|
||||||
COALESCE((error_count * 100.0) / total_requests, 0) AS error_rate,
|
COALESCE((error_count * 100.0) / total_requests, 0) AS error_rate,
|
||||||
COALESCE(total_requests / %d, 0) AS ingestion_rate,
|
COALESCE(total_requests / %d, 0) AS ingestion_rate,
|
||||||
@@ -228,32 +205,28 @@ FROM
|
|||||||
consumer_latency
|
consumer_latency
|
||||||
ORDER BY
|
ORDER BY
|
||||||
topic;
|
topic;
|
||||||
`, start, end, tsBucketStart, tsBucketEnd, queueType, timeRange, timeRange)
|
`, start, end, queueType, timeRange, timeRange)
|
||||||
return query
|
return query
|
||||||
}
|
}
|
||||||
|
|
||||||
// S3 consumer topic/service
|
// S3 consumer topic/service
|
||||||
func generateConsumerServiceLatencySQL(start, end int64, topic, service, queueType string) string {
|
func generateConsumerServiceLatencySQL(start, end int64, topic, service, queueType string) string {
|
||||||
timeRange := (end - start) / 1000000000
|
timeRange := (end - start) / 1000000000
|
||||||
tsBucketStart := (start / 1000000000) - 1800
|
|
||||||
tsBucketEnd := end / 1000000000
|
|
||||||
query := fmt.Sprintf(`
|
query := fmt.Sprintf(`
|
||||||
WITH consumer_latency AS (
|
WITH consumer_latency AS (
|
||||||
SELECT
|
SELECT
|
||||||
quantile(0.99)(durationNano) / 1000000 AS p99,
|
quantile(0.99)(durationNano) / 1000000 AS p99,
|
||||||
attributes_string['messaging.destination.partition.id'] AS partition,
|
stringTagMap['messaging.destination.partition.id'] AS partition,
|
||||||
COUNT(*) AS total_requests,
|
COUNT(*) AS total_requests,
|
||||||
sumIf(1, status_code = 2) AS error_count
|
sumIf(1, statusCode = 2) AS error_count
|
||||||
FROM signoz_traces.distributed_signoz_index_v3
|
FROM signoz_traces.distributed_signoz_index_v2
|
||||||
WHERE
|
WHERE
|
||||||
timestamp >= '%d'
|
timestamp >= '%d'
|
||||||
AND timestamp <= '%d'
|
AND timestamp <= '%d'
|
||||||
AND ts_bucket_start >= '%d'
|
|
||||||
AND ts_bucket_start <= '%d'
|
|
||||||
AND kind = 5
|
AND kind = 5
|
||||||
AND resource_string_service$$name = '%s'
|
AND serviceName = '%s'
|
||||||
AND attribute_string_messaging$$system = '%s'
|
AND msgSystem = '%s'
|
||||||
AND attributes_string['messaging.destination.name'] = '%s'
|
AND stringTagMap['messaging.destination.name'] = '%s'
|
||||||
GROUP BY partition
|
GROUP BY partition
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -264,7 +237,7 @@ SELECT
|
|||||||
COALESCE(total_requests / %d, 0) AS throughput
|
COALESCE(total_requests / %d, 0) AS throughput
|
||||||
FROM
|
FROM
|
||||||
consumer_latency
|
consumer_latency
|
||||||
`, start, end, tsBucketStart, tsBucketEnd, service, queueType, topic, timeRange)
|
`, start, end, service, queueType, topic, timeRange)
|
||||||
return query
|
return query
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -273,26 +246,26 @@ func generateProducerConsumerEvalSQL(start, end int64, queueType string, evalTim
|
|||||||
query := fmt.Sprintf(`
|
query := fmt.Sprintf(`
|
||||||
WITH trace_data AS (
|
WITH trace_data AS (
|
||||||
SELECT
|
SELECT
|
||||||
p.resource_string_service$$name AS producer_service,
|
p.serviceName AS producer_service,
|
||||||
c.resource_string_service$$name AS consumer_service,
|
c.serviceName AS consumer_service,
|
||||||
p.trace_id,
|
p.traceID,
|
||||||
p.timestamp AS producer_timestamp,
|
p.timestamp AS producer_timestamp,
|
||||||
c.timestamp AS consumer_timestamp,
|
c.timestamp AS consumer_timestamp,
|
||||||
p.durationNano AS durationNano,
|
p.durationNano AS durationNano,
|
||||||
(toUnixTimestamp64Nano(c.timestamp) - toUnixTimestamp64Nano(p.timestamp)) + p.durationNano AS time_difference
|
(toUnixTimestamp64Nano(c.timestamp) - toUnixTimestamp64Nano(p.timestamp)) + p.durationNano AS time_difference
|
||||||
FROM
|
FROM
|
||||||
signoz_traces.distributed_signoz_index_v3 p
|
signoz_traces.distributed_signoz_index_v2 p
|
||||||
INNER JOIN
|
INNER JOIN
|
||||||
signoz_traces.distributed_signoz_index_v3 c
|
signoz_traces.distributed_signoz_index_v2 c
|
||||||
ON p.trace_id = c.trace_id
|
ON p.traceID = c.traceID
|
||||||
AND c.parent_span_id = p.span_id
|
AND c.parentSpanID = p.spanID
|
||||||
WHERE
|
WHERE
|
||||||
p.kind = 4
|
p.kind = 4
|
||||||
AND c.kind = 5
|
AND c.kind = 5
|
||||||
AND toUnixTimestamp64Nano(p.timestamp) BETWEEN '%d' AND '%d'
|
AND toUnixTimestamp64Nano(p.timestamp) BETWEEN '%d' AND '%d'
|
||||||
AND toUnixTimestamp64Nano(c.timestamp) BETWEEN '%d' AND '%d'
|
AND toUnixTimestamp64Nano(c.timestamp) BETWEEN '%d' AND '%d'
|
||||||
AND c.attribute_string_messaging$$system = '%s'
|
AND c.msgSystem = '%s'
|
||||||
AND p.attribute_string_messaging$$system = '%s'
|
AND p.msgSystem = '%s'
|
||||||
)
|
)
|
||||||
|
|
||||||
SELECT
|
SELECT
|
||||||
@@ -305,7 +278,7 @@ SELECT
|
|||||||
arrayMap(x -> x.1,
|
arrayMap(x -> x.1,
|
||||||
arraySort(
|
arraySort(
|
||||||
x -> -x.2,
|
x -> -x.2,
|
||||||
groupArrayIf((trace_id, time_difference), time_difference > '%d')
|
groupArrayIf((traceID, time_difference), time_difference > '%d')
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
1, 10
|
1, 10
|
||||||
@@ -320,107 +293,91 @@ GROUP BY
|
|||||||
|
|
||||||
func generateProducerSQL(start, end int64, topic, partition, queueType string) string {
|
func generateProducerSQL(start, end int64, topic, partition, queueType string) string {
|
||||||
timeRange := (end - start) / 1000000000
|
timeRange := (end - start) / 1000000000
|
||||||
tsBucketStart := (start / 1000000000) - 1800
|
|
||||||
tsBucketEnd := end / 1000000000
|
|
||||||
query := fmt.Sprintf(`
|
query := fmt.Sprintf(`
|
||||||
WITH producer_query AS (
|
WITH producer_query AS (
|
||||||
SELECT
|
SELECT
|
||||||
resource_string_service$$name,
|
serviceName,
|
||||||
quantile(0.99)(durationNano) / 1000000 AS p99,
|
quantile(0.99)(durationNano) / 1000000 AS p99,
|
||||||
count(*) AS total_count,
|
count(*) AS total_count,
|
||||||
sumIf(1, status_code = 2) AS error_count
|
sumIf(1, statusCode = 2) AS error_count
|
||||||
FROM signoz_traces.distributed_signoz_index_v3
|
FROM signoz_traces.distributed_signoz_index_v2
|
||||||
WHERE
|
WHERE
|
||||||
timestamp >= '%d'
|
timestamp >= '%d'
|
||||||
AND timestamp <= '%d'
|
AND timestamp <= '%d'
|
||||||
AND ts_bucket_start >= '%d'
|
|
||||||
AND ts_bucket_start <= '%d'
|
|
||||||
AND kind = 4
|
AND kind = 4
|
||||||
AND attribute_string_messaging$$system = '%s'
|
AND msgSystem = '%s'
|
||||||
AND attributes_string['messaging.destination.name'] = '%s'
|
AND stringTagMap['messaging.destination.name'] = '%s'
|
||||||
AND attributes_string['messaging.destination.partition.id'] = '%s'
|
AND stringTagMap['messaging.destination.partition.id'] = '%s'
|
||||||
GROUP BY resource_string_service$$name
|
GROUP BY serviceName
|
||||||
)
|
)
|
||||||
|
|
||||||
SELECT
|
SELECT
|
||||||
resource_string_service$$name AS service_name,
|
serviceName AS service_name,
|
||||||
p99,
|
p99,
|
||||||
COALESCE((error_count * 100.0) / total_count, 0) AS error_percentage,
|
COALESCE((error_count * 100.0) / total_count, 0) AS error_percentage,
|
||||||
COALESCE(total_count / %d, 0) AS throughput
|
COALESCE(total_count / %d, 0) AS throughput
|
||||||
FROM
|
FROM
|
||||||
producer_query
|
producer_query
|
||||||
ORDER BY
|
ORDER BY
|
||||||
resource_string_service$$name;
|
serviceName;
|
||||||
`, start, end, tsBucketStart, tsBucketEnd, queueType, topic, partition, timeRange)
|
`, start, end, queueType, topic, partition, timeRange)
|
||||||
return query
|
return query
|
||||||
}
|
}
|
||||||
|
|
||||||
func generateNetworkLatencyThroughputSQL(start, end int64, consumerGroup, partitionID, queueType string) string {
|
func generateNetworkLatencyThroughputSQL(start, end int64, consumerGroup, partitionID, queueType string) string {
|
||||||
timeRange := (end - start) / 1000000000
|
timeRange := (end - start) / 1000000000
|
||||||
tsBucketStart := (start / 1000000000) - 1800
|
|
||||||
tsBucketEnd := end / 1000000000
|
|
||||||
query := fmt.Sprintf(`
|
query := fmt.Sprintf(`
|
||||||
SELECT
|
SELECT
|
||||||
attributes_string['messaging.client_id'] AS client_id,
|
stringTagMap['messaging.client_id'] AS client_id,
|
||||||
resources_string['service.instance.id'] AS service_instance_id,
|
stringTagMap['service.instance.id'] AS service_instance_id,
|
||||||
resource_string_service$$name AS service_name,
|
serviceName AS service_name,
|
||||||
count(*) / %d AS throughput
|
count(*) / %d AS throughput
|
||||||
FROM signoz_traces.distributed_signoz_index_v3
|
FROM signoz_traces.distributed_signoz_index_v2
|
||||||
WHERE
|
WHERE
|
||||||
timestamp >= '%d'
|
timestamp >= '%d'
|
||||||
AND timestamp <= '%d'
|
AND timestamp <= '%d'
|
||||||
AND ts_bucket_start >= '%d'
|
|
||||||
AND ts_bucket_start <= '%d'
|
|
||||||
AND kind = 5
|
AND kind = 5
|
||||||
AND attribute_string_messaging$$system = '%s'
|
AND msgSystem = '%s'
|
||||||
AND attributes_string['messaging.kafka.consumer.group'] = '%s'
|
AND stringTagMap['messaging.kafka.consumer.group'] = '%s'
|
||||||
AND attributes_string['messaging.destination.partition.id'] = '%s'
|
AND stringTagMap['messaging.destination.partition.id'] = '%s'
|
||||||
GROUP BY service_name, client_id, service_instance_id
|
GROUP BY service_name, client_id, service_instance_id
|
||||||
ORDER BY throughput DESC
|
ORDER BY throughput DESC
|
||||||
`, timeRange, start, end, tsBucketStart, tsBucketEnd, queueType, consumerGroup, partitionID)
|
`, timeRange, start, end, queueType, consumerGroup, partitionID)
|
||||||
return query
|
return query
|
||||||
}
|
}
|
||||||
|
|
||||||
func onboardProducersSQL(start, end int64, queueType string) string {
|
func onboardProducersSQL(start, end int64, queueType string) string {
|
||||||
tsBucketStart := (start / 1000000000) - 1800
|
|
||||||
tsBucketEnd := end / 1000000000
|
|
||||||
query := fmt.Sprintf(`
|
query := fmt.Sprintf(`
|
||||||
SELECT
|
SELECT
|
||||||
COUNT(*) = 0 AS entries,
|
COUNT(*) = 0 AS entries,
|
||||||
COUNT(IF(attribute_string_messaging$$system = '%s', 1, NULL)) = 0 AS queue,
|
COUNT(IF(msgSystem = '%s', 1, NULL)) = 0 AS queue,
|
||||||
COUNT(IF(kind = 4, 1, NULL)) = 0 AS kind,
|
COUNT(IF(kind = 4, 1, NULL)) = 0 AS kind,
|
||||||
COUNT(IF(has(attributes_string, 'messaging.destination.name'), 1, NULL)) = 0 AS destination,
|
COUNT(IF(has(stringTagMap, 'messaging.destination.name'), 1, NULL)) = 0 AS destination,
|
||||||
COUNT(IF(has(attributes_string, 'messaging.destination.partition.id'), 1, NULL)) = 0 AS partition
|
COUNT(IF(has(stringTagMap, 'messaging.destination.partition.id'), 1, NULL)) = 0 AS partition
|
||||||
FROM
|
FROM
|
||||||
signoz_traces.distributed_signoz_index_v3
|
signoz_traces.distributed_signoz_index_v2
|
||||||
WHERE
|
WHERE
|
||||||
timestamp >= '%d'
|
timestamp >= '%d'
|
||||||
AND timestamp <= '%d'
|
AND timestamp <= '%d';`, queueType, start, end)
|
||||||
AND ts_bucket_start >= '%d'
|
|
||||||
AND ts_bucket_start <= '%d';`, queueType, start, end, tsBucketStart, tsBucketEnd)
|
|
||||||
return query
|
return query
|
||||||
}
|
}
|
||||||
|
|
||||||
func onboardConsumerSQL(start, end int64, queueType string) string {
|
func onboardConsumerSQL(start, end int64, queueType string) string {
|
||||||
tsBucketStart := (start / 1000000000) - 1800
|
|
||||||
tsBucketEnd := end / 1000000000
|
|
||||||
query := fmt.Sprintf(`
|
query := fmt.Sprintf(`
|
||||||
SELECT
|
SELECT
|
||||||
COUNT(*) = 0 AS entries,
|
COUNT(*) = 0 AS entries,
|
||||||
COUNT(IF(attribute_string_messaging$$system = '%s', 1, NULL)) = 0 AS queue,
|
COUNT(IF(msgSystem = '%s', 1, NULL)) = 0 AS queue,
|
||||||
COUNT(IF(kind = 5, 1, NULL)) = 0 AS kind,
|
COUNT(IF(kind = 5, 1, NULL)) = 0 AS kind,
|
||||||
COUNT(resource_string_service$$name) = 0 AS svc,
|
COUNT(serviceName) = 0 AS svc,
|
||||||
COUNT(IF(has(attributes_string, 'messaging.destination.name'), 1, NULL)) = 0 AS destination,
|
COUNT(IF(has(stringTagMap, 'messaging.destination.name'), 1, NULL)) = 0 AS destination,
|
||||||
COUNT(IF(has(attributes_string, 'messaging.destination.partition.id'), 1, NULL)) = 0 AS partition,
|
COUNT(IF(has(stringTagMap, 'messaging.destination.partition.id'), 1, NULL)) = 0 AS partition,
|
||||||
COUNT(IF(has(attributes_string, 'messaging.kafka.consumer.group'), 1, NULL)) = 0 AS cgroup,
|
COUNT(IF(has(stringTagMap, 'messaging.kafka.consumer.group'), 1, NULL)) = 0 AS cgroup,
|
||||||
COUNT(IF(has(attributes_number, 'messaging.message.body.size'), 1, NULL)) = 0 AS bodysize,
|
COUNT(IF(has(numberTagMap, 'messaging.message.body.size'), 1, NULL)) = 0 AS bodysize,
|
||||||
COUNT(IF(has(attributes_string, 'messaging.client_id'), 1, NULL)) = 0 AS clientid,
|
COUNT(IF(has(stringTagMap, 'messaging.client_id'), 1, NULL)) = 0 AS clientid,
|
||||||
COUNT(IF(has(resources_string, 'service.instance.id'), 1, NULL)) = 0 AS instanceid
|
COUNT(IF(has(stringTagMap, 'service.instance.id'), 1, NULL)) = 0 AS instanceid
|
||||||
FROM signoz_traces.distributed_signoz_index_v3
|
FROM signoz_traces.distributed_signoz_index_v2
|
||||||
WHERE
|
WHERE
|
||||||
timestamp >= '%d'
|
timestamp >= '%d'
|
||||||
AND timestamp <= '%d'
|
AND timestamp <= '%d';`, queueType, start, end)
|
||||||
AND ts_bucket_start >= '%d'
|
|
||||||
AND ts_bucket_start <= '%d' ;`, queueType, start, end, tsBucketStart, tsBucketEnd)
|
|
||||||
return query
|
return query
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -94,45 +94,6 @@ func (ic *LogParsingPipelineController) ApplyPipelines(
|
|||||||
return ic.GetPipelinesByVersion(ctx, cfg.Version)
|
return ic.GetPipelinesByVersion(ctx, cfg.Version)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ic *LogParsingPipelineController) ValidatePipelines(
|
|
||||||
ctx context.Context,
|
|
||||||
postedPipelines []PostablePipeline,
|
|
||||||
) *model.ApiError {
|
|
||||||
for _, p := range postedPipelines {
|
|
||||||
if err := p.IsValid(); err != nil {
|
|
||||||
return model.BadRequestStr(err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Also run a collector simulation to ensure config is fit
|
|
||||||
// for e2e use with a collector
|
|
||||||
pipelines := []Pipeline{}
|
|
||||||
for _, pp := range postedPipelines {
|
|
||||||
pipelines = append(pipelines, Pipeline{
|
|
||||||
Id: uuid.New().String(),
|
|
||||||
OrderId: pp.OrderId,
|
|
||||||
Enabled: pp.Enabled,
|
|
||||||
Name: pp.Name,
|
|
||||||
Alias: pp.Alias,
|
|
||||||
Description: &pp.Description,
|
|
||||||
Filter: pp.Filter,
|
|
||||||
Config: pp.Config,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
sampleLogs := []model.SignozLog{{Body: ""}}
|
|
||||||
_, _, simulationErr := SimulatePipelinesProcessing(
|
|
||||||
ctx, pipelines, sampleLogs,
|
|
||||||
)
|
|
||||||
if simulationErr != nil {
|
|
||||||
return model.BadRequest(fmt.Errorf(
|
|
||||||
"invalid pipelines config: %w", simulationErr.ToError(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns effective list of pipelines including user created
|
// Returns effective list of pipelines including user created
|
||||||
// pipelines and pipelines for installed integrations
|
// pipelines and pipelines for installed integrations
|
||||||
func (ic *LogParsingPipelineController) getEffectivePipelinesByVersion(
|
func (ic *LogParsingPipelineController) getEffectivePipelinesByVersion(
|
||||||
|
|||||||
@@ -190,7 +190,7 @@ func (q *querier) runBuilderQuery(
|
|||||||
ch <- channelResult{Err: err, Name: queryName, Query: limitQuery, Series: nil}
|
ch <- channelResult{Err: err, Name: queryName, Query: limitQuery, Series: nil}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
query = strings.Replace(placeholderQuery, "#LIMIT_PLACEHOLDER", limitQuery, 1)
|
query = fmt.Sprintf(placeholderQuery, limitQuery)
|
||||||
} else {
|
} else {
|
||||||
query, err = tracesQueryBuilder(
|
query, err = tracesQueryBuilder(
|
||||||
start,
|
start,
|
||||||
|
|||||||
@@ -190,7 +190,7 @@ func (q *querier) runBuilderQuery(
|
|||||||
ch <- channelResult{Err: err, Name: queryName, Query: limitQuery, Series: nil}
|
ch <- channelResult{Err: err, Name: queryName, Query: limitQuery, Series: nil}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
query = strings.Replace(placeholderQuery, "#LIMIT_PLACEHOLDER", limitQuery, 1)
|
query = fmt.Sprintf(placeholderQuery, limitQuery)
|
||||||
} else {
|
} else {
|
||||||
query, err = tracesQueryBuilder(
|
query, err = tracesQueryBuilder(
|
||||||
start,
|
start,
|
||||||
|
|||||||
@@ -34,8 +34,8 @@ func enrichKeyWithMetadata(key v3.AttributeKey, keys map[string]v3.AttributeKey)
|
|||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tkey := range utils.GenerateEnrichmentKeys(key) {
|
for _, key := range utils.GenerateEnrichmentKeys(key) {
|
||||||
if val, ok := keys[tkey]; ok {
|
if val, ok := keys[key]; ok {
|
||||||
return val
|
return val
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -74,19 +74,6 @@ func getSelectLabels(groupBy []v3.AttributeKey) string {
|
|||||||
return strings.Join(labels, ",")
|
return strings.Join(labels, ",")
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(nitya): use the _exists columns as well in the future similar to logs
|
|
||||||
func existsSubQueryForFixedColumn(key v3.AttributeKey, op v3.FilterOperator) (string, error) {
|
|
||||||
if key.DataType == v3.AttributeKeyDataTypeString {
|
|
||||||
if op == v3.FilterOperatorExists {
|
|
||||||
return fmt.Sprintf("%s %s ''", getColumnName(key), tracesOperatorMappingV3[v3.FilterOperatorNotEqual]), nil
|
|
||||||
} else {
|
|
||||||
return fmt.Sprintf("%s %s ''", getColumnName(key), tracesOperatorMappingV3[v3.FilterOperatorEqual]), nil
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return "", fmt.Errorf("unsupported operation, exists and not exists can only be applied on custom attributes or string type columns")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildTracesFilterQuery(fs *v3.FilterSet) (string, error) {
|
func buildTracesFilterQuery(fs *v3.FilterSet) (string, error) {
|
||||||
var conditions []string
|
var conditions []string
|
||||||
|
|
||||||
@@ -123,7 +110,7 @@ func buildTracesFilterQuery(fs *v3.FilterSet) (string, error) {
|
|||||||
conditions = append(conditions, fmt.Sprintf(operator, columnName, fmtVal))
|
conditions = append(conditions, fmt.Sprintf(operator, columnName, fmtVal))
|
||||||
case v3.FilterOperatorExists, v3.FilterOperatorNotExists:
|
case v3.FilterOperatorExists, v3.FilterOperatorNotExists:
|
||||||
if item.Key.IsColumn {
|
if item.Key.IsColumn {
|
||||||
subQuery, err := existsSubQueryForFixedColumn(item.Key, item.Operator)
|
subQuery, err := tracesV3.ExistsSubQueryForFixedColumn(item.Key, item.Operator)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@@ -325,7 +312,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, panelType v3.
|
|||||||
}
|
}
|
||||||
|
|
||||||
if options.GraphLimitQtype == constants.SecondQueryGraphLimit {
|
if options.GraphLimitQtype == constants.SecondQueryGraphLimit {
|
||||||
filterSubQuery = filterSubQuery + " AND " + fmt.Sprintf("(%s) GLOBAL IN (", tracesV3.GetSelectKeys(mq.AggregateOperator, mq.GroupBy)) + "#LIMIT_PLACEHOLDER)"
|
filterSubQuery = filterSubQuery + " AND " + fmt.Sprintf("(%s) GLOBAL IN (", tracesV3.GetSelectKeys(mq.AggregateOperator, mq.GroupBy)) + "%s)"
|
||||||
}
|
}
|
||||||
|
|
||||||
switch mq.AggregateOperator {
|
switch mq.AggregateOperator {
|
||||||
@@ -363,7 +350,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, panelType v3.
|
|||||||
case v3.AggregateOperatorCount:
|
case v3.AggregateOperatorCount:
|
||||||
if mq.AggregateAttribute.Key != "" {
|
if mq.AggregateAttribute.Key != "" {
|
||||||
if mq.AggregateAttribute.IsColumn {
|
if mq.AggregateAttribute.IsColumn {
|
||||||
subQuery, err := existsSubQueryForFixedColumn(mq.AggregateAttribute, v3.FilterOperatorExists)
|
subQuery, err := tracesV3.ExistsSubQueryForFixedColumn(mq.AggregateAttribute, v3.FilterOperatorExists)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
filterSubQuery = fmt.Sprintf("%s AND %s", filterSubQuery, subQuery)
|
filterSubQuery = fmt.Sprintf("%s AND %s", filterSubQuery, subQuery)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -265,11 +265,9 @@ func Test_buildTracesFilterQuery(t *testing.T) {
|
|||||||
{Key: v3.AttributeKey{Key: "isDone", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, Operator: v3.FilterOperatorNotExists},
|
{Key: v3.AttributeKey{Key: "isDone", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, Operator: v3.FilterOperatorNotExists},
|
||||||
{Key: v3.AttributeKey{Key: "host1", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Operator: v3.FilterOperatorNotExists},
|
{Key: v3.AttributeKey{Key: "host1", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Operator: v3.FilterOperatorNotExists},
|
||||||
{Key: v3.AttributeKey{Key: "path", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Operator: v3.FilterOperatorNotExists},
|
{Key: v3.AttributeKey{Key: "path", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Operator: v3.FilterOperatorNotExists},
|
||||||
{Key: v3.AttributeKey{Key: "http_url", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Operator: v3.FilterOperatorNotExists},
|
|
||||||
{Key: v3.AttributeKey{Key: "http.route", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Operator: v3.FilterOperatorNotExists},
|
|
||||||
}},
|
}},
|
||||||
},
|
},
|
||||||
want: "mapContains(attributes_string, 'host') AND mapContains(attributes_number, 'duration') AND NOT mapContains(attributes_bool, 'isDone') AND NOT mapContains(attributes_string, 'host1') AND `attribute_string_path` = '' AND http_url = '' AND `attribute_string_http$$route` = ''",
|
want: "mapContains(attributes_string, 'host') AND mapContains(attributes_number, 'duration') AND NOT mapContains(attributes_bool, 'isDone') AND NOT mapContains(attributes_string, 'host1') AND path = ''",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
@@ -685,7 +683,7 @@ func TestPrepareTracesQuery(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
want: "SELECT attributes_string['function'] as `function`, toFloat64(count(distinct(name))) as value from signoz_traces.distributed_signoz_index_v3 where " +
|
want: "SELECT attributes_string['function'] as `function`, toFloat64(count(distinct(name))) as value from signoz_traces.distributed_signoz_index_v3 where " +
|
||||||
"(timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND mapContains(attributes_string, 'function') AND (`function`) GLOBAL IN (#LIMIT_PLACEHOLDER) group by `function` order by value DESC",
|
"(timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND mapContains(attributes_string, 'function') AND (`function`) GLOBAL IN (%s) group by `function` order by value DESC",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "test with limit with resources- first",
|
name: "test with limit with resources- first",
|
||||||
@@ -768,7 +766,7 @@ func TestPrepareTracesQuery(t *testing.T) {
|
|||||||
want: "SELECT `attribute_string_function` as `function`, serviceName as `serviceName`, toFloat64(count(distinct(name))) as value from signoz_traces.distributed_signoz_index_v3 " +
|
want: "SELECT `attribute_string_function` as `function`, serviceName as `serviceName`, toFloat64(count(distinct(name))) as value from signoz_traces.distributed_signoz_index_v3 " +
|
||||||
"where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND attributes_number['line'] = 100 " +
|
"where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND attributes_number['line'] = 100 " +
|
||||||
"AND (resource_fingerprint GLOBAL IN (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (seen_at_ts_bucket_start >= 1680064560) AND (seen_at_ts_bucket_start <= 1680066458) " +
|
"AND (resource_fingerprint GLOBAL IN (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (seen_at_ts_bucket_start >= 1680064560) AND (seen_at_ts_bucket_start <= 1680066458) " +
|
||||||
"AND simpleJSONExtractString(labels, 'hostname') = 'server1' AND labels like '%hostname%server1%')) AND (`function`,`serviceName`) GLOBAL IN (#LIMIT_PLACEHOLDER) group by `function`,`serviceName` order by value DESC",
|
"AND simpleJSONExtractString(labels, 'hostname') = 'server1' AND labels like '%hostname%server1%')) AND (`function`,`serviceName`) GLOBAL IN (%s) group by `function`,`serviceName` order by value DESC",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -350,27 +350,6 @@ func TestLogPipelinesValidation(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
ExpectedResponseStatusCode: 400,
|
ExpectedResponseStatusCode: 400,
|
||||||
}, {
|
|
||||||
Name: "Invalid from field path",
|
|
||||||
Pipeline: logparsingpipeline.PostablePipeline{
|
|
||||||
OrderId: 1,
|
|
||||||
Name: "pipeline 1",
|
|
||||||
Alias: "pipeline1",
|
|
||||||
Enabled: true,
|
|
||||||
Filter: validPipelineFilterSet,
|
|
||||||
Config: []logparsingpipeline.PipelineOperator{
|
|
||||||
{
|
|
||||||
OrderId: 1,
|
|
||||||
ID: "move",
|
|
||||||
Type: "move",
|
|
||||||
From: `attributes.temp_parsed_body."@l"`,
|
|
||||||
To: "attributes.test",
|
|
||||||
Enabled: true,
|
|
||||||
Name: "test move",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
ExpectedResponseStatusCode: 400,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user