mirror of
https://github.com/SigNoz/signoz.git
synced 2025-12-28 22:39:57 +00:00
Compare commits
13 Commits
v0.89.0-cl
...
fix/data-r
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
18fa83ebfa | ||
|
|
9daefeb881 | ||
|
|
526cf01cb7 | ||
|
|
cd4766ec2b | ||
|
|
2196b58d36 | ||
|
|
53c58b9983 | ||
|
|
d174038dce | ||
|
|
78d09e2940 | ||
|
|
6cb7f152e1 | ||
|
|
f6730d3d09 | ||
|
|
899a6ab70a | ||
|
|
a4b852bb99 | ||
|
|
92cd108c0d |
@@ -200,7 +200,7 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz, jwt *authtypes.JWT)
|
||||
s.privateHTTP = privateServer
|
||||
|
||||
s.opampServer = opamp.InitializeServer(
|
||||
&opAmpModel.AllAgents, agentConfMgr,
|
||||
&opAmpModel.AllAgents, agentConfMgr, signoz.Instrumentation,
|
||||
)
|
||||
|
||||
orgs, err := apiHandler.Signoz.Modules.OrgGetter.ListByOwnedKeyRange(context.Background())
|
||||
|
||||
@@ -14,8 +14,8 @@
|
||||
"remove_label_confirm": "This action will remove all the labels. Do you want to proceed?",
|
||||
"remove_label_success": "Labels cleared",
|
||||
"alert_form_step1": "Step 1 - Define the metric",
|
||||
"alert_form_step2": "Step 2 - Define Alert Conditions",
|
||||
"alert_form_step3": "Step 3 - Alert Configuration",
|
||||
"alert_form_step2": "Step {{step}} - Define Alert Conditions",
|
||||
"alert_form_step3": "Step {{step}} - Alert Configuration",
|
||||
"metric_query_max_limit": "Can not create query. You can create maximum of 5 queries",
|
||||
"confirm_save_title": "Save Changes",
|
||||
"confirm_save_content_part1": "Your alert built with",
|
||||
|
||||
@@ -7,8 +7,8 @@
|
||||
"remove_label_confirm": "This action will remove all the labels. Do you want to proceed?",
|
||||
"remove_label_success": "Labels cleared",
|
||||
"alert_form_step1": "Step 1 - Define the metric",
|
||||
"alert_form_step2": "Step 2 - Define Alert Conditions",
|
||||
"alert_form_step3": "Step 3 - Alert Configuration",
|
||||
"alert_form_step2": "Step {{step}} - Define Alert Conditions",
|
||||
"alert_form_step3": "Step {{step}} - Alert Configuration",
|
||||
"metric_query_max_limit": "Can not create query. You can create maximum of 5 queries",
|
||||
"confirm_save_title": "Save Changes",
|
||||
"confirm_save_content_part1": "Your alert built with",
|
||||
|
||||
@@ -7,8 +7,8 @@
|
||||
"remove_label_confirm": "This action will remove all the labels. Do you want to proceed?",
|
||||
"remove_label_success": "Labels cleared",
|
||||
"alert_form_step1": "Step 1 - Define the metric",
|
||||
"alert_form_step2": "Step 2 - Define Alert Conditions",
|
||||
"alert_form_step3": "Step 3 - Alert Configuration",
|
||||
"alert_form_step2": "Step {{step}} - Define Alert Conditions",
|
||||
"alert_form_step3": "Step {{step}} - Alert Configuration",
|
||||
"metric_query_max_limit": "Can not create query. You can create maximum of 5 queries",
|
||||
"confirm_save_title": "Save Changes",
|
||||
"confirm_save_content_part1": "Your alert built with",
|
||||
|
||||
@@ -430,9 +430,13 @@ function HostMetricsDetails({
|
||||
>
|
||||
{host.active ? 'ACTIVE' : 'INACTIVE'}
|
||||
</Tag>
|
||||
<Tag className="infra-monitoring-tags" bordered>
|
||||
{host.os}
|
||||
</Tag>
|
||||
{host.os ? (
|
||||
<Tag className="infra-monitoring-tags" bordered>
|
||||
{host.os}
|
||||
</Tag>
|
||||
) : (
|
||||
<Typography.Text>-</Typography.Text>
|
||||
)}
|
||||
<div className="progress-container">
|
||||
<Progress
|
||||
percent={Number((host.cpu * 100).toFixed(1))}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { Tabs, TabsProps } from 'antd';
|
||||
import { escapeRegExp } from 'lodash-es';
|
||||
import { useLocation, useParams } from 'react-router-dom';
|
||||
|
||||
import { RouteTabProps } from './types';
|
||||
@@ -28,7 +29,11 @@ function RouteTab({
|
||||
|
||||
// Find the matching route for the current pathname
|
||||
const currentRoute = routesWithParams.find((route) => {
|
||||
const routePattern = route.route.replace(/:(\w+)/g, '([^/]+)');
|
||||
const pathnameOnly = route.route.split('?')[0];
|
||||
const routePattern = escapeRegExp(pathnameOnly).replace(
|
||||
/\\:([a-zA-Z0-9_]+)/g,
|
||||
'([^/]+)',
|
||||
);
|
||||
const regex = new RegExp(`^${routePattern}$`);
|
||||
return regex.test(location.pathname);
|
||||
});
|
||||
|
||||
@@ -212,9 +212,12 @@ function QuerySection({
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
const step2Label = alertDef.alertType === 'METRIC_BASED_ALERT' ? '2' : '1';
|
||||
|
||||
return (
|
||||
<>
|
||||
<StepHeading> {t('alert_form_step2')}</StepHeading>
|
||||
<StepHeading> {t('alert_form_step2', { step: step2Label })}</StepHeading>
|
||||
<FormContainer>
|
||||
<div>{renderTabs(alertType)}</div>
|
||||
{renderQuerySection(currentTab)}
|
||||
|
||||
@@ -371,9 +371,11 @@ function RuleOptions({
|
||||
selectedCategory?.name,
|
||||
);
|
||||
|
||||
const step3Label = alertDef.alertType === 'METRIC_BASED_ALERT' ? '3' : '2';
|
||||
|
||||
return (
|
||||
<>
|
||||
<StepHeading>{t('alert_form_step3')}</StepHeading>
|
||||
<StepHeading>{t('alert_form_step3', { step: step3Label })}</StepHeading>
|
||||
<FormContainer>
|
||||
{queryCategory === EQueryType.PROM && renderPromRuleOptions()}
|
||||
{queryCategory !== EQueryType.PROM &&
|
||||
|
||||
@@ -172,6 +172,13 @@
|
||||
.ant-table-cell:nth-child(n + 3) {
|
||||
padding-right: 24px;
|
||||
}
|
||||
.memory-usage-header {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: flex-end;
|
||||
gap: 4px;
|
||||
margin-right: 4px;
|
||||
}
|
||||
.column-header-right {
|
||||
text-align: right;
|
||||
}
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import './InfraMonitoring.styles.scss';
|
||||
|
||||
import { InfoCircleOutlined } from '@ant-design/icons';
|
||||
import { Color } from '@signozhq/design-tokens';
|
||||
import { Progress, TabsProps, Tag } from 'antd';
|
||||
import { Progress, TabsProps, Tag, Tooltip } from 'antd';
|
||||
import { ColumnType } from 'antd/es/table';
|
||||
import {
|
||||
HostData,
|
||||
@@ -93,7 +94,14 @@ export const getHostsListColumns = (): ColumnType<HostRowData>[] => [
|
||||
align: 'right',
|
||||
},
|
||||
{
|
||||
title: <div className="column-header-right">Memory Usage</div>,
|
||||
title: (
|
||||
<div className="column-header-right memory-usage-header">
|
||||
Memory Usage
|
||||
<Tooltip title="Excluding cache memory">
|
||||
<InfoCircleOutlined />
|
||||
</Tooltip>
|
||||
</div>
|
||||
),
|
||||
dataIndex: 'memory',
|
||||
key: 'memory',
|
||||
width: 100,
|
||||
@@ -255,7 +263,6 @@ export function GetHostsQuickFiltersConfig(
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
},
|
||||
dataSource: DataSource.METRICS,
|
||||
defaultOpen: true,
|
||||
},
|
||||
];
|
||||
|
||||
@@ -731,7 +731,7 @@ export const getClusterMetricsQueryPayload = (
|
||||
},
|
||||
{
|
||||
selectedTime: 'GLOBAL_TIME',
|
||||
graphType: PANEL_TYPES.TIME_SERIES,
|
||||
graphType: PANEL_TYPES.TABLE,
|
||||
query: {
|
||||
builder: {
|
||||
queryData: [
|
||||
@@ -751,7 +751,7 @@ export const getClusterMetricsQueryPayload = (
|
||||
filters: {
|
||||
items: [
|
||||
{
|
||||
id: 'd7779183',
|
||||
id: 'a7da59c7',
|
||||
key: {
|
||||
dataType: DataTypes.String,
|
||||
id: 'k8s_cluster_name--string--tag--false',
|
||||
@@ -786,12 +786,12 @@ export const getClusterMetricsQueryPayload = (
|
||||
},
|
||||
],
|
||||
having: [],
|
||||
legend: `{{${k8sDeploymentNameKey}}} ({{${k8sNamespaceNameKey}})`,
|
||||
legend: 'available',
|
||||
limit: null,
|
||||
orderBy: [],
|
||||
queryName: 'A',
|
||||
reduceTo: 'avg',
|
||||
spaceAggregation: 'max',
|
||||
reduceTo: 'last',
|
||||
spaceAggregation: 'sum',
|
||||
stepInterval: 60,
|
||||
timeAggregation: 'latest',
|
||||
},
|
||||
@@ -804,14 +804,14 @@ export const getClusterMetricsQueryPayload = (
|
||||
key: k8sDeploymentDesiredKey,
|
||||
type: 'Gauge',
|
||||
},
|
||||
aggregateOperator: 'latest',
|
||||
aggregateOperator: 'avg',
|
||||
dataSource: DataSource.METRICS,
|
||||
disabled: false,
|
||||
expression: 'B',
|
||||
filters: {
|
||||
items: [
|
||||
{
|
||||
id: 'd7779183',
|
||||
id: '55110885',
|
||||
key: {
|
||||
dataType: DataTypes.String,
|
||||
id: 'k8s_cluster_name--string--tag--false',
|
||||
@@ -846,14 +846,14 @@ export const getClusterMetricsQueryPayload = (
|
||||
},
|
||||
],
|
||||
having: [],
|
||||
legend: `{{${k8sDeploymentNameKey}}} ({{${k8sNamespaceNameKey}})`,
|
||||
legend: 'desired',
|
||||
limit: null,
|
||||
orderBy: [],
|
||||
queryName: 'B',
|
||||
reduceTo: 'avg',
|
||||
spaceAggregation: 'max',
|
||||
reduceTo: 'last',
|
||||
spaceAggregation: 'sum',
|
||||
stepInterval: 60,
|
||||
timeAggregation: 'latest',
|
||||
timeAggregation: 'avg',
|
||||
},
|
||||
],
|
||||
queryFormulas: [],
|
||||
@@ -890,13 +890,13 @@ export const getClusterMetricsQueryPayload = (
|
||||
queryType: EQueryType.QUERY_BUILDER,
|
||||
},
|
||||
variables: {},
|
||||
formatForWeb: false,
|
||||
formatForWeb: true,
|
||||
start,
|
||||
end,
|
||||
},
|
||||
{
|
||||
selectedTime: 'GLOBAL_TIME',
|
||||
graphType: PANEL_TYPES.TIME_SERIES,
|
||||
graphType: PANEL_TYPES.TABLE,
|
||||
query: {
|
||||
builder: {
|
||||
queryData: [
|
||||
@@ -909,14 +909,14 @@ export const getClusterMetricsQueryPayload = (
|
||||
key: k8sStatefulsetCurrentPodsKey,
|
||||
type: 'Gauge',
|
||||
},
|
||||
aggregateOperator: 'latest',
|
||||
aggregateOperator: 'max',
|
||||
dataSource: DataSource.METRICS,
|
||||
disabled: false,
|
||||
expression: 'A',
|
||||
filters: {
|
||||
items: [
|
||||
{
|
||||
id: 'd7779183',
|
||||
id: '3c57b4d1',
|
||||
key: {
|
||||
dataType: DataTypes.String,
|
||||
id: 'k8s_cluster_name--string--tag--false',
|
||||
@@ -951,14 +951,14 @@ export const getClusterMetricsQueryPayload = (
|
||||
},
|
||||
],
|
||||
having: [],
|
||||
legend: `{{${k8sStatefulsetNameKey}}} ({{${k8sNamespaceNameKey}})`,
|
||||
legend: 'current',
|
||||
limit: null,
|
||||
orderBy: [],
|
||||
queryName: 'A',
|
||||
reduceTo: 'avg',
|
||||
spaceAggregation: 'max',
|
||||
reduceTo: 'last',
|
||||
spaceAggregation: 'sum',
|
||||
stepInterval: 60,
|
||||
timeAggregation: 'latest',
|
||||
timeAggregation: 'max',
|
||||
},
|
||||
{
|
||||
aggregateAttribute: {
|
||||
@@ -969,14 +969,14 @@ export const getClusterMetricsQueryPayload = (
|
||||
key: k8sStatefulsetDesiredPodsKey,
|
||||
type: 'Gauge',
|
||||
},
|
||||
aggregateOperator: 'latest',
|
||||
aggregateOperator: 'max',
|
||||
dataSource: DataSource.METRICS,
|
||||
disabled: false,
|
||||
expression: 'B',
|
||||
filters: {
|
||||
items: [
|
||||
{
|
||||
id: 'd7779183',
|
||||
id: '0f49fe64',
|
||||
key: {
|
||||
dataType: DataTypes.String,
|
||||
id: 'k8s_cluster_name--string--tag--false',
|
||||
@@ -1011,14 +1011,14 @@ export const getClusterMetricsQueryPayload = (
|
||||
},
|
||||
],
|
||||
having: [],
|
||||
legend: `{{${k8sStatefulsetNameKey}}} ({{${k8sNamespaceNameKey}})`,
|
||||
legend: 'desired',
|
||||
limit: null,
|
||||
orderBy: [],
|
||||
queryName: 'B',
|
||||
reduceTo: 'avg',
|
||||
spaceAggregation: 'max',
|
||||
reduceTo: 'last',
|
||||
spaceAggregation: 'sum',
|
||||
stepInterval: 60,
|
||||
timeAggregation: 'latest',
|
||||
timeAggregation: 'max',
|
||||
},
|
||||
{
|
||||
aggregateAttribute: {
|
||||
@@ -1029,14 +1029,14 @@ export const getClusterMetricsQueryPayload = (
|
||||
key: k8sStatefulsetReadyPodsKey,
|
||||
type: 'Gauge',
|
||||
},
|
||||
aggregateOperator: 'latest',
|
||||
aggregateOperator: 'max',
|
||||
dataSource: DataSource.METRICS,
|
||||
disabled: false,
|
||||
expression: 'C',
|
||||
filters: {
|
||||
items: [
|
||||
{
|
||||
id: 'd7779183',
|
||||
id: '0bebf625',
|
||||
key: {
|
||||
dataType: DataTypes.String,
|
||||
id: 'k8s_cluster_name--string--tag--false',
|
||||
@@ -1071,14 +1071,14 @@ export const getClusterMetricsQueryPayload = (
|
||||
},
|
||||
],
|
||||
having: [],
|
||||
legend: `{{${k8sStatefulsetNameKey}}} ({{${k8sNamespaceNameKey}})`,
|
||||
legend: 'ready',
|
||||
limit: null,
|
||||
orderBy: [],
|
||||
queryName: 'C',
|
||||
reduceTo: 'avg',
|
||||
spaceAggregation: 'max',
|
||||
reduceTo: 'last',
|
||||
spaceAggregation: 'sum',
|
||||
stepInterval: 60,
|
||||
timeAggregation: 'latest',
|
||||
timeAggregation: 'max',
|
||||
},
|
||||
{
|
||||
aggregateAttribute: {
|
||||
@@ -1089,14 +1089,14 @@ export const getClusterMetricsQueryPayload = (
|
||||
key: k8sStatefulsetUpdatedPodsKey,
|
||||
type: 'Gauge',
|
||||
},
|
||||
aggregateOperator: 'latest',
|
||||
aggregateOperator: 'max',
|
||||
dataSource: DataSource.METRICS,
|
||||
disabled: false,
|
||||
expression: 'D',
|
||||
filters: {
|
||||
items: [
|
||||
{
|
||||
id: 'd7779183',
|
||||
id: '1ddacbbe',
|
||||
key: {
|
||||
dataType: DataTypes.String,
|
||||
id: 'k8s_cluster_name--string--tag--false',
|
||||
@@ -1131,14 +1131,14 @@ export const getClusterMetricsQueryPayload = (
|
||||
},
|
||||
],
|
||||
having: [],
|
||||
legend: `{{${k8sStatefulsetNameKey}}} ({{${k8sNamespaceNameKey}})`,
|
||||
legend: 'updated',
|
||||
limit: null,
|
||||
orderBy: [],
|
||||
queryName: 'D',
|
||||
reduceTo: 'avg',
|
||||
spaceAggregation: 'max',
|
||||
reduceTo: 'last',
|
||||
spaceAggregation: 'sum',
|
||||
stepInterval: 60,
|
||||
timeAggregation: 'latest',
|
||||
timeAggregation: 'max',
|
||||
},
|
||||
],
|
||||
queryFormulas: [],
|
||||
@@ -1199,13 +1199,13 @@ export const getClusterMetricsQueryPayload = (
|
||||
queryType: EQueryType.QUERY_BUILDER,
|
||||
},
|
||||
variables: {},
|
||||
formatForWeb: false,
|
||||
formatForWeb: true,
|
||||
start,
|
||||
end,
|
||||
},
|
||||
{
|
||||
selectedTime: 'GLOBAL_TIME',
|
||||
graphType: PANEL_TYPES.TIME_SERIES,
|
||||
graphType: PANEL_TYPES.TABLE,
|
||||
query: {
|
||||
builder: {
|
||||
queryData: [
|
||||
@@ -1218,14 +1218,14 @@ export const getClusterMetricsQueryPayload = (
|
||||
key: k8sDaemonsetCurrentScheduledNodesKey,
|
||||
type: 'Gauge',
|
||||
},
|
||||
aggregateOperator: 'latest',
|
||||
aggregateOperator: 'avg',
|
||||
dataSource: DataSource.METRICS,
|
||||
disabled: false,
|
||||
expression: 'A',
|
||||
filters: {
|
||||
items: [
|
||||
{
|
||||
id: 'd7779183',
|
||||
id: 'e0bea554',
|
||||
key: {
|
||||
dataType: DataTypes.String,
|
||||
id: 'k8s_cluster_name--string--tag--false',
|
||||
@@ -1250,24 +1250,16 @@ export const getClusterMetricsQueryPayload = (
|
||||
key: k8sDaemonsetNameKey,
|
||||
type: 'tag',
|
||||
},
|
||||
{
|
||||
dataType: DataTypes.String,
|
||||
id: 'k8s_namespace_name--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: k8sNamespaceNameKey,
|
||||
type: 'tag',
|
||||
},
|
||||
],
|
||||
having: [],
|
||||
legend: `{{${k8sDaemonsetNameKey}} ({{${k8sNamespaceNameKey}})`,
|
||||
legend: 'current_nodes',
|
||||
limit: null,
|
||||
orderBy: [],
|
||||
queryName: 'A',
|
||||
reduceTo: 'avg',
|
||||
spaceAggregation: 'max',
|
||||
reduceTo: 'last',
|
||||
spaceAggregation: 'avg',
|
||||
stepInterval: 60,
|
||||
timeAggregation: 'latest',
|
||||
timeAggregation: 'avg',
|
||||
},
|
||||
{
|
||||
aggregateAttribute: {
|
||||
@@ -1278,14 +1270,14 @@ export const getClusterMetricsQueryPayload = (
|
||||
key: k8sDaemonsetDesiredScheduledNodesKey,
|
||||
type: 'Gauge',
|
||||
},
|
||||
aggregateOperator: 'latest',
|
||||
aggregateOperator: 'avg',
|
||||
dataSource: DataSource.METRICS,
|
||||
disabled: false,
|
||||
expression: 'B',
|
||||
filters: {
|
||||
items: [
|
||||
{
|
||||
id: 'd7779183',
|
||||
id: '741052f7',
|
||||
key: {
|
||||
dataType: DataTypes.String,
|
||||
id: 'k8s_cluster_name--string--tag--false',
|
||||
@@ -1310,24 +1302,16 @@ export const getClusterMetricsQueryPayload = (
|
||||
key: k8sDaemonsetNameKey,
|
||||
type: 'tag',
|
||||
},
|
||||
{
|
||||
dataType: DataTypes.String,
|
||||
id: 'k8s_namespace_name--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: k8sNamespaceNameKey,
|
||||
type: 'tag',
|
||||
},
|
||||
],
|
||||
having: [],
|
||||
legend: `{{${k8sDaemonsetNameKey}} ({{${k8sNamespaceNameKey}})`,
|
||||
legend: 'desired_nodes',
|
||||
limit: null,
|
||||
orderBy: [],
|
||||
queryName: 'B',
|
||||
reduceTo: 'avg',
|
||||
spaceAggregation: 'max',
|
||||
reduceTo: 'last',
|
||||
spaceAggregation: 'avg',
|
||||
stepInterval: 60,
|
||||
timeAggregation: 'latest',
|
||||
timeAggregation: 'avg',
|
||||
},
|
||||
{
|
||||
aggregateAttribute: {
|
||||
@@ -1338,14 +1322,14 @@ export const getClusterMetricsQueryPayload = (
|
||||
key: k8sDaemonsetReadyNodesKey,
|
||||
type: 'Gauge',
|
||||
},
|
||||
aggregateOperator: 'latest',
|
||||
aggregateOperator: 'avg',
|
||||
dataSource: DataSource.METRICS,
|
||||
disabled: false,
|
||||
expression: 'C',
|
||||
filters: {
|
||||
items: [
|
||||
{
|
||||
id: 'd7779183',
|
||||
id: 'f23759f2',
|
||||
key: {
|
||||
dataType: DataTypes.String,
|
||||
id: 'k8s_cluster_name--string--tag--false',
|
||||
@@ -1370,24 +1354,16 @@ export const getClusterMetricsQueryPayload = (
|
||||
key: k8sDaemonsetNameKey,
|
||||
type: 'tag',
|
||||
},
|
||||
{
|
||||
dataType: DataTypes.String,
|
||||
id: 'k8s_namespace_name--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: k8sNamespaceNameKey,
|
||||
type: 'tag',
|
||||
},
|
||||
],
|
||||
having: [],
|
||||
legend: `{{${k8sDaemonsetNameKey}} ({{${k8sNamespaceNameKey}})`,
|
||||
legend: 'ready_nodes',
|
||||
limit: null,
|
||||
orderBy: [],
|
||||
queryName: 'C',
|
||||
reduceTo: 'avg',
|
||||
spaceAggregation: 'max',
|
||||
reduceTo: 'last',
|
||||
spaceAggregation: 'avg',
|
||||
stepInterval: 60,
|
||||
timeAggregation: 'latest',
|
||||
timeAggregation: 'avg',
|
||||
},
|
||||
],
|
||||
queryFormulas: [],
|
||||
@@ -1436,316 +1412,7 @@ export const getClusterMetricsQueryPayload = (
|
||||
queryType: EQueryType.QUERY_BUILDER,
|
||||
},
|
||||
variables: {},
|
||||
formatForWeb: false,
|
||||
start,
|
||||
end,
|
||||
},
|
||||
{
|
||||
selectedTime: 'GLOBAL_TIME',
|
||||
graphType: PANEL_TYPES.TIME_SERIES,
|
||||
query: {
|
||||
builder: {
|
||||
queryData: [
|
||||
{
|
||||
aggregateAttribute: {
|
||||
dataType: DataTypes.Float64,
|
||||
id: 'k8s_job_active_pods--float64--Gauge--true',
|
||||
isColumn: true,
|
||||
isJSON: false,
|
||||
key: k8sJobActivePodsKey,
|
||||
type: 'Gauge',
|
||||
},
|
||||
aggregateOperator: 'latest',
|
||||
dataSource: DataSource.METRICS,
|
||||
disabled: false,
|
||||
expression: 'A',
|
||||
filters: {
|
||||
items: [
|
||||
{
|
||||
id: 'd7779183',
|
||||
key: {
|
||||
dataType: DataTypes.String,
|
||||
id: 'k8s_cluster_name--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: k8sClusterNameKey,
|
||||
type: 'tag',
|
||||
},
|
||||
op: '=',
|
||||
value: cluster.meta.k8s_cluster_name,
|
||||
},
|
||||
],
|
||||
op: 'AND',
|
||||
},
|
||||
functions: [],
|
||||
groupBy: [
|
||||
{
|
||||
dataType: DataTypes.String,
|
||||
id: 'k8s_job_name--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: k8sJobNameKey,
|
||||
type: 'tag',
|
||||
},
|
||||
{
|
||||
dataType: DataTypes.String,
|
||||
id: 'k8s_namespace_name--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: k8sNamespaceNameKey,
|
||||
type: 'tag',
|
||||
},
|
||||
],
|
||||
having: [],
|
||||
legend: `{{${k8sJobNameKey}}} ({{${k8sNamespaceNameKey}})`,
|
||||
limit: null,
|
||||
orderBy: [],
|
||||
queryName: 'A',
|
||||
reduceTo: 'avg',
|
||||
spaceAggregation: 'max',
|
||||
stepInterval: 60,
|
||||
timeAggregation: 'latest',
|
||||
},
|
||||
{
|
||||
aggregateAttribute: {
|
||||
dataType: DataTypes.Float64,
|
||||
id: 'k8s_job_successful_pods--float64--Gauge--true',
|
||||
isColumn: true,
|
||||
isJSON: false,
|
||||
key: k8sJobSuccessfulPodsKey,
|
||||
type: 'Gauge',
|
||||
},
|
||||
aggregateOperator: 'latest',
|
||||
dataSource: DataSource.METRICS,
|
||||
disabled: false,
|
||||
expression: 'B',
|
||||
filters: {
|
||||
items: [
|
||||
{
|
||||
id: 'd7779183',
|
||||
key: {
|
||||
dataType: DataTypes.String,
|
||||
id: 'k8s_cluster_name--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: k8sClusterNameKey,
|
||||
type: 'tag',
|
||||
},
|
||||
op: '=',
|
||||
value: cluster.meta.k8s_cluster_name,
|
||||
},
|
||||
],
|
||||
op: 'AND',
|
||||
},
|
||||
functions: [],
|
||||
groupBy: [
|
||||
{
|
||||
dataType: DataTypes.String,
|
||||
id: 'k8s_job_name--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: k8sJobNameKey,
|
||||
type: 'tag',
|
||||
},
|
||||
{
|
||||
dataType: DataTypes.String,
|
||||
id: 'k8s_namespace_name--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: k8sNamespaceNameKey,
|
||||
type: 'tag',
|
||||
},
|
||||
],
|
||||
having: [],
|
||||
legend: `{{${k8sJobNameKey}}} ({{${k8sNamespaceNameKey}})`,
|
||||
limit: null,
|
||||
orderBy: [],
|
||||
queryName: 'B',
|
||||
reduceTo: 'avg',
|
||||
spaceAggregation: 'max',
|
||||
stepInterval: 60,
|
||||
timeAggregation: 'latest',
|
||||
},
|
||||
{
|
||||
aggregateAttribute: {
|
||||
dataType: DataTypes.Float64,
|
||||
id: 'k8s_job_failed_pods--float64--Gauge--true',
|
||||
isColumn: true,
|
||||
isJSON: false,
|
||||
key: k8sJobFailedPodsKey,
|
||||
type: 'Gauge',
|
||||
},
|
||||
aggregateOperator: 'latest',
|
||||
dataSource: DataSource.METRICS,
|
||||
disabled: false,
|
||||
expression: 'C',
|
||||
filters: {
|
||||
items: [
|
||||
{
|
||||
id: 'd7779183',
|
||||
key: {
|
||||
dataType: DataTypes.String,
|
||||
id: 'k8s_cluster_name--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: k8sClusterNameKey,
|
||||
type: 'tag',
|
||||
},
|
||||
op: '=',
|
||||
value: cluster.meta.k8s_cluster_name,
|
||||
},
|
||||
],
|
||||
op: 'AND',
|
||||
},
|
||||
functions: [],
|
||||
groupBy: [
|
||||
{
|
||||
dataType: DataTypes.String,
|
||||
id: 'k8s_job_name--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: k8sJobNameKey,
|
||||
type: 'tag',
|
||||
},
|
||||
{
|
||||
dataType: DataTypes.String,
|
||||
id: 'k8s_namespace_name--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: k8sNamespaceNameKey,
|
||||
type: 'tag',
|
||||
},
|
||||
],
|
||||
having: [],
|
||||
legend: `{{${k8sJobNameKey}}} ({{${k8sNamespaceNameKey}})`,
|
||||
limit: null,
|
||||
orderBy: [],
|
||||
queryName: 'C',
|
||||
reduceTo: 'avg',
|
||||
spaceAggregation: 'max',
|
||||
stepInterval: 60,
|
||||
timeAggregation: 'latest',
|
||||
},
|
||||
{
|
||||
aggregateAttribute: {
|
||||
dataType: DataTypes.Float64,
|
||||
id: 'k8s_job_desired_successful_pods--float64--Gauge--true',
|
||||
isColumn: true,
|
||||
isJSON: false,
|
||||
key: k8sJobDesiredSuccessfulPodsKey,
|
||||
type: 'Gauge',
|
||||
},
|
||||
aggregateOperator: 'latest',
|
||||
dataSource: DataSource.METRICS,
|
||||
disabled: false,
|
||||
expression: 'D',
|
||||
filters: {
|
||||
items: [
|
||||
{
|
||||
id: 'd7779183',
|
||||
key: {
|
||||
dataType: DataTypes.String,
|
||||
id: 'k8s_cluster_name--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: k8sClusterNameKey,
|
||||
type: 'tag',
|
||||
},
|
||||
op: '=',
|
||||
value: cluster.meta.k8s_cluster_name,
|
||||
},
|
||||
],
|
||||
op: 'AND',
|
||||
},
|
||||
functions: [],
|
||||
groupBy: [
|
||||
{
|
||||
dataType: DataTypes.String,
|
||||
id: 'k8s_job_name--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: k8sJobNameKey,
|
||||
type: 'tag',
|
||||
},
|
||||
{
|
||||
dataType: DataTypes.String,
|
||||
id: 'k8s_namespace_name--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: k8sNamespaceNameKey,
|
||||
type: 'tag',
|
||||
},
|
||||
],
|
||||
having: [],
|
||||
legend: `{{${k8sJobNameKey}}} ({{${k8sNamespaceNameKey}})`,
|
||||
limit: null,
|
||||
orderBy: [],
|
||||
queryName: 'D',
|
||||
reduceTo: 'avg',
|
||||
spaceAggregation: 'max',
|
||||
stepInterval: 60,
|
||||
timeAggregation: 'latest',
|
||||
},
|
||||
],
|
||||
queryFormulas: [],
|
||||
},
|
||||
clickhouse_sql: [
|
||||
{
|
||||
disabled: false,
|
||||
legend: '',
|
||||
name: 'A',
|
||||
query: '',
|
||||
},
|
||||
{
|
||||
disabled: false,
|
||||
legend: '',
|
||||
name: 'B',
|
||||
query: '',
|
||||
},
|
||||
{
|
||||
disabled: false,
|
||||
legend: '',
|
||||
name: 'C',
|
||||
query: '',
|
||||
},
|
||||
{
|
||||
disabled: false,
|
||||
legend: '',
|
||||
name: 'D',
|
||||
query: '',
|
||||
},
|
||||
],
|
||||
id: v4(),
|
||||
promql: [
|
||||
{
|
||||
disabled: false,
|
||||
legend: '',
|
||||
name: 'A',
|
||||
query: '',
|
||||
},
|
||||
{
|
||||
disabled: false,
|
||||
legend: '',
|
||||
name: 'B',
|
||||
query: '',
|
||||
},
|
||||
{
|
||||
disabled: false,
|
||||
legend: '',
|
||||
name: 'C',
|
||||
query: '',
|
||||
},
|
||||
{
|
||||
disabled: false,
|
||||
legend: '',
|
||||
name: 'D',
|
||||
query: '',
|
||||
},
|
||||
],
|
||||
queryType: EQueryType.QUERY_BUILDER,
|
||||
},
|
||||
variables: {},
|
||||
formatForWeb: false,
|
||||
formatForWeb: true,
|
||||
start,
|
||||
end,
|
||||
},
|
||||
@@ -1777,7 +1444,7 @@ export const getClusterMetricsQueryPayload = (
|
||||
id: 'k8s_cluster_name--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: 'k8s_cluster_name',
|
||||
key: k8sClusterNameKey,
|
||||
type: 'tag',
|
||||
},
|
||||
op: '=',
|
||||
@@ -1837,7 +1504,7 @@ export const getClusterMetricsQueryPayload = (
|
||||
id: 'k8s_cluster_name--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: 'k8s_cluster_name',
|
||||
key: k8sClusterNameKey,
|
||||
type: 'tag',
|
||||
},
|
||||
op: '=',
|
||||
@@ -1897,7 +1564,7 @@ export const getClusterMetricsQueryPayload = (
|
||||
id: 'k8s_cluster_name--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: 'k8s_cluster_name',
|
||||
key: k8sClusterNameKey,
|
||||
type: 'tag',
|
||||
},
|
||||
op: '=',
|
||||
@@ -1957,7 +1624,7 @@ export const getClusterMetricsQueryPayload = (
|
||||
id: 'k8s_cluster_name--string--tag--false',
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
key: 'k8s_cluster_name',
|
||||
key: k8sClusterNameKey,
|
||||
type: 'tag',
|
||||
},
|
||||
op: '=',
|
||||
@@ -2005,6 +1672,24 @@ export const getClusterMetricsQueryPayload = (
|
||||
name: 'A',
|
||||
query: '',
|
||||
},
|
||||
{
|
||||
disabled: false,
|
||||
legend: '',
|
||||
name: 'B',
|
||||
query: '',
|
||||
},
|
||||
{
|
||||
disabled: false,
|
||||
legend: '',
|
||||
name: 'C',
|
||||
query: '',
|
||||
},
|
||||
{
|
||||
disabled: false,
|
||||
legend: '',
|
||||
name: 'D',
|
||||
query: '',
|
||||
},
|
||||
],
|
||||
id: v4(),
|
||||
promql: [
|
||||
@@ -2014,6 +1699,24 @@ export const getClusterMetricsQueryPayload = (
|
||||
name: 'A',
|
||||
query: '',
|
||||
},
|
||||
{
|
||||
disabled: false,
|
||||
legend: '',
|
||||
name: 'B',
|
||||
query: '',
|
||||
},
|
||||
{
|
||||
disabled: false,
|
||||
legend: '',
|
||||
name: 'C',
|
||||
query: '',
|
||||
},
|
||||
{
|
||||
disabled: false,
|
||||
legend: '',
|
||||
name: 'D',
|
||||
query: '',
|
||||
},
|
||||
],
|
||||
queryType: EQueryType.QUERY_BUILDER,
|
||||
},
|
||||
|
||||
@@ -238,7 +238,6 @@ export function GetPodsQuickFiltersConfig(
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
},
|
||||
dataSource: DataSource.METRICS,
|
||||
defaultOpen: true,
|
||||
},
|
||||
];
|
||||
@@ -302,7 +301,6 @@ export function GetNodesQuickFiltersConfig(
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
},
|
||||
dataSource: DataSource.METRICS,
|
||||
defaultOpen: true,
|
||||
},
|
||||
];
|
||||
@@ -365,7 +363,6 @@ export function GetNamespaceQuickFiltersConfig(
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
},
|
||||
dataSource: DataSource.METRICS,
|
||||
defaultOpen: true,
|
||||
},
|
||||
];
|
||||
@@ -409,7 +406,6 @@ export function GetClustersQuickFiltersConfig(
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
},
|
||||
dataSource: DataSource.METRICS,
|
||||
defaultOpen: true,
|
||||
},
|
||||
];
|
||||
@@ -449,7 +445,6 @@ export function GetContainersQuickFiltersConfig(
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
},
|
||||
dataSource: DataSource.METRICS,
|
||||
defaultOpen: true,
|
||||
},
|
||||
];
|
||||
@@ -531,7 +526,6 @@ export function GetVolumesQuickFiltersConfig(
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
},
|
||||
dataSource: DataSource.METRICS,
|
||||
defaultOpen: true,
|
||||
},
|
||||
];
|
||||
@@ -613,7 +607,6 @@ export function GetDeploymentsQuickFiltersConfig(
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
},
|
||||
dataSource: DataSource.METRICS,
|
||||
defaultOpen: true,
|
||||
},
|
||||
];
|
||||
@@ -695,7 +688,6 @@ export function GetStatefulsetsQuickFiltersConfig(
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
},
|
||||
dataSource: DataSource.METRICS,
|
||||
defaultOpen: true,
|
||||
},
|
||||
];
|
||||
@@ -775,7 +767,6 @@ export function GetDaemonsetsQuickFiltersConfig(
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
},
|
||||
dataSource: DataSource.METRICS,
|
||||
defaultOpen: true,
|
||||
},
|
||||
];
|
||||
@@ -853,7 +844,6 @@ export function GetJobsQuickFiltersConfig(
|
||||
isColumn: false,
|
||||
isJSON: false,
|
||||
},
|
||||
dataSource: DataSource.METRICS,
|
||||
defaultOpen: true,
|
||||
},
|
||||
];
|
||||
|
||||
@@ -2463,7 +2463,7 @@ export const getHostQueryPayload = (
|
||||
functions: [],
|
||||
groupBy: [],
|
||||
having: [],
|
||||
legend: '',
|
||||
legend: 'system disk io',
|
||||
limit: null,
|
||||
orderBy: [],
|
||||
queryName: 'A',
|
||||
|
||||
@@ -1631,7 +1631,7 @@
|
||||
"docker metrics to signoz"
|
||||
],
|
||||
"imgUrl": "/Logos/docker.svg",
|
||||
"link": "https://signoz.io/docs/userguide/k8s-metrics/"
|
||||
"link": "https://signoz.io/docs/metrics-management/docker-container-metrics/"
|
||||
},
|
||||
{
|
||||
"dataSource": "ec2-application-logs",
|
||||
|
||||
@@ -98,6 +98,7 @@ interface QueryBuilderSearchV2Props {
|
||||
hideSpanScopeSelector?: boolean;
|
||||
// Determines whether to call onChange when a tag is closed
|
||||
triggerOnChangeOnClose?: boolean;
|
||||
skipQueryBuilderRedirect?: boolean;
|
||||
}
|
||||
|
||||
export interface Option {
|
||||
@@ -137,6 +138,7 @@ function QueryBuilderSearchV2(
|
||||
operatorConfigKey,
|
||||
hideSpanScopeSelector,
|
||||
triggerOnChangeOnClose,
|
||||
skipQueryBuilderRedirect,
|
||||
} = props;
|
||||
|
||||
const { registerShortcut, deregisterShortcut } = useKeyboardHotkeys();
|
||||
@@ -1038,7 +1040,11 @@ function QueryBuilderSearchV2(
|
||||
})}
|
||||
</Select>
|
||||
{!hideSpanScopeSelector && (
|
||||
<SpanScopeSelector query={query} onChange={onChange} />
|
||||
<SpanScopeSelector
|
||||
query={query}
|
||||
onChange={onChange}
|
||||
skipQueryBuilderRedirect={skipQueryBuilderRedirect}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
@@ -1056,6 +1062,7 @@ QueryBuilderSearchV2.defaultProps = {
|
||||
operatorConfigKey: undefined,
|
||||
hideSpanScopeSelector: true,
|
||||
triggerOnChangeOnClose: false,
|
||||
skipQueryBuilderRedirect: false,
|
||||
};
|
||||
|
||||
export default QueryBuilderSearchV2;
|
||||
|
||||
@@ -23,6 +23,7 @@ interface SpanFilterConfig {
|
||||
interface SpanScopeSelectorProps {
|
||||
onChange?: (value: TagFilter) => void;
|
||||
query?: IBuilderQuery;
|
||||
skipQueryBuilderRedirect?: boolean;
|
||||
}
|
||||
|
||||
const SPAN_FILTER_CONFIG: Record<SpanScope, SpanFilterConfig | null> = {
|
||||
@@ -58,6 +59,7 @@ const SELECT_OPTIONS = [
|
||||
function SpanScopeSelector({
|
||||
onChange,
|
||||
query,
|
||||
skipQueryBuilderRedirect,
|
||||
}: SpanScopeSelectorProps): JSX.Element {
|
||||
const { currentQuery, redirectWithQueryBuilderData } = useQueryBuilder();
|
||||
const [selectedScope, setSelectedScope] = useState<SpanScope>(
|
||||
@@ -79,6 +81,7 @@ function SpanScopeSelector({
|
||||
if (hasFilter('isEntryPoint')) return SpanScope.ENTRYPOINT_SPANS;
|
||||
return SpanScope.ALL_SPANS;
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
let queryData = (currentQuery?.builder?.queryData || [])?.find(
|
||||
(item) => item.queryName === query?.queryName,
|
||||
@@ -127,13 +130,10 @@ function SpanScopeSelector({
|
||||
},
|
||||
}));
|
||||
|
||||
if (onChange && query) {
|
||||
if (skipQueryBuilderRedirect && onChange && query) {
|
||||
onChange({
|
||||
...query.filters,
|
||||
items: getUpdatedFilters(
|
||||
[...query.filters.items, ...newQuery.builder.queryData[0].filters.items],
|
||||
true,
|
||||
),
|
||||
items: getUpdatedFilters([...query.filters.items], true),
|
||||
});
|
||||
|
||||
setSelectedScope(newScope);
|
||||
@@ -156,6 +156,7 @@ function SpanScopeSelector({
|
||||
SpanScopeSelector.defaultProps = {
|
||||
onChange: undefined,
|
||||
query: undefined,
|
||||
skipQueryBuilderRedirect: false,
|
||||
};
|
||||
|
||||
export default SpanScopeSelector;
|
||||
|
||||
@@ -3,9 +3,11 @@ import {
|
||||
render,
|
||||
RenderResult,
|
||||
screen,
|
||||
within,
|
||||
} from '@testing-library/react';
|
||||
import { initialQueriesMap } from 'constants/queryBuilder';
|
||||
import { QueryBuilderContext } from 'providers/QueryBuilder';
|
||||
import { QueryClient, QueryClientProvider } from 'react-query';
|
||||
import {
|
||||
IBuilderQuery,
|
||||
Query,
|
||||
@@ -13,6 +15,7 @@ import {
|
||||
TagFilterItem,
|
||||
} from 'types/api/queryBuilder/queryBuilderData';
|
||||
|
||||
import QueryBuilderSearchV2 from '../QueryBuilderSearchV2';
|
||||
import SpanScopeSelector from '../SpanScopeSelector';
|
||||
|
||||
const mockRedirectWithQueryBuilderData = jest.fn();
|
||||
@@ -48,6 +51,14 @@ const defaultQuery = {
|
||||
},
|
||||
};
|
||||
|
||||
const queryClient = new QueryClient({
|
||||
defaultOptions: {
|
||||
queries: {
|
||||
refetchOnWindowFocus: false,
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const defaultQueryBuilderQuery: IBuilderQuery = {
|
||||
...initialQueriesMap.traces.builder.queryData[0],
|
||||
queryName: 'A',
|
||||
@@ -76,6 +87,7 @@ const renderWithContext = (
|
||||
initialQuery = defaultQuery,
|
||||
onChangeProp?: (value: TagFilter) => void,
|
||||
queryProp?: IBuilderQuery,
|
||||
skipQueryBuilderRedirect = false,
|
||||
): RenderResult =>
|
||||
render(
|
||||
<QueryBuilderContext.Provider
|
||||
@@ -87,12 +99,19 @@ const renderWithContext = (
|
||||
} as any
|
||||
}
|
||||
>
|
||||
<SpanScopeSelector onChange={onChangeProp} query={queryProp} />
|
||||
<SpanScopeSelector
|
||||
onChange={onChangeProp}
|
||||
query={queryProp}
|
||||
skipQueryBuilderRedirect={skipQueryBuilderRedirect}
|
||||
/>
|
||||
</QueryBuilderContext.Provider>,
|
||||
);
|
||||
|
||||
const selectOption = async (optionText: string): Promise<void> => {
|
||||
const selector = screen.getByRole('combobox');
|
||||
const selector = within(screen.getByTestId('span-scope-selector')).getByRole(
|
||||
'combobox',
|
||||
);
|
||||
|
||||
fireEvent.mouseDown(selector);
|
||||
|
||||
// Wait for dropdown to appear
|
||||
@@ -264,6 +283,7 @@ describe('SpanScopeSelector', () => {
|
||||
defaultQuery,
|
||||
mockOnChange,
|
||||
localQuery,
|
||||
true,
|
||||
);
|
||||
expect(await screen.findByText('All Spans')).toBeInTheDocument();
|
||||
|
||||
@@ -283,6 +303,7 @@ describe('SpanScopeSelector', () => {
|
||||
defaultQuery,
|
||||
mockOnChange,
|
||||
localQuery,
|
||||
true,
|
||||
);
|
||||
expect(await screen.findByText('Root Spans')).toBeInTheDocument();
|
||||
|
||||
@@ -303,6 +324,7 @@ describe('SpanScopeSelector', () => {
|
||||
defaultQuery,
|
||||
mockOnChange,
|
||||
localQuery,
|
||||
true,
|
||||
);
|
||||
expect(await screen.findByText('Root Spans')).toBeInTheDocument();
|
||||
|
||||
@@ -324,6 +346,7 @@ describe('SpanScopeSelector', () => {
|
||||
defaultQuery,
|
||||
mockOnChange,
|
||||
localQuery,
|
||||
true,
|
||||
);
|
||||
expect(await screen.findByText('Root Spans')).toBeInTheDocument();
|
||||
|
||||
@@ -350,6 +373,7 @@ describe('SpanScopeSelector', () => {
|
||||
defaultQuery,
|
||||
mockOnChange,
|
||||
localQuery,
|
||||
true,
|
||||
);
|
||||
expect(await screen.findByText('Entrypoint Spans')).toBeInTheDocument();
|
||||
|
||||
@@ -361,5 +385,60 @@ describe('SpanScopeSelector', () => {
|
||||
container.querySelector('span[title="All Spans"]'),
|
||||
).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('should not duplicate non-scope filters when changing span scope', async () => {
|
||||
const query = {
|
||||
...defaultQuery,
|
||||
builder: {
|
||||
...defaultQuery.builder,
|
||||
queryData: [
|
||||
{
|
||||
...defaultQuery.builder.queryData[0],
|
||||
filters: {
|
||||
items: [createNonScopeFilter('service', 'checkout')],
|
||||
op: 'AND',
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
render(
|
||||
<QueryClientProvider client={queryClient}>
|
||||
<QueryBuilderContext.Provider
|
||||
value={
|
||||
{
|
||||
currentQuery: query,
|
||||
redirectWithQueryBuilderData: mockRedirectWithQueryBuilderData,
|
||||
} as any
|
||||
}
|
||||
>
|
||||
<QueryBuilderSearchV2
|
||||
query={query.builder.queryData[0] as any}
|
||||
onChange={mockOnChange}
|
||||
hideSpanScopeSelector={false}
|
||||
/>
|
||||
</QueryBuilderContext.Provider>
|
||||
</QueryClientProvider>,
|
||||
);
|
||||
|
||||
expect(await screen.findByText('All Spans')).toBeInTheDocument();
|
||||
|
||||
await selectOption('Entrypoint Spans');
|
||||
|
||||
expect(mockRedirectWithQueryBuilderData).toHaveBeenCalled();
|
||||
|
||||
const redirectQueryArg = mockRedirectWithQueryBuilderData.mock
|
||||
.calls[0][0] as Query;
|
||||
const { items } = redirectQueryArg.builder.queryData[0].filters;
|
||||
// Count non-scope filters
|
||||
const nonScopeFilters = items.filter(
|
||||
(filter) => filter.key?.type !== 'spanSearchScope',
|
||||
);
|
||||
expect(nonScopeFilters).toHaveLength(1);
|
||||
|
||||
expect(nonScopeFilters).toContainEqual(
|
||||
createNonScopeFilter('service', 'checkout'),
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -142,6 +142,7 @@ function Filters({
|
||||
}}
|
||||
onChange={handleFilterChange}
|
||||
hideSpanScopeSelector={false}
|
||||
skipQueryBuilderRedirect
|
||||
/>
|
||||
{filteredSpanIds.length > 0 && (
|
||||
<div className="pre-next-toggle">
|
||||
|
||||
@@ -17,6 +17,7 @@ import { AppState } from 'store/reducers';
|
||||
import { DataSource } from 'types/common/queryBuilder';
|
||||
import { GlobalReducer } from 'types/reducer/globalTime';
|
||||
import DOCLINKS from 'utils/docLinks';
|
||||
import { transformBuilderQueryFields } from 'utils/queryTransformers';
|
||||
|
||||
import TraceExplorerControls from '../Controls';
|
||||
import { TracesLoading } from '../TraceLoading/TraceLoading';
|
||||
@@ -39,9 +40,22 @@ function TracesView({ isFilterApplied }: TracesViewProps): JSX.Element {
|
||||
QueryParams.pagination,
|
||||
);
|
||||
|
||||
const transformedQuery = useMemo(
|
||||
() =>
|
||||
transformBuilderQueryFields(stagedQuery || initialQueriesMap.traces, {
|
||||
orderBy: [
|
||||
{
|
||||
columnName: 'timestamp',
|
||||
order: 'desc',
|
||||
},
|
||||
],
|
||||
}),
|
||||
[stagedQuery],
|
||||
);
|
||||
|
||||
const { data, isLoading, isFetching, isError } = useGetQueryRange(
|
||||
{
|
||||
query: stagedQuery || initialQueriesMap.traces,
|
||||
query: transformedQuery,
|
||||
graphType: panelType || PANEL_TYPES.TRACE,
|
||||
selectedTime: 'GLOBAL_TIME',
|
||||
globalSelectedInterval: globalSelectedTime,
|
||||
|
||||
@@ -594,6 +594,53 @@ describe('TracesExplorer - ', () => {
|
||||
'http://localhost/trace/5765b60ba7cc4ddafe8bdaa9c1b4b246',
|
||||
);
|
||||
});
|
||||
it('trace explorer - trace view should only send order by timestamp in the query', async () => {
|
||||
let capturedPayload: QueryRangePayload;
|
||||
const orderBy = [
|
||||
{ columnName: 'id', order: 'desc' },
|
||||
{ columnName: 'serviceName', order: 'desc' },
|
||||
];
|
||||
const defaultOrderBy = [{ columnName: 'timestamp', order: 'desc' }];
|
||||
server.use(
|
||||
rest.post(`${BASE_URL}/api/v4/query_range`, async (req, res, ctx) => {
|
||||
const payload = await req.json();
|
||||
capturedPayload = payload;
|
||||
return res(ctx.status(200), ctx.json(queryRangeForTraceView));
|
||||
}),
|
||||
);
|
||||
render(
|
||||
<QueryBuilderContext.Provider
|
||||
value={{
|
||||
...qbProviderValue,
|
||||
panelType: PANEL_TYPES.TRACE,
|
||||
stagedQuery: {
|
||||
...qbProviderValue.stagedQuery,
|
||||
builder: {
|
||||
...qbProviderValue.stagedQuery.builder,
|
||||
queryData: [
|
||||
{
|
||||
...qbProviderValue.stagedQuery.builder.queryData[0],
|
||||
orderBy,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
}}
|
||||
>
|
||||
<TracesExplorer />
|
||||
</QueryBuilderContext.Provider>,
|
||||
);
|
||||
|
||||
await waitFor(() => {
|
||||
expect(capturedPayload).toBeDefined();
|
||||
expect(capturedPayload?.compositeQuery?.builderQueries?.A.orderBy).toEqual(
|
||||
defaultOrderBy,
|
||||
);
|
||||
expect(
|
||||
capturedPayload?.compositeQuery?.builderQueries?.A.orderBy,
|
||||
).not.toEqual(orderBy);
|
||||
});
|
||||
});
|
||||
|
||||
it('test for explorer options', async () => {
|
||||
const { getByText, getByTestId } = render(
|
||||
|
||||
28
frontend/src/utils/queryTransformers.ts
Normal file
28
frontend/src/utils/queryTransformers.ts
Normal file
@@ -0,0 +1,28 @@
|
||||
import { cloneDeep } from 'lodash-es';
|
||||
import { IBuilderQuery, Query } from 'types/api/queryBuilder/queryBuilderData';
|
||||
|
||||
/**
|
||||
* Transforms a query by modifying specific fields in the builder queries
|
||||
* @param query - The original query object
|
||||
* @param fieldOverrides - Partial object containing fields to override in each builder query
|
||||
* @returns A new query object with the modified fields
|
||||
*/
|
||||
export const transformBuilderQueryFields = (
|
||||
query: Query,
|
||||
fieldOverrides: Partial<IBuilderQuery>,
|
||||
): Query => {
|
||||
// Create a deep copy of the query
|
||||
const transformedQuery: Query = cloneDeep(query);
|
||||
|
||||
// Update the specified fields for each query in the builder
|
||||
if (transformedQuery.builder?.queryData) {
|
||||
transformedQuery.builder.queryData = transformedQuery.builder.queryData.map(
|
||||
(queryItem) => ({
|
||||
...queryItem,
|
||||
...fieldOverrides,
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
return transformedQuery;
|
||||
};
|
||||
257
go.mod
257
go.mod
@@ -5,43 +5,43 @@ go 1.23.0
|
||||
require (
|
||||
dario.cat/mergo v1.0.1
|
||||
github.com/AfterShip/clickhouse-sql-parser v0.4.7
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.30.0
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.36.0
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2
|
||||
github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd
|
||||
github.com/SigNoz/signoz-otel-collector v0.111.43
|
||||
github.com/SigNoz/signoz-otel-collector v0.111.43-aded056
|
||||
github.com/antlr4-go/antlr/v4 v4.13.1
|
||||
github.com/antonmedv/expr v1.15.3
|
||||
github.com/cespare/xxhash/v2 v2.3.0
|
||||
github.com/coreos/go-oidc/v3 v3.11.0
|
||||
github.com/coreos/go-oidc/v3 v3.14.1
|
||||
github.com/dustin/go-humanize v1.0.1
|
||||
github.com/go-co-op/gocron v1.30.1
|
||||
github.com/go-openapi/runtime v0.28.0
|
||||
github.com/go-openapi/strfmt v0.23.0
|
||||
github.com/go-redis/redis/v8 v8.11.5
|
||||
github.com/go-redis/redismock/v8 v8.11.5
|
||||
github.com/go-viper/mapstructure/v2 v2.1.0
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1
|
||||
github.com/gojek/heimdall/v7 v7.0.3
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gorilla/handlers v1.5.1
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/gorilla/websocket v1.5.0
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
github.com/huandu/go-sqlbuilder v1.35.0
|
||||
github.com/jackc/pgx/v5 v5.7.2
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/knadh/koanf v1.5.0
|
||||
github.com/knadh/koanf/v2 v2.1.1
|
||||
github.com/knadh/koanf/v2 v2.2.0
|
||||
github.com/mailru/easyjson v0.7.7
|
||||
github.com/mattn/go-sqlite3 v1.14.24
|
||||
github.com/open-telemetry/opamp-go v0.5.0
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.111.0
|
||||
github.com/open-telemetry/opamp-go v0.19.0
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.128.0
|
||||
github.com/opentracing/opentracing-go v1.2.0
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/alertmanager v0.28.0
|
||||
github.com/prometheus/client_golang v1.20.5
|
||||
github.com/prometheus/common v0.61.0
|
||||
github.com/prometheus/prometheus v0.300.1
|
||||
github.com/prometheus/alertmanager v0.28.1
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/prometheus/common v0.64.0
|
||||
github.com/prometheus/prometheus v0.304.1
|
||||
github.com/rs/cors v1.11.1
|
||||
github.com/russellhaering/gosaml2 v0.9.0
|
||||
github.com/russellhaering/goxmldsig v1.2.0
|
||||
@@ -50,53 +50,54 @@ require (
|
||||
github.com/sethvargo/go-password v0.2.0
|
||||
github.com/smartystreets/goconvey v1.8.1
|
||||
github.com/soheilhy/cmux v0.1.5
|
||||
github.com/srikanthccv/ClickHouse-go-mock v0.11.0
|
||||
github.com/srikanthccv/ClickHouse-go-mock v0.12.0
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/tidwall/gjson v1.18.0
|
||||
github.com/uptrace/bun v1.2.9
|
||||
github.com/uptrace/bun/dialect/pgdialect v1.2.9
|
||||
github.com/uptrace/bun/dialect/sqlitedialect v1.2.9
|
||||
go.opentelemetry.io/collector/confmap v1.17.0
|
||||
go.opentelemetry.io/collector/pdata v1.17.0
|
||||
go.opentelemetry.io/collector/processor v0.111.0
|
||||
go.opentelemetry.io/collector/confmap v1.34.0
|
||||
go.opentelemetry.io/collector/otelcol v0.128.0
|
||||
go.opentelemetry.io/collector/pdata v1.34.0
|
||||
go.opentelemetry.io/contrib/config v0.10.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0
|
||||
go.opentelemetry.io/otel v1.34.0
|
||||
go.opentelemetry.io/otel/metric v1.34.0
|
||||
go.opentelemetry.io/otel/sdk v1.34.0
|
||||
go.opentelemetry.io/otel/trace v1.34.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0
|
||||
go.opentelemetry.io/otel v1.36.0
|
||||
go.opentelemetry.io/otel/metric v1.36.0
|
||||
go.opentelemetry.io/otel/sdk v1.36.0
|
||||
go.opentelemetry.io/otel/trace v1.36.0
|
||||
go.uber.org/multierr v1.11.0
|
||||
go.uber.org/zap v1.27.0
|
||||
golang.org/x/crypto v0.38.0
|
||||
golang.org/x/exp v0.0.0-20240909161429-701f63a606c0
|
||||
golang.org/x/oauth2 v0.26.0
|
||||
golang.org/x/sync v0.14.0
|
||||
golang.org/x/text v0.25.0
|
||||
google.golang.org/protobuf v1.36.0
|
||||
golang.org/x/crypto v0.39.0
|
||||
golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac
|
||||
golang.org/x/oauth2 v0.30.0
|
||||
golang.org/x/sync v0.15.0
|
||||
golang.org/x/text v0.26.0
|
||||
google.golang.org/protobuf v1.36.6
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/apimachinery v0.31.3
|
||||
k8s.io/apimachinery v0.32.3
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/auth v0.13.0 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.6 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.6.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
|
||||
github.com/ClickHouse/ch-go v0.63.1 // indirect
|
||||
cloud.google.com/go/auth v0.16.1 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.7.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
|
||||
github.com/ClickHouse/ch-go v0.66.0 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect
|
||||
github.com/andybalholm/brotli v1.1.1 // indirect
|
||||
github.com/armon/go-metrics v0.4.1 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/aws/aws-sdk-go v1.55.5 // indirect
|
||||
github.com/aws/aws-sdk-go v1.55.7 // indirect
|
||||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 // indirect
|
||||
github.com/beevik/etree v1.1.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.2 // indirect
|
||||
github.com/coder/quartz v0.1.2 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
@@ -106,10 +107,10 @@ require (
|
||||
github.com/ebitengine/purego v0.8.4 // indirect
|
||||
github.com/edsrzf/mmap-go v1.2.0 // indirect
|
||||
github.com/elastic/lunes v0.1.0 // indirect
|
||||
github.com/expr-lang/expr v1.17.0 // indirect
|
||||
github.com/expr-lang/expr v1.17.5 // indirect
|
||||
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.8.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||
github.com/go-faster/city v1.0.1 // indirect
|
||||
github.com/go-faster/errors v0.7.1 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
|
||||
@@ -124,20 +125,21 @@ require (
|
||||
github.com/go-openapi/spec v0.21.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-openapi/validate v0.24.0 // indirect
|
||||
github.com/goccy/go-json v0.10.4 // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/goccy/go-json v0.10.5 // indirect
|
||||
github.com/gofrs/uuid v4.4.0+incompatible // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/gojek/valkyrie v0.0.0-20180215180059-6aee720afcdf // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
|
||||
github.com/golang/snappy v1.0.0 // indirect
|
||||
github.com/google/btree v1.0.1 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/s2a-go v0.1.8 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.14.0 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/s2a-go v0.1.9 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.14.2 // indirect
|
||||
github.com/gopherjs/gopherjs v1.17.2 // indirect
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
|
||||
github.com/hashicorp/go-msgpack/v2 v2.1.1 // indirect
|
||||
@@ -155,21 +157,21 @@ require (
|
||||
github.com/jessevdk/go-flags v1.6.1 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/jonboulle/clockwork v0.4.0 // indirect
|
||||
github.com/jonboulle/clockwork v0.5.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/jpillora/backoff v1.0.0 // indirect
|
||||
github.com/jtolds/gls v4.20.0+incompatible // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/leodido/go-syslog/v4 v4.2.0 // indirect
|
||||
github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20240408141607-282e7b5d6b74 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35 // indirect
|
||||
github.com/magefile/mage v1.15.0 // indirect
|
||||
github.com/mattermost/xml-roundtrip-validator v0.1.0 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mdlayher/socket v0.4.1 // indirect
|
||||
github.com/mdlayher/vsock v1.2.1 // indirect
|
||||
github.com/miekg/dns v1.1.62 // indirect
|
||||
github.com/miekg/dns v1.1.65 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
@@ -179,33 +181,38 @@ require (
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
|
||||
github.com/oklog/run v1.1.0 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.111.0 // indirect
|
||||
github.com/oklog/ulid/v2 v2.1.0 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.128.0 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.128.0 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.128.0 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.128.0 // indirect
|
||||
github.com/paulmach/orb v0.11.1 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.22 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common/sigv4 v0.1.0 // indirect
|
||||
github.com/prometheus/exporter-toolkit v0.13.2 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.0 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/exporter-toolkit v0.14.0 // indirect
|
||||
github.com/prometheus/otlptranslator v0.0.0-20250320144820-d800c8b0eb07 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/prometheus/sigv4 v0.1.2 // indirect
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
|
||||
github.com/robfig/cron/v3 v3.0.1 // indirect
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
|
||||
github.com/segmentio/asm v1.2.0 // indirect
|
||||
github.com/segmentio/backo-go v1.0.1 // indirect
|
||||
github.com/shirou/gopsutil/v4 v4.24.9 // indirect
|
||||
github.com/shirou/gopsutil/v4 v4.25.5 // indirect
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c // indirect
|
||||
github.com/shurcooL/vfsgen v0.0.0-20230704071429-0000e147ea92 // indirect
|
||||
github.com/smarty/assertions v1.15.0 // indirect
|
||||
github.com/spf13/cobra v1.8.1 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/cobra v1.9.1 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/tidwall/match v1.1.1 // indirect
|
||||
github.com/tidwall/pretty v1.2.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.13 // indirect
|
||||
github.com/tklauser/numcpus v0.7.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.15 // indirect
|
||||
github.com/tklauser/numcpus v0.10.0 // indirect
|
||||
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc // indirect
|
||||
github.com/trivago/tgo v1.0.7 // indirect
|
||||
github.com/valyala/fastjson v1.6.4 // indirect
|
||||
@@ -215,65 +222,79 @@ require (
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
go.mongodb.org/mongo-driver v1.17.1 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/collector v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/component v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/component/componentprofiles v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/component/componentstatus v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/config/configtelemetry v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/confmap/converter/expandconverter v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/confmap/provider/fileprovider v1.17.0 // indirect
|
||||
go.opentelemetry.io/collector/connector v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/connector/connectorprofiles v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/consumer v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/consumer/consumertest v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/exporter v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/extension v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/extension/experimental/storage v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/extension/extensioncapabilities v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/featuregate v1.17.0 // indirect
|
||||
go.opentelemetry.io/collector/internal/globalgates v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/internal/globalsignal v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/otelcol v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/pdata/pprofile v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/pdata/testdata v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/pipeline v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/processor/processorprofiles v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/receiver v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/semconv v0.116.0 // indirect
|
||||
go.opentelemetry.io/collector/service v0.111.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 // indirect
|
||||
go.opentelemetry.io/contrib/propagators/b3 v1.30.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.6.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.30.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.30.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.52.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.6.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.30.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0 // indirect
|
||||
go.opentelemetry.io/otel/log v0.10.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/log v0.10.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.31.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.4.0 // indirect
|
||||
go.opentelemetry.io/collector/component v1.34.0 // indirect
|
||||
go.opentelemetry.io/collector/component/componentstatus v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/component/componenttest v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/config/configtelemetry v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/confmap/provider/envprovider v1.34.0 // indirect
|
||||
go.opentelemetry.io/collector/confmap/provider/fileprovider v1.34.0 // indirect
|
||||
go.opentelemetry.io/collector/confmap/xconfmap v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/connector v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/connector/connectortest v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/connector/xconnector v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/consumer v1.34.0 // indirect
|
||||
go.opentelemetry.io/collector/consumer/consumererror v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/consumer/consumertest v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/consumer/xconsumer v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/exporter v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/exporter/exportertest v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/exporter/xexporter v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/extension v1.34.0 // indirect
|
||||
go.opentelemetry.io/collector/extension/extensioncapabilities v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/extension/extensiontest v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/extension/xextension v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/featuregate v1.34.0 // indirect
|
||||
go.opentelemetry.io/collector/internal/fanoutconsumer v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/internal/telemetry v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/pdata/pprofile v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/pdata/testdata v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/pipeline v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/pipeline/xpipeline v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/processor v1.34.0 // indirect
|
||||
go.opentelemetry.io/collector/processor/processorhelper v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/processor/processortest v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/processor/xprocessor v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/receiver v1.34.0 // indirect
|
||||
go.opentelemetry.io/collector/receiver/receiverhelper v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/receiver/receivertest v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/receiver/xreceiver v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/semconv v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/service v0.128.0 // indirect
|
||||
go.opentelemetry.io/collector/service/hostcapabilities v0.128.0 // indirect
|
||||
go.opentelemetry.io/contrib/bridges/otelzap v0.11.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.60.0 // indirect
|
||||
go.opentelemetry.io/contrib/otelconf v0.16.0 // indirect
|
||||
go.opentelemetry.io/contrib/propagators/b3 v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.58.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.12.2 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/log v0.12.2 // indirect
|
||||
go.opentelemetry.io/otel/sdk/log v0.12.2 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.36.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.6.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/goleak v1.3.0 // indirect
|
||||
golang.org/x/mod v0.22.0 // indirect
|
||||
golang.org/x/net v0.40.0 // indirect
|
||||
golang.org/x/mod v0.25.0 // indirect
|
||||
golang.org/x/net v0.41.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/time v0.8.0 // indirect
|
||||
golang.org/x/tools v0.29.0 // indirect
|
||||
gonum.org/v1/gonum v0.15.1 // indirect
|
||||
google.golang.org/api v0.213.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect
|
||||
google.golang.org/grpc v1.69.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
golang.org/x/tools v0.33.0 // indirect
|
||||
gonum.org/v1/gonum v0.16.0 // indirect
|
||||
google.golang.org/api v0.236.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a // indirect
|
||||
google.golang.org/grpc v1.72.2 // indirect
|
||||
gopkg.in/telebot.v3 v3.3.8 // indirect
|
||||
k8s.io/client-go v0.31.3 // indirect
|
||||
k8s.io/client-go v0.32.3 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect
|
||||
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
||||
@@ -2,7 +2,7 @@ package clickhouseprometheus
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
)
|
||||
|
||||
@@ -17,10 +17,6 @@ func unmarshalLabels(s string) ([]prompb.Label, string, error) {
|
||||
for n, v := range m {
|
||||
if n == "__name__" {
|
||||
metricName = v
|
||||
} else {
|
||||
if !model.IsValidLegacyMetricName(n) {
|
||||
n = `"` + n + `"`
|
||||
}
|
||||
}
|
||||
|
||||
res = append(res, prompb.Label{
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
)
|
||||
|
||||
@@ -26,8 +25,3 @@ func NewEngine(logger *slog.Logger, cfg Config) *Engine {
|
||||
ActiveQueryTracker: activeQueryTracker,
|
||||
})
|
||||
}
|
||||
|
||||
// init initializes the prometheus model with UTF8 validation
|
||||
func init() {
|
||||
model.NameValidationScheme = model.UTF8Validation
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ func getqueryInfo(spec any) queryInfo {
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]:
|
||||
return queryInfo{Name: s.Name, Disabled: s.Disabled, Step: s.StepInterval}
|
||||
case qbtypes.QueryBuilderFormula:
|
||||
return queryInfo{Name: s.Name, Disabled: false}
|
||||
return queryInfo{Name: s.Name, Disabled: s.Disabled}
|
||||
case qbtypes.PromQuery:
|
||||
return queryInfo{Name: s.Name, Disabled: s.Disabled, Step: s.Step}
|
||||
case qbtypes.ClickHouseQuery:
|
||||
|
||||
@@ -3989,13 +3989,13 @@ func (r *ClickHouseReader) GetTraceAttributeValues(ctx context.Context, req *v3.
|
||||
return &attributeValues, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetSpanAttributeKeys(ctx context.Context) (map[string]v3.AttributeKey, error) {
|
||||
func (r *ClickHouseReader) GetSpanAttributeKeysByNames(ctx context.Context, names []string) (map[string]v3.AttributeKey, error) {
|
||||
var query string
|
||||
var err error
|
||||
var rows driver.Rows
|
||||
response := map[string]v3.AttributeKey{}
|
||||
|
||||
query = fmt.Sprintf("SELECT DISTINCT(tagKey), tagType, dataType FROM %s.%s", r.TraceDB, r.spanAttributesKeysTable)
|
||||
query = fmt.Sprintf("SELECT DISTINCT(tagKey), tagType, dataType FROM %s.%s where tagKey in ('%s')", r.TraceDB, r.spanAttributesKeysTable, strings.Join(names, "','"))
|
||||
|
||||
rows, err = r.db.Query(ctx, query)
|
||||
if err != nil {
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"math/rand/v2"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
@@ -29,6 +30,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/services"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/metricsexplorer"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/transition"
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
@@ -64,6 +66,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/types/licensetypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/opamptypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/pipelinetypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
|
||||
traceFunnels "github.com/SigNoz/signoz/pkg/types/tracefunneltypes"
|
||||
|
||||
@@ -966,7 +969,7 @@ func (aH *APIHandler) metaForLinks(ctx context.Context, rule *ruletypes.Gettable
|
||||
zap.L().Error("failed to get log fields using empty keys; the link might not work as expected", zap.Error(err))
|
||||
}
|
||||
} else if rule.AlertType == ruletypes.AlertTypeTraces {
|
||||
traceFields, err := aH.reader.GetSpanAttributeKeys(ctx)
|
||||
traceFields, err := aH.reader.GetSpanAttributeKeysByNames(ctx, logsv3.GetFieldNames(rule.PostableRule.RuleCondition.CompositeQuery))
|
||||
if err == nil {
|
||||
keys = traceFields
|
||||
} else {
|
||||
@@ -4345,7 +4348,7 @@ func (aH *APIHandler) getSpanKeysV3(ctx context.Context, queryRangeParams *v3.Qu
|
||||
data := map[string]v3.AttributeKey{}
|
||||
for _, query := range queryRangeParams.CompositeQuery.BuilderQueries {
|
||||
if query.DataSource == v3.DataSourceTraces {
|
||||
spanKeys, err := aH.reader.GetSpanAttributeKeys(ctx)
|
||||
spanKeys, err := aH.reader.GetSpanAttributeKeysByNames(ctx, logsv3.GetFieldNames(queryRangeParams.CompositeQuery))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -4389,8 +4392,18 @@ func (aH *APIHandler) queryRangeV3(ctx context.Context, queryRangeParams *v3.Que
|
||||
var errQuriesByName map[string]error
|
||||
var spanKeys map[string]v3.AttributeKey
|
||||
if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeBuilder {
|
||||
hasLogsQuery := false
|
||||
hasTracesQuery := false
|
||||
for _, query := range queryRangeParams.CompositeQuery.BuilderQueries {
|
||||
if query.DataSource == v3.DataSourceLogs {
|
||||
hasLogsQuery = true
|
||||
}
|
||||
if query.DataSource == v3.DataSourceTraces {
|
||||
hasTracesQuery = true
|
||||
}
|
||||
}
|
||||
// check if any enrichment is required for logs if yes then enrich them
|
||||
if logsv3.EnrichmentRequired(queryRangeParams) {
|
||||
if logsv3.EnrichmentRequired(queryRangeParams) && hasLogsQuery {
|
||||
logsFields, err := aH.reader.GetLogFieldsFromNames(ctx, logsv3.GetFieldNames(queryRangeParams.CompositeQuery))
|
||||
if err != nil {
|
||||
apiErrObj := &model.ApiError{Typ: model.ErrorInternal, Err: err}
|
||||
@@ -4401,15 +4414,15 @@ func (aH *APIHandler) queryRangeV3(ctx context.Context, queryRangeParams *v3.Que
|
||||
fields := model.GetLogFieldsV3(ctx, queryRangeParams, logsFields)
|
||||
logsv3.Enrich(queryRangeParams, fields)
|
||||
}
|
||||
|
||||
spanKeys, err = aH.getSpanKeysV3(ctx, queryRangeParams)
|
||||
if err != nil {
|
||||
apiErrObj := &model.ApiError{Typ: model.ErrorInternal, Err: err}
|
||||
RespondError(w, apiErrObj, errQuriesByName)
|
||||
return
|
||||
if hasTracesQuery {
|
||||
spanKeys, err = aH.getSpanKeysV3(ctx, queryRangeParams)
|
||||
if err != nil {
|
||||
apiErrObj := &model.ApiError{Typ: model.ErrorInternal, Err: err}
|
||||
RespondError(w, apiErrObj, errQuriesByName)
|
||||
return
|
||||
}
|
||||
tracesV4.Enrich(queryRangeParams, spanKeys)
|
||||
}
|
||||
tracesV4.Enrich(queryRangeParams, spanKeys)
|
||||
|
||||
}
|
||||
|
||||
// WARN: Only works for AND operator in traces query
|
||||
@@ -4787,8 +4800,19 @@ func (aH *APIHandler) queryRangeV4(ctx context.Context, queryRangeParams *v3.Que
|
||||
var errQuriesByName map[string]error
|
||||
var spanKeys map[string]v3.AttributeKey
|
||||
if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeBuilder {
|
||||
hasLogsQuery := false
|
||||
hasTracesQuery := false
|
||||
for _, query := range queryRangeParams.CompositeQuery.BuilderQueries {
|
||||
if query.DataSource == v3.DataSourceLogs {
|
||||
hasLogsQuery = true
|
||||
}
|
||||
if query.DataSource == v3.DataSourceTraces {
|
||||
hasTracesQuery = true
|
||||
}
|
||||
}
|
||||
|
||||
// check if any enrichment is required for logs if yes then enrich them
|
||||
if logsv3.EnrichmentRequired(queryRangeParams) {
|
||||
if logsv3.EnrichmentRequired(queryRangeParams) && hasLogsQuery {
|
||||
// get the fields if any logs query is present
|
||||
logsFields, err := aH.reader.GetLogFieldsFromNames(r.Context(), logsv3.GetFieldNames(queryRangeParams.CompositeQuery))
|
||||
if err != nil {
|
||||
@@ -4800,13 +4824,15 @@ func (aH *APIHandler) queryRangeV4(ctx context.Context, queryRangeParams *v3.Que
|
||||
logsv3.Enrich(queryRangeParams, fields)
|
||||
}
|
||||
|
||||
spanKeys, err = aH.getSpanKeysV3(ctx, queryRangeParams)
|
||||
if err != nil {
|
||||
apiErrObj := &model.ApiError{Typ: model.ErrorInternal, Err: err}
|
||||
RespondError(w, apiErrObj, errQuriesByName)
|
||||
return
|
||||
if hasTracesQuery {
|
||||
spanKeys, err = aH.getSpanKeysV3(ctx, queryRangeParams)
|
||||
if err != nil {
|
||||
apiErrObj := &model.ApiError{Typ: model.ErrorInternal, Err: err}
|
||||
RespondError(w, apiErrObj, errQuriesByName)
|
||||
return
|
||||
}
|
||||
tracesV4.Enrich(queryRangeParams, spanKeys)
|
||||
}
|
||||
tracesV4.Enrich(queryRangeParams, spanKeys)
|
||||
}
|
||||
|
||||
// WARN: Only works for AND operator in traces query
|
||||
@@ -4854,6 +4880,45 @@ func (aH *APIHandler) queryRangeV4(ctx context.Context, queryRangeParams *v3.Que
|
||||
Result: result,
|
||||
}
|
||||
|
||||
if rand.Float64() < (1.0/30.0) &&
|
||||
queryRangeParams.CompositeQuery.PanelType != v3.PanelTypeList &&
|
||||
queryRangeParams.CompositeQuery.PanelType != v3.PanelTypeTrace {
|
||||
v4JSON, _ := json.Marshal(queryRangeParams)
|
||||
func() {
|
||||
defer func() {
|
||||
if rr := recover(); rr != nil {
|
||||
zap.L().Warn(
|
||||
"unexpected panic while converting to v5",
|
||||
zap.Any("panic", rr),
|
||||
zap.String("v4_payload", string(v4JSON)),
|
||||
)
|
||||
}
|
||||
}()
|
||||
v5Req, err := transition.ConvertV3ToV5(queryRangeParams)
|
||||
if err != nil {
|
||||
zap.L().Warn("unable to convert to v5 request payload", zap.Error(err), zap.String("v4_payload", string(v4JSON)))
|
||||
return
|
||||
}
|
||||
v5ReqJSON, _ := json.Marshal(v5Req)
|
||||
|
||||
v3Resp := v3.QueryRangeResponse{
|
||||
Result: result,
|
||||
}
|
||||
|
||||
v5Resp, err := transition.ConvertV3ResponseToV5(&v3Resp, querybuildertypesv5.RequestTypeTimeSeries)
|
||||
if err != nil {
|
||||
zap.L().Warn("unable to convert to v5 response payload", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
v5RespJSON, _ := json.Marshal(v5Resp)
|
||||
zap.L().Info("v5 request and expected response",
|
||||
zap.String("request_payload", string(v5ReqJSON)),
|
||||
zap.String("response_payload", string(v5RespJSON)),
|
||||
)
|
||||
}()
|
||||
}
|
||||
|
||||
aH.Respond(w, resp)
|
||||
}
|
||||
|
||||
|
||||
@@ -110,8 +110,8 @@ var HostsTableListQuery = v3.QueryRangeParamsV3{
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeTag,
|
||||
},
|
||||
Operator: v3.FilterOperatorIn,
|
||||
Value: []string{"used", "cached"},
|
||||
Operator: v3.FilterOperatorEqual,
|
||||
Value: "used",
|
||||
},
|
||||
{
|
||||
Key: v3.AttributeKey{
|
||||
|
||||
@@ -182,7 +182,6 @@ func GenerateCollectorConfigWithPipelines(
|
||||
|
||||
// Escape any `$`s as `$$$` in config generated for pipelines, to ensure any occurrences
|
||||
// like $data do not end up being treated as env vars when loading collector config.
|
||||
// otel-collector-contrib versions 0.111 and above require using $$$ as escaped dollar (and not $$)
|
||||
for _, procName := range signozPipelineProcNames {
|
||||
procConf := signozPipelineProcessors[procName]
|
||||
serializedProcConf, err := yaml.Marshal(procConf)
|
||||
@@ -192,7 +191,7 @@ func GenerateCollectorConfigWithPipelines(
|
||||
))
|
||||
}
|
||||
escapedSerializedConf := strings.ReplaceAll(
|
||||
string(serializedProcConf), "$", "$$$",
|
||||
string(serializedProcConf), "$", "$$",
|
||||
)
|
||||
|
||||
var escapedConf map[string]interface{}
|
||||
|
||||
@@ -12,9 +12,9 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/types/pipelinetypes"
|
||||
"github.com/pkg/errors"
|
||||
"go.opentelemetry.io/collector/otelcol"
|
||||
"go.opentelemetry.io/collector/pdata/pcommon"
|
||||
"go.opentelemetry.io/collector/pdata/plog"
|
||||
"go.opentelemetry.io/collector/processor"
|
||||
)
|
||||
|
||||
func SimulatePipelinesProcessing(
|
||||
@@ -42,7 +42,7 @@ func SimulatePipelinesProcessing(
|
||||
}
|
||||
simulatorInputPLogs := SignozLogsToPLogs(logs)
|
||||
|
||||
processorFactories, err := processor.MakeFactoryMap(
|
||||
processorFactories, err := otelcol.MakeFactoryMap(
|
||||
signozlogspipelineprocessor.NewFactory(),
|
||||
)
|
||||
if err != nil {
|
||||
|
||||
@@ -91,7 +91,7 @@ func GetFieldNames(compositeQuery *v3.CompositeQuery) []string {
|
||||
}
|
||||
|
||||
for queryName, query := range compositeQuery.BuilderQueries {
|
||||
if query.Expression != queryName && query.DataSource != v3.DataSourceLogs {
|
||||
if query.Expression != queryName {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -103,7 +103,8 @@ func GetFieldNames(compositeQuery *v3.CompositeQuery) []string {
|
||||
if query.Filters != nil && len(query.Filters.Items) != 0 {
|
||||
for i := 0; i < len(query.Filters.Items); i++ {
|
||||
tempItem := jsonFilterEnrich(query.Filters.Items[i])
|
||||
if tempItem.Key.IsJSON {
|
||||
// since json query is only for logs
|
||||
if query.DataSource == v3.DataSourceLogs && tempItem.Key.IsJSON {
|
||||
key, found := strings.CutPrefix(tempItem.Key.Key, "body.")
|
||||
if found {
|
||||
fieldNames[key] = struct{}{}
|
||||
|
||||
@@ -1005,7 +1005,7 @@ func TestGetFieldNames(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []string{"field1", "field2", "field3"},
|
||||
want: []string{"field1", "field2"},
|
||||
},
|
||||
{
|
||||
name: "empty aggregate attribute key and filters",
|
||||
|
||||
@@ -47,9 +47,11 @@ func TestOpAMPServerToAgentCommunicationWithConfigProvider(t *testing.T) {
|
||||
// Even if there are no recommended changes to the agent's initial config
|
||||
require.False(tb.testConfigProvider.HasRecommendations())
|
||||
agent1Conn := &MockOpAmpConnection{}
|
||||
agent1Id := valuer.GenerateUUID().String()
|
||||
agent1Id, err := valuer.GenerateUUID().MarshalBinary()
|
||||
require.Nil(err)
|
||||
// get orgId from the db
|
||||
tb.opampServer.OnMessage(
|
||||
context.Background(),
|
||||
agent1Conn,
|
||||
&protobufs.AgentToServer{
|
||||
InstanceUid: agent1Id,
|
||||
@@ -70,9 +72,12 @@ func TestOpAMPServerToAgentCommunicationWithConfigProvider(t *testing.T) {
|
||||
|
||||
tb.testConfigProvider.ZPagesEndpoint = "localhost:55555"
|
||||
require.True(tb.testConfigProvider.HasRecommendations())
|
||||
agent2Id := valuer.GenerateUUID().String()
|
||||
agent2IdUUID := valuer.GenerateUUID()
|
||||
agent2Id, err := agent2IdUUID.MarshalBinary()
|
||||
require.Nil(err)
|
||||
agent2Conn := &MockOpAmpConnection{}
|
||||
tb.opampServer.OnMessage(
|
||||
context.Background(),
|
||||
agent2Conn,
|
||||
&protobufs.AgentToServer{
|
||||
InstanceUid: agent2Id,
|
||||
@@ -97,7 +102,7 @@ func TestOpAMPServerToAgentCommunicationWithConfigProvider(t *testing.T) {
|
||||
)
|
||||
|
||||
agent2Conn.ClearMsgsFromServer()
|
||||
tb.opampServer.OnMessage(agent2Conn, &protobufs.AgentToServer{
|
||||
tb.opampServer.OnMessage(context.Background(), agent2Conn, &protobufs.AgentToServer{
|
||||
InstanceUid: agent2Id,
|
||||
EffectiveConfig: &protobufs.EffectiveConfig{
|
||||
ConfigMap: NewAgentConfigMap(
|
||||
@@ -110,10 +115,10 @@ func TestOpAMPServerToAgentCommunicationWithConfigProvider(t *testing.T) {
|
||||
},
|
||||
})
|
||||
expectedConfId := tb.testConfigProvider.ZPagesEndpoint
|
||||
require.True(tb.testConfigProvider.HasReportedDeploymentStatus(orgID, expectedConfId, agent2Id),
|
||||
require.True(tb.testConfigProvider.HasReportedDeploymentStatus(orgID, expectedConfId, agent2IdUUID.String()),
|
||||
"Server should report deployment success to config provider on receiving update from agent.",
|
||||
)
|
||||
require.True(tb.testConfigProvider.ReportedDeploymentStatuses[orgID.String()+expectedConfId][agent2Id])
|
||||
require.True(tb.testConfigProvider.ReportedDeploymentStatuses[orgID.String()+expectedConfId][agent2IdUUID.String()])
|
||||
require.Nil(
|
||||
agent2Conn.LatestMsgFromServer(),
|
||||
"Server should not recommend a RemoteConfig if agent is already running it.",
|
||||
@@ -135,7 +140,7 @@ func TestOpAMPServerToAgentCommunicationWithConfigProvider(t *testing.T) {
|
||||
}
|
||||
|
||||
lastAgent2Msg = agent2Conn.LatestMsgFromServer()
|
||||
tb.opampServer.OnMessage(agent2Conn, &protobufs.AgentToServer{
|
||||
tb.opampServer.OnMessage(context.Background(), agent2Conn, &protobufs.AgentToServer{
|
||||
InstanceUid: agent2Id,
|
||||
RemoteConfigStatus: &protobufs.RemoteConfigStatus{
|
||||
Status: protobufs.RemoteConfigStatuses_RemoteConfigStatuses_FAILED,
|
||||
@@ -143,14 +148,14 @@ func TestOpAMPServerToAgentCommunicationWithConfigProvider(t *testing.T) {
|
||||
},
|
||||
})
|
||||
expectedConfId = tb.testConfigProvider.ZPagesEndpoint
|
||||
require.True(tb.testConfigProvider.HasReportedDeploymentStatus(orgID, expectedConfId, agent2Id),
|
||||
require.True(tb.testConfigProvider.HasReportedDeploymentStatus(orgID, expectedConfId, agent2IdUUID.String()),
|
||||
"Server should report deployment failure to config provider on receiving update from agent.",
|
||||
)
|
||||
require.False(tb.testConfigProvider.ReportedDeploymentStatuses[orgID.String()+expectedConfId][agent2Id])
|
||||
require.False(tb.testConfigProvider.ReportedDeploymentStatuses[orgID.String()+expectedConfId][agent2IdUUID.String()])
|
||||
|
||||
lastAgent1Msg = agent1Conn.LatestMsgFromServer()
|
||||
agent1Conn.ClearMsgsFromServer()
|
||||
response := tb.opampServer.OnMessage(agent1Conn, &protobufs.AgentToServer{
|
||||
response := tb.opampServer.OnMessage(context.Background(), agent1Conn, &protobufs.AgentToServer{
|
||||
InstanceUid: agent1Id,
|
||||
RemoteConfigStatus: &protobufs.RemoteConfigStatus{
|
||||
Status: protobufs.RemoteConfigStatuses_RemoteConfigStatuses_APPLIED,
|
||||
@@ -177,12 +182,15 @@ func TestOpAMPServerAgentLimit(t *testing.T) {
|
||||
tb := newTestbed(t)
|
||||
// Create 51 agents and check if the first one gets deleted
|
||||
var agentConnections []*MockOpAmpConnection
|
||||
var agentIds []string
|
||||
var agentIds [][]byte
|
||||
for i := 0; i < 51; i++ {
|
||||
agentConn := &MockOpAmpConnection{}
|
||||
agentId := valuer.GenerateUUID().String()
|
||||
agentIdUUID := valuer.GenerateUUID()
|
||||
agentId, err := agentIdUUID.MarshalBinary()
|
||||
require.Nil(err)
|
||||
agentIds = append(agentIds, agentId)
|
||||
tb.opampServer.OnMessage(
|
||||
context.Background(),
|
||||
agentConn,
|
||||
&protobufs.AgentToServer{
|
||||
InstanceUid: agentId,
|
||||
@@ -235,12 +243,12 @@ func newTestbed(t *testing.T) *testbed {
|
||||
testDB := utils.NewQueryServiceDBForTests(t)
|
||||
|
||||
providerSettings := instrumentationtest.New().ToProviderSettings()
|
||||
sharder, err := noopsharder.New(context.TODO(), providerSettings, sharder.Config{})
|
||||
sharder, err := noopsharder.New(context.Background(), providerSettings, sharder.Config{})
|
||||
require.Nil(t, err)
|
||||
orgGetter := implorganization.NewGetter(implorganization.NewStore(testDB), sharder)
|
||||
model.Init(testDB, slog.Default(), orgGetter)
|
||||
testConfigProvider := NewMockAgentConfigProvider()
|
||||
opampServer := InitializeServer(nil, testConfigProvider)
|
||||
opampServer := InitializeServer(nil, testConfigProvider, instrumentationtest.New())
|
||||
|
||||
// create a test org
|
||||
err = utils.CreateTestOrg(t, testDB)
|
||||
|
||||
@@ -1,15 +1,25 @@
|
||||
package opamp
|
||||
|
||||
import "log"
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
)
|
||||
|
||||
type Logger struct {
|
||||
logger *log.Logger
|
||||
type logger struct {
|
||||
l *slog.Logger
|
||||
}
|
||||
|
||||
func (l *Logger) Debugf(format string, v ...interface{}) {
|
||||
l.logger.Printf(format, v...)
|
||||
func wrappedLogger(l *slog.Logger) *logger {
|
||||
return &logger{
|
||||
l: l,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Logger) Errorf(format string, v ...interface{}) {
|
||||
l.logger.Printf(format, v...)
|
||||
func (l *logger) Debugf(ctx context.Context, format string, args ...interface{}) {
|
||||
l.l.DebugContext(ctx, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (l *logger) Errorf(ctx context.Context, format string, args ...interface{}) {
|
||||
l.l.ErrorContext(ctx, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
@@ -43,6 +43,10 @@ func (conn *MockOpAmpConnection) RemoteAddr() net.Addr {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (conn *MockOpAmpConnection) Connection() net.Conn {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Implements opamp.AgentConfigProvider
|
||||
type MockAgentConfigProvider struct {
|
||||
// An updated config is recommended by TestAgentConfProvider
|
||||
|
||||
@@ -265,7 +265,16 @@ func (agent *Agent) processStatusUpdate(
|
||||
|
||||
configChanged := false
|
||||
if agentDescrChanged {
|
||||
// Agent description is changed.
|
||||
// Agent description is changed, but effective config is missing, force request agent to send Config
|
||||
//
|
||||
// Note: ideally this flag should be sent along side ErrorResponse;
|
||||
// but OpAMP agent prioritizes Flags before ErrorResponse hence sending
|
||||
// requests consequently without respecting the retry cooldown, if in future that changes,
|
||||
// it should be shifted there; To test uncomment Flags added in opamp_server.go
|
||||
if newStatus.EffectiveConfig == nil || newStatus.EffectiveConfig.ConfigMap == nil {
|
||||
response.Flags = uint64(protobufs.ServerToAgentFlags_ServerToAgentFlags_ReportFullState)
|
||||
return
|
||||
}
|
||||
|
||||
//Get the default org ID
|
||||
// agent.
|
||||
|
||||
@@ -75,6 +75,10 @@ func (agents *Agents) FindAgent(agentID string) *Agent {
|
||||
// If the Agent instance does not exist, it is created and added to the list of
|
||||
// Agent instances.
|
||||
func (agents *Agents) FindOrCreateAgent(agentID string, conn types.Connection, orgID valuer.UUID) (*Agent, bool, error) {
|
||||
if agentID == "" {
|
||||
return nil, false, errors.New("cannot create agent without agentID")
|
||||
}
|
||||
|
||||
agents.mux.Lock()
|
||||
defer agents.mux.Unlock()
|
||||
agent, ok := agents.agentsById[agentID]
|
||||
|
||||
@@ -2,8 +2,10 @@ package opamp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/instrumentation"
|
||||
model "github.com/SigNoz/signoz/pkg/query-service/app/opamp/model"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/open-telemetry/opamp-go/protobufs"
|
||||
@@ -30,7 +32,9 @@ const capabilities = protobufs.ServerCapabilities_ServerCapabilities_AcceptsEffe
|
||||
protobufs.ServerCapabilities_ServerCapabilities_AcceptsStatus
|
||||
|
||||
func InitializeServer(
|
||||
agents *model.Agents, agentConfigProvider AgentConfigProvider,
|
||||
agents *model.Agents,
|
||||
agentConfigProvider AgentConfigProvider,
|
||||
instrumentation instrumentation.Instrumentation,
|
||||
) *Server {
|
||||
if agents == nil {
|
||||
agents = &model.AllAgents
|
||||
@@ -40,16 +44,23 @@ func InitializeServer(
|
||||
agents: agents,
|
||||
agentConfigProvider: agentConfigProvider,
|
||||
}
|
||||
opAmpServer.server = server.New(zap.L().Sugar())
|
||||
opAmpServer.server = server.New(wrappedLogger(instrumentation.Logger()))
|
||||
return opAmpServer
|
||||
}
|
||||
|
||||
func (srv *Server) Start(listener string) error {
|
||||
settings := server.StartSettings{
|
||||
Settings: server.Settings{
|
||||
Callbacks: server.CallbacksStruct{
|
||||
OnMessageFunc: srv.OnMessage,
|
||||
OnConnectionCloseFunc: srv.onDisconnect,
|
||||
Callbacks: types.Callbacks{
|
||||
OnConnecting: func(request *http.Request) types.ConnectionResponse {
|
||||
return types.ConnectionResponse{
|
||||
Accept: true,
|
||||
ConnectionCallbacks: types.ConnectionCallbacks{
|
||||
OnMessage: srv.OnMessage,
|
||||
OnConnectionClose: srv.onDisconnect,
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
ListenEndpoint: listener,
|
||||
@@ -86,8 +97,8 @@ func (srv *Server) onDisconnect(conn types.Connection) {
|
||||
// but we keep them in context mapped which is mapped to the instanceID, so we would know the
|
||||
// orgID from the context
|
||||
// note :- there can only be 50 agents in the db for a given orgID, we don't have a check in-memory but we delete from the db after insert.
|
||||
func (srv *Server) OnMessage(conn types.Connection, msg *protobufs.AgentToServer) *protobufs.ServerToAgent {
|
||||
agentID := msg.InstanceUid
|
||||
func (srv *Server) OnMessage(ctx context.Context, conn types.Connection, msg *protobufs.AgentToServer) *protobufs.ServerToAgent {
|
||||
agentID, _ := valuer.NewUUIDFromBytes(msg.GetInstanceUid())
|
||||
|
||||
// find the orgID, if nothing is found keep it empty.
|
||||
// the find or create agent will return an error if orgID is empty
|
||||
@@ -98,13 +109,16 @@ func (srv *Server) OnMessage(conn types.Connection, msg *protobufs.AgentToServer
|
||||
orgID = orgIDs[0].ID
|
||||
}
|
||||
|
||||
agent, created, err := srv.agents.FindOrCreateAgent(agentID, conn, orgID)
|
||||
// when a new org is created and the agent is not able to register
|
||||
// the changes in pkg/query-service/app/opamp/model/agent.go 270 - 277 takes care that
|
||||
// agents sends the effective config when we processStatusUpdate.
|
||||
agent, created, err := srv.agents.FindOrCreateAgent(agentID.String(), conn, orgID)
|
||||
if err != nil {
|
||||
zap.L().Error("Failed to find or create agent", zap.String("agentID", agentID), zap.Error(err))
|
||||
zap.L().Error("Failed to find or create agent", zap.String("agentID", agentID.String()), zap.Error(err))
|
||||
|
||||
// Return error response according to OpAMP protocol
|
||||
return &protobufs.ServerToAgent{
|
||||
InstanceUid: agentID,
|
||||
InstanceUid: msg.GetInstanceUid(),
|
||||
ErrorResponse: &protobufs.ServerErrorResponse{
|
||||
Type: protobufs.ServerErrorResponseType_ServerErrorResponseType_Unavailable,
|
||||
Details: &protobufs.ServerErrorResponse_RetryInfo{
|
||||
@@ -113,6 +127,8 @@ func (srv *Server) OnMessage(conn types.Connection, msg *protobufs.AgentToServer
|
||||
},
|
||||
},
|
||||
},
|
||||
// Note: refer to opamp/model/agent.go; look for `Flags` keyword
|
||||
// Flags: uint64(protobufs.ServerToAgentFlags_ServerToAgentFlags_ReportFullState),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -126,7 +142,7 @@ func (srv *Server) OnMessage(conn types.Connection, msg *protobufs.AgentToServer
|
||||
}
|
||||
|
||||
response := &protobufs.ServerToAgent{
|
||||
InstanceUid: agentID,
|
||||
InstanceUid: msg.GetInstanceUid(),
|
||||
Capabilities: uint64(capabilities),
|
||||
}
|
||||
|
||||
|
||||
@@ -162,6 +162,7 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz, jwt *authtypes.JWT)
|
||||
s.opampServer = opamp.InitializeServer(
|
||||
&opAmpModel.AllAgents,
|
||||
agentConfMgr,
|
||||
signoz.Instrumentation,
|
||||
)
|
||||
|
||||
orgs, err := apiHandler.Signoz.Modules.OrgGetter.ListByOwnedKeyRange(context.Background())
|
||||
|
||||
@@ -32,7 +32,7 @@ type Reader interface {
|
||||
GetTraceAggregateAttributes(ctx context.Context, req *v3.AggregateAttributeRequest) (*v3.AggregateAttributeResponse, error)
|
||||
GetTraceAttributeKeys(ctx context.Context, req *v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error)
|
||||
GetTraceAttributeValues(ctx context.Context, req *v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error)
|
||||
GetSpanAttributeKeys(ctx context.Context) (map[string]v3.AttributeKey, error)
|
||||
GetSpanAttributeKeysByNames(ctx context.Context, names []string) (map[string]v3.AttributeKey, error)
|
||||
|
||||
ListErrors(ctx context.Context, params *model.ListErrorsParams) (*[]model.Error, *model.ApiError)
|
||||
CountErrors(ctx context.Context, params *model.CountErrorsParams) (uint64, *model.ApiError)
|
||||
|
||||
@@ -236,7 +236,14 @@ func (q *queryCache) FindMissingTimeRanges(orgID valuer.UUID, start, end, step i
|
||||
|
||||
func (q *queryCache) getCachedSeriesData(orgID valuer.UUID, cacheKey string) []*CachedSeriesData {
|
||||
cacheableSeriesData := new(CacheableSeriesData)
|
||||
err := q.cache.Get(context.TODO(), orgID, cacheKey, cacheableSeriesData, true)
|
||||
tmpcacheableSeriesData := new(CacheableSeriesData)
|
||||
err := q.cache.Get(context.TODO(), orgID, cacheKey, tmpcacheableSeriesData, true)
|
||||
data, err := tmpcacheableSeriesData.MarshalBinary()
|
||||
if err != nil {
|
||||
zap.L().Error("error marshalling cacheable series data", zap.Error(err))
|
||||
}
|
||||
cacheableSeriesData.UnmarshalBinary(data)
|
||||
|
||||
if err != nil && !errors.Ast(err, errors.TypeNotFound) {
|
||||
return nil
|
||||
}
|
||||
@@ -300,11 +307,18 @@ func (q *queryCache) MergeWithCachedSeriesDataV2(orgID valuer.UUID, cacheKey str
|
||||
return newData
|
||||
}
|
||||
|
||||
tmpcacheableSeriesData := new(CacheableSeriesData)
|
||||
cacheableSeriesData := new(CacheableSeriesData)
|
||||
err := q.cache.Get(context.TODO(), orgID, cacheKey, cacheableSeriesData, true)
|
||||
err := q.cache.Get(context.TODO(), orgID, cacheKey, tmpcacheableSeriesData, true)
|
||||
if err != nil && !errors.Ast(err, errors.TypeNotFound) {
|
||||
return nil
|
||||
}
|
||||
data, err := tmpcacheableSeriesData.MarshalBinary()
|
||||
if err != nil {
|
||||
zap.L().Error("error marshalling cacheable series data", zap.Error(err))
|
||||
}
|
||||
cacheableSeriesData.UnmarshalBinary(data)
|
||||
|
||||
allData := append(cacheableSeriesData.Series, newData...)
|
||||
|
||||
sort.Slice(allData, func(i, j int) bool {
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/rand/v2"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
@@ -15,6 +16,8 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/query-service/contextlinks"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/postprocess"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/transition"
|
||||
"github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
|
||||
@@ -52,6 +55,9 @@ type ThresholdRule struct {
|
||||
// used for attribute metadata enrichment for logs and traces
|
||||
logsKeys map[string]v3.AttributeKey
|
||||
spansKeys map[string]v3.AttributeKey
|
||||
|
||||
// internal use
|
||||
triggerCnt int
|
||||
}
|
||||
|
||||
func NewThresholdRule(
|
||||
@@ -288,7 +294,7 @@ func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, orgID valuer.UUID,
|
||||
}
|
||||
|
||||
if hasTracesQuery {
|
||||
spanKeys, err := r.reader.GetSpanAttributeKeys(ctx)
|
||||
spanKeys, err := r.reader.GetSpanAttributeKeysByNames(ctx, logsv3.GetFieldNames(params.CompositeQuery))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -349,12 +355,53 @@ func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, orgID valuer.UUID,
|
||||
return resultVector, nil
|
||||
}
|
||||
|
||||
shouldLog := false
|
||||
|
||||
for _, series := range queryResult.Series {
|
||||
smpl, shouldAlert := r.ShouldAlert(*series)
|
||||
if shouldAlert {
|
||||
shouldLog = true
|
||||
resultVector = append(resultVector, smpl)
|
||||
}
|
||||
}
|
||||
|
||||
if (shouldLog && r.triggerCnt < 100) || rand.Float64() < (1.0/30.0) {
|
||||
func(ts time.Time) {
|
||||
r.triggerCnt++
|
||||
defer func() {
|
||||
if rr := recover(); rr != nil {
|
||||
zap.L().Warn("unexpected panic while converting to v5",
|
||||
zap.Any("panic", rr),
|
||||
zap.String("ruleid", r.ID()),
|
||||
)
|
||||
}
|
||||
}()
|
||||
v5Req, err := transition.ConvertV3ToV5(params)
|
||||
if err != nil {
|
||||
zap.L().Warn("unable to convert to v5 request payload", zap.Error(err), zap.String("ruleid", r.ID()))
|
||||
return
|
||||
}
|
||||
v5ReqJSON, _ := json.Marshal(v5Req)
|
||||
|
||||
v3Resp := v3.QueryRangeResponse{
|
||||
Result: results,
|
||||
}
|
||||
|
||||
v5Resp, err := transition.ConvertV3ResponseToV5(&v3Resp, querybuildertypesv5.RequestTypeTimeSeries)
|
||||
if err != nil {
|
||||
zap.L().Warn("unable to convert to v5 response payload", zap.Error(err), zap.String("ruleid", r.ID()))
|
||||
return
|
||||
}
|
||||
|
||||
v5RespJSON, _ := json.Marshal(v5Resp)
|
||||
zap.L().Info("v5 request and expected response for triggered alert",
|
||||
zap.String("request_payload", string(v5ReqJSON)),
|
||||
zap.String("response_payload", string(v5RespJSON)),
|
||||
zap.String("ruleid", r.ID()),
|
||||
)
|
||||
}(ts)
|
||||
}
|
||||
|
||||
return resultVector, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -39,7 +39,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/types/opamptypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/pipelinetypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/knadh/koanf/parsers/yaml"
|
||||
"github.com/open-telemetry/opamp-go/protobufs"
|
||||
@@ -490,10 +489,10 @@ func NewTestbedWithoutOpamp(t *testing.T, sqlStore sqlstore.SQLStore) *LogPipeli
|
||||
}
|
||||
|
||||
providerSettings := instrumentationtest.New().ToProviderSettings()
|
||||
sharder, err := noopsharder.New(context.TODO(), providerSettings, sharder.Config{})
|
||||
sharder, err := noopsharder.New(context.Background(), providerSettings, sharder.Config{})
|
||||
require.NoError(t, err)
|
||||
orgGetter := implorganization.NewGetter(implorganization.NewStore(sqlStore), sharder)
|
||||
alertmanager, err := signozalertmanager.New(context.TODO(), providerSettings, alertmanager.Config{Signoz: alertmanager.Signoz{PollInterval: 10 * time.Second, Config: alertmanagerserver.NewConfig()}}, sqlStore, orgGetter)
|
||||
alertmanager, err := signozalertmanager.New(context.Background(), providerSettings, alertmanager.Config{Signoz: alertmanager.Signoz{PollInterval: 10 * time.Second, Config: alertmanagerserver.NewConfig()}}, sqlStore, orgGetter)
|
||||
require.NoError(t, err)
|
||||
jwt := authtypes.NewJWT("", 1*time.Hour, 1*time.Hour)
|
||||
emailing := emailingtest.New()
|
||||
@@ -543,12 +542,13 @@ func NewLogPipelinesTestBed(t *testing.T, testDB sqlstore.SQLStore, agentID stri
|
||||
testbed := NewTestbedWithoutOpamp(t, testDB)
|
||||
|
||||
providerSettings := instrumentationtest.New().ToProviderSettings()
|
||||
sharder, err := noopsharder.New(context.TODO(), providerSettings, sharder.Config{})
|
||||
sharder, err := noopsharder.New(context.Background(), providerSettings, sharder.Config{})
|
||||
require.Nil(t, err)
|
||||
orgGetter := implorganization.NewGetter(implorganization.NewStore(testbed.store), sharder)
|
||||
|
||||
model.Init(testbed.store, slog.Default(), orgGetter)
|
||||
|
||||
opampServer := opamp.InitializeServer(nil, testbed.agentConfMgr)
|
||||
opampServer := opamp.InitializeServer(nil, testbed.agentConfMgr, instrumentationtest.New())
|
||||
err = opampServer.Start(opamp.GetAvailableLocalAddress())
|
||||
require.Nil(t, err, "failed to start opamp server")
|
||||
|
||||
@@ -558,9 +558,10 @@ func NewLogPipelinesTestBed(t *testing.T, testDB sqlstore.SQLStore, agentID stri
|
||||
|
||||
opampClientConnection := &opamp.MockOpAmpConnection{}
|
||||
opampServer.OnMessage(
|
||||
context.Background(),
|
||||
opampClientConnection,
|
||||
&protobufs.AgentToServer{
|
||||
InstanceUid: agentID,
|
||||
InstanceUid: []byte(agentID),
|
||||
EffectiveConfig: &protobufs.EffectiveConfig{
|
||||
ConfigMap: newInitialAgentConfigMap(),
|
||||
},
|
||||
@@ -757,8 +758,8 @@ func assertPipelinesRecommendedInRemoteConfig(
|
||||
|
||||
func (tb *LogPipelinesTestBed) simulateOpampClientAcknowledgementForLatestConfig(agentID string) {
|
||||
lastMsg := tb.opampClientConn.LatestMsgFromServer()
|
||||
tb.opampServer.OnMessage(tb.opampClientConn, &protobufs.AgentToServer{
|
||||
InstanceUid: agentID,
|
||||
tb.opampServer.OnMessage(context.Background(), tb.opampClientConn, &protobufs.AgentToServer{
|
||||
InstanceUid: []byte(agentID),
|
||||
EffectiveConfig: &protobufs.EffectiveConfig{
|
||||
ConfigMap: lastMsg.RemoteConfig.Config,
|
||||
},
|
||||
@@ -773,10 +774,14 @@ func (tb *LogPipelinesTestBed) assertNewAgentGetsPipelinesOnConnection(
|
||||
pipelines []pipelinetypes.GettablePipeline,
|
||||
) {
|
||||
newAgentConn := &opamp.MockOpAmpConnection{}
|
||||
agentIDUUID := valuer.GenerateUUID()
|
||||
agentID, err := agentIDUUID.MarshalBinary()
|
||||
require.Nil(tb.t, err)
|
||||
tb.opampServer.OnMessage(
|
||||
context.Background(),
|
||||
newAgentConn,
|
||||
&protobufs.AgentToServer{
|
||||
InstanceUid: uuid.NewString(),
|
||||
InstanceUid: agentID,
|
||||
EffectiveConfig: &protobufs.EffectiveConfig{
|
||||
ConfigMap: newInitialAgentConfigMap(),
|
||||
},
|
||||
|
||||
683
pkg/query-service/transition/v3_to_v5_req.go
Normal file
683
pkg/query-service/transition/v3_to_v5_req.go
Normal file
@@ -0,0 +1,683 @@
|
||||
package transition
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/constants"
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils"
|
||||
|
||||
v5 "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
)
|
||||
|
||||
func ConvertV3ToV5(params *v3.QueryRangeParamsV3) (*v5.QueryRangeRequest, error) {
|
||||
v3Params := params.Clone()
|
||||
|
||||
if v3Params == nil || v3Params.CompositeQuery == nil {
|
||||
return nil, fmt.Errorf("v3 params or composite query is nil")
|
||||
}
|
||||
|
||||
varItems := map[string]v5.VariableItem{}
|
||||
|
||||
for name, value := range v3Params.Variables {
|
||||
varItems[name] = v5.VariableItem{
|
||||
Type: v5.QueryVariableType, // doesn't matter at the moment
|
||||
Value: value,
|
||||
}
|
||||
}
|
||||
|
||||
v5Request := &v5.QueryRangeRequest{
|
||||
SchemaVersion: "v5",
|
||||
Start: uint64(v3Params.Start),
|
||||
End: uint64(v3Params.End),
|
||||
RequestType: convertPanelTypeToRequestType(v3Params.CompositeQuery.PanelType),
|
||||
Variables: varItems,
|
||||
CompositeQuery: v5.CompositeQuery{
|
||||
Queries: []v5.QueryEnvelope{},
|
||||
},
|
||||
FormatOptions: &v5.FormatOptions{
|
||||
FormatTableResultForUI: v3Params.FormatForWeb,
|
||||
FillGaps: v3Params.CompositeQuery.FillGaps,
|
||||
},
|
||||
}
|
||||
|
||||
// Convert based on query type
|
||||
switch v3Params.CompositeQuery.QueryType {
|
||||
case v3.QueryTypeBuilder:
|
||||
if err := convertBuilderQueries(v3Params.CompositeQuery.BuilderQueries, &v5Request.CompositeQuery); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case v3.QueryTypeClickHouseSQL:
|
||||
if err := convertClickHouseQueries(v3Params.CompositeQuery.ClickHouseQueries, &v5Request.CompositeQuery); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case v3.QueryTypePromQL:
|
||||
if err := convertPromQueries(v3Params.CompositeQuery.PromQueries, v3Params.Step, &v5Request.CompositeQuery); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported query type: %s", v3Params.CompositeQuery.QueryType)
|
||||
}
|
||||
|
||||
return v5Request, nil
|
||||
}
|
||||
|
||||
func convertPanelTypeToRequestType(panelType v3.PanelType) v5.RequestType {
|
||||
switch panelType {
|
||||
case v3.PanelTypeValue, v3.PanelTypeTable:
|
||||
return v5.RequestTypeScalar
|
||||
case v3.PanelTypeGraph:
|
||||
return v5.RequestTypeTimeSeries
|
||||
case v3.PanelTypeList, v3.PanelTypeTrace:
|
||||
return v5.RequestTypeRaw
|
||||
default:
|
||||
return v5.RequestTypeUnknown
|
||||
}
|
||||
}
|
||||
|
||||
func convertBuilderQueries(v3Queries map[string]*v3.BuilderQuery, v5Composite *v5.CompositeQuery) error {
|
||||
for name, query := range v3Queries {
|
||||
if query == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle formula queries
|
||||
if query.Expression != "" && query.Expression != name {
|
||||
v5Envelope := v5.QueryEnvelope{
|
||||
Type: v5.QueryTypeFormula,
|
||||
Spec: v5.QueryBuilderFormula{
|
||||
Name: name,
|
||||
Expression: query.Expression,
|
||||
Disabled: query.Disabled,
|
||||
Order: convertOrderBy(query.OrderBy, query),
|
||||
Limit: int(query.Limit),
|
||||
Having: convertHaving(query.Having, query),
|
||||
Functions: convertFunctions(query.Functions),
|
||||
},
|
||||
}
|
||||
v5Composite.Queries = append(v5Composite.Queries, v5Envelope)
|
||||
continue
|
||||
}
|
||||
|
||||
// Regular builder query
|
||||
envelope, err := convertSingleBuilderQuery(name, query)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v5Composite.Queries = append(v5Composite.Queries, envelope)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func convertSingleBuilderQuery(name string, v3Query *v3.BuilderQuery) (v5.QueryEnvelope, error) {
|
||||
v5Envelope := v5.QueryEnvelope{
|
||||
Type: v5.QueryTypeBuilder,
|
||||
}
|
||||
|
||||
switch v3Query.DataSource {
|
||||
case v3.DataSourceTraces:
|
||||
v5Query := v5.QueryBuilderQuery[v5.TraceAggregation]{
|
||||
Name: name,
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
Disabled: v3Query.Disabled,
|
||||
StepInterval: v5.Step{Duration: time.Duration(v3Query.StepInterval) * time.Second},
|
||||
Filter: convertFilter(v3Query.Filters),
|
||||
GroupBy: convertGroupBy(v3Query.GroupBy),
|
||||
Order: convertOrderBy(v3Query.OrderBy, v3Query),
|
||||
Limit: int(v3Query.Limit),
|
||||
Offset: int(v3Query.Offset),
|
||||
Having: convertHaving(v3Query.Having, v3Query),
|
||||
Functions: convertFunctions(v3Query.Functions),
|
||||
SelectFields: convertSelectColumns(v3Query.SelectColumns),
|
||||
}
|
||||
|
||||
// Convert trace aggregations
|
||||
if v3Query.AggregateOperator != v3.AggregateOperatorNoOp {
|
||||
v5Query.Aggregations = []v5.TraceAggregation{
|
||||
{
|
||||
Expression: buildTraceAggregationExpression(v3Query),
|
||||
Alias: "",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
v5Envelope.Spec = v5Query
|
||||
|
||||
case v3.DataSourceLogs:
|
||||
v5Query := v5.QueryBuilderQuery[v5.LogAggregation]{
|
||||
Name: name,
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
Disabled: v3Query.Disabled,
|
||||
StepInterval: v5.Step{Duration: time.Duration(v3Query.StepInterval) * time.Second},
|
||||
Filter: convertFilter(v3Query.Filters),
|
||||
GroupBy: convertGroupBy(v3Query.GroupBy),
|
||||
Order: convertOrderBy(v3Query.OrderBy, v3Query),
|
||||
Limit: int(v3Query.PageSize),
|
||||
Offset: int(v3Query.Offset),
|
||||
Having: convertHaving(v3Query.Having, v3Query),
|
||||
Functions: convertFunctions(v3Query.Functions),
|
||||
}
|
||||
|
||||
// Convert log aggregations
|
||||
if v3Query.AggregateOperator != v3.AggregateOperatorNoOp {
|
||||
v5Query.Aggregations = []v5.LogAggregation{
|
||||
{
|
||||
Expression: buildLogAggregationExpression(v3Query),
|
||||
Alias: "",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
v5Envelope.Spec = v5Query
|
||||
|
||||
case v3.DataSourceMetrics:
|
||||
v5Query := v5.QueryBuilderQuery[v5.MetricAggregation]{
|
||||
Name: name,
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Disabled: v3Query.Disabled,
|
||||
StepInterval: v5.Step{Duration: time.Duration(v3Query.StepInterval) * time.Second},
|
||||
Filter: convertFilter(v3Query.Filters),
|
||||
GroupBy: convertGroupBy(v3Query.GroupBy),
|
||||
Order: convertOrderBy(v3Query.OrderBy, v3Query),
|
||||
Limit: int(v3Query.Limit),
|
||||
Offset: int(v3Query.Offset),
|
||||
Having: convertHaving(v3Query.Having, v3Query),
|
||||
Functions: convertFunctions(v3Query.Functions),
|
||||
}
|
||||
|
||||
if v3Query.AggregateAttribute.Key != "" {
|
||||
v5Query.Aggregations = []v5.MetricAggregation{
|
||||
{
|
||||
MetricName: v3Query.AggregateAttribute.Key,
|
||||
Temporality: convertTemporality(v3Query.Temporality),
|
||||
TimeAggregation: convertTimeAggregation(v3Query.TimeAggregation),
|
||||
SpaceAggregation: convertSpaceAggregation(v3Query.SpaceAggregation),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
v5Envelope.Spec = v5Query
|
||||
|
||||
default:
|
||||
return v5Envelope, fmt.Errorf("unsupported data source: %s", v3Query.DataSource)
|
||||
}
|
||||
|
||||
return v5Envelope, nil
|
||||
}
|
||||
|
||||
func buildTraceAggregationExpression(v3Query *v3.BuilderQuery) string {
|
||||
switch v3Query.AggregateOperator {
|
||||
case v3.AggregateOperatorCount:
|
||||
if v3Query.AggregateAttribute.Key != "" {
|
||||
return fmt.Sprintf("count(%s)", v3Query.AggregateAttribute.Key)
|
||||
}
|
||||
return "count()"
|
||||
case v3.AggregateOperatorCountDistinct:
|
||||
if v3Query.AggregateAttribute.Key != "" {
|
||||
return fmt.Sprintf("countDistinct(%s)", v3Query.AggregateAttribute.Key)
|
||||
}
|
||||
return "countDistinct()"
|
||||
case v3.AggregateOperatorSum:
|
||||
return fmt.Sprintf("sum(%s)", v3Query.AggregateAttribute.Key)
|
||||
case v3.AggregateOperatorAvg:
|
||||
return fmt.Sprintf("avg(%s)", v3Query.AggregateAttribute.Key)
|
||||
case v3.AggregateOperatorMin:
|
||||
return fmt.Sprintf("min(%s)", v3Query.AggregateAttribute.Key)
|
||||
case v3.AggregateOperatorMax:
|
||||
return fmt.Sprintf("max(%s)", v3Query.AggregateAttribute.Key)
|
||||
case v3.AggregateOperatorP05:
|
||||
return fmt.Sprintf("p05(%s)", v3Query.AggregateAttribute.Key)
|
||||
case v3.AggregateOperatorP10:
|
||||
return fmt.Sprintf("p10(%s)", v3Query.AggregateAttribute.Key)
|
||||
case v3.AggregateOperatorP20:
|
||||
return fmt.Sprintf("p20(%s)", v3Query.AggregateAttribute.Key)
|
||||
case v3.AggregateOperatorP25:
|
||||
return fmt.Sprintf("p25(%s)", v3Query.AggregateAttribute.Key)
|
||||
case v3.AggregateOperatorP50:
|
||||
return fmt.Sprintf("p50(%s)", v3Query.AggregateAttribute.Key)
|
||||
case v3.AggregateOperatorP75:
|
||||
return fmt.Sprintf("p75(%s)", v3Query.AggregateAttribute.Key)
|
||||
case v3.AggregateOperatorP90:
|
||||
return fmt.Sprintf("p90(%s)", v3Query.AggregateAttribute.Key)
|
||||
case v3.AggregateOperatorP95:
|
||||
return fmt.Sprintf("p95(%s)", v3Query.AggregateAttribute.Key)
|
||||
case v3.AggregateOperatorP99:
|
||||
return fmt.Sprintf("p99(%s)", v3Query.AggregateAttribute.Key)
|
||||
case v3.AggregateOperatorRate:
|
||||
return "rate()"
|
||||
case v3.AggregateOperatorRateSum:
|
||||
return fmt.Sprintf("rate_sum(%s)", v3Query.AggregateAttribute.Key)
|
||||
case v3.AggregateOperatorRateAvg:
|
||||
return fmt.Sprintf("rate_avg(%s)", v3Query.AggregateAttribute.Key)
|
||||
case v3.AggregateOperatorRateMin:
|
||||
return fmt.Sprintf("rate_min(%s)", v3Query.AggregateAttribute.Key)
|
||||
case v3.AggregateOperatorRateMax:
|
||||
return fmt.Sprintf("rate_max(%s)", v3Query.AggregateAttribute.Key)
|
||||
default:
|
||||
return "count()"
|
||||
}
|
||||
}
|
||||
|
||||
func buildLogAggregationExpression(v3Query *v3.BuilderQuery) string {
|
||||
// Similar to traces
|
||||
return buildTraceAggregationExpression(v3Query)
|
||||
}
|
||||
|
||||
func convertFilter(v3Filter *v3.FilterSet) *v5.Filter {
|
||||
if v3Filter == nil || len(v3Filter.Items) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
expressions := []string{}
|
||||
for _, item := range v3Filter.Items {
|
||||
expr := buildFilterExpression(item)
|
||||
if expr != "" {
|
||||
expressions = append(expressions, expr)
|
||||
}
|
||||
}
|
||||
|
||||
if len(expressions) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
operator := "AND"
|
||||
if v3Filter.Operator == "OR" {
|
||||
operator = "OR"
|
||||
}
|
||||
|
||||
return &v5.Filter{
|
||||
Expression: strings.Join(expressions, fmt.Sprintf(" %s ", operator)),
|
||||
}
|
||||
}
|
||||
|
||||
func buildFilterExpression(item v3.FilterItem) string {
|
||||
key := item.Key.Key
|
||||
value := item.Value
|
||||
|
||||
switch item.Operator {
|
||||
case v3.FilterOperatorEqual:
|
||||
return fmt.Sprintf("%s = %s", key, formatValue(value))
|
||||
case v3.FilterOperatorNotEqual:
|
||||
return fmt.Sprintf("%s != %s", key, formatValue(value))
|
||||
case v3.FilterOperatorGreaterThan:
|
||||
return fmt.Sprintf("%s > %s", key, formatValue(value))
|
||||
case v3.FilterOperatorGreaterThanOrEq:
|
||||
return fmt.Sprintf("%s >= %s", key, formatValue(value))
|
||||
case v3.FilterOperatorLessThan:
|
||||
return fmt.Sprintf("%s < %s", key, formatValue(value))
|
||||
case v3.FilterOperatorLessThanOrEq:
|
||||
return fmt.Sprintf("%s <= %s", key, formatValue(value))
|
||||
case v3.FilterOperatorIn:
|
||||
return fmt.Sprintf("%s IN %s", key, formatValue(value))
|
||||
case v3.FilterOperatorNotIn:
|
||||
return fmt.Sprintf("%s NOT IN %s", key, formatValue(value))
|
||||
case v3.FilterOperatorContains:
|
||||
return fmt.Sprintf("%s LIKE '%%%v%%'", key, value)
|
||||
case v3.FilterOperatorNotContains:
|
||||
return fmt.Sprintf("%s NOT LIKE '%%%v%%'", key, value)
|
||||
case v3.FilterOperatorRegex:
|
||||
return fmt.Sprintf("%s REGEXP %s", key, formatValue(value))
|
||||
case v3.FilterOperatorNotRegex:
|
||||
return fmt.Sprintf("%s NOT REGEXP %s", key, formatValue(value))
|
||||
case v3.FilterOperatorExists:
|
||||
return fmt.Sprintf("%s EXISTS", key)
|
||||
case v3.FilterOperatorNotExists:
|
||||
return fmt.Sprintf("%s NOT EXISTS", key)
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func formatValue(value interface{}) string {
|
||||
return utils.ClickHouseFormattedValue(value)
|
||||
}
|
||||
|
||||
func convertGroupBy(v3GroupBy []v3.AttributeKey) []v5.GroupByKey {
|
||||
v5GroupBy := []v5.GroupByKey{}
|
||||
for _, key := range v3GroupBy {
|
||||
v5GroupBy = append(v5GroupBy, v5.GroupByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: key.Key,
|
||||
FieldDataType: convertDataType(key.DataType),
|
||||
FieldContext: convertAttributeType(key.Type),
|
||||
Materialized: key.IsColumn,
|
||||
},
|
||||
})
|
||||
}
|
||||
return v5GroupBy
|
||||
}
|
||||
|
||||
func convertOrderBy(v3OrderBy []v3.OrderBy, v3Query *v3.BuilderQuery) []v5.OrderBy {
|
||||
v5OrderBy := []v5.OrderBy{}
|
||||
for _, order := range v3OrderBy {
|
||||
direction := v5.OrderDirectionAsc
|
||||
if order.Order == v3.DirectionDesc {
|
||||
direction = v5.OrderDirectionDesc
|
||||
}
|
||||
|
||||
var orderByName string
|
||||
if order.ColumnName == "#SIGNOZ_VALUE" {
|
||||
if v3Query.DataSource == v3.DataSourceLogs || v3Query.DataSource == v3.DataSourceTraces {
|
||||
orderByName = buildTraceAggregationExpression(v3Query)
|
||||
} else {
|
||||
if v3Query.Expression != v3Query.QueryName {
|
||||
orderByName = v3Query.Expression
|
||||
} else {
|
||||
orderByName = fmt.Sprintf("%s(%s)", v3Query.SpaceAggregation, v3Query.AggregateAttribute.Key)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
orderByName = order.ColumnName
|
||||
}
|
||||
|
||||
v5OrderBy = append(v5OrderBy, v5.OrderBy{
|
||||
Key: v5.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: orderByName,
|
||||
Materialized: order.IsColumn,
|
||||
},
|
||||
},
|
||||
Direction: direction,
|
||||
})
|
||||
}
|
||||
return v5OrderBy
|
||||
}
|
||||
|
||||
func convertHaving(v3Having []v3.Having, v3Query *v3.BuilderQuery) *v5.Having {
|
||||
if len(v3Having) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
expressions := []string{}
|
||||
for _, h := range v3Having {
|
||||
var expr string
|
||||
|
||||
if v3Query.DataSource == v3.DataSourceLogs || v3Query.DataSource == v3.DataSourceTraces {
|
||||
h.ColumnName = buildTraceAggregationExpression(v3Query)
|
||||
} else {
|
||||
if v3Query.Expression != v3Query.QueryName {
|
||||
h.ColumnName = v3Query.Expression
|
||||
} else {
|
||||
h.ColumnName = fmt.Sprintf("%s(%s)", v3Query.SpaceAggregation, v3Query.AggregateAttribute.Key)
|
||||
}
|
||||
}
|
||||
expr = buildHavingExpression(h)
|
||||
|
||||
if expr != "" {
|
||||
expressions = append(expressions, expr)
|
||||
}
|
||||
}
|
||||
|
||||
if len(expressions) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &v5.Having{
|
||||
Expression: strings.Join(expressions, " AND "),
|
||||
}
|
||||
}
|
||||
|
||||
func buildHavingExpression(having v3.Having) string {
|
||||
|
||||
switch having.Operator {
|
||||
case v3.HavingOperatorEqual:
|
||||
return fmt.Sprintf("%s = %s", having.ColumnName, formatValue(having.Value))
|
||||
case v3.HavingOperatorNotEqual:
|
||||
return fmt.Sprintf("%s != %s", having.ColumnName, formatValue(having.Value))
|
||||
case v3.HavingOperatorGreaterThan:
|
||||
return fmt.Sprintf("%s > %s", having.ColumnName, formatValue(having.Value))
|
||||
case v3.HavingOperatorGreaterThanOrEq:
|
||||
return fmt.Sprintf("%s >= %s", having.ColumnName, formatValue(having.Value))
|
||||
case v3.HavingOperatorLessThan:
|
||||
return fmt.Sprintf("%s < %s", having.ColumnName, formatValue(having.Value))
|
||||
case v3.HavingOperatorLessThanOrEq:
|
||||
return fmt.Sprintf("%s <= %s", having.ColumnName, formatValue(having.Value))
|
||||
case v3.HavingOperatorIn:
|
||||
return fmt.Sprintf("%s IN %s", having.ColumnName, formatValue(having.Value))
|
||||
case v3.HavingOperatorNotIn:
|
||||
return fmt.Sprintf("%s NOT IN %s", having.ColumnName, formatValue(having.Value))
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func convertFunctions(v3Functions []v3.Function) []v5.Function {
|
||||
v5Functions := []v5.Function{}
|
||||
for _, fn := range v3Functions {
|
||||
v5Fn := v5.Function{
|
||||
Name: convertFunctionName(fn.Name),
|
||||
Args: []v5.FunctionArg{},
|
||||
}
|
||||
|
||||
for _, arg := range fn.Args {
|
||||
v5Fn.Args = append(v5Fn.Args, v5.FunctionArg{
|
||||
Value: arg,
|
||||
})
|
||||
}
|
||||
|
||||
for name, value := range fn.NamedArgs {
|
||||
v5Fn.Args = append(v5Fn.Args, v5.FunctionArg{
|
||||
Name: name,
|
||||
Value: value,
|
||||
})
|
||||
}
|
||||
|
||||
v5Functions = append(v5Functions, v5Fn)
|
||||
}
|
||||
return v5Functions
|
||||
}
|
||||
|
||||
func convertFunctionName(v3Name v3.FunctionName) v5.FunctionName {
|
||||
switch v3Name {
|
||||
case v3.FunctionNameCutOffMin:
|
||||
return v5.FunctionNameCutOffMin
|
||||
case v3.FunctionNameCutOffMax:
|
||||
return v5.FunctionNameCutOffMax
|
||||
case v3.FunctionNameClampMin:
|
||||
return v5.FunctionNameClampMin
|
||||
case v3.FunctionNameClampMax:
|
||||
return v5.FunctionNameClampMax
|
||||
case v3.FunctionNameAbsolute:
|
||||
return v5.FunctionNameAbsolute
|
||||
case v3.FunctionNameRunningDiff:
|
||||
return v5.FunctionNameRunningDiff
|
||||
case v3.FunctionNameLog2:
|
||||
return v5.FunctionNameLog2
|
||||
case v3.FunctionNameLog10:
|
||||
return v5.FunctionNameLog10
|
||||
case v3.FunctionNameCumSum:
|
||||
return v5.FunctionNameCumulativeSum
|
||||
case v3.FunctionNameEWMA3:
|
||||
return v5.FunctionNameEWMA3
|
||||
case v3.FunctionNameEWMA5:
|
||||
return v5.FunctionNameEWMA5
|
||||
case v3.FunctionNameEWMA7:
|
||||
return v5.FunctionNameEWMA7
|
||||
case v3.FunctionNameMedian3:
|
||||
return v5.FunctionNameMedian3
|
||||
case v3.FunctionNameMedian5:
|
||||
return v5.FunctionNameMedian5
|
||||
case v3.FunctionNameMedian7:
|
||||
return v5.FunctionNameMedian7
|
||||
case v3.FunctionNameTimeShift:
|
||||
return v5.FunctionNameTimeShift
|
||||
case v3.FunctionNameAnomaly:
|
||||
return v5.FunctionNameAnomaly
|
||||
default:
|
||||
return v5.FunctionName{}
|
||||
}
|
||||
}
|
||||
|
||||
func convertSelectColumns(cols []v3.AttributeKey) []telemetrytypes.TelemetryFieldKey {
|
||||
fields := []telemetrytypes.TelemetryFieldKey{}
|
||||
|
||||
for _, key := range cols {
|
||||
newKey := telemetrytypes.TelemetryFieldKey{
|
||||
Name: key.Key,
|
||||
}
|
||||
|
||||
if _, exists := constants.NewStaticFieldsTraces[key.Key]; exists {
|
||||
fields = append(fields, newKey)
|
||||
continue
|
||||
}
|
||||
|
||||
if _, exists := constants.DeprecatedStaticFieldsTraces[key.Key]; exists {
|
||||
fields = append(fields, newKey)
|
||||
continue
|
||||
}
|
||||
|
||||
if _, exists := constants.StaticFieldsLogsV3[key.Key]; exists {
|
||||
fields = append(fields, newKey)
|
||||
continue
|
||||
}
|
||||
|
||||
newKey.FieldDataType = convertDataType(key.DataType)
|
||||
newKey.FieldContext = convertAttributeType(key.Type)
|
||||
newKey.Materialized = key.IsColumn
|
||||
}
|
||||
return fields
|
||||
}
|
||||
|
||||
func convertDataType(v3Type v3.AttributeKeyDataType) telemetrytypes.FieldDataType {
|
||||
switch v3Type {
|
||||
case v3.AttributeKeyDataTypeString:
|
||||
return telemetrytypes.FieldDataTypeString
|
||||
case v3.AttributeKeyDataTypeInt64:
|
||||
return telemetrytypes.FieldDataTypeInt64
|
||||
case v3.AttributeKeyDataTypeFloat64:
|
||||
return telemetrytypes.FieldDataTypeFloat64
|
||||
case v3.AttributeKeyDataTypeBool:
|
||||
return telemetrytypes.FieldDataTypeBool
|
||||
case v3.AttributeKeyDataTypeArrayString:
|
||||
return telemetrytypes.FieldDataTypeArrayString
|
||||
case v3.AttributeKeyDataTypeArrayInt64:
|
||||
return telemetrytypes.FieldDataTypeArrayInt64
|
||||
case v3.AttributeKeyDataTypeArrayFloat64:
|
||||
return telemetrytypes.FieldDataTypeArrayFloat64
|
||||
case v3.AttributeKeyDataTypeArrayBool:
|
||||
return telemetrytypes.FieldDataTypeArrayBool
|
||||
default:
|
||||
return telemetrytypes.FieldDataTypeUnspecified
|
||||
}
|
||||
}
|
||||
|
||||
func convertAttributeType(v3Type v3.AttributeKeyType) telemetrytypes.FieldContext {
|
||||
switch v3Type {
|
||||
case v3.AttributeKeyTypeTag:
|
||||
return telemetrytypes.FieldContextAttribute
|
||||
case v3.AttributeKeyTypeResource:
|
||||
return telemetrytypes.FieldContextResource
|
||||
case v3.AttributeKeyTypeInstrumentationScope:
|
||||
return telemetrytypes.FieldContextScope
|
||||
default:
|
||||
return telemetrytypes.FieldContextUnspecified
|
||||
}
|
||||
}
|
||||
|
||||
func convertTemporality(v3Temp v3.Temporality) metrictypes.Temporality {
|
||||
switch v3Temp {
|
||||
case v3.Delta:
|
||||
return metrictypes.Delta
|
||||
case v3.Cumulative:
|
||||
return metrictypes.Cumulative
|
||||
default:
|
||||
return metrictypes.Unspecified
|
||||
}
|
||||
}
|
||||
|
||||
func convertTimeAggregation(v3TimeAgg v3.TimeAggregation) metrictypes.TimeAggregation {
|
||||
switch v3TimeAgg {
|
||||
case v3.TimeAggregationAnyLast:
|
||||
return metrictypes.TimeAggregationLatest
|
||||
case v3.TimeAggregationSum:
|
||||
return metrictypes.TimeAggregationSum
|
||||
case v3.TimeAggregationAvg:
|
||||
return metrictypes.TimeAggregationAvg
|
||||
case v3.TimeAggregationMin:
|
||||
return metrictypes.TimeAggregationMin
|
||||
case v3.TimeAggregationMax:
|
||||
return metrictypes.TimeAggregationMax
|
||||
case v3.TimeAggregationCount:
|
||||
return metrictypes.TimeAggregationCount
|
||||
case v3.TimeAggregationCountDistinct:
|
||||
return metrictypes.TimeAggregationCountDistinct
|
||||
case v3.TimeAggregationRate:
|
||||
return metrictypes.TimeAggregationRate
|
||||
case v3.TimeAggregationIncrease:
|
||||
return metrictypes.TimeAggregationIncrease
|
||||
default:
|
||||
return metrictypes.TimeAggregationUnspecified
|
||||
}
|
||||
}
|
||||
|
||||
func convertSpaceAggregation(v3SpaceAgg v3.SpaceAggregation) metrictypes.SpaceAggregation {
|
||||
switch v3SpaceAgg {
|
||||
case v3.SpaceAggregationSum:
|
||||
return metrictypes.SpaceAggregationSum
|
||||
case v3.SpaceAggregationAvg:
|
||||
return metrictypes.SpaceAggregationAvg
|
||||
case v3.SpaceAggregationMin:
|
||||
return metrictypes.SpaceAggregationMin
|
||||
case v3.SpaceAggregationMax:
|
||||
return metrictypes.SpaceAggregationMax
|
||||
case v3.SpaceAggregationCount:
|
||||
return metrictypes.SpaceAggregationCount
|
||||
case v3.SpaceAggregationPercentile50:
|
||||
return metrictypes.SpaceAggregationPercentile50
|
||||
case v3.SpaceAggregationPercentile75:
|
||||
return metrictypes.SpaceAggregationPercentile75
|
||||
case v3.SpaceAggregationPercentile90:
|
||||
return metrictypes.SpaceAggregationPercentile90
|
||||
case v3.SpaceAggregationPercentile95:
|
||||
return metrictypes.SpaceAggregationPercentile95
|
||||
case v3.SpaceAggregationPercentile99:
|
||||
return metrictypes.SpaceAggregationPercentile99
|
||||
default:
|
||||
return metrictypes.SpaceAggregationUnspecified
|
||||
}
|
||||
}
|
||||
|
||||
func convertClickHouseQueries(v3Queries map[string]*v3.ClickHouseQuery, v5Composite *v5.CompositeQuery) error {
|
||||
for name, query := range v3Queries {
|
||||
if query == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
v5Envelope := v5.QueryEnvelope{
|
||||
Type: v5.QueryTypeClickHouseSQL,
|
||||
Spec: v5.ClickHouseQuery{
|
||||
Name: name,
|
||||
Query: query.Query,
|
||||
Disabled: query.Disabled,
|
||||
},
|
||||
}
|
||||
v5Composite.Queries = append(v5Composite.Queries, v5Envelope)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func convertPromQueries(v3Queries map[string]*v3.PromQuery, step int64, v5Composite *v5.CompositeQuery) error {
|
||||
for name, query := range v3Queries {
|
||||
if query == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
v5Envelope := v5.QueryEnvelope{
|
||||
Type: v5.QueryTypePromQL,
|
||||
Spec: v5.PromQuery{
|
||||
Name: name,
|
||||
Query: query.Query,
|
||||
Disabled: query.Disabled,
|
||||
Step: v5.Step{Duration: time.Duration(step) * time.Second},
|
||||
Stats: query.Stats != "",
|
||||
},
|
||||
}
|
||||
v5Composite.Queries = append(v5Composite.Queries, v5Envelope)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
442
pkg/query-service/transition/v3_to_v5_resp.go
Normal file
442
pkg/query-service/transition/v3_to_v5_resp.go
Normal file
@@ -0,0 +1,442 @@
|
||||
package transition
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
v5 "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
func ConvertV3ResponseToV5(v3Response *v3.QueryRangeResponse, requestType v5.RequestType) (*v5.QueryRangeResponse, error) {
|
||||
if v3Response == nil {
|
||||
return nil, fmt.Errorf("v3 response is nil")
|
||||
}
|
||||
|
||||
v5Response := &v5.QueryRangeResponse{
|
||||
Type: requestType,
|
||||
}
|
||||
|
||||
switch requestType {
|
||||
case v5.RequestTypeTimeSeries:
|
||||
data, err := convertToTimeSeriesData(v3Response.Result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v5Response.Data = data
|
||||
|
||||
case v5.RequestTypeScalar:
|
||||
data, err := convertToScalarData(v3Response.Result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v5Response.Data = data
|
||||
|
||||
case v5.RequestTypeRaw:
|
||||
data, err := convertToRawData(v3Response.Result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v5Response.Data = data
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported request type: %v", requestType)
|
||||
}
|
||||
|
||||
return v5Response, nil
|
||||
}
|
||||
|
||||
func convertToTimeSeriesData(v3Results []*v3.Result) ([]*v5.TimeSeriesData, error) {
|
||||
v5Data := []*v5.TimeSeriesData{}
|
||||
|
||||
for _, result := range v3Results {
|
||||
if result == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
tsData := &v5.TimeSeriesData{
|
||||
QueryName: result.QueryName,
|
||||
Aggregations: []*v5.AggregationBucket{},
|
||||
}
|
||||
|
||||
if len(result.Series) > 0 {
|
||||
bucket := &v5.AggregationBucket{
|
||||
Index: 0,
|
||||
Alias: "",
|
||||
Series: convertSeries(result.Series),
|
||||
}
|
||||
tsData.Aggregations = append(tsData.Aggregations, bucket)
|
||||
}
|
||||
|
||||
v5Data = append(v5Data, tsData)
|
||||
}
|
||||
|
||||
return v5Data, nil
|
||||
}
|
||||
|
||||
func convertSeries(v3Series []*v3.Series) []*v5.TimeSeries {
|
||||
v5Series := []*v5.TimeSeries{}
|
||||
|
||||
for _, series := range v3Series {
|
||||
if series == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
v5TimeSeries := &v5.TimeSeries{
|
||||
Labels: convertLabels(series.Labels),
|
||||
Values: convertPoints(series.Points),
|
||||
}
|
||||
|
||||
v5Series = append(v5Series, v5TimeSeries)
|
||||
}
|
||||
|
||||
return v5Series
|
||||
}
|
||||
|
||||
func convertLabels(v3Labels map[string]string) []*v5.Label {
|
||||
v5Labels := []*v5.Label{}
|
||||
|
||||
keys := make([]string, 0, len(v3Labels))
|
||||
for k := range v3Labels {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, key := range keys {
|
||||
v5Labels = append(v5Labels, &v5.Label{
|
||||
Key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: key,
|
||||
},
|
||||
Value: v3Labels[key],
|
||||
})
|
||||
}
|
||||
|
||||
return v5Labels
|
||||
}
|
||||
|
||||
func convertPoints(v3Points []v3.Point) []*v5.TimeSeriesValue {
|
||||
v5Values := []*v5.TimeSeriesValue{}
|
||||
|
||||
for _, point := range v3Points {
|
||||
v5Values = append(v5Values, &v5.TimeSeriesValue{
|
||||
Timestamp: point.Timestamp,
|
||||
Value: point.Value,
|
||||
})
|
||||
}
|
||||
|
||||
return v5Values
|
||||
}
|
||||
|
||||
func convertToScalarData(v3Results []*v3.Result) (*v5.ScalarData, error) {
|
||||
scalarData := &v5.ScalarData{
|
||||
Columns: []*v5.ColumnDescriptor{},
|
||||
Data: [][]any{},
|
||||
}
|
||||
|
||||
for _, result := range v3Results {
|
||||
if result == nil || result.Table == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, col := range result.Table.Columns {
|
||||
columnType := v5.ColumnTypeGroup
|
||||
if col.IsValueColumn {
|
||||
columnType = v5.ColumnTypeAggregation
|
||||
}
|
||||
|
||||
scalarData.Columns = append(scalarData.Columns, &v5.ColumnDescriptor{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: col.Name,
|
||||
},
|
||||
QueryName: col.QueryName,
|
||||
AggregationIndex: 0,
|
||||
Type: columnType,
|
||||
})
|
||||
}
|
||||
|
||||
for _, row := range result.Table.Rows {
|
||||
rowData := []any{}
|
||||
for _, col := range result.Table.Columns {
|
||||
if val, ok := row.Data[col.Name]; ok {
|
||||
rowData = append(rowData, val)
|
||||
} else {
|
||||
rowData = append(rowData, nil)
|
||||
}
|
||||
}
|
||||
scalarData.Data = append(scalarData.Data, rowData)
|
||||
}
|
||||
}
|
||||
|
||||
return scalarData, nil
|
||||
}
|
||||
|
||||
func convertToRawData(v3Results []*v3.Result) ([]*v5.RawData, error) {
|
||||
v5Data := []*v5.RawData{}
|
||||
|
||||
for _, result := range v3Results {
|
||||
if result == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
rawData := &v5.RawData{
|
||||
QueryName: result.QueryName,
|
||||
NextCursor: "",
|
||||
Rows: []*v5.RawRow{},
|
||||
}
|
||||
|
||||
for _, row := range result.List {
|
||||
if row == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
dataMap := make(map[string]*any)
|
||||
for k, v := range row.Data {
|
||||
val := v
|
||||
dataMap[k] = &val
|
||||
}
|
||||
|
||||
rawData.Rows = append(rawData.Rows, &v5.RawRow{
|
||||
Timestamp: row.Timestamp,
|
||||
Data: dataMap,
|
||||
})
|
||||
}
|
||||
|
||||
v5Data = append(v5Data, rawData)
|
||||
}
|
||||
|
||||
return v5Data, nil
|
||||
}
|
||||
|
||||
func LogV5Response(response *v5.QueryRangeResponse, logger func(string)) {
|
||||
if response == nil {
|
||||
logger("Response: nil")
|
||||
return
|
||||
}
|
||||
|
||||
logger(fmt.Sprintf("[%s] Meta{rows:%d bytes:%d ms:%d}",
|
||||
response.Type, response.Meta.RowsScanned, response.Meta.BytesScanned, response.Meta.DurationMS))
|
||||
|
||||
switch response.Type {
|
||||
case v5.RequestTypeTimeSeries:
|
||||
logTimeSeriesDataCompact(response.Data, logger)
|
||||
case v5.RequestTypeScalar:
|
||||
logScalarDataCompact(response.Data, logger)
|
||||
case v5.RequestTypeRaw:
|
||||
logRawDataCompact(response.Data, logger)
|
||||
default:
|
||||
logger(fmt.Sprintf("Unknown response type: %v", response.Type))
|
||||
}
|
||||
}
|
||||
|
||||
func logTimeSeriesDataCompact(data any, logger func(string)) {
|
||||
tsData, ok := data.([]*v5.TimeSeriesData)
|
||||
if !ok {
|
||||
logger("ERROR: Failed to cast data to TimeSeriesData")
|
||||
return
|
||||
}
|
||||
|
||||
sort.Slice(tsData, func(i, j int) bool {
|
||||
return tsData[i].QueryName < tsData[j].QueryName
|
||||
})
|
||||
|
||||
for _, ts := range tsData {
|
||||
allSeries := flattenSeries(ts.Aggregations)
|
||||
|
||||
sort.Slice(allSeries, func(i, j int) bool {
|
||||
return createLabelSignature(allSeries[i].Labels) < createLabelSignature(allSeries[j].Labels)
|
||||
})
|
||||
|
||||
for _, series := range allSeries {
|
||||
labels := []string{}
|
||||
for _, label := range series.Labels {
|
||||
labels = append(labels, fmt.Sprintf("%s:%v", label.Key.Name, label.Value))
|
||||
}
|
||||
labelStr := strings.Join(labels, ",")
|
||||
|
||||
values := make([]*v5.TimeSeriesValue, len(series.Values))
|
||||
copy(values, series.Values)
|
||||
sort.Slice(values, func(i, j int) bool {
|
||||
return values[i].Timestamp < values[j].Timestamp
|
||||
})
|
||||
|
||||
valueStrs := []string{}
|
||||
for _, val := range values {
|
||||
relTime := val.Timestamp
|
||||
if len(values) > 0 && values[0].Timestamp > 0 {
|
||||
relTime = (val.Timestamp - values[0].Timestamp) / 1000 // Convert to seconds
|
||||
}
|
||||
valueStrs = append(valueStrs, fmt.Sprintf("%d:%.2f", relTime, val.Value))
|
||||
}
|
||||
|
||||
logger(fmt.Sprintf("%s {%s} [%s]", ts.QueryName, labelStr, strings.Join(valueStrs, " ")))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createLabelSignature(labels []*v5.Label) string {
|
||||
parts := []string{}
|
||||
for _, label := range labels {
|
||||
parts = append(parts, fmt.Sprintf("%s=%v", label.Key.Name, label.Value))
|
||||
}
|
||||
sort.Strings(parts)
|
||||
return strings.Join(parts, ",")
|
||||
}
|
||||
|
||||
func logScalarDataCompact(data any, logger func(string)) {
|
||||
scalar, ok := data.(*v5.ScalarData)
|
||||
if !ok {
|
||||
logger("ERROR: Failed to cast data to ScalarData")
|
||||
return
|
||||
}
|
||||
|
||||
colNames := []string{}
|
||||
for _, col := range scalar.Columns {
|
||||
colNames = append(colNames, col.Name)
|
||||
}
|
||||
|
||||
logger(fmt.Sprintf("SCALAR [%s]", strings.Join(colNames, "|")))
|
||||
|
||||
for i, row := range scalar.Data {
|
||||
rowVals := []string{}
|
||||
for _, val := range row {
|
||||
rowVals = append(rowVals, fmt.Sprintf("%v", val))
|
||||
}
|
||||
logger(fmt.Sprintf(" %d: [%s]", i, strings.Join(rowVals, "|")))
|
||||
}
|
||||
}
|
||||
|
||||
func flattenSeries(buckets []*v5.AggregationBucket) []*v5.TimeSeries {
|
||||
var allSeries []*v5.TimeSeries
|
||||
for _, bucket := range buckets {
|
||||
allSeries = append(allSeries, bucket.Series...)
|
||||
}
|
||||
return allSeries
|
||||
}
|
||||
|
||||
func logRawDataCompact(data any, logger func(string)) {
|
||||
rawData, ok := data.([]*v5.RawData)
|
||||
if !ok {
|
||||
logger("ERROR: Failed to cast data to RawData")
|
||||
return
|
||||
}
|
||||
|
||||
sort.Slice(rawData, func(i, j int) bool {
|
||||
return rawData[i].QueryName < rawData[j].QueryName
|
||||
})
|
||||
|
||||
for _, rd := range rawData {
|
||||
logger(fmt.Sprintf("RAW %s (rows:%d cursor:%s)", rd.QueryName, len(rd.Rows), rd.NextCursor))
|
||||
|
||||
rows := make([]*v5.RawRow, len(rd.Rows))
|
||||
copy(rows, rd.Rows)
|
||||
sort.Slice(rows, func(i, j int) bool {
|
||||
return rows[i].Timestamp.Before(rows[j].Timestamp)
|
||||
})
|
||||
|
||||
allFields := make(map[string]bool)
|
||||
for _, row := range rows {
|
||||
for k := range row.Data {
|
||||
allFields[k] = true
|
||||
}
|
||||
}
|
||||
|
||||
fieldNames := []string{}
|
||||
for k := range allFields {
|
||||
fieldNames = append(fieldNames, k)
|
||||
}
|
||||
sort.Strings(fieldNames)
|
||||
|
||||
logger(fmt.Sprintf(" Fields: [%s]", strings.Join(fieldNames, "|")))
|
||||
|
||||
for i, row := range rows {
|
||||
vals := []string{}
|
||||
for _, field := range fieldNames {
|
||||
if val, exists := row.Data[field]; exists && val != nil {
|
||||
vals = append(vals, fmt.Sprintf("%v", *val))
|
||||
} else {
|
||||
vals = append(vals, "-")
|
||||
}
|
||||
}
|
||||
tsStr := row.Timestamp.Format("15:04:05")
|
||||
logger(fmt.Sprintf(" %d@%s: [%s]", i, tsStr, strings.Join(vals, "|")))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func LogV5ResponseJSON(response *v5.QueryRangeResponse, logger func(string)) {
|
||||
sortedResponse := sortV5ResponseForLogging(response)
|
||||
|
||||
jsonBytes, err := json.MarshalIndent(sortedResponse, "", " ")
|
||||
if err != nil {
|
||||
logger(fmt.Sprintf("ERROR: Failed to marshal response: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
logger(string(jsonBytes))
|
||||
}
|
||||
|
||||
func sortV5ResponseForLogging(response *v5.QueryRangeResponse) *v5.QueryRangeResponse {
|
||||
if response == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
responseCopy := &v5.QueryRangeResponse{
|
||||
Type: response.Type,
|
||||
Meta: response.Meta,
|
||||
}
|
||||
|
||||
switch response.Type {
|
||||
case v5.RequestTypeTimeSeries:
|
||||
if tsData, ok := response.Data.([]*v5.TimeSeriesData); ok {
|
||||
sortedData := make([]*v5.TimeSeriesData, len(tsData))
|
||||
for i, ts := range tsData {
|
||||
sortedData[i] = &v5.TimeSeriesData{
|
||||
QueryName: ts.QueryName,
|
||||
Aggregations: make([]*v5.AggregationBucket, len(ts.Aggregations)),
|
||||
}
|
||||
|
||||
for j, bucket := range ts.Aggregations {
|
||||
sortedBucket := &v5.AggregationBucket{
|
||||
Index: bucket.Index,
|
||||
Alias: bucket.Alias,
|
||||
Series: make([]*v5.TimeSeries, len(bucket.Series)),
|
||||
}
|
||||
|
||||
for k, series := range bucket.Series {
|
||||
sortedSeries := &v5.TimeSeries{
|
||||
Labels: series.Labels,
|
||||
Values: make([]*v5.TimeSeriesValue, len(series.Values)),
|
||||
}
|
||||
copy(sortedSeries.Values, series.Values)
|
||||
|
||||
sort.Slice(sortedSeries.Values, func(i, j int) bool {
|
||||
return sortedSeries.Values[i].Timestamp < sortedSeries.Values[j].Timestamp
|
||||
})
|
||||
|
||||
sortedBucket.Series[k] = sortedSeries
|
||||
}
|
||||
|
||||
sort.Slice(sortedBucket.Series, func(i, j int) bool {
|
||||
return createLabelSignature(sortedBucket.Series[i].Labels) <
|
||||
createLabelSignature(sortedBucket.Series[j].Labels)
|
||||
})
|
||||
|
||||
sortedData[i].Aggregations[j] = sortedBucket
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(sortedData, func(i, j int) bool {
|
||||
return sortedData[i].QueryName < sortedData[j].QueryName
|
||||
})
|
||||
|
||||
responseCopy.Data = sortedData
|
||||
}
|
||||
default:
|
||||
responseCopy.Data = response.Data
|
||||
}
|
||||
|
||||
return responseCopy
|
||||
}
|
||||
@@ -417,14 +417,17 @@ func (t *telemetryMetaStore) getMetricsKeys(ctx context.Context, fieldKeySelecto
|
||||
} else {
|
||||
fieldConds = append(fieldConds, sb.Like("attr_name", "%"+fieldKeySelector.Name+"%"))
|
||||
}
|
||||
fieldConds = append(fieldConds, sb.NotLike("attr_name", "\\_\\_%"))
|
||||
|
||||
if fieldKeySelector.FieldContext != telemetrytypes.FieldContextUnspecified {
|
||||
fieldConds = append(fieldConds, sb.E("attr_type", fieldKeySelector.FieldContext.TagType()))
|
||||
}
|
||||
// note: type and datatype do not have much significance in metrics
|
||||
|
||||
if fieldKeySelector.FieldDataType != telemetrytypes.FieldDataTypeUnspecified {
|
||||
fieldConds = append(fieldConds, sb.E("attr_datatype", fieldKeySelector.FieldDataType.TagDataType()))
|
||||
}
|
||||
// if fieldKeySelector.FieldContext != telemetrytypes.FieldContextUnspecified {
|
||||
// fieldConds = append(fieldConds, sb.E("attr_type", fieldKeySelector.FieldContext.TagType()))
|
||||
// }
|
||||
|
||||
// if fieldKeySelector.FieldDataType != telemetrytypes.FieldDataTypeUnspecified {
|
||||
// fieldConds = append(fieldConds, sb.E("attr_datatype", fieldKeySelector.FieldDataType.TagDataType()))
|
||||
// }
|
||||
|
||||
if fieldKeySelector.MetricContext != nil {
|
||||
fieldConds = append(fieldConds, sb.E("metric_name", fieldKeySelector.MetricContext.MetricName))
|
||||
@@ -966,18 +969,15 @@ func (t *telemetryMetaStore) FetchTemporalityMulti(ctx context.Context, metricNa
|
||||
// Note: The columns are mixed in the current data - temporality column contains metric_name
|
||||
// and metric_name column contains temporality value, so we use the correct mapping
|
||||
sb := sqlbuilder.Select(
|
||||
"temporality as metric_name",
|
||||
"argMax(attr_string_value, last_reported_unix_milli) as temporality_value",
|
||||
"metric_name",
|
||||
"argMax(temporality, last_reported_unix_milli) as temporality",
|
||||
).From(t.metricsDBName + "." + t.metricsFieldsTblName)
|
||||
|
||||
// Filter by metric names (in the temporality column due to data mix-up)
|
||||
sb.Where(sb.In("temporality", metricNames))
|
||||
|
||||
// Only fetch temporality metadata rows (where attr_name = '__temporality__')
|
||||
sb.Where(sb.E("attr_name", "__temporality__"))
|
||||
sb.Where(sb.In("metric_name", metricNames))
|
||||
|
||||
// Group by metric name to get one temporality per metric
|
||||
sb.GroupBy("temporality")
|
||||
sb.GroupBy("metric_name")
|
||||
|
||||
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
package telemetrytraces
|
||||
|
||||
import "github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
|
||||
var (
|
||||
IntrinsicFields = []string{
|
||||
"trace_id",
|
||||
@@ -53,4 +55,37 @@ var (
|
||||
}
|
||||
SpanSearchScopeRoot = "isroot"
|
||||
SpanSearchScopeEntryPoint = "isentrypoint"
|
||||
|
||||
DefaultFields = []telemetrytypes.TelemetryFieldKey{
|
||||
{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
{
|
||||
Name: "span_id",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
{
|
||||
Name: "trace_id",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
{
|
||||
Name: "name",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
{
|
||||
Name: "service.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "duration_nano",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
{
|
||||
Name: "response_status_code",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
@@ -171,19 +171,14 @@ func (b *traceQueryStatementBuilder) buildListQuery(
|
||||
cteArgs = append(cteArgs, args)
|
||||
}
|
||||
|
||||
// Select default columns
|
||||
sb.Select(
|
||||
"timestamp",
|
||||
"trace_id",
|
||||
"span_id",
|
||||
"name",
|
||||
sqlbuilder.Escape("resource_string_service$$name"),
|
||||
"duration_nano",
|
||||
"response_status_code",
|
||||
)
|
||||
selectedFields := query.SelectFields
|
||||
|
||||
if len(selectedFields) == 0 {
|
||||
selectedFields = DefaultFields
|
||||
}
|
||||
|
||||
// TODO: should we deprecate `SelectFields` and return everything from a span like we do for logs?
|
||||
for _, field := range query.SelectFields {
|
||||
for _, field := range selectedFields {
|
||||
colExpr, err := b.fm.ColumnExpressionFor(ctx, &field, keys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/prometheus/alertmanager/config"
|
||||
commoncfg "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
|
||||
@@ -398,5 +397,4 @@ type ConfigStore interface {
|
||||
func init() {
|
||||
commoncfg.MarshalSecretValue = true
|
||||
config.MarshalSecretValue = true
|
||||
model.NameValidationScheme = model.UTF8Validation
|
||||
}
|
||||
|
||||
@@ -20,6 +20,8 @@ type QueryBuilderFormula struct {
|
||||
// expression to apply to the query
|
||||
Expression string `json:"expression"`
|
||||
|
||||
Disabled bool `json:"disabled,omitempty"`
|
||||
|
||||
// order by keys and directions
|
||||
Order []OrderBy `json:"order,omitempty"`
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ func NewUUID(value string) (UUID, error) {
|
||||
}
|
||||
|
||||
func NewUUIDFromBytes(value []byte) (UUID, error) {
|
||||
val, err := uuid.ParseBytes(value)
|
||||
val, err := uuid.FromBytes(value)
|
||||
if err != nil {
|
||||
return UUID{}, err
|
||||
}
|
||||
@@ -69,6 +69,10 @@ func (enum UUID) String() string {
|
||||
return enum.val.String()
|
||||
}
|
||||
|
||||
func (enum UUID) MarshalBinary() ([]byte, error) {
|
||||
return enum.val.MarshalBinary()
|
||||
}
|
||||
|
||||
func (enum UUID) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(enum.StringValue())
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user