Compare commits

...

5 Commits

Author SHA1 Message Date
Srikanth Chekuri
201d5c24a5 test(integration): add dtype=object to preserve types and insert resource keys (#9897) 2025-12-30 00:28:34 +05:30
Aditya Singh
ab8b42fbbe fix: update query suggestion hook to accept options (#9894) 2025-12-29 11:23:34 +00:00
Abhi kumar
f99821bc40 perf: optimize uplot chart data processing (#9881) 2025-12-29 14:40:51 +05:30
Niladri Adhikary
7c051601f2 fix: normalize context-prefixed field keys (#9089)
* feat: normalize context-prefixed field keys

Signed-off-by: “niladrix719” <niladrix719@gmail.com>

* test: added tests validation for context-prefixed field

Signed-off-by: “niladrix719” <niladrix719@gmail.com>

* refactor: moved logic to parse.go

Signed-off-by: “niladrix719” <niladrix719@gmail.com>

* fix: attribute key edge case

Signed-off-by: “niladrix719” <niladrix719@gmail.com>

* fix: corrupt field context

Signed-off-by: “niladrix719” <niladrix719@gmail.com>

* fix: corrupt field context

Signed-off-by: “niladrix719” <niladrix719@gmail.com>

* refactor: parse and signal

Signed-off-by: “niladrix719” <niladrix719@gmail.com>

* refactor: mismatch for unknown signal

Signed-off-by: “niladrix719” <niladrix719@gmail.com>

---------

Signed-off-by: “niladrix719” <niladrix719@gmail.com>
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2025-12-28 23:17:44 +05:30
Niladri Adhikary
b9f9c00da5 feat: implement case-insensitive query name handling in formula evaluation (#9302)
* feat: implement case-insensitive query name handling in formula evaluation

Signed-off-by: “niladrix719” <niladrix719@gmail.com>

* feat: optimized lookups

Signed-off-by: “niladrix719” <niladrix719@gmail.com>

* feat: updated naming

Signed-off-by: “niladrix719” <niladrix719@gmail.com>

* fix: normalize keys in canDefaultZero for case insensitivity

Signed-off-by: “niladrix719” <niladrix719@gmail.com>

* fix: lookup

Signed-off-by: “niladrix719” <niladrix719@gmail.com>

* fix: canDefaultZero lookup

Signed-off-by: “niladrix719” <niladrix719@gmail.com>

---------

Signed-off-by: “niladrix719” <niladrix719@gmail.com>
Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2025-12-28 22:29:37 +05:30
9 changed files with 450 additions and 63 deletions

View File

@@ -132,11 +132,20 @@ function UplotPanelWrapper({
[selectedGraph, widget?.panelTypes, widget?.stackedBarChart],
);
const chartData = getUPlotChartData(
queryResponse?.data?.payload,
widget.fillSpans,
stackedBarChart,
hiddenGraph,
const chartData = useMemo(
() =>
getUPlotChartData(
queryResponse?.data?.payload,
widget.fillSpans,
stackedBarChart,
hiddenGraph,
),
[
queryResponse?.data?.payload,
widget.fillSpans,
stackedBarChart,
hiddenGraph,
],
);
useEffect(() => {
@@ -293,7 +302,7 @@ function UplotPanelWrapper({
)}
{isFullViewMode && setGraphVisibility && !stackedBarChart && (
<GraphManager
data={getUPlotChartData(queryResponse?.data?.payload, widget.fillSpans)}
data={chartData}
name={widget.id}
options={options}
yAxisUnit={widget.yAxisUnit}

View File

@@ -1,8 +1,6 @@
import { getValueSuggestions } from 'api/querySuggestions/getValueSuggestion';
import { AxiosError, AxiosResponse } from 'axios';
import { useQuery, UseQueryOptions, UseQueryResult } from 'react-query';
import { ErrorResponse } from 'react-router-dom-v5-compat';
import { SuccessResponse } from 'types/api';
import { QueryKeyValueSuggestionsResponseProps } from 'types/api/querySuggestions/types';
export const useGetQueryKeyValueSuggestions = ({
@@ -11,13 +9,15 @@ export const useGetQueryKeyValueSuggestions = ({
searchText,
signalSource,
metricName,
options,
}: {
key: string;
signal: 'traces' | 'logs' | 'metrics';
searchText?: string;
signalSource?: 'meter' | '';
options?: UseQueryOptions<
SuccessResponse<QueryKeyValueSuggestionsResponseProps> | ErrorResponse
AxiosResponse<QueryKeyValueSuggestionsResponseProps>,
AxiosError
>;
metricName?: string;
}): UseQueryResult<
@@ -41,4 +41,5 @@ export const useGetQueryKeyValueSuggestions = ({
signalSource: signalSource as 'meter' | '',
metricName: metricName || '',
}),
...options,
});

View File

@@ -1,6 +1,6 @@
import { themeColors } from 'constants/theme';
import getLabelName from 'lib/getLabelName';
import { cloneDeep, isUndefined } from 'lodash-es';
import { isUndefined } from 'lodash-es';
import { MetricRangePayloadProps } from 'types/api/metrics/getQueryRange';
import { QueryData } from 'types/api/widgets/getQuery';
@@ -8,7 +8,7 @@ import { normalizePlotValue } from './dataUtils';
import { generateColor } from './generateColor';
function getXAxisTimestamps(seriesList: QueryData[]): number[] {
const timestamps = new Set();
const timestamps = new Set<number>();
seriesList.forEach((series: { values?: [number, string][] }) => {
if (series?.values) {
@@ -18,54 +18,71 @@ function getXAxisTimestamps(seriesList: QueryData[]): number[] {
}
});
const timestampsArr: number[] | unknown[] = Array.from(timestamps) || [];
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-ignore
return timestampsArr.sort((a, b) => a - b);
const timestampsArr = Array.from(timestamps);
timestampsArr.sort((a, b) => a - b);
return timestampsArr;
}
function fillMissingXAxisTimestamps(timestampArr: number[], data: any[]): any {
// eslint-disable-next-line sonarjs/cognitive-complexity
function fillMissingXAxisTimestamps(
timestampArr: number[],
data: Array<{ values?: [number, string][] }>,
): (number | null)[][] {
// Generate a set of all timestamps in the range
const allTimestampsSet = new Set(timestampArr);
const processedData = cloneDeep(data);
const result: (number | null)[][] = [];
// Fill missing timestamps with null values
processedData.forEach((entry: { values: (number | null)[][] }) => {
const existingTimestamps = new Set(
(entry?.values ?? []).map((value) => value[0]),
);
// Process each series entry
for (let i = 0; i < data.length; i++) {
const entry = data[i];
if (!entry?.values) {
result.push([]);
} else {
// Build Set of existing timestamps directly (avoid intermediate array)
const existingTimestamps = new Set<number>();
const valuesMap = new Map<number, number | null>();
const missingTimestamps = Array.from(allTimestampsSet).filter(
(timestamp) => !existingTimestamps.has(timestamp),
);
for (let j = 0; j < entry.values.length; j++) {
const [timestamp, value] = entry.values[j];
existingTimestamps.add(timestamp);
valuesMap.set(timestamp, normalizePlotValue(value));
}
missingTimestamps.forEach((timestamp) => {
const value = null;
// Find missing timestamps by iterating Set directly (avoid Array.from + filter)
const missingTimestamps: number[] = [];
const allTimestampsArray = Array.from(allTimestampsSet);
for (let k = 0; k < allTimestampsArray.length; k++) {
const timestamp = allTimestampsArray[k];
if (!existingTimestamps.has(timestamp)) {
missingTimestamps.push(timestamp);
}
}
entry?.values?.push([timestamp, value]);
});
// Add missing timestamps to map
for (let j = 0; j < missingTimestamps.length; j++) {
valuesMap.set(missingTimestamps[j], null);
}
entry?.values?.forEach((v) => {
// eslint-disable-next-line no-param-reassign
v[1] = normalizePlotValue(v[1]);
});
// Build sorted array of values
const sortedTimestamps = Array.from(valuesMap.keys()).sort((a, b) => a - b);
const yValues = sortedTimestamps.map((timestamp) => {
const value = valuesMap.get(timestamp);
return value !== undefined ? value : null;
});
result.push(yValues);
}
}
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-ignore
entry?.values?.sort((a, b) => a[0] - b[0]);
});
return processedData.map((entry: { values: [number, string][] }) =>
entry?.values?.map((value) => value[1]),
);
return result;
}
function getStackedSeries(val: any): any {
const series = cloneDeep(val) || [];
function getStackedSeries(val: (number | null)[][]): (number | null)[][] {
const series = val ? val.map((row: (number | null)[]) => [...row]) : [];
for (let i = series.length - 2; i >= 0; i--) {
for (let j = 0; j < series[i].length; j++) {
series[i][j] += series[i + 1][j];
series[i][j] = (series[i][j] || 0) + (series[i + 1][j] || 0);
}
}
@@ -110,6 +127,7 @@ const processAnomalyDetectionData = (
queryIndex < anomalyDetectionData.length;
queryIndex++
) {
const queryData = anomalyDetectionData[queryIndex];
const {
series,
predictedSeries,
@@ -117,7 +135,7 @@ const processAnomalyDetectionData = (
lowerBoundSeries,
queryName,
legend,
} = anomalyDetectionData[queryIndex];
} = queryData;
for (let index = 0; index < series?.length; index++) {
const label = getLabelName(
@@ -129,14 +147,30 @@ const processAnomalyDetectionData = (
const objKey =
anomalyDetectionData.length > 1 ? `${queryName}-${label}` : label;
// Single iteration instead of 5 separate map operations
const { values: seriesValues } = series[index];
const { values: predictedValues } = predictedSeries[index];
const { values: upperBoundValues } = upperBoundSeries[index];
const { values: lowerBoundValues } = lowerBoundSeries[index];
// eslint-disable-next-line prefer-destructuring
const length = seriesValues.length;
const timestamps: number[] = new Array(length);
const values: number[] = new Array(length);
const predicted: number[] = new Array(length);
const upperBound: number[] = new Array(length);
const lowerBound: number[] = new Array(length);
for (let i = 0; i < length; i++) {
timestamps[i] = seriesValues[i].timestamp / 1000;
values[i] = seriesValues[i].value;
predicted[i] = predictedValues[i].value;
upperBound[i] = upperBoundValues[i].value;
lowerBound[i] = lowerBoundValues[i].value;
}
processedData[objKey] = {
data: [
series[index].values.map((v: { timestamp: number }) => v.timestamp / 1000),
series[index].values.map((v: { value: number }) => v.value),
predictedSeries[index].values.map((v: { value: number }) => v.value),
upperBoundSeries[index].values.map((v: { value: number }) => v.value),
lowerBoundSeries[index].values.map((v: { value: number }) => v.value),
],
data: [timestamps, values, predicted, upperBound, lowerBound],
color: generateColor(
objKey,
isDarkMode ? themeColors.chartcolors : themeColors.lightModeColor,
@@ -152,14 +186,7 @@ const processAnomalyDetectionData = (
export const getUplotChartDataForAnomalyDetection = (
apiResponse: MetricRangePayloadProps,
isDarkMode: boolean,
): Record<
string,
{
[x: string]: any;
data: number[][];
color: string;
}
> => {
): Record<string, { [x: string]: any; data: number[][]; color: string }> => {
const anomalyDetectionData = apiResponse?.data?.newResult?.data?.result;
return processAnomalyDetectionData(anomalyDetectionData, isDarkMode);
};

View File

@@ -80,6 +80,17 @@ func parseFieldKeyRequest(r *http.Request) (*telemetrytypes.FieldKeySelector, er
name := r.URL.Query().Get("searchText")
if name != "" && fieldContext == telemetrytypes.FieldContextUnspecified {
parsedFieldKey := telemetrytypes.GetFieldKeyFromKeyText(name)
if parsedFieldKey.FieldContext != telemetrytypes.FieldContextUnspecified {
// Only apply inferred context if it is valid for the current signal
if isContextValidForSignal(parsedFieldKey.FieldContext, signal) {
name = parsedFieldKey.Name
fieldContext = parsedFieldKey.FieldContext
}
}
}
req = telemetrytypes.FieldKeySelector{
StartUnixMilli: startUnixMilli,
EndUnixMilli: endUnixMilli,
@@ -102,6 +113,16 @@ func parseFieldValueRequest(r *http.Request) (*telemetrytypes.FieldValueSelector
}
name := r.URL.Query().Get("name")
if name != "" && keySelector.FieldContext == telemetrytypes.FieldContextUnspecified {
parsedFieldKey := telemetrytypes.GetFieldKeyFromKeyText(name)
if parsedFieldKey.FieldContext != telemetrytypes.FieldContextUnspecified {
// Only apply inferred context if it is valid for the current signal
if isContextValidForSignal(parsedFieldKey.FieldContext, keySelector.Signal) {
name = parsedFieldKey.Name
keySelector.FieldContext = parsedFieldKey.FieldContext
}
}
}
keySelector.Name = name
existingQuery := r.URL.Query().Get("existingQuery")
value := r.URL.Query().Get("searchText")
@@ -121,3 +142,21 @@ func parseFieldValueRequest(r *http.Request) (*telemetrytypes.FieldValueSelector
return &req, nil
}
func isContextValidForSignal(ctx telemetrytypes.FieldContext, signal telemetrytypes.Signal) bool {
if ctx == telemetrytypes.FieldContextResource ||
ctx == telemetrytypes.FieldContextAttribute ||
ctx == telemetrytypes.FieldContextScope {
return true
}
switch signal.StringValue() {
case telemetrytypes.SignalLogs.StringValue():
return ctx == telemetrytypes.FieldContextLog || ctx == telemetrytypes.FieldContextBody
case telemetrytypes.SignalTraces.StringValue():
return ctx == telemetrytypes.FieldContextSpan || ctx == telemetrytypes.FieldContextEvent || ctx == telemetrytypes.FieldContextTrace
case telemetrytypes.SignalMetrics.StringValue():
return ctx == telemetrytypes.FieldContextMetric
}
return true
}

View File

@@ -153,10 +153,28 @@ func NewFormulaEvaluator(expressionStr string, canDefaultZero map[string]bool) (
return nil, errors.NewInvalidInputf(errors.CodeInvalidInput, "failed to parse expression")
}
// Normalize canDefaultZero keys to match variable casing from expression
normalizedCanDefaultZero := make(map[string]bool)
vars := expression.Vars()
for _, variable := range vars {
// If exact match exists, use it
if val, ok := canDefaultZero[variable]; ok {
normalizedCanDefaultZero[variable] = val
continue
}
// Otherwise try case-insensitive lookup
for k, v := range canDefaultZero {
if strings.EqualFold(k, variable) {
normalizedCanDefaultZero[variable] = v
break
}
}
}
evaluator := &FormulaEvaluator{
expression: expression,
variables: expression.Vars(),
canDefaultZero: canDefaultZero,
variables: vars,
canDefaultZero: normalizedCanDefaultZero,
aggRefs: make(map[string]aggregationRef),
}
@@ -281,6 +299,16 @@ func (fe *FormulaEvaluator) buildSeriesLookup(timeSeriesData map[string]*TimeSer
// We are only interested in the time series data for the queries that are
// involved in the formula expression.
data, exists := timeSeriesData[aggRef.QueryName]
if !exists {
// try case-insensitive lookup
for k, v := range timeSeriesData {
if strings.EqualFold(k, aggRef.QueryName) {
data = v
exists = true
break
}
}
}
if !exists {
continue
}

View File

@@ -864,6 +864,158 @@ func TestComplexExpression(t *testing.T) {
}
}
func TestCaseInsensitiveQueryNames(t *testing.T) {
tests := []struct {
name string
expression string
tsData map[string]*TimeSeriesData
expectedValues []float64
}{
{
name: "lowercase query names",
expression: "a / b",
tsData: map[string]*TimeSeriesData{
"A": createFormulaTestTimeSeriesData("A", []*TimeSeries{
{
Labels: createLabels(map[string]string{}),
Values: createValues(map[int64]float64{1: 10}),
},
}),
"B": createFormulaTestTimeSeriesData("B", []*TimeSeries{
{
Labels: createLabels(map[string]string{}),
Values: createValues(map[int64]float64{1: 2}),
},
}),
},
expectedValues: []float64{5.0},
},
{
name: "mixed case query names",
expression: "A / b",
tsData: map[string]*TimeSeriesData{
"A": createFormulaTestTimeSeriesData("A", []*TimeSeries{
{
Labels: createLabels(map[string]string{}),
Values: createValues(map[int64]float64{1: 10}),
},
}),
"B": createFormulaTestTimeSeriesData("B", []*TimeSeries{
{
Labels: createLabels(map[string]string{}),
Values: createValues(map[int64]float64{1: 2}),
},
}),
},
expectedValues: []float64{5.0},
},
{
name: "uppercase query names with lowercase data keys",
expression: "A / B",
tsData: map[string]*TimeSeriesData{
"a": createFormulaTestTimeSeriesData("a", []*TimeSeries{
{
Labels: createLabels(map[string]string{}),
Values: createValues(map[int64]float64{1: 10}),
},
}),
"b": createFormulaTestTimeSeriesData("b", []*TimeSeries{
{
Labels: createLabels(map[string]string{}),
Values: createValues(map[int64]float64{1: 2}),
},
}),
},
expectedValues: []float64{5.0},
},
{
name: "all lowercase",
expression: "a/b",
tsData: map[string]*TimeSeriesData{
"a": createFormulaTestTimeSeriesData("a", []*TimeSeries{
{
Labels: createLabels(map[string]string{}),
Values: createValues(map[int64]float64{1: 100}),
},
}),
"b": createFormulaTestTimeSeriesData("b", []*TimeSeries{
{
Labels: createLabels(map[string]string{}),
Values: createValues(map[int64]float64{1: 10}),
},
}),
},
expectedValues: []float64{10.0},
},
{
name: "complex expression with mixed case",
expression: "a + B * c",
tsData: map[string]*TimeSeriesData{
"A": createFormulaTestTimeSeriesData("A", []*TimeSeries{
{
Labels: createLabels(map[string]string{}),
Values: createValues(map[int64]float64{1: 5}),
},
}),
"b": createFormulaTestTimeSeriesData("b", []*TimeSeries{
{
Labels: createLabels(map[string]string{}),
Values: createValues(map[int64]float64{1: 3}),
},
}),
"C": createFormulaTestTimeSeriesData("C", []*TimeSeries{
{
Labels: createLabels(map[string]string{}),
Values: createValues(map[int64]float64{1: 2}),
},
}),
},
expectedValues: []float64{11.0}, // 5 + 3 * 2 = 11
},
{
name: "lowercase variables with default zero missing point",
expression: "a + b",
tsData: map[string]*TimeSeriesData{
"A": createFormulaTestTimeSeriesData("A", []*TimeSeries{
{
Labels: createLabels(map[string]string{}),
Values: createValues(map[int64]float64{
1: 10,
2: 20,
}),
},
}),
"B": createFormulaTestTimeSeriesData("B", []*TimeSeries{
{
Labels: createLabels(map[string]string{}),
Values: createValues(map[int64]float64{
1: 5,
}),
},
}),
},
expectedValues: []float64{15.0, 20.0}, // t1: 10+5, t2: 20+0
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
evaluator, err := NewFormulaEvaluator(tt.expression, map[string]bool{"a": true, "A": true, "b": true, "B": true, "c": true, "C": true})
require.NoError(t, err)
result, err := evaluator.EvaluateFormula(tt.tsData)
require.NoError(t, err)
require.NotNil(t, result)
assert.Equal(t, 1, len(result), "should have exactly one result series")
assert.Equal(t, len(tt.expectedValues), len(result[0].Values), "should match expected number of values")
for i, v := range tt.expectedValues {
assert.InDelta(t, v, result[0].Values[i].Value, 0.0001, "value at index %d should match", i)
}
})
}
}
func TestAbsValueExpression(t *testing.T) {
tsData := map[string]*TimeSeriesData{
"A": createFormulaTestTimeSeriesData("A", []*TimeSeries{

View File

@@ -81,7 +81,7 @@ class TracesResourceOrAttributeKeys(ABC):
self.is_column = is_column
def np_arr(self) -> np.array:
return np.array([self.name, self.tag_type, self.datatype, self.is_column])
return np.array([self.name, self.tag_type, self.datatype, self.is_column], dtype=object)
class TracesTagAttributes(ABC):
@@ -636,8 +636,10 @@ def insert_traces(
)
attribute_keys: List[TracesResourceOrAttributeKeys] = []
resource_keys: List[TracesResourceOrAttributeKeys] = []
for trace in traces:
attribute_keys.extend(trace.attribute_keys)
resource_keys.extend(trace.resource_keys)
if len(attribute_keys) > 0:
clickhouse.conn.insert(
@@ -646,6 +648,13 @@ def insert_traces(
data=[attribute_key.np_arr() for attribute_key in attribute_keys],
)
if len(resource_keys) > 0:
clickhouse.conn.insert(
database="signoz_traces",
table="distributed_span_attributes_keys",
data=[resource_key.np_arr() for resource_key in resource_keys],
)
# Insert main traces
clickhouse.conn.insert(
database="signoz_traces",

View File

@@ -67,6 +67,7 @@ def test_logs_list(
"code.file": "/opt/integration.go",
"code.function": "com.example.Integration.process",
"code.line": 120,
"metric.domain_id": "d-001",
"telemetry.sdk.language": "go",
},
body="This is a log message, coming from a go application",
@@ -141,6 +142,7 @@ def test_logs_list(
"code.function": "com.example.Integration.process",
"log.iostream": "stdout",
"logtag": "F",
"metric.domain_id": "d-001",
"telemetry.sdk.language": "go",
}
assert rows[0]["data"]["attributes_number"] == {"code.line": 120}
@@ -308,6 +310,86 @@ def test_logs_list(
assert len(values) == 1
assert 120 in values
# Query keys from the fields API with context specified in the key
response = requests.get(
signoz.self.host_configs["8080"].get("/api/v1/fields/keys"),
timeout=2,
headers={
"authorization": f"Bearer {token}",
},
params={
"signal": "logs",
"searchText": "resource.servic",
},
)
assert response.status_code == HTTPStatus.OK
assert response.json()["status"] == "success"
keys = response.json()["data"]["keys"]
assert "service.name" in keys
assert any(k["fieldContext"] == "resource" for k in keys["service.name"])
# Do not treat `metric.` as a context prefix for logs
response = requests.get(
signoz.self.host_configs["8080"].get("/api/v1/fields/keys"),
timeout=2,
headers={
"authorization": f"Bearer {token}",
},
params={
"signal": "logs",
"searchText": "metric.do",
},
)
assert response.status_code == HTTPStatus.OK
assert response.json()["status"] == "success"
keys = response.json()["data"]["keys"]
assert "metric.domain_id" in keys
# Query values of service.name resource attribute using context-prefixed key
response = requests.get(
signoz.self.host_configs["8080"].get("/api/v1/fields/values"),
timeout=2,
headers={
"authorization": f"Bearer {token}",
},
params={
"signal": "logs",
"name": "resource.service.name",
"searchText": "",
},
)
assert response.status_code == HTTPStatus.OK
assert response.json()["status"] == "success"
values = response.json()["data"]["values"]["stringValues"]
assert "go" in values
assert "java" in values
# Query values of metric.domain_id (string attribute) and ensure context collision doesn't break it
response = requests.get(
signoz.self.host_configs["8080"].get("/api/v1/fields/values"),
timeout=2,
headers={
"authorization": f"Bearer {token}",
},
params={
"signal": "logs",
"name": "metric.domain_id",
"searchText": "",
},
)
assert response.status_code == HTTPStatus.OK
assert response.json()["status"] == "success"
values = response.json()["data"]["values"]["stringValues"]
assert "d-001" in values
def test_logs_time_series_count(
signoz: types.SigNoz,

View File

@@ -373,3 +373,43 @@ def test_traces_list(
assert len(values) == 2
assert set(values) == set(["POST", "PATCH"])
# Query keys from the fields API with context specified in the key
response = requests.get(
signoz.self.host_configs["8080"].get("/api/v1/fields/keys"),
timeout=2,
headers={
"authorization": f"Bearer {token}",
},
params={
"signal": "traces",
"searchText": "resource.servic",
},
)
assert response.status_code == HTTPStatus.OK
assert response.json()["status"] == "success"
keys = response.json()["data"]["keys"]
assert "service.name" in keys
assert any(k["fieldContext"] == "resource" for k in keys["service.name"])
# Query values of service.name resource attribute using context-prefixed key
response = requests.get(
signoz.self.host_configs["8080"].get("/api/v1/fields/values"),
timeout=2,
headers={
"authorization": f"Bearer {token}",
},
params={
"signal": "traces",
"name": "resource.service.name",
"searchText": "",
},
)
assert response.status_code == HTTPStatus.OK
assert response.json()["status"] == "success"
values = response.json()["data"]["values"]["stringValues"]
assert set(values) == set(["topic-service", "http-service"])