mirror of
https://github.com/SigNoz/signoz.git
synced 2025-12-29 08:00:59 +00:00
Compare commits
4 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
aa004c9da9 | ||
|
|
7c051601f2 | ||
|
|
b9f9c00da5 | ||
|
|
49ff86e65a |
@@ -421,11 +421,16 @@ export default function CheckboxFilter(props: ICheckboxProps): JSX.Element {
|
||||
...currentFilter,
|
||||
value: currentFilter.value.filter((val) => val !== value),
|
||||
};
|
||||
|
||||
if (newFilter.value.length === 0) {
|
||||
query.filters.items = query.filters.items.filter(
|
||||
(item) => !isEqual(item.key?.key, filter.attributeKey.key),
|
||||
);
|
||||
if (query.filter?.expression) {
|
||||
query.filter.expression = removeKeysFromExpression(
|
||||
query.filter.expression,
|
||||
[filter.attributeKey.key],
|
||||
);
|
||||
}
|
||||
} else {
|
||||
query.filters.items = query.filters.items.map((item) => {
|
||||
if (isEqual(item.key?.key, filter.attributeKey.key)) {
|
||||
@@ -435,6 +440,16 @@ export default function CheckboxFilter(props: ICheckboxProps): JSX.Element {
|
||||
});
|
||||
}
|
||||
} else {
|
||||
const newFilter = {
|
||||
...currentFilter,
|
||||
value: currentFilter.value === value ? null : currentFilter.value,
|
||||
};
|
||||
if (newFilter.value === null && query.filter?.expression) {
|
||||
query.filter.expression = removeKeysFromExpression(
|
||||
query.filter.expression,
|
||||
[filter.attributeKey.key],
|
||||
);
|
||||
}
|
||||
query.filters.items = query.filters.items.filter(
|
||||
(item) => !isEqual(item.key?.key, filter.attributeKey.key),
|
||||
);
|
||||
|
||||
@@ -80,6 +80,17 @@ func parseFieldKeyRequest(r *http.Request) (*telemetrytypes.FieldKeySelector, er
|
||||
|
||||
name := r.URL.Query().Get("searchText")
|
||||
|
||||
if name != "" && fieldContext == telemetrytypes.FieldContextUnspecified {
|
||||
parsedFieldKey := telemetrytypes.GetFieldKeyFromKeyText(name)
|
||||
if parsedFieldKey.FieldContext != telemetrytypes.FieldContextUnspecified {
|
||||
// Only apply inferred context if it is valid for the current signal
|
||||
if isContextValidForSignal(parsedFieldKey.FieldContext, signal) {
|
||||
name = parsedFieldKey.Name
|
||||
fieldContext = parsedFieldKey.FieldContext
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
req = telemetrytypes.FieldKeySelector{
|
||||
StartUnixMilli: startUnixMilli,
|
||||
EndUnixMilli: endUnixMilli,
|
||||
@@ -102,6 +113,16 @@ func parseFieldValueRequest(r *http.Request) (*telemetrytypes.FieldValueSelector
|
||||
}
|
||||
|
||||
name := r.URL.Query().Get("name")
|
||||
if name != "" && keySelector.FieldContext == telemetrytypes.FieldContextUnspecified {
|
||||
parsedFieldKey := telemetrytypes.GetFieldKeyFromKeyText(name)
|
||||
if parsedFieldKey.FieldContext != telemetrytypes.FieldContextUnspecified {
|
||||
// Only apply inferred context if it is valid for the current signal
|
||||
if isContextValidForSignal(parsedFieldKey.FieldContext, keySelector.Signal) {
|
||||
name = parsedFieldKey.Name
|
||||
keySelector.FieldContext = parsedFieldKey.FieldContext
|
||||
}
|
||||
}
|
||||
}
|
||||
keySelector.Name = name
|
||||
existingQuery := r.URL.Query().Get("existingQuery")
|
||||
value := r.URL.Query().Get("searchText")
|
||||
@@ -121,3 +142,21 @@ func parseFieldValueRequest(r *http.Request) (*telemetrytypes.FieldValueSelector
|
||||
|
||||
return &req, nil
|
||||
}
|
||||
|
||||
func isContextValidForSignal(ctx telemetrytypes.FieldContext, signal telemetrytypes.Signal) bool {
|
||||
if ctx == telemetrytypes.FieldContextResource ||
|
||||
ctx == telemetrytypes.FieldContextAttribute ||
|
||||
ctx == telemetrytypes.FieldContextScope {
|
||||
return true
|
||||
}
|
||||
|
||||
switch signal.StringValue() {
|
||||
case telemetrytypes.SignalLogs.StringValue():
|
||||
return ctx == telemetrytypes.FieldContextLog || ctx == telemetrytypes.FieldContextBody
|
||||
case telemetrytypes.SignalTraces.StringValue():
|
||||
return ctx == telemetrytypes.FieldContextSpan || ctx == telemetrytypes.FieldContextEvent || ctx == telemetrytypes.FieldContextTrace
|
||||
case telemetrytypes.SignalMetrics.StringValue():
|
||||
return ctx == telemetrytypes.FieldContextMetric
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -520,7 +520,7 @@ func (h *HostsRepo) GetHostList(ctx context.Context, orgID valuer.UUID, req mode
|
||||
if _, ok := hostAttrs[record.HostName]; ok {
|
||||
record.Meta = hostAttrs[record.HostName]
|
||||
}
|
||||
if osType, ok := record.Meta["os_type"]; ok {
|
||||
if osType, ok := record.Meta[GetDotMetrics("os_type")]; ok {
|
||||
record.OS = osType
|
||||
}
|
||||
record.Active = activeHosts[record.HostName]
|
||||
|
||||
@@ -153,10 +153,28 @@ func NewFormulaEvaluator(expressionStr string, canDefaultZero map[string]bool) (
|
||||
return nil, errors.NewInvalidInputf(errors.CodeInvalidInput, "failed to parse expression")
|
||||
}
|
||||
|
||||
// Normalize canDefaultZero keys to match variable casing from expression
|
||||
normalizedCanDefaultZero := make(map[string]bool)
|
||||
vars := expression.Vars()
|
||||
for _, variable := range vars {
|
||||
// If exact match exists, use it
|
||||
if val, ok := canDefaultZero[variable]; ok {
|
||||
normalizedCanDefaultZero[variable] = val
|
||||
continue
|
||||
}
|
||||
// Otherwise try case-insensitive lookup
|
||||
for k, v := range canDefaultZero {
|
||||
if strings.EqualFold(k, variable) {
|
||||
normalizedCanDefaultZero[variable] = v
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
evaluator := &FormulaEvaluator{
|
||||
expression: expression,
|
||||
variables: expression.Vars(),
|
||||
canDefaultZero: canDefaultZero,
|
||||
variables: vars,
|
||||
canDefaultZero: normalizedCanDefaultZero,
|
||||
aggRefs: make(map[string]aggregationRef),
|
||||
}
|
||||
|
||||
@@ -281,6 +299,16 @@ func (fe *FormulaEvaluator) buildSeriesLookup(timeSeriesData map[string]*TimeSer
|
||||
// We are only interested in the time series data for the queries that are
|
||||
// involved in the formula expression.
|
||||
data, exists := timeSeriesData[aggRef.QueryName]
|
||||
if !exists {
|
||||
// try case-insensitive lookup
|
||||
for k, v := range timeSeriesData {
|
||||
if strings.EqualFold(k, aggRef.QueryName) {
|
||||
data = v
|
||||
exists = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !exists {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -864,6 +864,158 @@ func TestComplexExpression(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCaseInsensitiveQueryNames(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
expression string
|
||||
tsData map[string]*TimeSeriesData
|
||||
expectedValues []float64
|
||||
}{
|
||||
{
|
||||
name: "lowercase query names",
|
||||
expression: "a / b",
|
||||
tsData: map[string]*TimeSeriesData{
|
||||
"A": createFormulaTestTimeSeriesData("A", []*TimeSeries{
|
||||
{
|
||||
Labels: createLabels(map[string]string{}),
|
||||
Values: createValues(map[int64]float64{1: 10}),
|
||||
},
|
||||
}),
|
||||
"B": createFormulaTestTimeSeriesData("B", []*TimeSeries{
|
||||
{
|
||||
Labels: createLabels(map[string]string{}),
|
||||
Values: createValues(map[int64]float64{1: 2}),
|
||||
},
|
||||
}),
|
||||
},
|
||||
expectedValues: []float64{5.0},
|
||||
},
|
||||
{
|
||||
name: "mixed case query names",
|
||||
expression: "A / b",
|
||||
tsData: map[string]*TimeSeriesData{
|
||||
"A": createFormulaTestTimeSeriesData("A", []*TimeSeries{
|
||||
{
|
||||
Labels: createLabels(map[string]string{}),
|
||||
Values: createValues(map[int64]float64{1: 10}),
|
||||
},
|
||||
}),
|
||||
"B": createFormulaTestTimeSeriesData("B", []*TimeSeries{
|
||||
{
|
||||
Labels: createLabels(map[string]string{}),
|
||||
Values: createValues(map[int64]float64{1: 2}),
|
||||
},
|
||||
}),
|
||||
},
|
||||
expectedValues: []float64{5.0},
|
||||
},
|
||||
{
|
||||
name: "uppercase query names with lowercase data keys",
|
||||
expression: "A / B",
|
||||
tsData: map[string]*TimeSeriesData{
|
||||
"a": createFormulaTestTimeSeriesData("a", []*TimeSeries{
|
||||
{
|
||||
Labels: createLabels(map[string]string{}),
|
||||
Values: createValues(map[int64]float64{1: 10}),
|
||||
},
|
||||
}),
|
||||
"b": createFormulaTestTimeSeriesData("b", []*TimeSeries{
|
||||
{
|
||||
Labels: createLabels(map[string]string{}),
|
||||
Values: createValues(map[int64]float64{1: 2}),
|
||||
},
|
||||
}),
|
||||
},
|
||||
expectedValues: []float64{5.0},
|
||||
},
|
||||
{
|
||||
name: "all lowercase",
|
||||
expression: "a/b",
|
||||
tsData: map[string]*TimeSeriesData{
|
||||
"a": createFormulaTestTimeSeriesData("a", []*TimeSeries{
|
||||
{
|
||||
Labels: createLabels(map[string]string{}),
|
||||
Values: createValues(map[int64]float64{1: 100}),
|
||||
},
|
||||
}),
|
||||
"b": createFormulaTestTimeSeriesData("b", []*TimeSeries{
|
||||
{
|
||||
Labels: createLabels(map[string]string{}),
|
||||
Values: createValues(map[int64]float64{1: 10}),
|
||||
},
|
||||
}),
|
||||
},
|
||||
expectedValues: []float64{10.0},
|
||||
},
|
||||
{
|
||||
name: "complex expression with mixed case",
|
||||
expression: "a + B * c",
|
||||
tsData: map[string]*TimeSeriesData{
|
||||
"A": createFormulaTestTimeSeriesData("A", []*TimeSeries{
|
||||
{
|
||||
Labels: createLabels(map[string]string{}),
|
||||
Values: createValues(map[int64]float64{1: 5}),
|
||||
},
|
||||
}),
|
||||
"b": createFormulaTestTimeSeriesData("b", []*TimeSeries{
|
||||
{
|
||||
Labels: createLabels(map[string]string{}),
|
||||
Values: createValues(map[int64]float64{1: 3}),
|
||||
},
|
||||
}),
|
||||
"C": createFormulaTestTimeSeriesData("C", []*TimeSeries{
|
||||
{
|
||||
Labels: createLabels(map[string]string{}),
|
||||
Values: createValues(map[int64]float64{1: 2}),
|
||||
},
|
||||
}),
|
||||
},
|
||||
expectedValues: []float64{11.0}, // 5 + 3 * 2 = 11
|
||||
},
|
||||
{
|
||||
name: "lowercase variables with default zero missing point",
|
||||
expression: "a + b",
|
||||
tsData: map[string]*TimeSeriesData{
|
||||
"A": createFormulaTestTimeSeriesData("A", []*TimeSeries{
|
||||
{
|
||||
Labels: createLabels(map[string]string{}),
|
||||
Values: createValues(map[int64]float64{
|
||||
1: 10,
|
||||
2: 20,
|
||||
}),
|
||||
},
|
||||
}),
|
||||
"B": createFormulaTestTimeSeriesData("B", []*TimeSeries{
|
||||
{
|
||||
Labels: createLabels(map[string]string{}),
|
||||
Values: createValues(map[int64]float64{
|
||||
1: 5,
|
||||
}),
|
||||
},
|
||||
}),
|
||||
},
|
||||
expectedValues: []float64{15.0, 20.0}, // t1: 10+5, t2: 20+0
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
evaluator, err := NewFormulaEvaluator(tt.expression, map[string]bool{"a": true, "A": true, "b": true, "B": true, "c": true, "C": true})
|
||||
require.NoError(t, err)
|
||||
|
||||
result, err := evaluator.EvaluateFormula(tt.tsData)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, result)
|
||||
|
||||
assert.Equal(t, 1, len(result), "should have exactly one result series")
|
||||
assert.Equal(t, len(tt.expectedValues), len(result[0].Values), "should match expected number of values")
|
||||
for i, v := range tt.expectedValues {
|
||||
assert.InDelta(t, v, result[0].Values[i].Value, 0.0001, "value at index %d should match", i)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAbsValueExpression(t *testing.T) {
|
||||
tsData := map[string]*TimeSeriesData{
|
||||
"A": createFormulaTestTimeSeriesData("A", []*TimeSeries{
|
||||
|
||||
@@ -67,6 +67,7 @@ def test_logs_list(
|
||||
"code.file": "/opt/integration.go",
|
||||
"code.function": "com.example.Integration.process",
|
||||
"code.line": 120,
|
||||
"metric.domain_id": "d-001",
|
||||
"telemetry.sdk.language": "go",
|
||||
},
|
||||
body="This is a log message, coming from a go application",
|
||||
@@ -141,6 +142,7 @@ def test_logs_list(
|
||||
"code.function": "com.example.Integration.process",
|
||||
"log.iostream": "stdout",
|
||||
"logtag": "F",
|
||||
"metric.domain_id": "d-001",
|
||||
"telemetry.sdk.language": "go",
|
||||
}
|
||||
assert rows[0]["data"]["attributes_number"] == {"code.line": 120}
|
||||
@@ -308,6 +310,86 @@ def test_logs_list(
|
||||
assert len(values) == 1
|
||||
assert 120 in values
|
||||
|
||||
# Query keys from the fields API with context specified in the key
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get("/api/v1/fields/keys"),
|
||||
timeout=2,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
},
|
||||
params={
|
||||
"signal": "logs",
|
||||
"searchText": "resource.servic",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.json()["status"] == "success"
|
||||
|
||||
keys = response.json()["data"]["keys"]
|
||||
assert "service.name" in keys
|
||||
assert any(k["fieldContext"] == "resource" for k in keys["service.name"])
|
||||
|
||||
# Do not treat `metric.` as a context prefix for logs
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get("/api/v1/fields/keys"),
|
||||
timeout=2,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
},
|
||||
params={
|
||||
"signal": "logs",
|
||||
"searchText": "metric.do",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.json()["status"] == "success"
|
||||
|
||||
keys = response.json()["data"]["keys"]
|
||||
assert "metric.domain_id" in keys
|
||||
|
||||
# Query values of service.name resource attribute using context-prefixed key
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get("/api/v1/fields/values"),
|
||||
timeout=2,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
},
|
||||
params={
|
||||
"signal": "logs",
|
||||
"name": "resource.service.name",
|
||||
"searchText": "",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.json()["status"] == "success"
|
||||
|
||||
values = response.json()["data"]["values"]["stringValues"]
|
||||
assert "go" in values
|
||||
assert "java" in values
|
||||
|
||||
# Query values of metric.domain_id (string attribute) and ensure context collision doesn't break it
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get("/api/v1/fields/values"),
|
||||
timeout=2,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
},
|
||||
params={
|
||||
"signal": "logs",
|
||||
"name": "metric.domain_id",
|
||||
"searchText": "",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.json()["status"] == "success"
|
||||
|
||||
values = response.json()["data"]["values"]["stringValues"]
|
||||
assert "d-001" in values
|
||||
|
||||
|
||||
def test_logs_time_series_count(
|
||||
signoz: types.SigNoz,
|
||||
|
||||
@@ -373,3 +373,43 @@ def test_traces_list(
|
||||
assert len(values) == 2
|
||||
|
||||
assert set(values) == set(["POST", "PATCH"])
|
||||
|
||||
# Query keys from the fields API with context specified in the key
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get("/api/v1/fields/keys"),
|
||||
timeout=2,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
},
|
||||
params={
|
||||
"signal": "traces",
|
||||
"searchText": "resource.servic",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.json()["status"] == "success"
|
||||
|
||||
keys = response.json()["data"]["keys"]
|
||||
assert "service.name" in keys
|
||||
assert any(k["fieldContext"] == "resource" for k in keys["service.name"])
|
||||
|
||||
# Query values of service.name resource attribute using context-prefixed key
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get("/api/v1/fields/values"),
|
||||
timeout=2,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
},
|
||||
params={
|
||||
"signal": "traces",
|
||||
"name": "resource.service.name",
|
||||
"searchText": "",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.json()["status"] == "success"
|
||||
|
||||
values = response.json()["data"]["values"]["stringValues"]
|
||||
assert set(values) == set(["topic-service", "http-service"])
|
||||
|
||||
Reference in New Issue
Block a user