Compare commits
3 Commits
main
...
v0.87.0-bd
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fecf6667a3 | ||
|
|
bda2316377 | ||
|
|
6404e7388e |
@@ -44,6 +44,7 @@ func Success(rw http.ResponseWriter, httpCode int, data interface{}) {
|
||||
}
|
||||
|
||||
rw.WriteHeader(httpCode)
|
||||
rw.Header().Set("Content-Type", "application/json")
|
||||
_, _ = rw.Write(body)
|
||||
}
|
||||
|
||||
|
||||
@@ -33,6 +33,12 @@ func (a *API) QueryRange(rw http.ResponseWriter, req *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// Validate the query request
|
||||
if err := queryRangeRequest.Validate(); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgID, err := valuer.NewUUID(claims.OrgID)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
|
||||
@@ -70,7 +70,12 @@ func (bc *bucketCache) GetMissRanges(
|
||||
// Get query window
|
||||
startMs, endMs := q.Window()
|
||||
|
||||
bc.logger.DebugContext(ctx, "getting miss ranges", "fingerprint", q.Fingerprint(), "start", startMs, "end", endMs)
|
||||
bc.logger.DebugContext(ctx, "getting miss ranges",
|
||||
"fingerprint", q.Fingerprint(),
|
||||
"start", startMs,
|
||||
"end", endMs,
|
||||
"start_time", time.UnixMilli(int64(startMs)).Format(time.RFC3339),
|
||||
"end_time", time.UnixMilli(int64(endMs)).Format(time.RFC3339))
|
||||
|
||||
// Generate cache key
|
||||
cacheKey := bc.generateCacheKey(q)
|
||||
@@ -117,7 +122,7 @@ func (bc *bucketCache) GetMissRanges(
|
||||
}
|
||||
|
||||
// Put stores fresh query results in the cache
|
||||
func (bc *bucketCache) Put(ctx context.Context, orgID valuer.UUID, q qbtypes.Query, fresh *qbtypes.Result) {
|
||||
func (bc *bucketCache) Put(ctx context.Context, orgID valuer.UUID, q qbtypes.Query, step qbtypes.Step, fresh *qbtypes.Result) {
|
||||
// Get query window
|
||||
startMs, endMs := q.Window()
|
||||
|
||||
@@ -159,8 +164,51 @@ func (bc *bucketCache) Put(ctx context.Context, orgID valuer.UUID, q qbtypes.Que
|
||||
return
|
||||
}
|
||||
|
||||
// Convert trimmed result to buckets
|
||||
freshBuckets := bc.resultToBuckets(ctx, trimmedResult, startMs, cachableEndMs)
|
||||
// Adjust start and end times to only cache complete intervals
|
||||
cachableStartMs := startMs
|
||||
stepMs := uint64(step.Duration.Milliseconds())
|
||||
|
||||
// If we have a step interval, adjust boundaries to only cache complete intervals
|
||||
if stepMs > 0 {
|
||||
// If start is not aligned, round up to next step boundary (first complete interval)
|
||||
if startMs%stepMs != 0 {
|
||||
cachableStartMs = ((startMs / stepMs) + 1) * stepMs
|
||||
}
|
||||
|
||||
// If end is not aligned, round down to previous step boundary (last complete interval)
|
||||
if cachableEndMs%stepMs != 0 {
|
||||
cachableEndMs = (cachableEndMs / stepMs) * stepMs
|
||||
}
|
||||
|
||||
// If after adjustment we have no complete intervals, don't cache
|
||||
if cachableStartMs >= cachableEndMs {
|
||||
bc.logger.DebugContext(ctx, "no complete intervals to cache",
|
||||
"original_start", startMs,
|
||||
"original_end", endMs,
|
||||
"adjusted_start", cachableStartMs,
|
||||
"adjusted_end", cachableEndMs,
|
||||
"step", stepMs)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Convert trimmed result to buckets with adjusted boundaries
|
||||
freshBuckets := bc.resultToBuckets(ctx, trimmedResult, cachableStartMs, cachableEndMs)
|
||||
|
||||
// Debug: Log what we're about to cache
|
||||
if tsData, ok := trimmedResult.Value.(*qbtypes.TimeSeriesData); ok {
|
||||
totalSeries := 0
|
||||
for _, agg := range tsData.Aggregations {
|
||||
totalSeries += len(agg.Series)
|
||||
}
|
||||
bc.logger.DebugContext(ctx, "converting result to buckets",
|
||||
"total_series", totalSeries,
|
||||
"original_start", startMs,
|
||||
"original_end", endMs,
|
||||
"cachable_start", cachableStartMs,
|
||||
"cachable_end", cachableEndMs,
|
||||
"step", stepMs)
|
||||
}
|
||||
|
||||
// If no fresh buckets and no existing data, don't cache
|
||||
if len(freshBuckets) == 0 && len(existingData.Buckets) == 0 {
|
||||
@@ -485,6 +533,12 @@ func (bc *bucketCache) mergeTimeSeriesValues(ctx context.Context, buckets []*cac
|
||||
}
|
||||
|
||||
if existingSeries, ok := seriesMap[key]; ok {
|
||||
// Merge values, avoiding duplicate timestamps
|
||||
timestampMap := make(map[int64]bool)
|
||||
for _, v := range existingSeries.Values {
|
||||
timestampMap[v.Timestamp] = true
|
||||
}
|
||||
|
||||
// Pre-allocate capacity for merged values
|
||||
newCap := len(existingSeries.Values) + len(series.Values)
|
||||
if cap(existingSeries.Values) < newCap {
|
||||
@@ -492,7 +546,13 @@ func (bc *bucketCache) mergeTimeSeriesValues(ctx context.Context, buckets []*cac
|
||||
copy(newValues, existingSeries.Values)
|
||||
existingSeries.Values = newValues
|
||||
}
|
||||
existingSeries.Values = append(existingSeries.Values, series.Values...)
|
||||
|
||||
// Only add values with new timestamps
|
||||
for _, v := range series.Values {
|
||||
if !timestampMap[v.Timestamp] {
|
||||
existingSeries.Values = append(existingSeries.Values, v)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// New series
|
||||
seriesMap[key] = series
|
||||
@@ -697,7 +757,7 @@ func (bc *bucketCache) trimResultToFluxBoundary(result *qbtypes.Result, fluxBoun
|
||||
switch result.Type {
|
||||
case qbtypes.RequestTypeTimeSeries:
|
||||
// Trim time series data
|
||||
if tsData, ok := result.Value.(*qbtypes.TimeSeriesData); ok {
|
||||
if tsData, ok := result.Value.(*qbtypes.TimeSeriesData); ok && tsData != nil {
|
||||
trimmedData := &qbtypes.TimeSeriesData{
|
||||
QueryName: tsData.QueryName,
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ func BenchmarkBucketCache_GetMissRanges(b *testing.B) {
|
||||
endMs: uint64((i + 1) * 10000),
|
||||
}
|
||||
result := createBenchmarkResult(query.startMs, query.endMs, 1000)
|
||||
bc.Put(ctx, orgID, query, result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{}, result)
|
||||
}
|
||||
|
||||
// Create test queries with varying cache hit patterns
|
||||
@@ -121,7 +121,7 @@ func BenchmarkBucketCache_Put(b *testing.B) {
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
for j := 0; j < tc.numQueries; j++ {
|
||||
bc.Put(ctx, orgID, queries[j], results[j])
|
||||
bc.Put(ctx, orgID, queries[j], qbtypes.Step{}, results[j])
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -259,7 +259,7 @@ func BenchmarkBucketCache_ConcurrentOperations(b *testing.B) {
|
||||
endMs: uint64((i + 1) * 10000),
|
||||
}
|
||||
result := createBenchmarkResult(query.startMs, query.endMs, 1000)
|
||||
bc.Put(ctx, orgID, query, result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{}, result)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
@@ -284,7 +284,7 @@ func BenchmarkBucketCache_ConcurrentOperations(b *testing.B) {
|
||||
endMs: uint64((i + 1) * 10000),
|
||||
}
|
||||
result := createBenchmarkResult(query.startMs, query.endMs, 1000)
|
||||
bc.Put(ctx, orgID, query, result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{}, result)
|
||||
case 2: // Partial read
|
||||
query := &mockQuery{
|
||||
fingerprint: fmt.Sprintf("concurrent-query-%d", i%100),
|
||||
|
||||
117
pkg/querier/bucket_cache_step_test.go
Normal file
117
pkg/querier/bucket_cache_step_test.go
Normal file
@@ -0,0 +1,117 @@
|
||||
package querier
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestBucketCacheStepAlignment(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
orgID := valuer.UUID{}
|
||||
cache := createTestCache(t)
|
||||
bc := NewBucketCache(instrumentationtest.New().ToProviderSettings(), cache, time.Hour, 5*time.Minute)
|
||||
|
||||
// Test with 5-minute step
|
||||
step := qbtypes.Step{Duration: 5 * time.Minute}
|
||||
|
||||
// Query from 12:02 to 12:58 (both unaligned)
|
||||
// Complete intervals: 12:05 to 12:55
|
||||
query := &mockQuery{
|
||||
fingerprint: "test-step-alignment",
|
||||
startMs: 1672563720000, // 12:02
|
||||
endMs: 1672567080000, // 12:58
|
||||
}
|
||||
|
||||
result := &qbtypes.Result{
|
||||
Type: qbtypes.RequestTypeTimeSeries,
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "test",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Index: 0,
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Labels: []*qbtypes.Label{
|
||||
{Key: telemetrytypes.TelemetryFieldKey{Name: "service"}, Value: "test"},
|
||||
},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1672563720000, Value: 1, Partial: true}, // 12:02
|
||||
{Timestamp: 1672563900000, Value: 2}, // 12:05
|
||||
{Timestamp: 1672564200000, Value: 2.5}, // 12:10
|
||||
{Timestamp: 1672564500000, Value: 2.6}, // 12:15
|
||||
{Timestamp: 1672566600000, Value: 2.9}, // 12:50
|
||||
{Timestamp: 1672566900000, Value: 3}, // 12:55
|
||||
{Timestamp: 1672567080000, Value: 4, Partial: true}, // 12:58
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Put result in cache
|
||||
bc.Put(ctx, orgID, query, step, result)
|
||||
|
||||
// Get cached data
|
||||
cached, missing := bc.GetMissRanges(ctx, orgID, query, step)
|
||||
|
||||
// Should have cached data
|
||||
require.NotNil(t, cached)
|
||||
|
||||
// Log the missing ranges to debug
|
||||
t.Logf("Missing ranges: %v", missing)
|
||||
for i, r := range missing {
|
||||
t.Logf("Missing range %d: From=%d, To=%d", i, r.From, r.To)
|
||||
}
|
||||
|
||||
// Should have 2 missing ranges for partial intervals
|
||||
require.Len(t, missing, 2)
|
||||
|
||||
// First partial: 12:02 to 12:05
|
||||
assert.Equal(t, uint64(1672563720000), missing[0].From)
|
||||
assert.Equal(t, uint64(1672563900000), missing[0].To)
|
||||
|
||||
// Second partial: 12:55 to 12:58
|
||||
assert.Equal(t, uint64(1672566900000), missing[1].From, "Second missing range From")
|
||||
assert.Equal(t, uint64(1672567080000), missing[1].To, "Second missing range To")
|
||||
}
|
||||
|
||||
func TestBucketCacheNoStepInterval(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
orgID := valuer.UUID{}
|
||||
cache := createTestCache(t)
|
||||
bc := NewBucketCache(instrumentationtest.New().ToProviderSettings(), cache, time.Hour, 5*time.Minute)
|
||||
|
||||
// Test with no step (stepMs = 0)
|
||||
step := qbtypes.Step{Duration: 0}
|
||||
|
||||
query := &mockQuery{
|
||||
fingerprint: "test-no-step",
|
||||
startMs: 1672563720000,
|
||||
endMs: 1672567080000,
|
||||
}
|
||||
|
||||
result := &qbtypes.Result{
|
||||
Type: qbtypes.RequestTypeTimeSeries,
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "test",
|
||||
Aggregations: []*qbtypes.AggregationBucket{{Index: 0, Series: []*qbtypes.TimeSeries{}}},
|
||||
},
|
||||
}
|
||||
|
||||
// Should cache the entire range when step is 0
|
||||
bc.Put(ctx, orgID, query, step, result)
|
||||
|
||||
cached, missing := bc.GetMissRanges(ctx, orgID, query, step)
|
||||
assert.NotNil(t, cached)
|
||||
assert.Len(t, missing, 0)
|
||||
}
|
||||
@@ -159,7 +159,7 @@ func TestBucketCache_Put_And_Get(t *testing.T) {
|
||||
}
|
||||
|
||||
// Store in cache
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, result)
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, qbtypes.Step{}, result)
|
||||
|
||||
// Wait a bit for cache to be written
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
@@ -193,7 +193,7 @@ func TestBucketCache_PartialHit(t *testing.T) {
|
||||
Type: qbtypes.RequestTypeTimeSeries,
|
||||
Value: createTestTimeSeries("A", 1000, 3000, 1000),
|
||||
}
|
||||
bc.Put(context.Background(), valuer.UUID{}, query1, result1)
|
||||
bc.Put(context.Background(), valuer.UUID{}, query1, qbtypes.Step{}, result1)
|
||||
|
||||
// Wait for cache write
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
@@ -226,7 +226,7 @@ func TestBucketCache_MultipleBuckets(t *testing.T) {
|
||||
startMs: 1000,
|
||||
endMs: 2000,
|
||||
}
|
||||
bc.Put(context.Background(), valuer.UUID{}, query1, &qbtypes.Result{
|
||||
bc.Put(context.Background(), valuer.UUID{}, query1, qbtypes.Step{}, &qbtypes.Result{
|
||||
Type: qbtypes.RequestTypeTimeSeries,
|
||||
Value: createTestTimeSeries("A", 1000, 2000, 100),
|
||||
})
|
||||
@@ -236,7 +236,7 @@ func TestBucketCache_MultipleBuckets(t *testing.T) {
|
||||
startMs: 3000,
|
||||
endMs: 4000,
|
||||
}
|
||||
bc.Put(context.Background(), valuer.UUID{}, query2, &qbtypes.Result{
|
||||
bc.Put(context.Background(), valuer.UUID{}, query2, qbtypes.Step{}, &qbtypes.Result{
|
||||
Type: qbtypes.RequestTypeTimeSeries,
|
||||
Value: createTestTimeSeries("A", 3000, 4000, 100),
|
||||
})
|
||||
@@ -284,7 +284,7 @@ func TestBucketCache_FluxInterval(t *testing.T) {
|
||||
}
|
||||
|
||||
// This should not be cached due to flux interval
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, result)
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, qbtypes.Step{}, result)
|
||||
|
||||
// Wait a bit
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
@@ -354,7 +354,7 @@ func TestBucketCache_MergeTimeSeriesResults(t *testing.T) {
|
||||
startMs: 1000,
|
||||
endMs: 3000,
|
||||
}
|
||||
bc.Put(context.Background(), valuer.UUID{}, query1, &qbtypes.Result{
|
||||
bc.Put(context.Background(), valuer.UUID{}, query1, qbtypes.Step{}, &qbtypes.Result{
|
||||
Type: qbtypes.RequestTypeTimeSeries,
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "A",
|
||||
@@ -370,7 +370,7 @@ func TestBucketCache_MergeTimeSeriesResults(t *testing.T) {
|
||||
startMs: 3000,
|
||||
endMs: 5000,
|
||||
}
|
||||
bc.Put(context.Background(), valuer.UUID{}, query2, &qbtypes.Result{
|
||||
bc.Put(context.Background(), valuer.UUID{}, query2, qbtypes.Step{}, &qbtypes.Result{
|
||||
Type: qbtypes.RequestTypeTimeSeries,
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "A",
|
||||
@@ -445,7 +445,7 @@ func TestBucketCache_RawData(t *testing.T) {
|
||||
Value: rawData,
|
||||
}
|
||||
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, result)
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, qbtypes.Step{}, result)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
cached, missing := bc.GetMissRanges(context.Background(), valuer.UUID{}, query, qbtypes.Step{Duration: 1000})
|
||||
@@ -485,7 +485,7 @@ func TestBucketCache_ScalarData(t *testing.T) {
|
||||
Value: scalarData,
|
||||
}
|
||||
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, result)
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, qbtypes.Step{}, result)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
cached, missing := bc.GetMissRanges(context.Background(), valuer.UUID{}, query, qbtypes.Step{Duration: 1000})
|
||||
@@ -513,7 +513,7 @@ func TestBucketCache_EmptyFingerprint(t *testing.T) {
|
||||
Value: createTestTimeSeries("A", 1000, 5000, 1000),
|
||||
}
|
||||
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, result)
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, qbtypes.Step{}, result)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Should still be able to retrieve
|
||||
@@ -568,7 +568,7 @@ func TestBucketCache_ConcurrentAccess(t *testing.T) {
|
||||
Type: qbtypes.RequestTypeTimeSeries,
|
||||
Value: createTestTimeSeries(fmt.Sprintf("Q%d", id), query.startMs, query.endMs, 100),
|
||||
}
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, result)
|
||||
bc.Put(context.Background(), valuer.UUID{}, query, qbtypes.Step{}, result)
|
||||
done <- true
|
||||
}(i)
|
||||
}
|
||||
@@ -628,7 +628,7 @@ func TestBucketCache_GetMissRanges_FluxInterval(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
bc.Put(ctx, orgID, query, cachedResult)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{}, cachedResult)
|
||||
|
||||
// Get miss ranges
|
||||
cached, missing := bc.GetMissRanges(ctx, orgID, query, qbtypes.Step{Duration: 1000})
|
||||
@@ -690,7 +690,7 @@ func TestBucketCache_Put_FluxIntervalTrimming(t *testing.T) {
|
||||
}
|
||||
|
||||
// Put the result
|
||||
bc.Put(ctx, orgID, query, result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{}, result)
|
||||
|
||||
// Retrieve cached data
|
||||
cached, missing := bc.GetMissRanges(ctx, orgID, query, qbtypes.Step{Duration: 1000})
|
||||
@@ -760,7 +760,7 @@ func TestBucketCache_Put_EntireRangeInFluxInterval(t *testing.T) {
|
||||
}
|
||||
|
||||
// Put the result - should not cache anything
|
||||
bc.Put(ctx, orgID, query, result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{}, result)
|
||||
|
||||
// Try to get cached data - should have no cached data
|
||||
cached, missing := bc.GetMissRanges(ctx, orgID, query, qbtypes.Step{Duration: 1000})
|
||||
@@ -878,7 +878,7 @@ func TestBucketCache_EmptyDataHandling(t *testing.T) {
|
||||
}
|
||||
|
||||
// Put the result
|
||||
bc.Put(ctx, orgID, query, tt.result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{}, tt.result)
|
||||
|
||||
// Wait a bit for cache to be written
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
@@ -944,7 +944,7 @@ func TestBucketCache_PartialValues(t *testing.T) {
|
||||
}
|
||||
|
||||
// Put the result
|
||||
bc.Put(ctx, orgID, query, result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{}, result)
|
||||
|
||||
// Wait for cache to be written
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
@@ -1014,7 +1014,7 @@ func TestBucketCache_AllPartialValues(t *testing.T) {
|
||||
}
|
||||
|
||||
// Put the result
|
||||
bc.Put(ctx, orgID, query, result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{}, result)
|
||||
|
||||
// Wait for cache to be written
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
@@ -1075,7 +1075,7 @@ func TestBucketCache_FilteredCachedResults(t *testing.T) {
|
||||
}
|
||||
|
||||
// Cache the wide range
|
||||
bc.Put(ctx, orgID, query1, result1)
|
||||
bc.Put(ctx, orgID, query1, qbtypes.Step{}, result1)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Now query for a smaller range (2000-3500ms)
|
||||
@@ -1246,7 +1246,7 @@ func TestBucketCache_PartialValueDetection(t *testing.T) {
|
||||
}
|
||||
|
||||
// Put the result
|
||||
bc.Put(ctx, orgID, query, result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{}, result)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Get cached data
|
||||
@@ -1300,7 +1300,7 @@ func TestBucketCache_PartialValueDetection(t *testing.T) {
|
||||
}
|
||||
|
||||
// Put the result
|
||||
bc.Put(ctx, orgID, query, result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{}, result)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Get cached data
|
||||
@@ -1352,7 +1352,7 @@ func TestBucketCache_PartialValueDetection(t *testing.T) {
|
||||
}
|
||||
|
||||
// Put the result
|
||||
bc.Put(ctx, orgID, query, result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{}, result)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Get cached data
|
||||
@@ -1409,7 +1409,7 @@ func TestBucketCache_NoCache(t *testing.T) {
|
||||
}
|
||||
|
||||
// Put the result in cache
|
||||
bc.Put(ctx, orgID, query, result)
|
||||
bc.Put(ctx, orgID, query, qbtypes.Step{}, result)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Verify data is cached
|
||||
|
||||
@@ -176,7 +176,7 @@ func readAsTimeSeries(rows driver.Rows, queryWindow *qbtypes.TimeRange, step qbt
|
||||
lblVals = append(lblVals, *val)
|
||||
lblObjs = append(lblObjs, &qbtypes.Label{
|
||||
Key: telemetrytypes.TelemetryFieldKey{Name: name},
|
||||
Value: val,
|
||||
Value: *val, // Dereference to get the actual string value
|
||||
})
|
||||
|
||||
default:
|
||||
|
||||
@@ -17,5 +17,5 @@ type BucketCache interface {
|
||||
// cached portion + list of gaps to fetch
|
||||
GetMissRanges(ctx context.Context, orgID valuer.UUID, q qbtypes.Query, step qbtypes.Step) (cached *qbtypes.Result, missing []*qbtypes.TimeRange)
|
||||
// store fresh buckets for future hits
|
||||
Put(ctx context.Context, orgID valuer.UUID, q qbtypes.Query, fresh *qbtypes.Result)
|
||||
Put(ctx context.Context, orgID valuer.UUID, q qbtypes.Query, step qbtypes.Step, fresh *qbtypes.Result)
|
||||
}
|
||||
223
pkg/querier/merge_metadata_test.go
Normal file
223
pkg/querier/merge_metadata_test.go
Normal file
@@ -0,0 +1,223 @@
|
||||
package querier
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestMergeTimeSeriesResults_PreservesMetadata(t *testing.T) {
|
||||
q := &querier{}
|
||||
|
||||
// Create cached data with metadata
|
||||
cachedValue := &qbtypes.TimeSeriesData{
|
||||
QueryName: "testQuery",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Index: 0,
|
||||
Alias: "count_result",
|
||||
Meta: struct {
|
||||
Unit string `json:"unit,omitempty"`
|
||||
}{
|
||||
Unit: "requests",
|
||||
},
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Labels: []*qbtypes.Label{
|
||||
{
|
||||
Key: telemetrytypes.TelemetryFieldKey{Name: "service"},
|
||||
Value: "frontend",
|
||||
},
|
||||
},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 10},
|
||||
{Timestamp: 2000, Value: 20},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Index: 1,
|
||||
Alias: "sum_result",
|
||||
Meta: struct {
|
||||
Unit string `json:"unit,omitempty"`
|
||||
}{
|
||||
Unit: "bytes",
|
||||
},
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Labels: []*qbtypes.Label{
|
||||
{
|
||||
Key: telemetrytypes.TelemetryFieldKey{Name: "service"},
|
||||
Value: "backend",
|
||||
},
|
||||
},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 100},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Create fresh results with some overlapping and new data
|
||||
freshResults := []*qbtypes.Result{
|
||||
{
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "testQuery",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Index: 0,
|
||||
Alias: "count_result", // Same alias
|
||||
Meta: struct {
|
||||
Unit string `json:"unit,omitempty"`
|
||||
}{
|
||||
Unit: "requests", // Same unit
|
||||
},
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Labels: []*qbtypes.Label{
|
||||
{
|
||||
Key: telemetrytypes.TelemetryFieldKey{Name: "service"},
|
||||
Value: "frontend",
|
||||
},
|
||||
},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 3000, Value: 30},
|
||||
{Timestamp: 4000, Value: 40},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Index: 2, // New aggregation
|
||||
Alias: "avg_result",
|
||||
Meta: struct {
|
||||
Unit string `json:"unit,omitempty"`
|
||||
}{
|
||||
Unit: "milliseconds",
|
||||
},
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Labels: []*qbtypes.Label{
|
||||
{
|
||||
Key: telemetrytypes.TelemetryFieldKey{Name: "service"},
|
||||
Value: "api",
|
||||
},
|
||||
},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 50},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Merge the results
|
||||
result := q.mergeTimeSeriesResults(cachedValue, freshResults)
|
||||
|
||||
// Verify the result
|
||||
require.NotNil(t, result)
|
||||
assert.Equal(t, "testQuery", result.QueryName)
|
||||
assert.Len(t, result.Aggregations, 3) // Should have 3 aggregations
|
||||
|
||||
// Check each aggregation
|
||||
for _, agg := range result.Aggregations {
|
||||
switch agg.Index {
|
||||
case 0:
|
||||
assert.Equal(t, "count_result", agg.Alias)
|
||||
assert.Equal(t, "requests", agg.Meta.Unit)
|
||||
assert.Len(t, agg.Series, 1)
|
||||
// Should have merged values
|
||||
assert.Len(t, agg.Series[0].Values, 4)
|
||||
case 1:
|
||||
assert.Equal(t, "sum_result", agg.Alias)
|
||||
assert.Equal(t, "bytes", agg.Meta.Unit)
|
||||
assert.Len(t, agg.Series, 1)
|
||||
assert.Len(t, agg.Series[0].Values, 1)
|
||||
case 2:
|
||||
assert.Equal(t, "avg_result", agg.Alias)
|
||||
assert.Equal(t, "milliseconds", agg.Meta.Unit)
|
||||
assert.Len(t, agg.Series, 1)
|
||||
assert.Len(t, agg.Series[0].Values, 1)
|
||||
default:
|
||||
t.Fatalf("Unexpected aggregation index: %d", agg.Index)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMergeTimeSeriesResults_HandlesEmptyMetadata(t *testing.T) {
|
||||
q := &querier{}
|
||||
|
||||
// Create cached data without metadata
|
||||
cachedValue := &qbtypes.TimeSeriesData{
|
||||
QueryName: "testQuery",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Index: 0,
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Labels: []*qbtypes.Label{
|
||||
{
|
||||
Key: telemetrytypes.TelemetryFieldKey{Name: "service"},
|
||||
Value: "frontend",
|
||||
},
|
||||
},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 10},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Create fresh results with metadata
|
||||
freshResults := []*qbtypes.Result{
|
||||
{
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "testQuery",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Index: 0,
|
||||
Alias: "new_alias",
|
||||
Meta: struct {
|
||||
Unit string `json:"unit,omitempty"`
|
||||
}{
|
||||
Unit: "items",
|
||||
},
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Labels: []*qbtypes.Label{
|
||||
{
|
||||
Key: telemetrytypes.TelemetryFieldKey{Name: "service"},
|
||||
Value: "frontend",
|
||||
},
|
||||
},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 2000, Value: 20},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Merge the results
|
||||
result := q.mergeTimeSeriesResults(cachedValue, freshResults)
|
||||
|
||||
// Verify the metadata from fresh results is preserved
|
||||
require.NotNil(t, result)
|
||||
assert.Len(t, result.Aggregations, 1)
|
||||
assert.Equal(t, "new_alias", result.Aggregations[0].Alias)
|
||||
assert.Equal(t, "items", result.Aggregations[0].Meta.Unit)
|
||||
}
|
||||
333
pkg/querier/postprocess.go
Normal file
333
pkg/querier/postprocess.go
Normal file
@@ -0,0 +1,333 @@
|
||||
package querier
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
)
|
||||
|
||||
// PostProcessResults applies postprocessing to query results
|
||||
func (q *querier) PostProcessResults(results map[string]any, req *qbtypes.QueryRangeRequest) (map[string]any, error) {
|
||||
// Convert results to typed format for processing
|
||||
typedResults := make(map[string]*qbtypes.Result)
|
||||
for name, result := range results {
|
||||
typedResults[name] = &qbtypes.Result{
|
||||
Value: result,
|
||||
}
|
||||
}
|
||||
|
||||
// Apply postprocessing based on query types
|
||||
for _, query := range req.CompositeQuery.Queries {
|
||||
switch spec := query.Spec.(type) {
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]:
|
||||
if result, ok := typedResults[spec.Name]; ok {
|
||||
result = postProcessBuilderQuery(q, result, spec, req)
|
||||
typedResults[spec.Name] = result
|
||||
}
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]:
|
||||
if result, ok := typedResults[spec.Name]; ok {
|
||||
result = postProcessBuilderQuery(q, result, spec, req)
|
||||
typedResults[spec.Name] = result
|
||||
}
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]:
|
||||
if result, ok := typedResults[spec.Name]; ok {
|
||||
result = postProcessMetricQuery(q, result, spec, req)
|
||||
typedResults[spec.Name] = result
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Apply formula calculations
|
||||
typedResults = q.applyFormulas(typedResults, req)
|
||||
|
||||
// Filter out disabled queries
|
||||
typedResults = q.filterDisabledQueries(typedResults, req)
|
||||
|
||||
// Apply fill gaps if requested
|
||||
if req.FormatOptions != nil && req.FormatOptions.FillGaps {
|
||||
typedResults = q.fillGaps(typedResults, req)
|
||||
}
|
||||
|
||||
// Apply table formatting for UI if requested
|
||||
if req.FormatOptions != nil && req.FormatOptions.FormatTableResultForUI && req.RequestType == qbtypes.RequestTypeScalar {
|
||||
// Format results as a table - this merges all queries into a single table
|
||||
tableResult := q.formatScalarResultsAsTable(typedResults, req)
|
||||
|
||||
// Return the table under the first query's name so it gets included in results
|
||||
if len(req.CompositeQuery.Queries) > 0 {
|
||||
var firstQueryName string
|
||||
switch spec := req.CompositeQuery.Queries[0].Spec.(type) {
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]:
|
||||
firstQueryName = spec.Name
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]:
|
||||
firstQueryName = spec.Name
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]:
|
||||
firstQueryName = spec.Name
|
||||
}
|
||||
|
||||
if firstQueryName != "" && tableResult["table"] != nil {
|
||||
// Return table under first query name
|
||||
return map[string]any{firstQueryName: tableResult["table"]}, nil
|
||||
}
|
||||
}
|
||||
|
||||
return tableResult, nil
|
||||
}
|
||||
|
||||
// Convert back to map[string]any
|
||||
finalResults := make(map[string]any)
|
||||
for name, result := range typedResults {
|
||||
finalResults[name] = result.Value
|
||||
}
|
||||
|
||||
return finalResults, nil
|
||||
}
|
||||
|
||||
// postProcessBuilderQuery applies postprocessing to a single builder query result
|
||||
func postProcessBuilderQuery[T any](
|
||||
q *querier,
|
||||
result *qbtypes.Result,
|
||||
query qbtypes.QueryBuilderQuery[T],
|
||||
req *qbtypes.QueryRangeRequest,
|
||||
) *qbtypes.Result {
|
||||
|
||||
// Apply functions
|
||||
if len(query.Functions) > 0 {
|
||||
result = q.applyFunctions(result, query.Functions)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// postProcessMetricQuery applies postprocessing to a metric query result
|
||||
func postProcessMetricQuery(
|
||||
q *querier,
|
||||
result *qbtypes.Result,
|
||||
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
|
||||
req *qbtypes.QueryRangeRequest,
|
||||
) *qbtypes.Result {
|
||||
// Apply having clause
|
||||
result = q.applyHavingClause(result, query.Having)
|
||||
|
||||
// Apply series limit
|
||||
if query.Limit > 0 {
|
||||
result = q.applySeriesLimit(result, query.Limit, query.Order)
|
||||
}
|
||||
|
||||
// Apply functions
|
||||
if len(query.Functions) > 0 {
|
||||
result = q.applyFunctions(result, query.Functions)
|
||||
}
|
||||
|
||||
// Apply reduce to for scalar request type
|
||||
if req.RequestType == qbtypes.RequestTypeScalar {
|
||||
// For metrics, prefer the ReduceTo field from first aggregation if set
|
||||
if len(query.Aggregations) > 0 && query.Aggregations[0].ReduceTo != qbtypes.ReduceToUnknown {
|
||||
result = q.applyMetricReduceTo(result, query.Aggregations[0].ReduceTo)
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// applyMetricReduceTo applies reduce to operation using the metric's ReduceTo field
|
||||
func (q *querier) applyMetricReduceTo(result *qbtypes.Result, reduceOp qbtypes.ReduceTo) *qbtypes.Result {
|
||||
tsData, ok := result.Value.(*qbtypes.TimeSeriesData)
|
||||
if !ok {
|
||||
return result
|
||||
}
|
||||
|
||||
if tsData != nil {
|
||||
for _, agg := range tsData.Aggregations {
|
||||
for i, series := range agg.Series {
|
||||
// Use the FunctionReduceTo helper
|
||||
reducedSeries := qbtypes.FunctionReduceTo(series, reduceOp)
|
||||
agg.Series[i] = reducedSeries
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// applyHavingClause filters results based on having conditions
|
||||
func (q *querier) applyHavingClause(result *qbtypes.Result, having *qbtypes.Having) *qbtypes.Result {
|
||||
// TODO: Implement having clause evaluation once expression parser is available
|
||||
// For now, we skip having clause processing
|
||||
return result
|
||||
}
|
||||
|
||||
// evaluateHavingExpression evaluates a having expression
|
||||
// TODO: Implement this once we have an expression parser for having clauses
|
||||
func evaluateHavingExpression(value float64, expression string) bool {
|
||||
// For now, always return true (no filtering)
|
||||
return true
|
||||
}
|
||||
|
||||
// applySeriesLimit limits the number of series in the result
|
||||
func (q *querier) applySeriesLimit(result *qbtypes.Result, limit int, orderBy []qbtypes.OrderBy) *qbtypes.Result {
|
||||
tsData, ok := result.Value.(*qbtypes.TimeSeriesData)
|
||||
if !ok {
|
||||
return result
|
||||
}
|
||||
|
||||
if tsData != nil {
|
||||
for _, agg := range tsData.Aggregations {
|
||||
if len(agg.Series) <= limit {
|
||||
continue
|
||||
}
|
||||
|
||||
// Sort series based on orderBy
|
||||
q.sortSeries(agg.Series, orderBy)
|
||||
|
||||
// Keep only the top 'limit' series
|
||||
agg.Series = agg.Series[:limit]
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// sortSeries sorts time series based on orderBy criteria
|
||||
func (q *querier) sortSeries(series []*qbtypes.TimeSeries, orderBy []qbtypes.OrderBy) {
|
||||
if len(orderBy) == 0 {
|
||||
// Default: sort by value (average) in descending order
|
||||
sort.SliceStable(series, func(i, j int) bool {
|
||||
avgI := calculateAverage(series[i].Values)
|
||||
avgJ := calculateAverage(series[j].Values)
|
||||
return avgI > avgJ
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Sort by specified criteria
|
||||
sort.SliceStable(series, func(i, j int) bool {
|
||||
for _, order := range orderBy {
|
||||
cmp := 0
|
||||
|
||||
if order.Key.Name == "#value" {
|
||||
// Sort by value
|
||||
avgI := calculateAverage(series[i].Values)
|
||||
avgJ := calculateAverage(series[j].Values)
|
||||
if avgI < avgJ {
|
||||
cmp = -1
|
||||
} else if avgI > avgJ {
|
||||
cmp = 1
|
||||
}
|
||||
} else {
|
||||
// Sort by label
|
||||
valI := getLabelValue(series[i].Labels, order.Key.Name)
|
||||
valJ := getLabelValue(series[j].Labels, order.Key.Name)
|
||||
cmp = strings.Compare(valI, valJ)
|
||||
}
|
||||
|
||||
if cmp != 0 {
|
||||
if order.Direction == qbtypes.OrderDirectionAsc {
|
||||
return cmp < 0
|
||||
}
|
||||
return cmp > 0
|
||||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
}
|
||||
|
||||
// calculateAverage calculates the average of time series values
|
||||
func calculateAverage(values []*qbtypes.TimeSeriesValue) float64 {
|
||||
if len(values) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
sum := 0.0
|
||||
count := 0
|
||||
for _, v := range values {
|
||||
if !math.IsNaN(v.Value) && !math.IsInf(v.Value, 0) {
|
||||
sum += v.Value
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
if count == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
return sum / float64(count)
|
||||
}
|
||||
|
||||
// getLabelValue gets the value of a label by name
|
||||
func getLabelValue(labels []*qbtypes.Label, name string) string {
|
||||
for _, label := range labels {
|
||||
if label.Key.Name == name {
|
||||
return fmt.Sprintf("%v", label.Value)
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// applyFunctions applies functions to time series data
|
||||
func (q *querier) applyFunctions(result *qbtypes.Result, functions []qbtypes.Function) *qbtypes.Result {
|
||||
tsData, ok := result.Value.(*qbtypes.TimeSeriesData)
|
||||
if !ok {
|
||||
return result
|
||||
}
|
||||
|
||||
if tsData != nil {
|
||||
for _, agg := range tsData.Aggregations {
|
||||
for i, series := range agg.Series {
|
||||
agg.Series[i] = qbtypes.ApplyFunctions(functions, series)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// applyReduceTo reduces time series to a single value
|
||||
func (q *querier) applyReduceTo(result *qbtypes.Result, secondaryAggs []qbtypes.SecondaryAggregation) *qbtypes.Result {
|
||||
tsData, ok := result.Value.(*qbtypes.TimeSeriesData)
|
||||
if !ok {
|
||||
return result
|
||||
}
|
||||
|
||||
// For now, we'll use the first secondary aggregation's expression
|
||||
// In the future, this might need to handle multiple secondary aggregations
|
||||
expression := ""
|
||||
if len(secondaryAggs) > 0 {
|
||||
expression = secondaryAggs[0].Expression
|
||||
}
|
||||
|
||||
if expression == "" {
|
||||
return result
|
||||
}
|
||||
|
||||
// Map expression to reduce operation
|
||||
var reduceOp qbtypes.ReduceTo
|
||||
switch expression {
|
||||
case "last":
|
||||
reduceOp = qbtypes.ReduceToLast
|
||||
case "sum":
|
||||
reduceOp = qbtypes.ReduceToSum
|
||||
case "avg":
|
||||
reduceOp = qbtypes.ReduceToAvg
|
||||
case "min":
|
||||
reduceOp = qbtypes.ReduceToMin
|
||||
case "max":
|
||||
reduceOp = qbtypes.ReduceToMax
|
||||
default:
|
||||
// Unknown reduce operation, return as-is
|
||||
return result
|
||||
}
|
||||
|
||||
for _, agg := range tsData.Aggregations {
|
||||
for i, series := range agg.Series {
|
||||
// Use the FunctionReduceTo helper
|
||||
reducedSeries := qbtypes.FunctionReduceTo(series, reduceOp)
|
||||
agg.Series[i] = reducedSeries
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
125
pkg/querier/postprocess_formula.go
Normal file
125
pkg/querier/postprocess_formula.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package querier
|
||||
|
||||
import (
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
)
|
||||
|
||||
// applyFormulas processes formula queries in the composite query
|
||||
func (q *querier) applyFormulas(results map[string]*qbtypes.Result, req *qbtypes.QueryRangeRequest) map[string]*qbtypes.Result {
|
||||
// Collect formula queries
|
||||
formulaQueries := make(map[string]qbtypes.QueryBuilderFormula)
|
||||
|
||||
for _, query := range req.CompositeQuery.Queries {
|
||||
if query.Type == qbtypes.QueryTypeFormula {
|
||||
if formula, ok := query.Spec.(qbtypes.QueryBuilderFormula); ok {
|
||||
formulaQueries[formula.Name] = formula
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Process each formula
|
||||
for name, formula := range formulaQueries {
|
||||
// Prepare time series data for formula evaluation
|
||||
timeSeriesData := make(map[string]*qbtypes.TimeSeriesData)
|
||||
|
||||
// Extract time series data from results
|
||||
for queryName, result := range results {
|
||||
if tsData, ok := result.Value.(*qbtypes.TimeSeriesData); ok {
|
||||
timeSeriesData[queryName] = tsData
|
||||
}
|
||||
}
|
||||
|
||||
// Create formula evaluator
|
||||
canDefaultZero := make(map[string]bool)
|
||||
for _, query := range req.CompositeQuery.Queries {
|
||||
switch spec := query.Spec.(type) {
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]:
|
||||
// Metrics can default to zero for rate/increase operations
|
||||
canDefaultZero[spec.Name] = true
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]:
|
||||
canDefaultZero[spec.Name] = false
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]:
|
||||
canDefaultZero[spec.Name] = false
|
||||
}
|
||||
}
|
||||
|
||||
evaluator, err := qbtypes.NewFormulaEvaluator(formula.Expression, canDefaultZero)
|
||||
if err != nil {
|
||||
q.logger.Error("failed to create formula evaluator", "error", err, "formula", name)
|
||||
continue
|
||||
}
|
||||
|
||||
// Evaluate the formula
|
||||
formulaSeries, err := evaluator.EvaluateFormula(timeSeriesData)
|
||||
if err != nil {
|
||||
q.logger.Error("failed to evaluate formula", "error", err, "formula", name)
|
||||
continue
|
||||
}
|
||||
|
||||
// Create result for formula
|
||||
formulaResult := &qbtypes.TimeSeriesData{
|
||||
QueryName: name,
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Index: 0,
|
||||
Series: formulaSeries,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Apply functions if any
|
||||
if len(formula.Functions) > 0 {
|
||||
for _, agg := range formulaResult.Aggregations {
|
||||
for i, series := range agg.Series {
|
||||
agg.Series[i] = qbtypes.ApplyFunctions(formula.Functions, series)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
results[name] = &qbtypes.Result{
|
||||
Value: formulaResult,
|
||||
}
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
// filterDisabledQueries removes results for disabled queries
|
||||
func (q *querier) filterDisabledQueries(results map[string]*qbtypes.Result, req *qbtypes.QueryRangeRequest) map[string]*qbtypes.Result {
|
||||
filtered := make(map[string]*qbtypes.Result)
|
||||
|
||||
for _, query := range req.CompositeQuery.Queries {
|
||||
var queryName string
|
||||
var disabled bool
|
||||
|
||||
switch spec := query.Spec.(type) {
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]:
|
||||
queryName = spec.Name
|
||||
disabled = spec.Disabled
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]:
|
||||
queryName = spec.Name
|
||||
disabled = spec.Disabled
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]:
|
||||
queryName = spec.Name
|
||||
disabled = spec.Disabled
|
||||
case qbtypes.QueryBuilderFormula:
|
||||
queryName = spec.Name
|
||||
// Formulas don't have a disabled flag, include them
|
||||
disabled = false
|
||||
case qbtypes.PromQuery:
|
||||
queryName = spec.Name
|
||||
disabled = spec.Disabled
|
||||
case qbtypes.ClickHouseQuery:
|
||||
queryName = spec.Name
|
||||
disabled = spec.Disabled
|
||||
}
|
||||
|
||||
if !disabled {
|
||||
if result, ok := results[queryName]; ok {
|
||||
filtered[queryName] = result
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return filtered
|
||||
}
|
||||
702
pkg/querier/postprocess_gaps.go
Normal file
702
pkg/querier/postprocess_gaps.go
Normal file
@@ -0,0 +1,702 @@
|
||||
package querier
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
// fillGaps fills missing data points with zeros in time series data
|
||||
func (q *querier) fillGaps(results map[string]*qbtypes.Result, req *qbtypes.QueryRangeRequest) map[string]*qbtypes.Result {
|
||||
// Only fill gaps for time series data
|
||||
if req.RequestType != qbtypes.RequestTypeTimeSeries {
|
||||
return results
|
||||
}
|
||||
|
||||
// Get the step interval from the first query
|
||||
var step int64 = 60000 // Default to 1 minute in milliseconds
|
||||
for _, query := range req.CompositeQuery.Queries {
|
||||
switch spec := query.Spec.(type) {
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]:
|
||||
if spec.StepInterval.Duration > 0 {
|
||||
step = int64(spec.StepInterval.Duration) / int64(time.Millisecond)
|
||||
break
|
||||
}
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]:
|
||||
if spec.StepInterval.Duration > 0 {
|
||||
step = int64(spec.StepInterval.Duration) / int64(time.Millisecond)
|
||||
break
|
||||
}
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]:
|
||||
if spec.StepInterval.Duration > 0 {
|
||||
step = int64(spec.StepInterval.Duration) / int64(time.Millisecond)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
startMs := int64(req.Start)
|
||||
endMs := int64(req.End)
|
||||
|
||||
for name, result := range results {
|
||||
tsData, ok := result.Value.(*qbtypes.TimeSeriesData)
|
||||
if !ok || tsData == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// If no aggregations, create an empty one
|
||||
if len(tsData.Aggregations) == 0 {
|
||||
tsData.Aggregations = []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Index: 0,
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Labels: []*qbtypes.Label{},
|
||||
Values: fillGapForSeries(nil, startMs, endMs, step),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Fill gaps for each series
|
||||
for _, agg := range tsData.Aggregations {
|
||||
if len(agg.Series) == 0 {
|
||||
// Create empty series if none exist
|
||||
agg.Series = []*qbtypes.TimeSeries{
|
||||
{
|
||||
Labels: []*qbtypes.Label{},
|
||||
Values: fillGapForSeries(nil, startMs, endMs, step),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
// Fill gaps for existing series
|
||||
for _, series := range agg.Series {
|
||||
series.Values = fillGapForSeries(series.Values, startMs, endMs, step)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
results[name] = result
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
// fillGapForSeries fills gaps in a single time series
|
||||
func fillGapForSeries(values []*qbtypes.TimeSeriesValue, startMs, endMs, step int64) []*qbtypes.TimeSeriesValue {
|
||||
// Safeguard against invalid step
|
||||
if step <= 0 {
|
||||
step = 60000 // Default to 1 minute
|
||||
}
|
||||
|
||||
// Create a map of existing values
|
||||
valueMap := make(map[int64]float64)
|
||||
for _, v := range values {
|
||||
if v != nil && !v.Partial {
|
||||
valueMap[v.Timestamp] = v.Value
|
||||
}
|
||||
}
|
||||
|
||||
// Generate all expected timestamps
|
||||
var filledValues []*qbtypes.TimeSeriesValue
|
||||
for ts := startMs; ts <= endMs; ts += step {
|
||||
value := 0.0
|
||||
if v, ok := valueMap[ts]; ok {
|
||||
value = v
|
||||
}
|
||||
|
||||
filledValues = append(filledValues, &qbtypes.TimeSeriesValue{
|
||||
Timestamp: ts,
|
||||
Value: value,
|
||||
})
|
||||
}
|
||||
|
||||
return filledValues
|
||||
}
|
||||
|
||||
// formatScalarResultsAsTable formats scalar results as a table for UI display
|
||||
func (q *querier) formatScalarResultsAsTable(results map[string]*qbtypes.Result, req *qbtypes.QueryRangeRequest) map[string]any {
|
||||
if len(results) == 0 {
|
||||
return map[string]any{"table": &qbtypes.ScalarData{}}
|
||||
}
|
||||
|
||||
// Convert all results to ScalarData first
|
||||
for name, result := range results {
|
||||
if tsData, ok := result.Value.(*qbtypes.TimeSeriesData); ok {
|
||||
// Convert TimeSeriesData to ScalarData
|
||||
columns := []*qbtypes.ColumnDescriptor{}
|
||||
data := [][]any{}
|
||||
|
||||
// Extract group columns from labels
|
||||
if len(tsData.Aggregations) > 0 && len(tsData.Aggregations[0].Series) > 0 {
|
||||
// Get group columns from the first series
|
||||
for _, label := range tsData.Aggregations[0].Series[0].Labels {
|
||||
col := &qbtypes.ColumnDescriptor{
|
||||
TelemetryFieldKey: label.Key,
|
||||
QueryName: name,
|
||||
Type: qbtypes.ColumnTypeGroup,
|
||||
}
|
||||
// Ensure Name is set
|
||||
if col.Name == "" {
|
||||
col.Name = label.Key.Name
|
||||
}
|
||||
columns = append(columns, col)
|
||||
}
|
||||
}
|
||||
|
||||
// Add aggregation columns
|
||||
for _, agg := range tsData.Aggregations {
|
||||
col := &qbtypes.ColumnDescriptor{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: agg.Alias,
|
||||
},
|
||||
QueryName: name,
|
||||
AggregationIndex: int64(agg.Index),
|
||||
Meta: agg.Meta,
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
}
|
||||
if col.Name == "" {
|
||||
col.Name = fmt.Sprintf("__result_%d", agg.Index)
|
||||
}
|
||||
columns = append(columns, col)
|
||||
}
|
||||
|
||||
// Convert series to rows
|
||||
for seriesIdx, series := range tsData.Aggregations[0].Series {
|
||||
row := make([]any, len(columns))
|
||||
colIdx := 0
|
||||
|
||||
// Add group values
|
||||
for _, label := range series.Labels {
|
||||
row[colIdx] = label.Value
|
||||
colIdx++
|
||||
}
|
||||
|
||||
// Add aggregation values (last value from each aggregation)
|
||||
for _, agg := range tsData.Aggregations {
|
||||
if seriesIdx < len(agg.Series) && len(agg.Series[seriesIdx].Values) > 0 {
|
||||
value := agg.Series[seriesIdx].Values[len(agg.Series[seriesIdx].Values)-1].Value
|
||||
row[colIdx] = roundToTwoDecimal(value)
|
||||
} else {
|
||||
row[colIdx] = 0.0
|
||||
}
|
||||
colIdx++
|
||||
}
|
||||
|
||||
data = append(data, row)
|
||||
}
|
||||
|
||||
results[name] = &qbtypes.Result{
|
||||
Value: &qbtypes.ScalarData{
|
||||
Columns: columns,
|
||||
Data: data,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check if we have a single result that already contains all columns from multiple queries
|
||||
// This happens when the SQL query already joins multiple queries
|
||||
if len(results) == 1 {
|
||||
for queryName, result := range results {
|
||||
if scalarData, ok := result.Value.(*qbtypes.ScalarData); ok {
|
||||
// Check if this result already has columns from multiple queries
|
||||
queryNamesInColumns := make(map[string]bool)
|
||||
for _, col := range scalarData.Columns {
|
||||
if col.Type == qbtypes.ColumnTypeAggregation && col.QueryName != "" {
|
||||
queryNamesInColumns[col.QueryName] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Debug: log what we found
|
||||
if q.logger != nil {
|
||||
q.logger.Debug("Single result analysis",
|
||||
"queryNamesInColumns", queryNamesInColumns,
|
||||
"num_columns", len(scalarData.Columns),
|
||||
"num_rows", len(scalarData.Data))
|
||||
}
|
||||
|
||||
// If we have columns from multiple queries, we need to deduplicate rows
|
||||
if len(queryNamesInColumns) > 1 {
|
||||
if q.logger != nil {
|
||||
q.logger.Debug("Deduplicating scalar rows")
|
||||
}
|
||||
deduplicatedResult := q.deduplicateScalarRows(scalarData)
|
||||
// Return the deduplicated result under the original query name
|
||||
return map[string]any{queryName: deduplicatedResult["table"]}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Now merge all ScalarData results
|
||||
// First, collect all unique group columns
|
||||
groupColumnMap := make(map[string]*qbtypes.ColumnDescriptor)
|
||||
groupColumnOrder := []string{}
|
||||
|
||||
for _, result := range results {
|
||||
if scalarData, ok := result.Value.(*qbtypes.ScalarData); ok {
|
||||
for _, col := range scalarData.Columns {
|
||||
if col.Type == qbtypes.ColumnTypeGroup {
|
||||
if _, exists := groupColumnMap[col.Name]; !exists {
|
||||
groupColumnMap[col.Name] = col
|
||||
groupColumnOrder = append(groupColumnOrder, col.Name)
|
||||
if q.logger != nil {
|
||||
q.logger.Debug("Found group column", "name", col.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Debug: log the group columns we found
|
||||
if q.logger != nil {
|
||||
q.logger.Debug("Group columns collected",
|
||||
"groupColumnOrder", groupColumnOrder,
|
||||
"num_group_columns", len(groupColumnOrder))
|
||||
}
|
||||
|
||||
// Build final columns
|
||||
mergedColumns := []*qbtypes.ColumnDescriptor{}
|
||||
|
||||
// Add group columns
|
||||
for _, colName := range groupColumnOrder {
|
||||
mergedColumns = append(mergedColumns, groupColumnMap[colName])
|
||||
}
|
||||
|
||||
// Add aggregation columns from each query
|
||||
queryNames := []string{}
|
||||
for name := range results {
|
||||
queryNames = append(queryNames, name)
|
||||
}
|
||||
sort.Strings(queryNames)
|
||||
|
||||
for _, queryName := range queryNames {
|
||||
result := results[queryName]
|
||||
if scalarData, ok := result.Value.(*qbtypes.ScalarData); ok {
|
||||
for _, col := range scalarData.Columns {
|
||||
if col.Type == qbtypes.ColumnTypeAggregation {
|
||||
newCol := &qbtypes.ColumnDescriptor{
|
||||
TelemetryFieldKey: col.TelemetryFieldKey,
|
||||
QueryName: queryName,
|
||||
AggregationIndex: col.AggregationIndex,
|
||||
Meta: col.Meta,
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
}
|
||||
mergedColumns = append(mergedColumns, newCol)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Build a map of unique rows by group values
|
||||
type rowKey struct {
|
||||
values []string
|
||||
}
|
||||
rowMap := make(map[string][]any)
|
||||
|
||||
// Debug: log the input data
|
||||
if q.logger != nil {
|
||||
for _, queryName := range queryNames {
|
||||
if scalarData, ok := results[queryName].Value.(*qbtypes.ScalarData); ok {
|
||||
q.logger.Debug("Processing query result",
|
||||
"query", queryName,
|
||||
"num_columns", len(scalarData.Columns),
|
||||
"num_rows", len(scalarData.Data),
|
||||
"columns", func() []string {
|
||||
names := []string{}
|
||||
for _, col := range scalarData.Columns {
|
||||
names = append(names, fmt.Sprintf("%s(%s)", col.Name, col.Type))
|
||||
}
|
||||
return names
|
||||
}())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Process each query's results
|
||||
for _, queryName := range queryNames {
|
||||
result := results[queryName]
|
||||
if scalarData, ok := result.Value.(*qbtypes.ScalarData); ok {
|
||||
// Map column indices
|
||||
groupIndices := make(map[string]int)
|
||||
aggIndices := []int{}
|
||||
|
||||
for i, col := range scalarData.Columns {
|
||||
if col.Type == qbtypes.ColumnTypeGroup {
|
||||
groupIndices[col.Name] = i
|
||||
} else if col.Type == qbtypes.ColumnTypeAggregation {
|
||||
aggIndices = append(aggIndices, i)
|
||||
}
|
||||
}
|
||||
|
||||
// Process each row
|
||||
for rowIdx, row := range scalarData.Data {
|
||||
// Build key from group values in consistent order
|
||||
keyParts := make([]string, len(groupColumnOrder))
|
||||
for i, colName := range groupColumnOrder {
|
||||
if idx, ok := groupIndices[colName]; ok && idx < len(row) {
|
||||
// Convert the value to string properly
|
||||
switch v := row[idx].(type) {
|
||||
case string:
|
||||
keyParts[i] = v
|
||||
case *string:
|
||||
if v != nil {
|
||||
keyParts[i] = *v
|
||||
} else {
|
||||
keyParts[i] = "n/a"
|
||||
}
|
||||
default:
|
||||
keyParts[i] = fmt.Sprintf("%v", v)
|
||||
}
|
||||
} else {
|
||||
keyParts[i] = "n/a"
|
||||
}
|
||||
}
|
||||
|
||||
// Debug first few rows
|
||||
if q.logger != nil && rowIdx < 3 {
|
||||
q.logger.Debug("Building key",
|
||||
"query", queryName,
|
||||
"rowIdx", rowIdx,
|
||||
"groupColumnOrder", groupColumnOrder,
|
||||
"groupIndices", groupIndices,
|
||||
"row", row,
|
||||
"keyParts", keyParts)
|
||||
}
|
||||
// Create a unique key by joining parts with a delimiter
|
||||
key := ""
|
||||
for i, part := range keyParts {
|
||||
if i > 0 {
|
||||
key += "|"
|
||||
}
|
||||
key += part
|
||||
}
|
||||
|
||||
// Debug: log the key generation
|
||||
if q.logger != nil {
|
||||
q.logger.Debug("Generated row key",
|
||||
"query", queryName,
|
||||
"key", key,
|
||||
"keyParts", strings.Join(keyParts, ","),
|
||||
"numKeyParts", len(keyParts),
|
||||
"firstRowValue", func() string {
|
||||
if len(row) > 0 {
|
||||
return fmt.Sprintf("%v", row[0])
|
||||
}
|
||||
return "empty"
|
||||
}())
|
||||
}
|
||||
|
||||
// Initialize row if needed
|
||||
if _, exists := rowMap[key]; !exists {
|
||||
rowMap[key] = make([]any, len(mergedColumns))
|
||||
// Set group values
|
||||
for i, colName := range groupColumnOrder {
|
||||
if idx, ok := groupIndices[colName]; ok && idx < len(row) {
|
||||
// Store the actual value, not a pointer
|
||||
switch v := row[idx].(type) {
|
||||
case *string:
|
||||
if v != nil {
|
||||
rowMap[key][i] = *v
|
||||
} else {
|
||||
rowMap[key][i] = "n/a"
|
||||
}
|
||||
default:
|
||||
rowMap[key][i] = v
|
||||
}
|
||||
} else {
|
||||
rowMap[key][i] = "n/a"
|
||||
}
|
||||
}
|
||||
// Initialize all aggregation values to "n/a"
|
||||
for i := len(groupColumnOrder); i < len(mergedColumns); i++ {
|
||||
rowMap[key][i] = "n/a"
|
||||
}
|
||||
}
|
||||
|
||||
// Set aggregation values for this query
|
||||
aggStartIdx := len(groupColumnOrder)
|
||||
for _, queryName2 := range queryNames {
|
||||
if queryName2 == queryName {
|
||||
// Copy aggregation values
|
||||
for i, aggIdx := range aggIndices {
|
||||
if aggIdx < len(row) {
|
||||
rowMap[key][aggStartIdx+i] = row[aggIdx]
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
// Skip columns for other queries
|
||||
result2 := results[queryName2]
|
||||
if scalarData2, ok := result2.Value.(*qbtypes.ScalarData); ok {
|
||||
aggCount := 0
|
||||
for _, col := range scalarData2.Columns {
|
||||
if col.Type == qbtypes.ColumnTypeAggregation {
|
||||
aggCount++
|
||||
}
|
||||
}
|
||||
aggStartIdx += aggCount
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert map to slice
|
||||
mergedData := [][]any{}
|
||||
for _, row := range rowMap {
|
||||
mergedData = append(mergedData, row)
|
||||
}
|
||||
|
||||
// Sort rows by first aggregation column (descending)
|
||||
if len(mergedColumns) > len(groupColumnOrder) {
|
||||
sort.SliceStable(mergedData, func(i, j int) bool {
|
||||
valI := mergedData[i][len(groupColumnOrder)]
|
||||
valJ := mergedData[j][len(groupColumnOrder)]
|
||||
|
||||
// Handle n/a values
|
||||
if valI == "n/a" {
|
||||
return false
|
||||
}
|
||||
if valJ == "n/a" {
|
||||
return true
|
||||
}
|
||||
|
||||
// Compare numeric values
|
||||
switch vI := valI.(type) {
|
||||
case float64:
|
||||
if vJ, ok := valJ.(float64); ok {
|
||||
return vI > vJ
|
||||
}
|
||||
case int64:
|
||||
if vJ, ok := valJ.(int64); ok {
|
||||
return vI > vJ
|
||||
}
|
||||
case int:
|
||||
if vJ, ok := valJ.(int); ok {
|
||||
return vI > vJ
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
})
|
||||
}
|
||||
|
||||
return map[string]any{
|
||||
"table": &qbtypes.ScalarData{
|
||||
Columns: mergedColumns,
|
||||
Data: mergedData,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// sortTableRows sorts the table rows based on the query order
|
||||
func sortTableRows(rows [][]any, columns []*qbtypes.ColumnDescriptor, req *qbtypes.QueryRangeRequest) {
|
||||
// Get query names in order
|
||||
var queryNames []string
|
||||
for _, query := range req.CompositeQuery.Queries {
|
||||
switch spec := query.Spec.(type) {
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]:
|
||||
queryNames = append(queryNames, spec.Name)
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]:
|
||||
queryNames = append(queryNames, spec.Name)
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]:
|
||||
queryNames = append(queryNames, spec.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// Create a map of column indices by query name
|
||||
columnIndices := make(map[string][]int)
|
||||
for i, col := range columns {
|
||||
if col.Type == qbtypes.ColumnTypeAggregation && col.QueryName != "" {
|
||||
columnIndices[col.QueryName] = append(columnIndices[col.QueryName], i)
|
||||
}
|
||||
}
|
||||
|
||||
// Sort in reverse order of query names (stable sort)
|
||||
for i := len(queryNames) - 1; i >= 0; i-- {
|
||||
queryName := queryNames[i]
|
||||
indices, ok := columnIndices[queryName]
|
||||
if !ok || len(indices) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Use the first aggregation column for this query
|
||||
colIdx := indices[0]
|
||||
|
||||
sort.SliceStable(rows, func(i, j int) bool {
|
||||
valI := rows[i][colIdx]
|
||||
valJ := rows[j][colIdx]
|
||||
|
||||
// Handle n/a values
|
||||
if valI == "n/a" && valJ == "n/a" {
|
||||
return false
|
||||
}
|
||||
if valI == "n/a" {
|
||||
return false
|
||||
}
|
||||
if valJ == "n/a" {
|
||||
return true
|
||||
}
|
||||
|
||||
// Compare numeric values (default descending)
|
||||
if numI, ok := valI.(float64); ok {
|
||||
if numJ, ok := valJ.(float64); ok {
|
||||
return numI > numJ
|
||||
}
|
||||
}
|
||||
|
||||
// Compare int64 values
|
||||
if numI, ok := valI.(int64); ok {
|
||||
if numJ, ok := valJ.(int64); ok {
|
||||
return numI > numJ
|
||||
}
|
||||
}
|
||||
|
||||
// Compare int values
|
||||
if numI, ok := valI.(int); ok {
|
||||
if numJ, ok := valJ.(int); ok {
|
||||
return numI > numJ
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// deduplicateScalarRows deduplicates rows in a ScalarData that already contains columns from multiple queries
|
||||
func (q *querier) deduplicateScalarRows(data *qbtypes.ScalarData) map[string]any {
|
||||
// First, identify group columns
|
||||
groupColumnIndices := []int{}
|
||||
for i, col := range data.Columns {
|
||||
if col.Type == qbtypes.ColumnTypeGroup {
|
||||
groupColumnIndices = append(groupColumnIndices, i)
|
||||
}
|
||||
}
|
||||
|
||||
// Build a map to merge rows by group key
|
||||
rowMap := make(map[string][]any)
|
||||
|
||||
for _, row := range data.Data {
|
||||
// Build key from group values
|
||||
keyParts := make([]string, len(groupColumnIndices))
|
||||
for i, colIdx := range groupColumnIndices {
|
||||
if colIdx < len(row) {
|
||||
// Convert the value to string properly
|
||||
switch v := row[colIdx].(type) {
|
||||
case string:
|
||||
keyParts[i] = v
|
||||
case *string:
|
||||
if v != nil {
|
||||
keyParts[i] = *v
|
||||
} else {
|
||||
keyParts[i] = "n/a"
|
||||
}
|
||||
default:
|
||||
keyParts[i] = fmt.Sprintf("%v", v)
|
||||
}
|
||||
} else {
|
||||
keyParts[i] = "n/a"
|
||||
}
|
||||
}
|
||||
key := strings.Join(keyParts, "|")
|
||||
|
||||
if existingRow, exists := rowMap[key]; exists {
|
||||
// Merge this row with existing row
|
||||
// Replace "n/a" values with actual values
|
||||
for i, val := range row {
|
||||
if existingRow[i] == "n/a" && val != "n/a" {
|
||||
existingRow[i] = val
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// First time seeing this key, store the row
|
||||
rowCopy := make([]any, len(row))
|
||||
copy(rowCopy, row)
|
||||
rowMap[key] = rowCopy
|
||||
}
|
||||
}
|
||||
|
||||
// Convert map back to slice
|
||||
mergedData := make([][]any, 0, len(rowMap))
|
||||
for _, row := range rowMap {
|
||||
mergedData = append(mergedData, row)
|
||||
}
|
||||
|
||||
// Sort by first aggregation column if available
|
||||
firstAggCol := -1
|
||||
for i, col := range data.Columns {
|
||||
if col.Type == qbtypes.ColumnTypeAggregation {
|
||||
firstAggCol = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if firstAggCol >= 0 {
|
||||
sort.SliceStable(mergedData, func(i, j int) bool {
|
||||
valI := mergedData[i][firstAggCol]
|
||||
valJ := mergedData[j][firstAggCol]
|
||||
|
||||
// Handle n/a values
|
||||
if valI == "n/a" {
|
||||
return false
|
||||
}
|
||||
if valJ == "n/a" {
|
||||
return true
|
||||
}
|
||||
|
||||
// Compare numeric values
|
||||
switch vI := valI.(type) {
|
||||
case float64:
|
||||
if vJ, ok := valJ.(float64); ok {
|
||||
return vI > vJ
|
||||
}
|
||||
case int64:
|
||||
if vJ, ok := valJ.(int64); ok {
|
||||
return vI > vJ
|
||||
}
|
||||
case int:
|
||||
if vJ, ok := valJ.(int); ok {
|
||||
return vI > vJ
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
})
|
||||
}
|
||||
|
||||
return map[string]any{
|
||||
"table": &qbtypes.ScalarData{
|
||||
Columns: data.Columns,
|
||||
Data: mergedData,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// roundToTwoDecimal rounds a number to two decimal places
|
||||
func roundToTwoDecimal(number float64) float64 {
|
||||
// Handle very small numbers
|
||||
if math.Abs(number) < 0.000001 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Determine the number of decimal places to round to
|
||||
decimalPlaces := 2
|
||||
if math.Abs(number) < 0.01 {
|
||||
decimalPlaces = int(math.Ceil(-math.Log10(math.Abs(number)))) + 1
|
||||
}
|
||||
|
||||
// Round to the determined number of decimal places
|
||||
scale := math.Pow(10, float64(decimalPlaces))
|
||||
return math.Round(number*scale) / scale
|
||||
}
|
||||
242
pkg/querier/postprocess_table_duplicate_test.go
Normal file
242
pkg/querier/postprocess_table_duplicate_test.go
Normal file
@@ -0,0 +1,242 @@
|
||||
package querier
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestFormatScalarResultsAsTableDuplicateIssue reproduces the exact issue from the user's JSON
|
||||
func TestFormatScalarResultsAsTableDuplicateIssue(t *testing.T) {
|
||||
q := &querier{}
|
||||
|
||||
// Create results that exactly match the user's problematic case
|
||||
// Query A has data for all services
|
||||
// Query B also has data for all services
|
||||
// But they're coming as separate ScalarData results
|
||||
results := map[string]*qbtypes.Result{
|
||||
"A": {
|
||||
Value: &qbtypes.ScalarData{
|
||||
Columns: []*qbtypes.ColumnDescriptor{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"},
|
||||
QueryName: "B", // Note: This says "B" in the user's JSON!
|
||||
Type: qbtypes.ColumnTypeGroup,
|
||||
},
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "__result_0"},
|
||||
QueryName: "A",
|
||||
AggregationIndex: 0,
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
},
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "__result_1"},
|
||||
QueryName: "A",
|
||||
AggregationIndex: 1,
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
},
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "__result_0"},
|
||||
QueryName: "B",
|
||||
AggregationIndex: 0,
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
},
|
||||
},
|
||||
Data: [][]any{
|
||||
// These rows have values for A but "n/a" for B
|
||||
{"currencyservice", 3380.0, 1.0, "n/a"},
|
||||
{"producer-svc-3", 25.0, 1.0, "n/a"},
|
||||
{"producer-svc-5", 45.0, 1.0, "n/a"},
|
||||
{"mongodb", 5713.0, 1.0, "n/a"},
|
||||
{"recommendationservice", 1724.0, 1.0, "n/a"},
|
||||
{"producer-svc-1", 180.0, 1.0, "n/a"},
|
||||
{"consumer-svc-4", 210.0, 1.0, "n/a"},
|
||||
{"frauddetectionservice", 101.0, 1.0, "n/a"},
|
||||
{"kafka", 1376.0, 1.0, "n/a"},
|
||||
{"consumer-svc-3", 122.0, 1.0, "n/a"},
|
||||
{"producer-svc-6", 60.0, 1.0, "n/a"},
|
||||
{"cartservice", 3322.0, 1.0, "n/a"},
|
||||
{"consumer-svc-2", 1080.0, 1.0, "n/a"},
|
||||
{"adservice", 133.0, 1.0, "n/a"},
|
||||
{"demo-app", 1449.0, 1.0, "n/a"},
|
||||
{"quoteservice", 101.0, 1.0, "n/a"},
|
||||
{"producer-svc-2", 360.0, 1.0, "n/a"},
|
||||
{"producer-svc-4", 36.0, 1.0, "n/a"},
|
||||
// These rows have "n/a" for A but values for B
|
||||
{"consumer-svc-4", "n/a", "n/a", 1.0},
|
||||
{"currencyservice", "n/a", "n/a", 1.0},
|
||||
{"producer-svc-4", "n/a", "n/a", 1.0},
|
||||
{"producer-svc-2", "n/a", "n/a", 1.0},
|
||||
{"producer-svc-3", "n/a", "n/a", 1.0},
|
||||
{"adservice", "n/a", "n/a", 1.0},
|
||||
{"kafka", "n/a", "n/a", 1.0},
|
||||
{"frauddetectionservice", "n/a", "n/a", 1.0},
|
||||
{"recommendationservice", "n/a", "n/a", 1.0},
|
||||
{"consumer-svc-3", "n/a", "n/a", 1.0},
|
||||
{"consumer-svc-2", "n/a", "n/a", 1.0},
|
||||
{"cartservice", "n/a", "n/a", 1.0},
|
||||
{"quoteservice", "n/a", "n/a", 1.0},
|
||||
{"producer-svc-5", "n/a", "n/a", 1.0},
|
||||
{"demo-app", "n/a", "n/a", 1.0},
|
||||
{"mongodb", "n/a", "n/a", 1.0},
|
||||
{"producer-svc-6", "n/a", "n/a", 1.0},
|
||||
{"producer-svc-1", "n/a", "n/a", 1.0},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
req := &qbtypes.QueryRangeRequest{
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: []qbtypes.QueryEnvelope{
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "A",
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "B",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Format as table
|
||||
result := q.formatScalarResultsAsTable(results, req)
|
||||
|
||||
// Get the table - it should be under "A" key now
|
||||
var table *qbtypes.ScalarData
|
||||
var ok bool
|
||||
if tableResult, exists := result["A"]; exists {
|
||||
table, ok = tableResult.(*qbtypes.ScalarData)
|
||||
} else if tableResult, exists := result["table"]; exists {
|
||||
table, ok = tableResult.(*qbtypes.ScalarData)
|
||||
}
|
||||
require.True(t, ok, "Expected table result, got: %+v", result)
|
||||
|
||||
// The problem: we should have 18 unique services, not 36 rows
|
||||
assert.Len(t, table.Data, 18, "Should have 18 unique services, not duplicate rows")
|
||||
|
||||
// Create a map to check row values by service name
|
||||
rowMap := make(map[string][]any)
|
||||
for _, row := range table.Data {
|
||||
serviceName := row[0].(string)
|
||||
assert.NotContains(t, rowMap, serviceName, "Service %s should not appear twice", serviceName)
|
||||
rowMap[serviceName] = row
|
||||
}
|
||||
|
||||
// Check some specific services that appear in both lists
|
||||
// currencyservice should have values from both A and B
|
||||
currencyRow := rowMap["currencyservice"]
|
||||
assert.Equal(t, "currencyservice", currencyRow[0])
|
||||
assert.Equal(t, 3380.0, currencyRow[1]) // A result 0
|
||||
assert.Equal(t, 1.0, currencyRow[2]) // A result 1
|
||||
assert.Equal(t, 1.0, currencyRow[3]) // B result 0
|
||||
}
|
||||
|
||||
// TestFormatScalarResultsAsTableSingleResultAlreadyMerged tests the case where
|
||||
// a single result already contains all columns from multiple queries
|
||||
func TestFormatScalarResultsAsTableSingleResultAlreadyMerged(t *testing.T) {
|
||||
q := &querier{}
|
||||
|
||||
// This is what we're actually getting - a single result that already has columns from both queries
|
||||
results := map[string]*qbtypes.Result{
|
||||
"merged": {
|
||||
Value: &qbtypes.ScalarData{
|
||||
Columns: []*qbtypes.ColumnDescriptor{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"},
|
||||
QueryName: "B",
|
||||
Type: qbtypes.ColumnTypeGroup,
|
||||
},
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "__result_0"},
|
||||
QueryName: "A",
|
||||
AggregationIndex: 0,
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
},
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "__result_1"},
|
||||
QueryName: "A",
|
||||
AggregationIndex: 1,
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
},
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "__result_0"},
|
||||
QueryName: "B",
|
||||
AggregationIndex: 0,
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
},
|
||||
},
|
||||
Data: [][]any{
|
||||
{"currencyservice", 3380.0, 1.0, "n/a"},
|
||||
{"mongodb", 5713.0, 1.0, "n/a"},
|
||||
{"currencyservice", "n/a", "n/a", 1.0},
|
||||
{"mongodb", "n/a", "n/a", 1.0},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
req := &qbtypes.QueryRangeRequest{
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: []qbtypes.QueryEnvelope{
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "A",
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "B",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Format as table
|
||||
result := q.formatScalarResultsAsTable(results, req)
|
||||
|
||||
// Get the table - it should be under "merged" key now
|
||||
var table *qbtypes.ScalarData
|
||||
var ok bool
|
||||
if tableResult, exists := result["merged"]; exists {
|
||||
table, ok = tableResult.(*qbtypes.ScalarData)
|
||||
} else if tableResult, exists := result["table"]; exists {
|
||||
table, ok = tableResult.(*qbtypes.ScalarData)
|
||||
}
|
||||
require.True(t, ok, "Expected table result, got: %+v", result)
|
||||
|
||||
// Should have 2 unique services, not 4 rows
|
||||
assert.Len(t, table.Data, 2, "Should have 2 unique services after merging duplicates")
|
||||
|
||||
// Create a map to check row values by service name
|
||||
rowMap := make(map[string][]any)
|
||||
for _, row := range table.Data {
|
||||
serviceName := row[0].(string)
|
||||
rowMap[serviceName] = row
|
||||
}
|
||||
|
||||
// Check that values are properly merged
|
||||
currencyRow := rowMap["currencyservice"]
|
||||
assert.Equal(t, "currencyservice", currencyRow[0])
|
||||
assert.Equal(t, 3380.0, currencyRow[1]) // A result 0
|
||||
assert.Equal(t, 1.0, currencyRow[2]) // A result 1
|
||||
assert.Equal(t, 1.0, currencyRow[3]) // B result 0
|
||||
|
||||
mongoRow := rowMap["mongodb"]
|
||||
assert.Equal(t, "mongodb", mongoRow[0])
|
||||
assert.Equal(t, 5713.0, mongoRow[1]) // A result 0
|
||||
assert.Equal(t, 1.0, mongoRow[2]) // A result 1
|
||||
assert.Equal(t, 1.0, mongoRow[3]) // B result 0
|
||||
}
|
||||
290
pkg/querier/postprocess_table_test.go
Normal file
290
pkg/querier/postprocess_table_test.go
Normal file
@@ -0,0 +1,290 @@
|
||||
package querier
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFormatScalarResultsAsTableMergesCorrectly(t *testing.T) {
|
||||
q := &querier{}
|
||||
|
||||
// Create results that simulate the problematic case
|
||||
results := map[string]*qbtypes.Result{
|
||||
"A": {
|
||||
Value: &qbtypes.ScalarData{
|
||||
Columns: []*qbtypes.ColumnDescriptor{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"},
|
||||
QueryName: "A",
|
||||
Type: qbtypes.ColumnTypeGroup,
|
||||
},
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "__result_0"},
|
||||
QueryName: "A",
|
||||
AggregationIndex: 0,
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
},
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "__result_1"},
|
||||
QueryName: "A",
|
||||
AggregationIndex: 1,
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
},
|
||||
},
|
||||
Data: [][]any{
|
||||
{"currencyservice", 3380.0, 1.0},
|
||||
{"mongodb", 5713.0, 1.0},
|
||||
{"cartservice", 3322.0, 1.0},
|
||||
},
|
||||
},
|
||||
},
|
||||
"B": {
|
||||
Value: &qbtypes.ScalarData{
|
||||
Columns: []*qbtypes.ColumnDescriptor{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"},
|
||||
QueryName: "B",
|
||||
Type: qbtypes.ColumnTypeGroup,
|
||||
},
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "__result_0"},
|
||||
QueryName: "B",
|
||||
AggregationIndex: 0,
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
},
|
||||
},
|
||||
Data: [][]any{
|
||||
{"currencyservice", 1.0},
|
||||
{"mongodb", 1.0},
|
||||
{"cartservice", 1.0},
|
||||
{"kafka", 1.0}, // Service only in B
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
req := &qbtypes.QueryRangeRequest{
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: []qbtypes.QueryEnvelope{
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "A",
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "B",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Format as table
|
||||
result := q.formatScalarResultsAsTable(results, req)
|
||||
|
||||
// Get the table
|
||||
table, ok := result["table"].(*qbtypes.ScalarData)
|
||||
require.True(t, ok)
|
||||
|
||||
// Should have 5 columns: 1 group + 2 from A + 1 from B
|
||||
assert.Len(t, table.Columns, 4)
|
||||
|
||||
// Check column names and query associations
|
||||
assert.Equal(t, "service.name", table.Columns[0].Name)
|
||||
assert.Equal(t, qbtypes.ColumnTypeGroup, table.Columns[0].Type)
|
||||
|
||||
assert.Equal(t, "__result_0", table.Columns[1].Name)
|
||||
assert.Equal(t, "A", table.Columns[1].QueryName)
|
||||
assert.Equal(t, qbtypes.ColumnTypeAggregation, table.Columns[1].Type)
|
||||
|
||||
assert.Equal(t, "__result_1", table.Columns[2].Name)
|
||||
assert.Equal(t, "A", table.Columns[2].QueryName)
|
||||
assert.Equal(t, qbtypes.ColumnTypeAggregation, table.Columns[2].Type)
|
||||
|
||||
assert.Equal(t, "__result_0", table.Columns[3].Name)
|
||||
assert.Equal(t, "B", table.Columns[3].QueryName)
|
||||
assert.Equal(t, qbtypes.ColumnTypeAggregation, table.Columns[3].Type)
|
||||
|
||||
// Should have 4 rows (one for each unique service)
|
||||
assert.Len(t, table.Data, 4)
|
||||
|
||||
// Create a map to check row values by service name
|
||||
rowMap := make(map[string][]any)
|
||||
for _, row := range table.Data {
|
||||
serviceName := row[0].(string)
|
||||
rowMap[serviceName] = row
|
||||
}
|
||||
|
||||
// Check currencyservice row
|
||||
currencyRow := rowMap["currencyservice"]
|
||||
assert.Equal(t, "currencyservice", currencyRow[0])
|
||||
assert.Equal(t, 3380.0, currencyRow[1]) // A result 0
|
||||
assert.Equal(t, 1.0, currencyRow[2]) // A result 1
|
||||
assert.Equal(t, 1.0, currencyRow[3]) // B result 0
|
||||
|
||||
// Check mongodb row
|
||||
mongoRow := rowMap["mongodb"]
|
||||
assert.Equal(t, "mongodb", mongoRow[0])
|
||||
assert.Equal(t, 5713.0, mongoRow[1]) // A result 0
|
||||
assert.Equal(t, 1.0, mongoRow[2]) // A result 1
|
||||
assert.Equal(t, 1.0, mongoRow[3]) // B result 0
|
||||
|
||||
// Check cartservice row
|
||||
cartRow := rowMap["cartservice"]
|
||||
assert.Equal(t, "cartservice", cartRow[0])
|
||||
assert.Equal(t, 3322.0, cartRow[1]) // A result 0
|
||||
assert.Equal(t, 1.0, cartRow[2]) // A result 1
|
||||
assert.Equal(t, 1.0, cartRow[3]) // B result 0
|
||||
|
||||
// Check kafka row (only in B)
|
||||
kafkaRow := rowMap["kafka"]
|
||||
assert.Equal(t, "kafka", kafkaRow[0])
|
||||
assert.Equal(t, "n/a", kafkaRow[1]) // A result 0
|
||||
assert.Equal(t, "n/a", kafkaRow[2]) // A result 1
|
||||
assert.Equal(t, 1.0, kafkaRow[3]) // B result 0
|
||||
}
|
||||
|
||||
func TestFormatScalarResultsAsTableWithTimeSeriesData(t *testing.T) {
|
||||
q := &querier{}
|
||||
|
||||
// Create time series results that need to be converted to scalar
|
||||
results := map[string]*qbtypes.Result{
|
||||
"A": {
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "A",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Index: 0,
|
||||
Alias: "count",
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Labels: []*qbtypes.Label{
|
||||
{
|
||||
Key: telemetrytypes.TelemetryFieldKey{Name: "service.name"},
|
||||
Value: "frontend",
|
||||
},
|
||||
},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 100},
|
||||
{Timestamp: 2000, Value: 200},
|
||||
},
|
||||
},
|
||||
{
|
||||
Labels: []*qbtypes.Label{
|
||||
{
|
||||
Key: telemetrytypes.TelemetryFieldKey{Name: "service.name"},
|
||||
Value: "backend",
|
||||
},
|
||||
},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 300},
|
||||
{Timestamp: 2000, Value: 400},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"B": {
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "B",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Index: 0,
|
||||
Alias: "sum",
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Labels: []*qbtypes.Label{
|
||||
{
|
||||
Key: telemetrytypes.TelemetryFieldKey{Name: "service.name"},
|
||||
Value: "frontend",
|
||||
},
|
||||
},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 10},
|
||||
{Timestamp: 2000, Value: 20},
|
||||
},
|
||||
},
|
||||
{
|
||||
Labels: []*qbtypes.Label{
|
||||
{
|
||||
Key: telemetrytypes.TelemetryFieldKey{Name: "service.name"},
|
||||
Value: "backend",
|
||||
},
|
||||
},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 30},
|
||||
{Timestamp: 2000, Value: 40},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
req := &qbtypes.QueryRangeRequest{
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: []qbtypes.QueryEnvelope{
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "A",
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "B",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Format as table
|
||||
result := q.formatScalarResultsAsTable(results, req)
|
||||
|
||||
// Get the table
|
||||
table, ok := result["table"].(*qbtypes.ScalarData)
|
||||
require.True(t, ok)
|
||||
|
||||
// Should have 3 columns: 1 group + 1 from A + 1 from B
|
||||
assert.Len(t, table.Columns, 3)
|
||||
|
||||
// Check column names
|
||||
assert.Equal(t, "service.name", table.Columns[0].Name)
|
||||
assert.Equal(t, "count", table.Columns[1].Name) // Should use alias
|
||||
assert.Equal(t, "sum", table.Columns[2].Name) // Should use alias
|
||||
|
||||
// Should have 2 rows (frontend and backend)
|
||||
assert.Len(t, table.Data, 2)
|
||||
|
||||
// Create a map to check row values by service name
|
||||
rowMap := make(map[string][]any)
|
||||
for _, row := range table.Data {
|
||||
serviceName := row[0].(string)
|
||||
rowMap[serviceName] = row
|
||||
}
|
||||
|
||||
// Check frontend row (should have last values)
|
||||
frontendRow := rowMap["frontend"]
|
||||
assert.Equal(t, "frontend", frontendRow[0])
|
||||
assert.Equal(t, 200.0, frontendRow[1]) // Last value from A
|
||||
assert.Equal(t, 20.0, frontendRow[2]) // Last value from B
|
||||
|
||||
// Check backend row
|
||||
backendRow := rowMap["backend"]
|
||||
assert.Equal(t, "backend", backendRow[0])
|
||||
assert.Equal(t, 400.0, backendRow[1]) // Last value from A
|
||||
assert.Equal(t, 40.0, backendRow[2]) // Last value from B
|
||||
}
|
||||
813
pkg/querier/postprocess_test.go
Normal file
813
pkg/querier/postprocess_test.go
Normal file
@@ -0,0 +1,813 @@
|
||||
package querier
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestApplyHavingClause(t *testing.T) {
|
||||
q := &querier{}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
result *qbtypes.Result
|
||||
having *qbtypes.Having
|
||||
expected int // expected number of values after filtering
|
||||
}{
|
||||
{
|
||||
name: "having clause not implemented yet",
|
||||
result: &qbtypes.Result{
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "test",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 5},
|
||||
{Timestamp: 2000, Value: 15},
|
||||
{Timestamp: 3000, Value: 8},
|
||||
{Timestamp: 4000, Value: 20},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
having: &qbtypes.Having{
|
||||
Expression: "value > 10",
|
||||
},
|
||||
expected: 4, // No filtering for now
|
||||
},
|
||||
{
|
||||
name: "no having clause",
|
||||
result: &qbtypes.Result{
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "test",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 5},
|
||||
{Timestamp: 2000, Value: 15},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
having: nil,
|
||||
expected: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := q.applyHavingClause(tt.result, tt.having)
|
||||
tsData := result.Value.(*qbtypes.TimeSeriesData)
|
||||
|
||||
totalValues := 0
|
||||
for _, agg := range tsData.Aggregations {
|
||||
for _, series := range agg.Series {
|
||||
totalValues += len(series.Values)
|
||||
}
|
||||
}
|
||||
|
||||
assert.Equal(t, tt.expected, totalValues)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplySeriesLimit(t *testing.T) {
|
||||
q := &querier{}
|
||||
|
||||
result := &qbtypes.Result{
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "test",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Labels: []*qbtypes.Label{
|
||||
{Key: telemetrytypes.TelemetryFieldKey{Name: "service"}, Value: "service1"},
|
||||
},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 10},
|
||||
{Timestamp: 2000, Value: 20},
|
||||
},
|
||||
},
|
||||
{
|
||||
Labels: []*qbtypes.Label{
|
||||
{Key: telemetrytypes.TelemetryFieldKey{Name: "service"}, Value: "service2"},
|
||||
},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 30},
|
||||
{Timestamp: 2000, Value: 40},
|
||||
},
|
||||
},
|
||||
{
|
||||
Labels: []*qbtypes.Label{
|
||||
{Key: telemetrytypes.TelemetryFieldKey{Name: "service"}, Value: "service3"},
|
||||
},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 5},
|
||||
{Timestamp: 2000, Value: 10},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Test limiting to 2 series with default ordering (by value desc)
|
||||
limited := q.applySeriesLimit(result, 2, nil)
|
||||
tsData := limited.Value.(*qbtypes.TimeSeriesData)
|
||||
|
||||
assert.Len(t, tsData.Aggregations[0].Series, 2)
|
||||
|
||||
// Should keep service2 (avg=35) and service1 (avg=15), drop service3 (avg=7.5)
|
||||
assert.Equal(t, "service2", tsData.Aggregations[0].Series[0].Labels[0].Value)
|
||||
assert.Equal(t, "service1", tsData.Aggregations[0].Series[1].Labels[0].Value)
|
||||
}
|
||||
|
||||
func TestApplyReduceTo(t *testing.T) {
|
||||
q := &querier{}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
expression string
|
||||
values []float64
|
||||
expectedValue float64
|
||||
}{
|
||||
{
|
||||
name: "reduce to last",
|
||||
expression: "last",
|
||||
values: []float64{10, 20, 30},
|
||||
expectedValue: 30,
|
||||
},
|
||||
{
|
||||
name: "reduce to sum",
|
||||
expression: "sum",
|
||||
values: []float64{10, 20, 30},
|
||||
expectedValue: 60,
|
||||
},
|
||||
{
|
||||
name: "reduce to avg",
|
||||
expression: "avg",
|
||||
values: []float64{10, 20, 30},
|
||||
expectedValue: 20,
|
||||
},
|
||||
{
|
||||
name: "reduce to min",
|
||||
expression: "min",
|
||||
values: []float64{10, 20, 5, 30},
|
||||
expectedValue: 5,
|
||||
},
|
||||
{
|
||||
name: "reduce to max",
|
||||
expression: "max",
|
||||
values: []float64{10, 20, 50, 30},
|
||||
expectedValue: 50,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Create time series values
|
||||
var values []*qbtypes.TimeSeriesValue
|
||||
for i, v := range tt.values {
|
||||
values = append(values, &qbtypes.TimeSeriesValue{
|
||||
Timestamp: int64(i * 1000),
|
||||
Value: v,
|
||||
})
|
||||
}
|
||||
|
||||
result := &qbtypes.Result{
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "test",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Values: values,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
secondaryAggs := []qbtypes.SecondaryAggregation{
|
||||
{Expression: tt.expression},
|
||||
}
|
||||
|
||||
reduced := q.applyReduceTo(result, secondaryAggs)
|
||||
tsData := reduced.Value.(*qbtypes.TimeSeriesData)
|
||||
|
||||
require.Len(t, tsData.Aggregations[0].Series[0].Values, 1)
|
||||
assert.Equal(t, tt.expectedValue, tsData.Aggregations[0].Series[0].Values[0].Value)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFillGaps(t *testing.T) {
|
||||
q := &querier{}
|
||||
|
||||
req := &qbtypes.QueryRangeRequest{
|
||||
Start: 1000,
|
||||
End: 5000,
|
||||
RequestType: qbtypes.RequestTypeTimeSeries,
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: []qbtypes.QueryEnvelope{
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "test",
|
||||
StepInterval: qbtypes.Step{Duration: time.Second},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
FormatOptions: &qbtypes.FormatOptions{
|
||||
FillGaps: true,
|
||||
},
|
||||
}
|
||||
|
||||
results := map[string]*qbtypes.Result{
|
||||
"test": {
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "test",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 10},
|
||||
{Timestamp: 3000, Value: 30},
|
||||
// Missing 2000, 4000, 5000
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
filled := q.fillGaps(results, req)
|
||||
tsData := filled["test"].Value.(*qbtypes.TimeSeriesData)
|
||||
values := tsData.Aggregations[0].Series[0].Values
|
||||
|
||||
// Should have 5 values: 1000, 2000, 3000, 4000, 5000
|
||||
assert.Len(t, values, 5)
|
||||
|
||||
// Check filled values
|
||||
assert.Equal(t, int64(1000), values[0].Timestamp)
|
||||
assert.Equal(t, 10.0, values[0].Value)
|
||||
|
||||
assert.Equal(t, int64(2000), values[1].Timestamp)
|
||||
assert.Equal(t, 0.0, values[1].Value) // Filled with 0
|
||||
|
||||
assert.Equal(t, int64(3000), values[2].Timestamp)
|
||||
assert.Equal(t, 30.0, values[2].Value)
|
||||
|
||||
assert.Equal(t, int64(4000), values[3].Timestamp)
|
||||
assert.Equal(t, 0.0, values[3].Value) // Filled with 0
|
||||
|
||||
assert.Equal(t, int64(5000), values[4].Timestamp)
|
||||
assert.Equal(t, 0.0, values[4].Value) // Filled with 0
|
||||
}
|
||||
|
||||
func TestApplyMetricReduceTo(t *testing.T) {
|
||||
q := &querier{}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
reduceOp qbtypes.ReduceTo
|
||||
values []float64
|
||||
expectedValue float64
|
||||
}{
|
||||
{
|
||||
name: "reduce to last",
|
||||
reduceOp: qbtypes.ReduceToLast,
|
||||
values: []float64{10, 20, 30},
|
||||
expectedValue: 30,
|
||||
},
|
||||
{
|
||||
name: "reduce to sum",
|
||||
reduceOp: qbtypes.ReduceToSum,
|
||||
values: []float64{10, 20, 30},
|
||||
expectedValue: 60,
|
||||
},
|
||||
{
|
||||
name: "reduce to avg",
|
||||
reduceOp: qbtypes.ReduceToAvg,
|
||||
values: []float64{10, 20, 30},
|
||||
expectedValue: 20,
|
||||
},
|
||||
{
|
||||
name: "reduce to min",
|
||||
reduceOp: qbtypes.ReduceToMin,
|
||||
values: []float64{10, 20, 5, 30},
|
||||
expectedValue: 5,
|
||||
},
|
||||
{
|
||||
name: "reduce to max",
|
||||
reduceOp: qbtypes.ReduceToMax,
|
||||
values: []float64{10, 20, 50, 30},
|
||||
expectedValue: 50,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Create time series values
|
||||
var values []*qbtypes.TimeSeriesValue
|
||||
for i, v := range tt.values {
|
||||
values = append(values, &qbtypes.TimeSeriesValue{
|
||||
Timestamp: int64(i * 1000),
|
||||
Value: v,
|
||||
})
|
||||
}
|
||||
|
||||
result := &qbtypes.Result{
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "test",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Values: values,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
reduced := q.applyMetricReduceTo(result, tt.reduceOp)
|
||||
tsData := reduced.Value.(*qbtypes.TimeSeriesData)
|
||||
|
||||
require.Len(t, tsData.Aggregations[0].Series[0].Values, 1)
|
||||
assert.Equal(t, tt.expectedValue, tsData.Aggregations[0].Series[0].Values[0].Value)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostProcessResultsWithMetricReduceTo(t *testing.T) {
|
||||
q := &querier{}
|
||||
|
||||
// Test complete PostProcessResults flow with metric ReduceTo
|
||||
results := map[string]any{
|
||||
"metric_query": &qbtypes.TimeSeriesData{
|
||||
QueryName: "metric_query",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 100},
|
||||
{Timestamp: 2000, Value: 200},
|
||||
{Timestamp: 3000, Value: 150},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
req := &qbtypes.QueryRangeRequest{
|
||||
RequestType: qbtypes.RequestTypeScalar,
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: []qbtypes.QueryEnvelope{
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "metric_query",
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "test_metric",
|
||||
ReduceTo: qbtypes.ReduceToAvg, // Should use average (150)
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Process results
|
||||
processed, err := q.PostProcessResults(results, req)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that the metric was reduced to average
|
||||
tsData := processed["metric_query"].(*qbtypes.TimeSeriesData)
|
||||
require.Len(t, tsData.Aggregations[0].Series[0].Values, 1)
|
||||
assert.Equal(t, 150.0, tsData.Aggregations[0].Series[0].Values[0].Value)
|
||||
}
|
||||
|
||||
func TestPostProcessMetricQuery(t *testing.T) {
|
||||
q := &querier{}
|
||||
|
||||
// Test that metric query uses ReduceTo field
|
||||
result := &qbtypes.Result{
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "test_metric",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 10},
|
||||
{Timestamp: 2000, Value: 20},
|
||||
{Timestamp: 3000, Value: 30},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
req := &qbtypes.QueryRangeRequest{
|
||||
RequestType: qbtypes.RequestTypeScalar,
|
||||
}
|
||||
|
||||
query := qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "test_metric",
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "test_metric",
|
||||
ReduceTo: qbtypes.ReduceToMax,
|
||||
},
|
||||
},
|
||||
Functions: []qbtypes.Function{},
|
||||
SecondaryAggregations: []qbtypes.SecondaryAggregation{
|
||||
{Expression: "sum"}, // This should be ignored when ReduceTo is set
|
||||
},
|
||||
}
|
||||
|
||||
// Process the metric query
|
||||
processed := postProcessMetricQuery(q, result, query, req)
|
||||
tsData := processed.Value.(*qbtypes.TimeSeriesData)
|
||||
|
||||
// Should have reduced to max value (30)
|
||||
require.Len(t, tsData.Aggregations[0].Series[0].Values, 1)
|
||||
assert.Equal(t, 30.0, tsData.Aggregations[0].Series[0].Values[0].Value)
|
||||
}
|
||||
|
||||
func TestFormatScalarResultsAsTable(t *testing.T) {
|
||||
q := &querier{}
|
||||
|
||||
// Test simple scalar queries without groupBy (TimeSeriesData to ScalarData conversion)
|
||||
req := &qbtypes.QueryRangeRequest{
|
||||
RequestType: qbtypes.RequestTypeScalar,
|
||||
FormatOptions: &qbtypes.FormatOptions{
|
||||
FormatTableResultForUI: true,
|
||||
},
|
||||
}
|
||||
|
||||
results := map[string]*qbtypes.Result{
|
||||
"queryA": {
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "queryA",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Index: 0,
|
||||
Alias: "count_result",
|
||||
Meta: struct {
|
||||
Unit string `json:"unit,omitempty"`
|
||||
}{
|
||||
Unit: "requests",
|
||||
},
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 100},
|
||||
{Timestamp: 2000, Value: 200},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"queryB": {
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "queryB",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Index: 0,
|
||||
Alias: "sum_result",
|
||||
Meta: struct {
|
||||
Unit string `json:"unit,omitempty"`
|
||||
}{
|
||||
Unit: "bytes",
|
||||
},
|
||||
Series: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 50.5678},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
formatted := q.formatScalarResultsAsTable(results, req)
|
||||
|
||||
// Should return table under "table" key when called directly
|
||||
table, ok := formatted["table"].(*qbtypes.ScalarData)
|
||||
require.True(t, ok)
|
||||
|
||||
// Should have 2 columns
|
||||
assert.Len(t, table.Columns, 2)
|
||||
|
||||
// Check column names and metadata
|
||||
assert.Equal(t, "count_result", table.Columns[0].Name)
|
||||
assert.Equal(t, "requests", table.Columns[0].Meta.Unit)
|
||||
|
||||
assert.Equal(t, "sum_result", table.Columns[1].Name)
|
||||
assert.Equal(t, "bytes", table.Columns[1].Meta.Unit)
|
||||
|
||||
// Should have 1 row with 2 values
|
||||
assert.Len(t, table.Data, 1)
|
||||
assert.Len(t, table.Data[0], 2)
|
||||
|
||||
// Check values (last value from time series, rounded)
|
||||
assert.Equal(t, 200.0, table.Data[0][0]) // Last value from queryA
|
||||
assert.Equal(t, 50.57, table.Data[0][1]) // Rounded value from queryB
|
||||
}
|
||||
|
||||
func TestFormatScalarResultsAsTableWithScalarData(t *testing.T) {
|
||||
q := &querier{}
|
||||
|
||||
// Test with ScalarData (already formatted from query execution)
|
||||
req := &qbtypes.QueryRangeRequest{
|
||||
RequestType: qbtypes.RequestTypeScalar,
|
||||
FormatOptions: &qbtypes.FormatOptions{
|
||||
FormatTableResultForUI: true,
|
||||
},
|
||||
}
|
||||
|
||||
results := map[string]*qbtypes.Result{
|
||||
"queryA": {
|
||||
Value: &qbtypes.ScalarData{
|
||||
Columns: []*qbtypes.ColumnDescriptor{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
},
|
||||
QueryName: "queryA",
|
||||
Type: qbtypes.ColumnTypeGroup,
|
||||
},
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "count",
|
||||
},
|
||||
QueryName: "queryA",
|
||||
AggregationIndex: 0,
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
},
|
||||
},
|
||||
Data: [][]any{
|
||||
{"service1", 100.0},
|
||||
{"service2", 200.0},
|
||||
},
|
||||
},
|
||||
},
|
||||
"queryB": {
|
||||
Value: &qbtypes.ScalarData{
|
||||
Columns: []*qbtypes.ColumnDescriptor{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
},
|
||||
QueryName: "queryB",
|
||||
Type: qbtypes.ColumnTypeGroup,
|
||||
},
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "sum",
|
||||
},
|
||||
QueryName: "queryB",
|
||||
AggregationIndex: 0,
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
},
|
||||
},
|
||||
Data: [][]any{
|
||||
{"service1", 50.0},
|
||||
{"service2", 75.0},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
formatted := q.formatScalarResultsAsTable(results, req)
|
||||
|
||||
// Should return a merged table with all results
|
||||
table, ok := formatted["table"].(*qbtypes.ScalarData)
|
||||
require.True(t, ok)
|
||||
|
||||
// Should have 3 columns: service.name (group), count (from queryA), sum (from queryB)
|
||||
assert.Len(t, table.Columns, 3)
|
||||
assert.Equal(t, "service.name", table.Columns[0].Name)
|
||||
assert.Equal(t, qbtypes.ColumnTypeGroup, table.Columns[0].Type)
|
||||
// Aggregation columns
|
||||
assert.Equal(t, qbtypes.ColumnTypeAggregation, table.Columns[1].Type)
|
||||
assert.Equal(t, "queryA", table.Columns[1].QueryName)
|
||||
assert.Equal(t, qbtypes.ColumnTypeAggregation, table.Columns[2].Type)
|
||||
assert.Equal(t, "queryB", table.Columns[2].QueryName)
|
||||
|
||||
// Should have 2 rows
|
||||
assert.Len(t, table.Data, 2)
|
||||
// Check row values - sorted by first aggregation column (descending)
|
||||
// service2 has value 200, service1 has value 100, so service2 comes first
|
||||
assert.Equal(t, "service2", table.Data[0][0])
|
||||
assert.Equal(t, 200.0, table.Data[0][1])
|
||||
assert.Equal(t, 75.0, table.Data[0][2])
|
||||
assert.Equal(t, "service1", table.Data[1][0])
|
||||
assert.Equal(t, 100.0, table.Data[1][1])
|
||||
assert.Equal(t, 50.0, table.Data[1][2])
|
||||
}
|
||||
|
||||
func TestFormatScalarResultsAsTableMergesDuplicateRows(t *testing.T) {
|
||||
q := &querier{}
|
||||
|
||||
// Test that duplicate rows are properly merged
|
||||
req := &qbtypes.QueryRangeRequest{
|
||||
RequestType: qbtypes.RequestTypeScalar,
|
||||
FormatOptions: &qbtypes.FormatOptions{
|
||||
FormatTableResultForUI: true,
|
||||
},
|
||||
}
|
||||
|
||||
results := map[string]*qbtypes.Result{
|
||||
"A": {
|
||||
Value: &qbtypes.ScalarData{
|
||||
Columns: []*qbtypes.ColumnDescriptor{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
},
|
||||
QueryName: "A",
|
||||
Type: qbtypes.ColumnTypeGroup,
|
||||
},
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "count",
|
||||
},
|
||||
QueryName: "A",
|
||||
AggregationIndex: 0,
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
},
|
||||
},
|
||||
Data: [][]any{
|
||||
{"service1", 100.0},
|
||||
{"service2", 200.0},
|
||||
{"service3", 300.0},
|
||||
},
|
||||
},
|
||||
},
|
||||
"B": {
|
||||
Value: &qbtypes.ScalarData{
|
||||
Columns: []*qbtypes.ColumnDescriptor{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
},
|
||||
QueryName: "B",
|
||||
Type: qbtypes.ColumnTypeGroup,
|
||||
},
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "sum",
|
||||
},
|
||||
QueryName: "B",
|
||||
AggregationIndex: 0,
|
||||
Type: qbtypes.ColumnTypeAggregation,
|
||||
},
|
||||
},
|
||||
Data: [][]any{
|
||||
{"service1", 150.0},
|
||||
{"service2", 250.0},
|
||||
{"service3", 350.0},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
formatted := q.formatScalarResultsAsTable(results, req)
|
||||
|
||||
// Should return a merged table
|
||||
table, ok := formatted["table"].(*qbtypes.ScalarData)
|
||||
require.True(t, ok)
|
||||
|
||||
// Should have 3 columns: service.name, count (from A), sum (from B)
|
||||
assert.Len(t, table.Columns, 3)
|
||||
|
||||
// Should have 3 rows (not 6) - one per service
|
||||
assert.Len(t, table.Data, 3)
|
||||
|
||||
// Check that rows are properly merged (sorted by first aggregation column desc)
|
||||
assert.Equal(t, "service3", table.Data[0][0]) // Highest count value
|
||||
assert.Equal(t, 300.0, table.Data[0][1]) // count from A
|
||||
assert.Equal(t, 350.0, table.Data[0][2]) // sum from B
|
||||
|
||||
assert.Equal(t, "service2", table.Data[1][0])
|
||||
assert.Equal(t, 200.0, table.Data[1][1])
|
||||
assert.Equal(t, 250.0, table.Data[1][2])
|
||||
|
||||
assert.Equal(t, "service1", table.Data[2][0]) // Lowest count value
|
||||
assert.Equal(t, 100.0, table.Data[2][1])
|
||||
assert.Equal(t, 150.0, table.Data[2][2])
|
||||
}
|
||||
|
||||
func TestFormatScalarResultsAsTableWithEmptyResults(t *testing.T) {
|
||||
q := &querier{}
|
||||
|
||||
// Test with empty results (queries executed but returned no data)
|
||||
req := &qbtypes.QueryRangeRequest{
|
||||
RequestType: qbtypes.RequestTypeScalar,
|
||||
FormatOptions: &qbtypes.FormatOptions{
|
||||
FormatTableResultForUI: true,
|
||||
},
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: []qbtypes.QueryEnvelope{
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
|
||||
Name: "A",
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
|
||||
Name: "B",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
results := map[string]*qbtypes.Result{
|
||||
"A": {
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "A",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Index: 0,
|
||||
Alias: "logs_count",
|
||||
Series: []*qbtypes.TimeSeries{}, // Empty series
|
||||
},
|
||||
{
|
||||
Index: 1,
|
||||
Alias: "unique hosts",
|
||||
Series: []*qbtypes.TimeSeries{}, // Empty series
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"B": {
|
||||
Value: &qbtypes.TimeSeriesData{
|
||||
QueryName: "B",
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Index: 0,
|
||||
Alias: "hosts",
|
||||
Series: []*qbtypes.TimeSeries{}, // Empty series
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
formatted := q.formatScalarResultsAsTable(results, req)
|
||||
|
||||
// Should return a table structure even with empty results
|
||||
table, ok := formatted["table"].(*qbtypes.ScalarData)
|
||||
require.True(t, ok)
|
||||
|
||||
// Should have columns for the aggregations even with no data
|
||||
// Columns: logs_count, unique hosts (from A), hosts (from B)
|
||||
assert.Len(t, table.Columns, 3)
|
||||
|
||||
// Should have no data rows
|
||||
assert.Len(t, table.Data, 0)
|
||||
|
||||
// But should have columns for the empty aggregations
|
||||
assert.True(t, len(table.Columns) > 0)
|
||||
}
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
@@ -133,13 +132,43 @@ func (q *querier) run(ctx context.Context, orgID valuer.UUID, qs map[string]qbty
|
||||
}
|
||||
}
|
||||
|
||||
// Apply postprocessing
|
||||
processedResults, err := q.PostProcessResults(results, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert results to slice for response
|
||||
resultSlice := make([]any, 0, len(processedResults))
|
||||
for _, query := range req.CompositeQuery.Queries {
|
||||
var queryName string
|
||||
switch spec := query.Spec.(type) {
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]:
|
||||
queryName = spec.Name
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]:
|
||||
queryName = spec.Name
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]:
|
||||
queryName = spec.Name
|
||||
case qbtypes.QueryBuilderFormula:
|
||||
queryName = spec.Name
|
||||
case qbtypes.PromQuery:
|
||||
queryName = spec.Name
|
||||
case qbtypes.ClickHouseQuery:
|
||||
queryName = spec.Name
|
||||
}
|
||||
|
||||
if result, ok := processedResults[queryName]; ok {
|
||||
resultSlice = append(resultSlice, result)
|
||||
}
|
||||
}
|
||||
|
||||
return &qbtypes.QueryRangeResponse{
|
||||
Type: req.RequestType,
|
||||
Data: struct {
|
||||
Results []any `json:"results"`
|
||||
Warnings []string `json:"warnings"`
|
||||
}{
|
||||
Results: maps.Values(results),
|
||||
Results: resultSlice,
|
||||
Warnings: warnings,
|
||||
},
|
||||
Meta: struct {
|
||||
@@ -159,6 +188,22 @@ func (q *querier) executeWithCache(ctx context.Context, orgID valuer.UUID, query
|
||||
// Get cached data and missing ranges
|
||||
cachedResult, missingRanges := q.bucketCache.GetMissRanges(ctx, orgID, query, step)
|
||||
|
||||
// Debug: Log cached result
|
||||
if cachedResult != nil {
|
||||
if tsData, ok := cachedResult.Value.(*qbtypes.TimeSeriesData); ok {
|
||||
totalSeries := 0
|
||||
seriesPerBucket := make(map[int]int)
|
||||
for _, agg := range tsData.Aggregations {
|
||||
totalSeries += len(agg.Series)
|
||||
seriesPerBucket[agg.Index] = len(agg.Series)
|
||||
}
|
||||
q.logger.DebugContext(ctx, "received cached result",
|
||||
"total_series", totalSeries,
|
||||
"series_per_bucket", seriesPerBucket,
|
||||
"missing_ranges", len(missingRanges))
|
||||
}
|
||||
}
|
||||
|
||||
// If no missing ranges, return cached result
|
||||
if len(missingRanges) == 0 && cachedResult != nil {
|
||||
return cachedResult, nil
|
||||
@@ -173,7 +218,7 @@ func (q *querier) executeWithCache(ctx context.Context, orgID valuer.UUID, query
|
||||
return nil, err
|
||||
}
|
||||
// Store in cache for future use
|
||||
q.bucketCache.Put(ctx, orgID, query, result)
|
||||
q.bucketCache.Put(ctx, orgID, query, step, result)
|
||||
return result, nil
|
||||
}
|
||||
}
|
||||
@@ -183,6 +228,10 @@ func (q *querier) executeWithCache(ctx context.Context, orgID valuer.UUID, query
|
||||
errors := make([]error, len(missingRanges))
|
||||
totalStats := qbtypes.ExecStats{}
|
||||
|
||||
q.logger.DebugContext(ctx, "executing queries for missing ranges",
|
||||
"missing_ranges_count", len(missingRanges),
|
||||
"ranges", missingRanges)
|
||||
|
||||
sem := make(chan struct{}, 4)
|
||||
var wg sync.WaitGroup
|
||||
|
||||
@@ -224,7 +273,7 @@ func (q *querier) executeWithCache(ctx context.Context, orgID valuer.UUID, query
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
q.bucketCache.Put(ctx, orgID, query, result)
|
||||
q.bucketCache.Put(ctx, orgID, query, step, result)
|
||||
return result, nil
|
||||
}
|
||||
}
|
||||
@@ -247,8 +296,21 @@ func (q *querier) executeWithCache(ctx context.Context, orgID valuer.UUID, query
|
||||
mergedResult.Stats.BytesScanned += totalStats.BytesScanned
|
||||
mergedResult.Stats.DurationMS += totalStats.DurationMS
|
||||
|
||||
// Debug: Log before storing in cache
|
||||
if tsData, ok := mergedResult.Value.(*qbtypes.TimeSeriesData); ok {
|
||||
totalSeries := 0
|
||||
seriesPerBucket := make(map[int]int)
|
||||
for _, agg := range tsData.Aggregations {
|
||||
totalSeries += len(agg.Series)
|
||||
seriesPerBucket[agg.Index] = len(agg.Series)
|
||||
}
|
||||
q.logger.DebugContext(ctx, "storing merged result in cache",
|
||||
"total_series", totalSeries,
|
||||
"series_per_bucket", seriesPerBucket)
|
||||
}
|
||||
|
||||
// Store merged result in cache
|
||||
q.bucketCache.Put(ctx, orgID, query, mergedResult)
|
||||
q.bucketCache.Put(ctx, orgID, query, step, mergedResult)
|
||||
|
||||
return mergedResult, nil
|
||||
}
|
||||
@@ -273,8 +335,29 @@ func (q *querier) createRangedQuery(originalQuery qbtypes.Query, timeRange qbtyp
|
||||
|
||||
// mergeResults merges cached result with fresh results
|
||||
func (q *querier) mergeResults(cached *qbtypes.Result, fresh []*qbtypes.Result) *qbtypes.Result {
|
||||
if cached == nil && len(fresh) == 1 {
|
||||
return fresh[0]
|
||||
if cached == nil {
|
||||
if len(fresh) == 1 {
|
||||
return fresh[0]
|
||||
}
|
||||
if len(fresh) == 0 {
|
||||
return nil
|
||||
}
|
||||
// If cached is nil but we have multiple fresh results, we need to merge them
|
||||
// We need to merge all fresh results properly to avoid duplicates
|
||||
merged := &qbtypes.Result{
|
||||
Type: fresh[0].Type,
|
||||
Stats: fresh[0].Stats,
|
||||
Warnings: fresh[0].Warnings,
|
||||
}
|
||||
|
||||
// Merge all fresh results including the first one
|
||||
switch merged.Type {
|
||||
case qbtypes.RequestTypeTimeSeries:
|
||||
// Pass nil as cached value to ensure proper merging of all fresh results
|
||||
merged.Value = q.mergeTimeSeriesResults(nil, fresh)
|
||||
}
|
||||
|
||||
return merged
|
||||
}
|
||||
|
||||
// Start with cached result
|
||||
@@ -315,23 +398,71 @@ func (q *querier) mergeResults(cached *qbtypes.Result, fresh []*qbtypes.Result)
|
||||
// mergeTimeSeriesResults merges time series data
|
||||
func (q *querier) mergeTimeSeriesResults(cachedValue *qbtypes.TimeSeriesData, freshResults []*qbtypes.Result) *qbtypes.TimeSeriesData {
|
||||
|
||||
// Map to store merged series by query name and series key
|
||||
// Map to store merged series by aggregation index and series key
|
||||
seriesMap := make(map[int]map[string]*qbtypes.TimeSeries)
|
||||
// Map to store aggregation bucket metadata
|
||||
bucketMetadata := make(map[int]*qbtypes.AggregationBucket)
|
||||
|
||||
for _, aggBucket := range cachedValue.Aggregations {
|
||||
if seriesMap[aggBucket.Index] == nil {
|
||||
seriesMap[aggBucket.Index] = make(map[string]*qbtypes.TimeSeries)
|
||||
// Debug: Log input data
|
||||
if q.logger != nil {
|
||||
cachedCount := 0
|
||||
cachedSeriesDetails := make(map[int][]string)
|
||||
if cachedValue != nil && cachedValue.Aggregations != nil {
|
||||
for _, agg := range cachedValue.Aggregations {
|
||||
cachedCount += len(agg.Series)
|
||||
for _, s := range agg.Series {
|
||||
key := qbtypes.GetUniqueSeriesKey(s.Labels)
|
||||
cachedSeriesDetails[agg.Index] = append(cachedSeriesDetails[agg.Index], key)
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, series := range aggBucket.Series {
|
||||
key := qbtypes.GetUniqueSeriesKey(series.Labels)
|
||||
seriesMap[aggBucket.Index][key] = series
|
||||
q.logger.Debug("mergeTimeSeriesResults called",
|
||||
"cached_series_count", cachedCount,
|
||||
"cached_series_details", cachedSeriesDetails,
|
||||
"fresh_results_count", len(freshResults))
|
||||
}
|
||||
|
||||
// Process cached data if available
|
||||
if cachedValue != nil && cachedValue.Aggregations != nil {
|
||||
for _, aggBucket := range cachedValue.Aggregations {
|
||||
if seriesMap[aggBucket.Index] == nil {
|
||||
seriesMap[aggBucket.Index] = make(map[string]*qbtypes.TimeSeries)
|
||||
}
|
||||
if bucketMetadata[aggBucket.Index] == nil {
|
||||
bucketMetadata[aggBucket.Index] = aggBucket
|
||||
}
|
||||
for _, series := range aggBucket.Series {
|
||||
key := qbtypes.GetUniqueSeriesKey(series.Labels)
|
||||
if existingSeries, ok := seriesMap[aggBucket.Index][key]; ok {
|
||||
// Merge values from duplicate series in cached data, avoiding duplicate timestamps
|
||||
timestampMap := make(map[int64]bool)
|
||||
for _, v := range existingSeries.Values {
|
||||
timestampMap[v.Timestamp] = true
|
||||
}
|
||||
|
||||
// Only add values with new timestamps
|
||||
for _, v := range series.Values {
|
||||
if !timestampMap[v.Timestamp] {
|
||||
existingSeries.Values = append(existingSeries.Values, v)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Create a copy to avoid modifying the cached data
|
||||
seriesCopy := &qbtypes.TimeSeries{
|
||||
Labels: series.Labels,
|
||||
Values: make([]*qbtypes.TimeSeriesValue, len(series.Values)),
|
||||
}
|
||||
copy(seriesCopy.Values, series.Values)
|
||||
seriesMap[aggBucket.Index][key] = seriesCopy
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add fresh series
|
||||
for _, result := range freshResults {
|
||||
freshTS, ok := result.Value.(*qbtypes.TimeSeriesData)
|
||||
if !ok {
|
||||
if !ok || freshTS == nil || freshTS.Aggregations == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -339,6 +470,12 @@ func (q *querier) mergeTimeSeriesResults(cachedValue *qbtypes.TimeSeriesData, fr
|
||||
if seriesMap[aggBucket.Index] == nil {
|
||||
seriesMap[aggBucket.Index] = make(map[string]*qbtypes.TimeSeries)
|
||||
}
|
||||
// Prefer fresh metadata over cached metadata
|
||||
if aggBucket.Alias != "" || aggBucket.Meta.Unit != "" {
|
||||
bucketMetadata[aggBucket.Index] = aggBucket
|
||||
} else if bucketMetadata[aggBucket.Index] == nil {
|
||||
bucketMetadata[aggBucket.Index] = aggBucket
|
||||
}
|
||||
}
|
||||
|
||||
for _, aggBucket := range freshTS.Aggregations {
|
||||
@@ -346,8 +483,19 @@ func (q *querier) mergeTimeSeriesResults(cachedValue *qbtypes.TimeSeriesData, fr
|
||||
key := qbtypes.GetUniqueSeriesKey(series.Labels)
|
||||
|
||||
if existingSeries, ok := seriesMap[aggBucket.Index][key]; ok {
|
||||
// Merge values
|
||||
existingSeries.Values = append(existingSeries.Values, series.Values...)
|
||||
// Merge values, avoiding duplicate timestamps
|
||||
// Create a map to track existing timestamps
|
||||
timestampMap := make(map[int64]bool)
|
||||
for _, v := range existingSeries.Values {
|
||||
timestampMap[v.Timestamp] = true
|
||||
}
|
||||
|
||||
// Only add values with new timestamps
|
||||
for _, v := range series.Values {
|
||||
if !timestampMap[v.Timestamp] {
|
||||
existingSeries.Values = append(existingSeries.Values, v)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// New series
|
||||
seriesMap[aggBucket.Index][key] = series
|
||||
@@ -357,10 +505,18 @@ func (q *querier) mergeTimeSeriesResults(cachedValue *qbtypes.TimeSeriesData, fr
|
||||
}
|
||||
|
||||
result := &qbtypes.TimeSeriesData{
|
||||
QueryName: cachedValue.QueryName,
|
||||
Aggregations: []*qbtypes.AggregationBucket{},
|
||||
}
|
||||
|
||||
// Set QueryName from cached or first fresh result
|
||||
if cachedValue != nil {
|
||||
result.QueryName = cachedValue.QueryName
|
||||
} else if len(freshResults) > 0 {
|
||||
if freshTS, ok := freshResults[0].Value.(*qbtypes.TimeSeriesData); ok && freshTS != nil {
|
||||
result.QueryName = freshTS.QueryName
|
||||
}
|
||||
}
|
||||
|
||||
for index, series := range seriesMap {
|
||||
var aggSeries []*qbtypes.TimeSeries
|
||||
for _, s := range series {
|
||||
@@ -377,10 +533,38 @@ func (q *querier) mergeTimeSeriesResults(cachedValue *qbtypes.TimeSeriesData, fr
|
||||
aggSeries = append(aggSeries, s)
|
||||
}
|
||||
|
||||
result.Aggregations = append(result.Aggregations, &qbtypes.AggregationBucket{
|
||||
// Preserve bucket metadata from either cached or fresh results
|
||||
bucket := &qbtypes.AggregationBucket{
|
||||
Index: index,
|
||||
Series: aggSeries,
|
||||
})
|
||||
}
|
||||
if metadata, ok := bucketMetadata[index]; ok {
|
||||
bucket.Alias = metadata.Alias
|
||||
bucket.Meta = metadata.Meta
|
||||
}
|
||||
|
||||
result.Aggregations = append(result.Aggregations, bucket)
|
||||
}
|
||||
|
||||
// Debug: Log output data
|
||||
if q.logger != nil {
|
||||
finalCount := 0
|
||||
finalSeriesDetails := make(map[int][]string)
|
||||
for _, agg := range result.Aggregations {
|
||||
finalCount += len(agg.Series)
|
||||
for _, s := range agg.Series {
|
||||
key := qbtypes.GetUniqueSeriesKey(s.Labels)
|
||||
// Also log the actual label values for debugging
|
||||
labelDetails := make([]string, 0, len(s.Labels))
|
||||
for _, l := range s.Labels {
|
||||
labelDetails = append(labelDetails, fmt.Sprintf("%s=%v", l.Key.Name, l.Value))
|
||||
}
|
||||
finalSeriesDetails[agg.Index] = append(finalSeriesDetails[agg.Index], fmt.Sprintf("key=%s,labels=%v", key, labelDetails))
|
||||
}
|
||||
}
|
||||
q.logger.Debug("mergeTimeSeriesResults returning",
|
||||
"final_series_count", finalCount,
|
||||
"final_series_details", finalSeriesDetails)
|
||||
}
|
||||
|
||||
return result
|
||||
|
||||
@@ -68,7 +68,7 @@ func CollisionHandledFinalExpr(
|
||||
return "", nil, errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, correction)
|
||||
} else {
|
||||
// not even a close match, return an error
|
||||
return "", nil, err
|
||||
return "", nil, errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, "field %s not found", field.Name)
|
||||
}
|
||||
} else {
|
||||
for _, key := range keysForField {
|
||||
|
||||
@@ -46,7 +46,7 @@ func (b *defaultConditionBuilder) ConditionFor(
|
||||
) (string, error) {
|
||||
|
||||
if key.FieldContext != telemetrytypes.FieldContextResource {
|
||||
return "", nil
|
||||
return "true", nil
|
||||
}
|
||||
|
||||
column, err := b.fm.ColumnFor(ctx, key)
|
||||
|
||||
@@ -22,7 +22,7 @@ type filterExpressionVisitor struct {
|
||||
conditionBuilder qbtypes.ConditionBuilder
|
||||
warnings []string
|
||||
fieldKeys map[string][]*telemetrytypes.TelemetryFieldKey
|
||||
errors []error
|
||||
errors []string
|
||||
builder *sqlbuilder.SelectBuilder
|
||||
fullTextColumn *telemetrytypes.TelemetryFieldKey
|
||||
jsonBodyPrefix string
|
||||
@@ -90,10 +90,12 @@ func PrepareWhereClause(query string, opts FilterExprVisitorOpts) (*sqlbuilder.W
|
||||
combinedErrors := errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"found %d syntax errors while parsing the filter expression: %v",
|
||||
"found %d syntax errors while parsing the filter expression",
|
||||
len(parserErrorListener.SyntaxErrors),
|
||||
parserErrorListener.SyntaxErrors,
|
||||
)
|
||||
for _, err := range parserErrorListener.SyntaxErrors {
|
||||
combinedErrors = combinedErrors.WithAdditional(err.Error())
|
||||
}
|
||||
return nil, nil, combinedErrors
|
||||
}
|
||||
|
||||
@@ -105,10 +107,12 @@ func PrepareWhereClause(query string, opts FilterExprVisitorOpts) (*sqlbuilder.W
|
||||
combinedErrors := errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"found %d errors while parsing the search expression: %v",
|
||||
"found %d errors while parsing the search expression",
|
||||
len(visitor.errors),
|
||||
visitor.errors,
|
||||
)
|
||||
for _, err := range visitor.errors {
|
||||
combinedErrors = combinedErrors.WithAdditional(err)
|
||||
}
|
||||
return nil, nil, combinedErrors
|
||||
}
|
||||
|
||||
@@ -238,11 +242,7 @@ func (v *filterExpressionVisitor) VisitPrimary(ctx *grammar.PrimaryContext) any
|
||||
}
|
||||
|
||||
if v.fullTextColumn == nil {
|
||||
v.errors = append(v.errors, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"full text search is not supported",
|
||||
))
|
||||
v.errors = append(v.errors, "full text search is not supported")
|
||||
return ""
|
||||
}
|
||||
child := ctx.GetChild(0)
|
||||
@@ -251,7 +251,7 @@ func (v *filterExpressionVisitor) VisitPrimary(ctx *grammar.PrimaryContext) any
|
||||
keyText := keyCtx.GetText()
|
||||
cond, err := v.conditionBuilder.ConditionFor(context.Background(), v.fullTextColumn, qbtypes.FilterOperatorRegexp, keyText, v.builder)
|
||||
if err != nil {
|
||||
v.errors = append(v.errors, errors.WrapInternalf(err, errors.CodeInternal, "failed to build full text search condition"))
|
||||
v.errors = append(v.errors, fmt.Sprintf("failed to build full text search condition: %s", err.Error()))
|
||||
return ""
|
||||
}
|
||||
return cond
|
||||
@@ -266,12 +266,12 @@ func (v *filterExpressionVisitor) VisitPrimary(ctx *grammar.PrimaryContext) any
|
||||
} else if valCtx.KEY() != nil {
|
||||
text = valCtx.KEY().GetText()
|
||||
} else {
|
||||
v.errors = append(v.errors, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "unsupported value type: %s", valCtx.GetText()))
|
||||
v.errors = append(v.errors, fmt.Sprintf("unsupported value type: %s", valCtx.GetText()))
|
||||
return ""
|
||||
}
|
||||
cond, err := v.conditionBuilder.ConditionFor(context.Background(), v.fullTextColumn, qbtypes.FilterOperatorRegexp, text, v.builder)
|
||||
if err != nil {
|
||||
v.errors = append(v.errors, errors.WrapInternalf(err, errors.CodeInternal, "failed to build full text search condition"))
|
||||
v.errors = append(v.errors, fmt.Sprintf("failed to build full text search condition: %s", err.Error()))
|
||||
return ""
|
||||
}
|
||||
return cond
|
||||
@@ -419,7 +419,7 @@ func (v *filterExpressionVisitor) VisitComparison(ctx *grammar.ComparisonContext
|
||||
for _, key := range keys {
|
||||
condition, err := v.conditionBuilder.ConditionFor(context.Background(), key, op, value, v.builder)
|
||||
if err != nil {
|
||||
v.errors = append(v.errors, errors.WrapInternalf(err, errors.CodeInternal, "failed to build condition"))
|
||||
v.errors = append(v.errors, fmt.Sprintf("failed to build condition: %s", err.Error()))
|
||||
return ""
|
||||
}
|
||||
conds = append(conds, condition)
|
||||
@@ -471,16 +471,12 @@ func (v *filterExpressionVisitor) VisitFullText(ctx *grammar.FullTextContext) an
|
||||
}
|
||||
|
||||
if v.fullTextColumn == nil {
|
||||
v.errors = append(v.errors, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"full text search is not supported",
|
||||
))
|
||||
v.errors = append(v.errors, "full text search is not supported")
|
||||
return ""
|
||||
}
|
||||
cond, err := v.conditionBuilder.ConditionFor(context.Background(), v.fullTextColumn, qbtypes.FilterOperatorRegexp, text, v.builder)
|
||||
if err != nil {
|
||||
v.errors = append(v.errors, errors.WrapInternalf(err, errors.CodeInternal, "failed to build full text search condition"))
|
||||
v.errors = append(v.errors, fmt.Sprintf("failed to build full text search condition: %s", err.Error()))
|
||||
return ""
|
||||
}
|
||||
return cond
|
||||
@@ -498,34 +494,19 @@ func (v *filterExpressionVisitor) VisitFunctionCall(ctx *grammar.FunctionCallCon
|
||||
functionName = "hasAll"
|
||||
} else {
|
||||
// Default fallback
|
||||
v.errors = append(v.errors, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"unknown function `%s`",
|
||||
ctx.GetText(),
|
||||
))
|
||||
v.errors = append(v.errors, fmt.Sprintf("unknown function `%s`", ctx.GetText()))
|
||||
return ""
|
||||
}
|
||||
params := v.Visit(ctx.FunctionParamList()).([]any)
|
||||
|
||||
if len(params) < 2 {
|
||||
v.errors = append(v.errors, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"function `%s` expects key and value parameters",
|
||||
functionName,
|
||||
))
|
||||
v.errors = append(v.errors, fmt.Sprintf("function `%s` expects key and value parameters", functionName))
|
||||
return ""
|
||||
}
|
||||
|
||||
keys, ok := params[0].([]*telemetrytypes.TelemetryFieldKey)
|
||||
if !ok {
|
||||
v.errors = append(v.errors, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"function `%s` expects key parameter to be a field key",
|
||||
functionName,
|
||||
))
|
||||
v.errors = append(v.errors, fmt.Sprintf("function `%s` expects key parameter to be a field key", functionName))
|
||||
return ""
|
||||
}
|
||||
value := params[1:]
|
||||
@@ -536,12 +517,7 @@ func (v *filterExpressionVisitor) VisitFunctionCall(ctx *grammar.FunctionCallCon
|
||||
if strings.HasPrefix(key.Name, v.jsonBodyPrefix) {
|
||||
fieldName, _ = v.jsonKeyToKey(context.Background(), key, qbtypes.FilterOperatorUnknown, value)
|
||||
} else {
|
||||
v.errors = append(v.errors, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"function `%s` supports only body JSON search",
|
||||
functionName,
|
||||
))
|
||||
v.errors = append(v.errors, fmt.Sprintf("function `%s` supports only body JSON search", functionName))
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -603,12 +579,7 @@ func (v *filterExpressionVisitor) VisitValue(ctx *grammar.ValueContext) any {
|
||||
} else if ctx.NUMBER() != nil {
|
||||
number, err := strconv.ParseFloat(ctx.NUMBER().GetText(), 64)
|
||||
if err != nil {
|
||||
v.errors = append(v.errors, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"failed to parse number %s",
|
||||
ctx.NUMBER().GetText(),
|
||||
))
|
||||
v.errors = append(v.errors, fmt.Sprintf("failed to parse number %s", ctx.NUMBER().GetText()))
|
||||
return ""
|
||||
}
|
||||
return number
|
||||
@@ -648,19 +619,11 @@ func (v *filterExpressionVisitor) VisitKey(ctx *grammar.KeyContext) any {
|
||||
|
||||
if len(fieldKeysForName) == 0 {
|
||||
if strings.HasPrefix(fieldKey.Name, v.jsonBodyPrefix) && v.jsonBodyPrefix != "" && keyName == "" {
|
||||
v.errors = append(v.errors, errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"missing key for body json search - expected key of the form `body.key` (ex: `body.status`)",
|
||||
))
|
||||
v.errors = append(v.errors, "missing key for body json search - expected key of the form `body.key` (ex: `body.status`)")
|
||||
} else {
|
||||
// TODO(srikanthccv): do we want to return an error here?
|
||||
// should we infer the type and auto-magically build a key for expression?
|
||||
v.errors = append(v.errors, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"key `%s` not found",
|
||||
fieldKey.Name,
|
||||
))
|
||||
v.errors = append(v.errors, fmt.Sprintf("key `%s` not found", fieldKey.Name))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -173,7 +173,7 @@ func (m *fieldMapper) ColumnExpressionFor(
|
||||
return "", errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, correction)
|
||||
} else {
|
||||
// not even a close match, return an error
|
||||
return "", err
|
||||
return "", errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, "field %s not found", field.Name)
|
||||
}
|
||||
}
|
||||
} else if len(keysForField) == 1 {
|
||||
@@ -186,7 +186,7 @@ func (m *fieldMapper) ColumnExpressionFor(
|
||||
colName, _ = m.FieldFor(ctx, key)
|
||||
args = append(args, fmt.Sprintf("toString(%s) != '', toString(%s)", colName, colName))
|
||||
}
|
||||
colName = fmt.Sprintf("multiIf(%s)", strings.Join(args, ", "))
|
||||
colName = fmt.Sprintf("multiIf(%s, NULL)", strings.Join(args, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"log/slog"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/querybuilder"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
@@ -14,10 +13,6 @@ import (
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrUnsupportedAggregation = errors.NewInvalidInputf(errors.CodeInvalidInput, "unsupported aggregation")
|
||||
)
|
||||
|
||||
type logQueryStatementBuilder struct {
|
||||
logger *slog.Logger
|
||||
metadataStore telemetrytypes.MetadataStore
|
||||
@@ -165,12 +160,19 @@ func (b *logQueryStatementBuilder) buildListQuery(
|
||||
|
||||
// Add order by
|
||||
for _, orderBy := range query.Order {
|
||||
sb.OrderBy(fmt.Sprintf("`%s` %s", orderBy.Key.Name, orderBy.Direction))
|
||||
colExpr, err := b.fm.ColumnExpressionFor(ctx, &orderBy.Key.TelemetryFieldKey, keys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sb.OrderBy(fmt.Sprintf("%s %s", colExpr, orderBy.Direction.StringValue()))
|
||||
}
|
||||
|
||||
// Add limit and offset
|
||||
if query.Limit > 0 {
|
||||
sb.Limit(query.Limit)
|
||||
} else {
|
||||
// default to 1k rows
|
||||
sb.Limit(100)
|
||||
}
|
||||
|
||||
if query.Offset > 0 {
|
||||
@@ -381,9 +383,9 @@ func (b *logQueryStatementBuilder) buildScalarQuery(
|
||||
for _, orderBy := range query.Order {
|
||||
idx, ok := aggOrderBy(orderBy, query)
|
||||
if ok {
|
||||
sb.OrderBy(fmt.Sprintf("__result_%d %s", idx, orderBy.Direction))
|
||||
sb.OrderBy(fmt.Sprintf("__result_%d %s", idx, orderBy.Direction.StringValue()))
|
||||
} else {
|
||||
sb.OrderBy(fmt.Sprintf("`%s` %s", orderBy.Key.Name, orderBy.Direction))
|
||||
sb.OrderBy(fmt.Sprintf("`%s` %s", orderBy.Key.Name, orderBy.Direction.StringValue()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -420,19 +422,25 @@ func (b *logQueryStatementBuilder) addFilterCondition(
|
||||
keys map[string][]*telemetrytypes.TelemetryFieldKey,
|
||||
) ([]string, error) {
|
||||
|
||||
// add filter expression
|
||||
filterWhereClause, warnings, err := querybuilder.PrepareWhereClause(query.Filter.Expression, querybuilder.FilterExprVisitorOpts{
|
||||
FieldMapper: b.fm,
|
||||
ConditionBuilder: b.cb,
|
||||
FieldKeys: keys,
|
||||
SkipResourceFilter: true,
|
||||
FullTextColumn: b.fullTextColumn,
|
||||
JsonBodyPrefix: b.jsonBodyPrefix,
|
||||
JsonKeyToKey: b.jsonKeyToKey,
|
||||
})
|
||||
var filterWhereClause *sqlbuilder.WhereClause
|
||||
var warnings []string
|
||||
var err error
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if query.Filter != nil && query.Filter.Expression != "" {
|
||||
// add filter expression
|
||||
filterWhereClause, warnings, err = querybuilder.PrepareWhereClause(query.Filter.Expression, querybuilder.FilterExprVisitorOpts{
|
||||
FieldMapper: b.fm,
|
||||
ConditionBuilder: b.cb,
|
||||
FieldKeys: keys,
|
||||
SkipResourceFilter: true,
|
||||
FullTextColumn: b.fullTextColumn,
|
||||
JsonBodyPrefix: b.jsonBodyPrefix,
|
||||
JsonKeyToKey: b.jsonKeyToKey,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if filterWhereClause != nil {
|
||||
|
||||
@@ -95,7 +95,7 @@ func (m *fieldMapper) ColumnExpressionFor(
|
||||
return "", errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, correction)
|
||||
} else {
|
||||
// not even a close match, return an error
|
||||
return "", err
|
||||
return "", errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, "field %s not found", field.Name)
|
||||
}
|
||||
}
|
||||
} else if len(keysForField) == 1 {
|
||||
@@ -108,7 +108,7 @@ func (m *fieldMapper) ColumnExpressionFor(
|
||||
colName, _ = m.FieldFor(ctx, key)
|
||||
args = append(args, fmt.Sprintf("toString(%s) != '', toString(%s)", colName, colName))
|
||||
}
|
||||
colName = fmt.Sprintf("multiIf(%s)", strings.Join(args, ", "))
|
||||
colName = fmt.Sprintf("multiIf(%s, NULL)", strings.Join(args, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -250,7 +250,7 @@ func (m *defaultFieldMapper) ColumnExpressionFor(
|
||||
return "", errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, correction)
|
||||
} else {
|
||||
// not even a close match, return an error
|
||||
return "", err
|
||||
return "", errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, "field %s not found", field.Name)
|
||||
}
|
||||
}
|
||||
} else if len(keysForField) == 1 {
|
||||
@@ -263,7 +263,7 @@ func (m *defaultFieldMapper) ColumnExpressionFor(
|
||||
colName, _ = m.FieldFor(ctx, key)
|
||||
args = append(args, fmt.Sprintf("toString(%s) != '', toString(%s)", colName, colName))
|
||||
}
|
||||
colName = fmt.Sprintf("multiIf(%s)", strings.Join(args, ", "))
|
||||
colName = fmt.Sprintf("multiIf(%s, NULL)", strings.Join(args, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -179,12 +179,19 @@ func (b *traceQueryStatementBuilder) buildListQuery(
|
||||
|
||||
// Add order by
|
||||
for _, orderBy := range query.Order {
|
||||
sb.OrderBy(fmt.Sprintf("`%s` %s", orderBy.Key.Name, orderBy.Direction.StringValue()))
|
||||
colExpr, err := b.fm.ColumnExpressionFor(ctx, &orderBy.Key.TelemetryFieldKey, keys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sb.OrderBy(fmt.Sprintf("%s %s", colExpr, orderBy.Direction.StringValue()))
|
||||
}
|
||||
|
||||
// Add limit and offset
|
||||
if query.Limit > 0 {
|
||||
sb.Limit(query.Limit)
|
||||
} else {
|
||||
// default to 1k rows
|
||||
sb.Limit(100)
|
||||
}
|
||||
|
||||
if query.Offset > 0 {
|
||||
|
||||
@@ -328,6 +328,8 @@ type MetricAggregation struct {
|
||||
TableHints *metrictypes.MetricTableHints `json:"-"`
|
||||
// value filter to apply to the query
|
||||
ValueFilter *metrictypes.MetricValueFilter `json:"-"`
|
||||
// reduce to operator for scalar requests
|
||||
ReduceTo ReduceTo `json:"reduceTo,omitempty"`
|
||||
}
|
||||
|
||||
type Filter struct {
|
||||
|
||||
@@ -56,3 +56,20 @@ type QueryBuilderQuery[T any] struct {
|
||||
// functions to apply to the query
|
||||
Functions []Function `json:"functions,omitempty"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements custom JSON unmarshaling to disallow unknown fields
|
||||
func (q *QueryBuilderQuery[T]) UnmarshalJSON(data []byte) error {
|
||||
// Define a type alias to avoid infinite recursion
|
||||
type Alias QueryBuilderQuery[T]
|
||||
|
||||
var temp Alias
|
||||
// Use UnmarshalJSONWithContext for better error messages
|
||||
if err := UnmarshalJSONWithContext(data, &temp, "query spec"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Copy the decoded values back to the original struct
|
||||
*q = QueryBuilderQuery[T](temp)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -20,10 +20,30 @@ type QueryBuilderFormula struct {
|
||||
// expression to apply to the query
|
||||
Expression string `json:"expression"`
|
||||
|
||||
// order by keys and directions
|
||||
Order []OrderBy `json:"order,omitempty"`
|
||||
|
||||
// limit the maximum number of rows to return
|
||||
Limit int `json:"limit,omitempty"`
|
||||
|
||||
// having clause to apply to the query
|
||||
Having *Having `json:"having,omitempty"`
|
||||
|
||||
// functions to apply to the formula result
|
||||
Functions []Function `json:"functions,omitempty"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements custom JSON unmarshaling to disallow unknown fields
|
||||
func (f *QueryBuilderFormula) UnmarshalJSON(data []byte) error {
|
||||
type Alias QueryBuilderFormula
|
||||
var temp Alias
|
||||
if err := UnmarshalJSONWithContext(data, &temp, "formula spec"); err != nil {
|
||||
return err
|
||||
}
|
||||
*f = QueryBuilderFormula(temp)
|
||||
return nil
|
||||
}
|
||||
|
||||
// small container to store the query name and index or alias reference
|
||||
// for a variable in the formula expression
|
||||
// read below for more details on aggregation references
|
||||
|
||||
109
pkg/types/querybuildertypes/querybuildertypesv5/json_decoder.go
Normal file
109
pkg/types/querybuildertypes/querybuildertypesv5/json_decoder.go
Normal file
@@ -0,0 +1,109 @@
|
||||
package querybuildertypesv5
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
)
|
||||
|
||||
// UnmarshalJSONWithSuggestions unmarshals JSON data into the target struct
|
||||
// and provides field name suggestions for unknown fields
|
||||
func UnmarshalJSONWithSuggestions(data []byte, target any) error {
|
||||
return UnmarshalJSONWithContext(data, target, "")
|
||||
}
|
||||
|
||||
// UnmarshalJSONWithContext unmarshals JSON with context information for better error messages
|
||||
func UnmarshalJSONWithContext(data []byte, target any, context string) error {
|
||||
// First, try to unmarshal with DisallowUnknownFields to catch unknown fields
|
||||
dec := json.NewDecoder(bytes.NewReader(data))
|
||||
dec.DisallowUnknownFields()
|
||||
|
||||
err := dec.Decode(target)
|
||||
if err == nil {
|
||||
// No error, successful unmarshal
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if it's an unknown field error
|
||||
if strings.Contains(err.Error(), "unknown field") {
|
||||
// Extract the unknown field name
|
||||
unknownField := extractUnknownField(err.Error())
|
||||
if unknownField != "" {
|
||||
// Get valid field names from the target struct
|
||||
validFields := getJSONFieldNames(target)
|
||||
|
||||
// Build error message with context
|
||||
errorMsg := "unknown field %q"
|
||||
if context != "" {
|
||||
errorMsg = "unknown field %q in " + context
|
||||
}
|
||||
|
||||
// Find closest match with max distance of 3 (reasonable for typos)
|
||||
if suggestion, found := findClosestMatch(unknownField, validFields, 3); found {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
errorMsg,
|
||||
unknownField,
|
||||
).WithAdditional(
|
||||
"Did you mean '" + suggestion + "'?",
|
||||
)
|
||||
}
|
||||
|
||||
// No good suggestion found
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
errorMsg,
|
||||
unknownField,
|
||||
).WithAdditional(
|
||||
"Valid fields are: " + strings.Join(validFields, ", "),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Return the original error if it's not an unknown field error
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid JSON: %v", err)
|
||||
}
|
||||
|
||||
// extractUnknownField extracts the field name from an unknown field error message
|
||||
func extractUnknownField(errMsg string) string {
|
||||
// The error message format is: json: unknown field "fieldname"
|
||||
parts := strings.Split(errMsg, `"`)
|
||||
if len(parts) >= 2 {
|
||||
return parts[1]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// getJSONFieldNames extracts all JSON field names from a struct
|
||||
func getJSONFieldNames(v any) []string {
|
||||
var fields []string
|
||||
|
||||
t := reflect.TypeOf(v)
|
||||
if t.Kind() == reflect.Ptr {
|
||||
t = t.Elem()
|
||||
}
|
||||
|
||||
if t.Kind() != reflect.Struct {
|
||||
return fields
|
||||
}
|
||||
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
field := t.Field(i)
|
||||
jsonTag := field.Tag.Get("json")
|
||||
|
||||
if jsonTag == "" || jsonTag == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Extract the field name from the JSON tag
|
||||
fieldName := strings.Split(jsonTag, ",")[0]
|
||||
if fieldName != "" {
|
||||
fields = append(fields, fieldName)
|
||||
}
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
@@ -0,0 +1,87 @@
|
||||
package querybuildertypesv5
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
func levenshteinDistance(s1, s2 string) int {
|
||||
if len(s1) == 0 {
|
||||
return len(s2)
|
||||
}
|
||||
if len(s2) == 0 {
|
||||
return len(s1)
|
||||
}
|
||||
|
||||
// Create a matrix to store distances
|
||||
matrix := make([][]int, len(s1)+1)
|
||||
for i := range matrix {
|
||||
matrix[i] = make([]int, len(s2)+1)
|
||||
}
|
||||
|
||||
// Initialize first column and row
|
||||
for i := 0; i <= len(s1); i++ {
|
||||
matrix[i][0] = i
|
||||
}
|
||||
for j := 0; j <= len(s2); j++ {
|
||||
matrix[0][j] = j
|
||||
}
|
||||
|
||||
// Calculate distances
|
||||
for i := 1; i <= len(s1); i++ {
|
||||
for j := 1; j <= len(s2); j++ {
|
||||
cost := 0
|
||||
if s1[i-1] != s2[j-1] {
|
||||
cost = 1
|
||||
}
|
||||
matrix[i][j] = min(
|
||||
matrix[i-1][j]+1, // deletion
|
||||
matrix[i][j-1]+1, // insertion
|
||||
matrix[i-1][j-1]+cost, // substitution
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return matrix[len(s1)][len(s2)]
|
||||
}
|
||||
|
||||
func findClosestMatch(target string, validOptions []string, maxDistance int) (string, bool) {
|
||||
if len(validOptions) == 0 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
bestMatch := ""
|
||||
bestDistance := maxDistance + 1
|
||||
|
||||
// Convert target to lowercase for case-insensitive comparison
|
||||
targetLower := strings.ToLower(target)
|
||||
|
||||
for _, option := range validOptions {
|
||||
// Case-insensitive comparison
|
||||
distance := levenshteinDistance(targetLower, strings.ToLower(option))
|
||||
if distance < bestDistance {
|
||||
bestDistance = distance
|
||||
bestMatch = option
|
||||
}
|
||||
}
|
||||
|
||||
// Only return a match if it's within the threshold
|
||||
if bestDistance <= maxDistance {
|
||||
return bestMatch, true
|
||||
}
|
||||
|
||||
return "", false
|
||||
}
|
||||
|
||||
// min returns the minimum of three integers
|
||||
func min(a, b, c int) int {
|
||||
if a < b {
|
||||
if a < c {
|
||||
return a
|
||||
}
|
||||
return c
|
||||
}
|
||||
if b < c {
|
||||
return b
|
||||
}
|
||||
return c
|
||||
}
|
||||
@@ -0,0 +1,323 @@
|
||||
package querybuildertypesv5
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestLevenshteinDistance(t *testing.T) {
|
||||
tests := []struct {
|
||||
s1 string
|
||||
s2 string
|
||||
expected int
|
||||
}{
|
||||
{"", "", 0},
|
||||
{"a", "", 1},
|
||||
{"", "a", 1},
|
||||
{"a", "a", 0},
|
||||
{"abc", "abc", 0},
|
||||
{"kitten", "sitting", 3},
|
||||
{"saturday", "sunday", 3},
|
||||
{"expires", "expires_in", 3},
|
||||
{"start", "end", 5}, // s->e, t->n, a->d, r->"", t->""
|
||||
{"schemaVersion", "schema_version", 2}, // V->_ and ""->_
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.s1+"_"+tt.s2, func(t *testing.T) {
|
||||
result := levenshteinDistance(tt.s1, tt.s2)
|
||||
assert.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindClosestMatch(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
target string
|
||||
validOptions []string
|
||||
maxDistance int
|
||||
expectedMatch string
|
||||
expectedFound bool
|
||||
}{
|
||||
{
|
||||
name: "exact match",
|
||||
target: "start",
|
||||
validOptions: []string{"start", "end", "limit"},
|
||||
maxDistance: 3,
|
||||
expectedMatch: "start",
|
||||
expectedFound: true,
|
||||
},
|
||||
{
|
||||
name: "close match",
|
||||
target: "strt",
|
||||
validOptions: []string{"start", "end", "limit"},
|
||||
maxDistance: 3,
|
||||
expectedMatch: "start",
|
||||
expectedFound: true,
|
||||
},
|
||||
{
|
||||
name: "case insensitive match",
|
||||
target: "START",
|
||||
validOptions: []string{"start", "end", "limit"},
|
||||
maxDistance: 3,
|
||||
expectedMatch: "start",
|
||||
expectedFound: true,
|
||||
},
|
||||
{
|
||||
name: "no match within distance",
|
||||
target: "completely_different",
|
||||
validOptions: []string{"start", "end", "limit"},
|
||||
maxDistance: 3,
|
||||
expectedMatch: "",
|
||||
expectedFound: false,
|
||||
},
|
||||
{
|
||||
name: "expires to expires_in",
|
||||
target: "expires",
|
||||
validOptions: []string{"expires_in", "start", "end"},
|
||||
maxDistance: 3,
|
||||
expectedMatch: "expires_in",
|
||||
expectedFound: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
match, found := findClosestMatch(tt.target, tt.validOptions, tt.maxDistance)
|
||||
assert.Equal(t, tt.expectedFound, found)
|
||||
if tt.expectedFound {
|
||||
assert.Equal(t, tt.expectedMatch, match)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryRangeRequestUnmarshalWithSuggestions(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
jsonData string
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
name: "valid request",
|
||||
jsonData: `{
|
||||
"schemaVersion": "v5",
|
||||
"start": 1000,
|
||||
"end": 2000,
|
||||
"requestType": "timeseries",
|
||||
"compositeQuery": {
|
||||
"queries": []
|
||||
}
|
||||
}`,
|
||||
expectedErr: "",
|
||||
},
|
||||
{
|
||||
name: "typo in start field",
|
||||
jsonData: `{
|
||||
"schemaVersion": "v5",
|
||||
"strt": 1000,
|
||||
"end": 2000,
|
||||
"requestType": "timeseries",
|
||||
"compositeQuery": {
|
||||
"queries": []
|
||||
}
|
||||
}`,
|
||||
expectedErr: `unknown field "strt"`,
|
||||
},
|
||||
{
|
||||
name: "typo in schemaVersion",
|
||||
jsonData: `{
|
||||
"schemaVerson": "v5",
|
||||
"start": 1000,
|
||||
"end": 2000,
|
||||
"requestType": "timeseries",
|
||||
"compositeQuery": {
|
||||
"queries": []
|
||||
}
|
||||
}`,
|
||||
expectedErr: `unknown field "schemaVerson"`,
|
||||
},
|
||||
{
|
||||
name: "requestype instead of requestType",
|
||||
jsonData: `{
|
||||
"schemaVersion": "v5",
|
||||
"start": 1000,
|
||||
"end": 2000,
|
||||
"requestype": "timeseries",
|
||||
"compositeQuery": {
|
||||
"queries": []
|
||||
}
|
||||
}`,
|
||||
expectedErr: `unknown field "requestype"`,
|
||||
},
|
||||
{
|
||||
name: "composite_query instead of compositeQuery",
|
||||
jsonData: `{
|
||||
"schemaVersion": "v5",
|
||||
"start": 1000,
|
||||
"end": 2000,
|
||||
"requestType": "timeseries",
|
||||
"composite_query": {
|
||||
"queries": []
|
||||
}
|
||||
}`,
|
||||
expectedErr: `unknown field "composite_query"`,
|
||||
},
|
||||
{
|
||||
name: "no_cache instead of noCache",
|
||||
jsonData: `{
|
||||
"schemaVersion": "v5",
|
||||
"start": 1000,
|
||||
"end": 2000,
|
||||
"requestType": "timeseries",
|
||||
"compositeQuery": {
|
||||
"queries": []
|
||||
},
|
||||
"no_cache": true
|
||||
}`,
|
||||
expectedErr: `unknown field "no_cache"`,
|
||||
},
|
||||
{
|
||||
name: "format_options instead of formatOptions",
|
||||
jsonData: `{
|
||||
"schemaVersion": "v5",
|
||||
"start": 1000,
|
||||
"end": 2000,
|
||||
"requestType": "timeseries",
|
||||
"compositeQuery": {
|
||||
"queries": []
|
||||
},
|
||||
"format_options": {}
|
||||
}`,
|
||||
expectedErr: `unknown field "format_options"`,
|
||||
},
|
||||
{
|
||||
name: "completely unknown field with no good suggestion",
|
||||
jsonData: `{
|
||||
"schemaVersion": "v5",
|
||||
"completely_unknown_field_xyz": 1000,
|
||||
"end": 2000,
|
||||
"requestType": "timeseries",
|
||||
"compositeQuery": {
|
||||
"queries": []
|
||||
}
|
||||
}`,
|
||||
expectedErr: `unknown field "completely_unknown_field_xyz"`,
|
||||
},
|
||||
{
|
||||
name: "common mistake: limit instead of variables",
|
||||
jsonData: `{
|
||||
"schemaVersion": "v5",
|
||||
"start": 1000,
|
||||
"end": 2000,
|
||||
"requestType": "timeseries",
|
||||
"compositeQuery": {
|
||||
"queries": []
|
||||
},
|
||||
"limit": 100
|
||||
}`,
|
||||
expectedErr: `unknown field "limit"`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var req QueryRangeRequest
|
||||
err := json.Unmarshal([]byte(tt.jsonData), &req)
|
||||
|
||||
if tt.expectedErr == "" {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), tt.expectedErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetJSONFieldNames(t *testing.T) {
|
||||
type TestStruct struct {
|
||||
Field1 string `json:"field1"`
|
||||
Field2 int `json:"field2,omitempty"`
|
||||
Field3 bool `json:"-"`
|
||||
Field4 string `json:""`
|
||||
Field5 string // no json tag
|
||||
}
|
||||
|
||||
fields := getJSONFieldNames(&TestStruct{})
|
||||
expected := []string{"field1", "field2"}
|
||||
|
||||
assert.ElementsMatch(t, expected, fields)
|
||||
}
|
||||
|
||||
func TestUnmarshalJSONWithSuggestions(t *testing.T) {
|
||||
type TestRequest struct {
|
||||
SchemaVersion string `json:"schemaVersion"`
|
||||
Start int64 `json:"start"`
|
||||
End int64 `json:"end"`
|
||||
Limit int `json:"limit,omitempty"`
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
jsonData string
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
name: "valid JSON",
|
||||
jsonData: `{
|
||||
"schemaVersion": "v1",
|
||||
"start": 1000,
|
||||
"end": 2000
|
||||
}`,
|
||||
expectedErr: "",
|
||||
},
|
||||
{
|
||||
name: "typo in field name",
|
||||
jsonData: `{
|
||||
"schemaVerson": "v1",
|
||||
"start": 1000,
|
||||
"end": 2000
|
||||
}`,
|
||||
expectedErr: `unknown field "schemaVerson"`,
|
||||
},
|
||||
{
|
||||
name: "multiple typos - only first is reported",
|
||||
jsonData: `{
|
||||
"strt": 1000,
|
||||
"ed": 2000
|
||||
}`,
|
||||
expectedErr: `unknown field "strt"`,
|
||||
},
|
||||
{
|
||||
name: "case sensitivity",
|
||||
jsonData: `{
|
||||
"schema_version": "v1",
|
||||
"start": 1000,
|
||||
"end": 2000
|
||||
}`,
|
||||
expectedErr: `unknown field "schema_version"`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var req TestRequest
|
||||
err := UnmarshalJSONWithSuggestions([]byte(tt.jsonData), &req)
|
||||
|
||||
if tt.expectedErr == "" {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
// Clean up the error message for comparison
|
||||
errMsg := strings.ReplaceAll(err.Error(), "\n", " ")
|
||||
assert.Contains(t, errMsg, tt.expectedErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package querybuildertypesv5
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
@@ -17,12 +18,11 @@ type QueryEnvelope struct {
|
||||
// implement custom json unmarshaler for the QueryEnvelope
|
||||
func (q *QueryEnvelope) UnmarshalJSON(data []byte) error {
|
||||
var shadow struct {
|
||||
Name string `json:"name"`
|
||||
Type QueryType `json:"type"`
|
||||
Spec json.RawMessage `json:"spec"`
|
||||
}
|
||||
if err := json.Unmarshal(data, &shadow); err != nil {
|
||||
return errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "invalid query envelope")
|
||||
if err := UnmarshalJSONWithSuggestions(data, &shadow); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
q.Type = shadow.Type
|
||||
@@ -34,62 +34,169 @@ func (q *QueryEnvelope) UnmarshalJSON(data []byte) error {
|
||||
Signal telemetrytypes.Signal `json:"signal"`
|
||||
}
|
||||
if err := json.Unmarshal(shadow.Spec, &header); err != nil {
|
||||
return errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "cannot detect builder signal")
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"cannot detect builder signal: %v",
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
switch header.Signal {
|
||||
case telemetrytypes.SignalTraces:
|
||||
var spec QueryBuilderQuery[TraceAggregation]
|
||||
if err := json.Unmarshal(shadow.Spec, &spec); err != nil {
|
||||
return errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "invalid trace builder query spec")
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "query spec"); err != nil {
|
||||
// If it's already one of our wrapped errors with additional context, return as-is
|
||||
_, _, _, _, _, additionals := errors.Unwrapb(err)
|
||||
if len(additionals) > 0 {
|
||||
return err
|
||||
}
|
||||
// Preserve helpful error messages about unknown fields
|
||||
if strings.Contains(err.Error(), "unknown field") {
|
||||
return err
|
||||
}
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid trace builder query spec: %v",
|
||||
err,
|
||||
)
|
||||
}
|
||||
q.Spec = spec
|
||||
case telemetrytypes.SignalLogs:
|
||||
var spec QueryBuilderQuery[LogAggregation]
|
||||
if err := json.Unmarshal(shadow.Spec, &spec); err != nil {
|
||||
return errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "invalid log builder query spec")
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "query spec"); err != nil {
|
||||
// If it's already one of our wrapped errors with additional context, return as-is
|
||||
_, _, _, _, _, additionals := errors.Unwrapb(err)
|
||||
if len(additionals) > 0 {
|
||||
return err
|
||||
}
|
||||
// Preserve helpful error messages about unknown fields
|
||||
if strings.Contains(err.Error(), "unknown field") {
|
||||
return err
|
||||
}
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid log builder query spec: %v",
|
||||
err,
|
||||
)
|
||||
}
|
||||
q.Spec = spec
|
||||
case telemetrytypes.SignalMetrics:
|
||||
var spec QueryBuilderQuery[MetricAggregation]
|
||||
if err := json.Unmarshal(shadow.Spec, &spec); err != nil {
|
||||
return errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "invalid metric builder query spec")
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "query spec"); err != nil {
|
||||
// If it's already one of our wrapped errors with additional context, return as-is
|
||||
_, _, _, _, _, additionals := errors.Unwrapb(err)
|
||||
if len(additionals) > 0 {
|
||||
return err
|
||||
}
|
||||
// Preserve helpful error messages about unknown fields
|
||||
if strings.Contains(err.Error(), "unknown field") {
|
||||
return err
|
||||
}
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid metric builder query spec: %v",
|
||||
err,
|
||||
)
|
||||
}
|
||||
q.Spec = spec
|
||||
default:
|
||||
return errors.WrapInvalidInputf(nil, errors.CodeInvalidInput, "unknown builder signal %q", header.Signal)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"unknown builder signal %q",
|
||||
header.Signal,
|
||||
).WithAdditional(
|
||||
"Valid signals are: traces, logs, metrics",
|
||||
)
|
||||
}
|
||||
|
||||
case QueryTypeFormula:
|
||||
var spec QueryBuilderFormula
|
||||
if err := json.Unmarshal(shadow.Spec, &spec); err != nil {
|
||||
return errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "invalid formula spec")
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "formula spec"); err != nil {
|
||||
// If it's already one of our wrapped errors with additional context, return as-is
|
||||
_, _, _, _, _, additionals := errors.Unwrapb(err)
|
||||
if len(additionals) > 0 {
|
||||
return err
|
||||
}
|
||||
// Preserve helpful error messages about unknown fields
|
||||
if strings.Contains(err.Error(), "unknown field") {
|
||||
return err
|
||||
}
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid formula spec: %v",
|
||||
err,
|
||||
)
|
||||
}
|
||||
q.Spec = spec
|
||||
|
||||
case QueryTypeJoin:
|
||||
var spec QueryBuilderJoin
|
||||
if err := json.Unmarshal(shadow.Spec, &spec); err != nil {
|
||||
return errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "invalid join spec")
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "join spec"); err != nil {
|
||||
// If it's already one of our wrapped errors with additional context, return as-is
|
||||
_, _, _, _, _, additionals := errors.Unwrapb(err)
|
||||
if len(additionals) > 0 {
|
||||
return err
|
||||
}
|
||||
// Preserve helpful error messages about unknown fields
|
||||
if strings.Contains(err.Error(), "unknown field") {
|
||||
return err
|
||||
}
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid join spec: %v",
|
||||
err,
|
||||
)
|
||||
}
|
||||
q.Spec = spec
|
||||
|
||||
case QueryTypePromQL:
|
||||
var spec PromQuery
|
||||
if err := json.Unmarshal(shadow.Spec, &spec); err != nil {
|
||||
return errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "invalid PromQL spec")
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "PromQL spec"); err != nil {
|
||||
// If it's already one of our wrapped errors with additional context, return as-is
|
||||
_, _, _, _, _, additionals := errors.Unwrapb(err)
|
||||
if len(additionals) > 0 {
|
||||
return err
|
||||
}
|
||||
// Preserve helpful error messages about unknown fields
|
||||
if strings.Contains(err.Error(), "unknown field") {
|
||||
return err
|
||||
}
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid PromQL spec: %v",
|
||||
err,
|
||||
)
|
||||
}
|
||||
q.Spec = spec
|
||||
|
||||
case QueryTypeClickHouseSQL:
|
||||
var spec ClickHouseQuery
|
||||
if err := json.Unmarshal(shadow.Spec, &spec); err != nil {
|
||||
return errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "invalid ClickHouse SQL spec")
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "ClickHouse SQL spec"); err != nil {
|
||||
// If it's already one of our wrapped errors with additional context, return as-is
|
||||
_, _, _, _, _, additionals := errors.Unwrapb(err)
|
||||
if len(additionals) > 0 {
|
||||
return err
|
||||
}
|
||||
// Preserve helpful error messages about unknown fields
|
||||
if strings.Contains(err.Error(), "unknown field") {
|
||||
return err
|
||||
}
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid ClickHouse SQL spec: %v",
|
||||
err,
|
||||
)
|
||||
}
|
||||
q.Spec = spec
|
||||
|
||||
default:
|
||||
return errors.WrapInvalidInputf(nil, errors.CodeInvalidInput, "unknown query type %q", shadow.Type)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"unknown query type %q",
|
||||
shadow.Type,
|
||||
).WithAdditional(
|
||||
"Valid query types are: builder_query, builder_sub_query, builder_formula, builder_join, promql, clickhouse_sql",
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -100,6 +207,59 @@ type CompositeQuery struct {
|
||||
Queries []QueryEnvelope `json:"queries"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements custom JSON unmarshaling to provide better error messages
|
||||
func (c *CompositeQuery) UnmarshalJSON(data []byte) error {
|
||||
type Alias CompositeQuery
|
||||
|
||||
// First do a normal unmarshal without DisallowUnknownFields
|
||||
var temp Alias
|
||||
if err := json.Unmarshal(data, &temp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Then check for unknown fields at this level only
|
||||
var check map[string]json.RawMessage
|
||||
if err := json.Unmarshal(data, &check); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check for unknown fields at this level
|
||||
validFields := map[string]bool{
|
||||
"queries": true,
|
||||
}
|
||||
|
||||
for field := range check {
|
||||
if !validFields[field] {
|
||||
// Find closest match
|
||||
var fieldNames []string
|
||||
for f := range validFields {
|
||||
fieldNames = append(fieldNames, f)
|
||||
}
|
||||
|
||||
if suggestion, found := findClosestMatch(field, fieldNames, 3); found {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"unknown field %q in composite query",
|
||||
field,
|
||||
).WithAdditional(
|
||||
"Did you mean '" + suggestion + "'?",
|
||||
)
|
||||
}
|
||||
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"unknown field %q in composite query",
|
||||
field,
|
||||
).WithAdditional(
|
||||
"Valid fields are: " + strings.Join(fieldNames, ", "),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
*c = CompositeQuery(temp)
|
||||
return nil
|
||||
}
|
||||
|
||||
type QueryRangeRequest struct {
|
||||
// SchemaVersion is the version of the schema to use for the request payload.
|
||||
SchemaVersion string `json:"schemaVersion"`
|
||||
@@ -120,6 +280,69 @@ type QueryRangeRequest struct {
|
||||
FormatOptions *FormatOptions `json:"formatOptions,omitempty"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements custom JSON unmarshaling to disallow unknown fields
|
||||
func (r *QueryRangeRequest) UnmarshalJSON(data []byte) error {
|
||||
// Define a type alias to avoid infinite recursion
|
||||
type Alias QueryRangeRequest
|
||||
|
||||
// First do a normal unmarshal without DisallowUnknownFields to let nested structures handle their own validation
|
||||
var temp Alias
|
||||
if err := json.Unmarshal(data, &temp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Then check for unknown fields at this level only
|
||||
var check map[string]json.RawMessage
|
||||
if err := json.Unmarshal(data, &check); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check for unknown fields at the top level
|
||||
validFields := map[string]bool{
|
||||
"schemaVersion": true,
|
||||
"start": true,
|
||||
"end": true,
|
||||
"requestType": true,
|
||||
"compositeQuery": true,
|
||||
"variables": true,
|
||||
"noCache": true,
|
||||
"formatOptions": true,
|
||||
}
|
||||
|
||||
for field := range check {
|
||||
if !validFields[field] {
|
||||
// Find closest match
|
||||
var fieldNames []string
|
||||
for f := range validFields {
|
||||
fieldNames = append(fieldNames, f)
|
||||
}
|
||||
|
||||
if suggestion, found := findClosestMatch(field, fieldNames, 3); found {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"unknown field %q",
|
||||
field,
|
||||
).WithAdditional(
|
||||
"Did you mean '" + suggestion + "'?",
|
||||
)
|
||||
}
|
||||
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"unknown field %q",
|
||||
field,
|
||||
).WithAdditional(
|
||||
"Valid fields are: " + strings.Join(fieldNames, ", "),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Copy the decoded values back to the original struct
|
||||
*r = QueryRangeRequest(temp)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type FormatOptions struct {
|
||||
FillGaps bool `json:"fillGaps,omitempty"`
|
||||
FormatTableResultForUI bool `json:"formatTableResultForUI,omitempty"`
|
||||
|
||||
@@ -0,0 +1,150 @@
|
||||
package querybuildertypesv5
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestQueryRangeRequest_UnmarshalJSON_ErrorMessages(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
jsonData string
|
||||
wantErrMsg string
|
||||
wantAdditionalHints []string
|
||||
}{
|
||||
{
|
||||
name: "unknown field 'function' in query spec",
|
||||
jsonData: `{
|
||||
"schemaVersion": "v1",
|
||||
"start": 1749290340000,
|
||||
"end": 1749293940000,
|
||||
"requestType": "scalar",
|
||||
"compositeQuery": {
|
||||
"queries": [{
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"name": "A",
|
||||
"signal": "logs",
|
||||
"aggregations": [{
|
||||
"expression": "count()",
|
||||
"alias": "spans_count"
|
||||
}],
|
||||
"function": [{
|
||||
"name": "absolute",
|
||||
"args": []
|
||||
}]
|
||||
}
|
||||
}]
|
||||
}
|
||||
}`,
|
||||
wantErrMsg: `unknown field "function" in query spec`,
|
||||
wantAdditionalHints: []string{
|
||||
"Did you mean 'functions'?",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unknown field 'filters' in query spec",
|
||||
jsonData: `{
|
||||
"schemaVersion": "v1",
|
||||
"start": 1749290340000,
|
||||
"end": 1749293940000,
|
||||
"requestType": "scalar",
|
||||
"compositeQuery": {
|
||||
"queries": [{
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"name": "A",
|
||||
"signal": "metrics",
|
||||
"aggregations": [{
|
||||
"metricName": "test"
|
||||
}],
|
||||
"filters": {
|
||||
"expression": "test = 1"
|
||||
}
|
||||
}
|
||||
}]
|
||||
}
|
||||
}`,
|
||||
wantErrMsg: `unknown field "filters" in query spec`,
|
||||
wantAdditionalHints: []string{
|
||||
"Did you mean 'filter'?",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unknown field at top level",
|
||||
jsonData: `{
|
||||
"schemaVersion": "v1",
|
||||
"start": 1749290340000,
|
||||
"end": 1749293940000,
|
||||
"requestType": "scalar",
|
||||
"compositeQueries": {
|
||||
"queries": []
|
||||
}
|
||||
}`,
|
||||
wantErrMsg: `unknown field "compositeQueries"`,
|
||||
wantAdditionalHints: []string{
|
||||
"Did you mean 'compositeQuery'?",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unknown field with no good suggestion",
|
||||
jsonData: `{
|
||||
"schemaVersion": "v1",
|
||||
"start": 1749290340000,
|
||||
"end": 1749293940000,
|
||||
"requestType": "scalar",
|
||||
"compositeQuery": {
|
||||
"queries": [{
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"name": "A",
|
||||
"signal": "metrics",
|
||||
"aggregations": [{
|
||||
"metricName": "test"
|
||||
}],
|
||||
"randomField": "value"
|
||||
}
|
||||
}]
|
||||
}
|
||||
}`,
|
||||
wantErrMsg: `unknown field "randomField" in query spec`,
|
||||
wantAdditionalHints: []string{
|
||||
"Valid fields are:",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var req QueryRangeRequest
|
||||
err := json.Unmarshal([]byte(tt.jsonData), &req)
|
||||
|
||||
require.Error(t, err)
|
||||
|
||||
// Check main error message
|
||||
assert.Contains(t, err.Error(), tt.wantErrMsg)
|
||||
|
||||
// Check if it's an error from our package using Unwrapb
|
||||
_, _, _, _, _, additionals := errors.Unwrapb(err)
|
||||
|
||||
// Check additional hints if we have any
|
||||
if len(additionals) > 0 {
|
||||
for _, hint := range tt.wantAdditionalHints {
|
||||
found := false
|
||||
for _, additional := range additionals {
|
||||
if strings.Contains(additional, hint) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.True(t, found, "Expected to find hint '%s' in additionals: %v", hint, additionals)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -120,8 +120,8 @@ func TestQueryRangeRequest_UnmarshalJSON(t *testing.T) {
|
||||
"expression": "severity_text = 'ERROR'"
|
||||
},
|
||||
"selectFields": [{
|
||||
"key": "body",
|
||||
"type": "log"
|
||||
"name": "body",
|
||||
"fieldContext": "log"
|
||||
}],
|
||||
"limit": 50,
|
||||
"offset": 10
|
||||
@@ -177,8 +177,8 @@ func TestQueryRangeRequest_UnmarshalJSON(t *testing.T) {
|
||||
}],
|
||||
"stepInterval": 120,
|
||||
"groupBy": [{
|
||||
"key": "method",
|
||||
"type": "tag"
|
||||
"name": "method",
|
||||
"fieldContext": "tag"
|
||||
}]
|
||||
}
|
||||
}]
|
||||
@@ -436,10 +436,9 @@ func TestQueryRangeRequest_UnmarshalJSON(t *testing.T) {
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "B",
|
||||
"type": "builder_formula",
|
||||
"spec": {
|
||||
"name": "rate",
|
||||
"name": "B",
|
||||
"expression": "A * 100"
|
||||
}
|
||||
}
|
||||
@@ -465,7 +464,7 @@ func TestQueryRangeRequest_UnmarshalJSON(t *testing.T) {
|
||||
{
|
||||
Type: QueryTypeFormula,
|
||||
Spec: QueryBuilderFormula{
|
||||
Name: "rate",
|
||||
Name: "B",
|
||||
Expression: "A * 100",
|
||||
},
|
||||
},
|
||||
|
||||
@@ -99,8 +99,8 @@ type TimeSeriesValue struct {
|
||||
Partial bool `json:"partial,omitempty"`
|
||||
|
||||
// for the heatmap type chart
|
||||
Values []float64 `json:"values,omitempty"`
|
||||
Bucket *Bucket `json:"bucket,omitempty"`
|
||||
// Values []float64 `json:"values,omitempty"`
|
||||
Bucket *Bucket `json:"bucket,omitempty"`
|
||||
}
|
||||
|
||||
type Bucket struct {
|
||||
|
||||
783
pkg/types/querybuildertypes/querybuildertypesv5/validation.go
Normal file
783
pkg/types/querybuildertypes/querybuildertypesv5/validation.go
Normal file
@@ -0,0 +1,783 @@
|
||||
package querybuildertypesv5
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
// getQueryIdentifier returns a friendly identifier for a query based on its type and name/content
|
||||
func getQueryIdentifier(envelope QueryEnvelope, index int) string {
|
||||
switch envelope.Type {
|
||||
case QueryTypeBuilder, QueryTypeSubQuery:
|
||||
switch spec := envelope.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
if spec.Name != "" {
|
||||
return fmt.Sprintf("query '%s'", spec.Name)
|
||||
}
|
||||
return fmt.Sprintf("trace query at position %d", index+1)
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
if spec.Name != "" {
|
||||
return fmt.Sprintf("query '%s'", spec.Name)
|
||||
}
|
||||
return fmt.Sprintf("log query at position %d", index+1)
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
if spec.Name != "" {
|
||||
return fmt.Sprintf("query '%s'", spec.Name)
|
||||
}
|
||||
return fmt.Sprintf("metric query at position %d", index+1)
|
||||
}
|
||||
case QueryTypeFormula:
|
||||
if spec, ok := envelope.Spec.(QueryBuilderFormula); ok && spec.Name != "" {
|
||||
return fmt.Sprintf("formula '%s'", spec.Name)
|
||||
}
|
||||
return fmt.Sprintf("formula at position %d", index+1)
|
||||
case QueryTypeJoin:
|
||||
if spec, ok := envelope.Spec.(QueryBuilderJoin); ok && spec.Name != "" {
|
||||
return fmt.Sprintf("join '%s'", spec.Name)
|
||||
}
|
||||
return fmt.Sprintf("join at position %d", index+1)
|
||||
case QueryTypePromQL:
|
||||
if spec, ok := envelope.Spec.(PromQuery); ok && spec.Name != "" {
|
||||
return fmt.Sprintf("PromQL query '%s'", spec.Name)
|
||||
}
|
||||
return fmt.Sprintf("PromQL query at position %d", index+1)
|
||||
case QueryTypeClickHouseSQL:
|
||||
if spec, ok := envelope.Spec.(ClickHouseQuery); ok && spec.Name != "" {
|
||||
return fmt.Sprintf("ClickHouse query '%s'", spec.Name)
|
||||
}
|
||||
return fmt.Sprintf("ClickHouse query at position %d", index+1)
|
||||
}
|
||||
return fmt.Sprintf("query at position %d", index+1)
|
||||
}
|
||||
|
||||
const (
|
||||
// Maximum limit for query results
|
||||
MaxQueryLimit = 10000
|
||||
)
|
||||
|
||||
// ValidateFunctionName checks if the function name is valid
|
||||
func ValidateFunctionName(name FunctionName) error {
|
||||
validFunctions := []FunctionName{
|
||||
FunctionNameCutOffMin,
|
||||
FunctionNameCutOffMax,
|
||||
FunctionNameClampMin,
|
||||
FunctionNameClampMax,
|
||||
FunctionNameAbsolute,
|
||||
FunctionNameRunningDiff,
|
||||
FunctionNameLog2,
|
||||
FunctionNameLog10,
|
||||
FunctionNameCumulativeSum,
|
||||
FunctionNameEWMA3,
|
||||
FunctionNameEWMA5,
|
||||
FunctionNameEWMA7,
|
||||
FunctionNameMedian3,
|
||||
FunctionNameMedian5,
|
||||
FunctionNameMedian7,
|
||||
FunctionNameTimeShift,
|
||||
FunctionNameAnomaly,
|
||||
}
|
||||
|
||||
if slices.Contains(validFunctions, name) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid function name: %s",
|
||||
name.StringValue(),
|
||||
).WithAdditional(fmt.Sprintf("valid functions are: %v", validFunctions))
|
||||
}
|
||||
|
||||
// Validate performs preliminary validation on QueryBuilderQuery
|
||||
func (q *QueryBuilderQuery[T]) Validate(requestType RequestType) error {
|
||||
// Validate signal
|
||||
if err := q.validateSignal(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate aggregations only for non-raw request types
|
||||
if requestType != RequestTypeRaw {
|
||||
if err := q.validateAggregations(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Validate limit and pagination
|
||||
if err := q.validateLimitAndPagination(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate functions
|
||||
if err := q.validateFunctions(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate secondary aggregations
|
||||
if err := q.validateSecondaryAggregations(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate order by
|
||||
if err := q.validateOrderBy(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *QueryBuilderQuery[T]) validateSignal() error {
|
||||
// Signal validation is handled during unmarshaling in req.go
|
||||
// Valid signals are: metrics, traces, logs
|
||||
switch q.Signal {
|
||||
case telemetrytypes.SignalMetrics,
|
||||
telemetrytypes.SignalTraces,
|
||||
telemetrytypes.SignalLogs,
|
||||
telemetrytypes.SignalUnspecified: // Empty is allowed for backward compatibility
|
||||
return nil
|
||||
default:
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid signal type: %s",
|
||||
q.Signal,
|
||||
).WithAdditional(
|
||||
"Valid signals are: metrics, traces, logs",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func (q *QueryBuilderQuery[T]) validateAggregations() error {
|
||||
// At least one aggregation required for non-disabled queries
|
||||
if len(q.Aggregations) == 0 && !q.Disabled {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"at least one aggregation is required",
|
||||
)
|
||||
}
|
||||
|
||||
// Check for duplicate aliases
|
||||
aliases := make(map[string]bool)
|
||||
for i, agg := range q.Aggregations {
|
||||
// Type-specific validation based on T
|
||||
switch v := any(agg).(type) {
|
||||
case MetricAggregation:
|
||||
if v.MetricName == "" {
|
||||
aggId := fmt.Sprintf("aggregation #%d", i+1)
|
||||
if q.Name != "" {
|
||||
aggId = fmt.Sprintf("aggregation #%d in query '%s'", i+1, q.Name)
|
||||
}
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"metric name is required for %s",
|
||||
aggId,
|
||||
)
|
||||
}
|
||||
// Validate metric-specific aggregations
|
||||
if err := validateMetricAggregation(v); err != nil {
|
||||
// Extract the underlying error details
|
||||
_, _, innerMsg, _, _, additionals := errors.Unwrapb(err)
|
||||
|
||||
// Create a new error with friendly identifier
|
||||
aggId := fmt.Sprintf("aggregation #%d", i+1)
|
||||
if q.Name != "" {
|
||||
aggId = fmt.Sprintf("aggregation #%d in query '%s'", i+1, q.Name)
|
||||
}
|
||||
newErr := errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid metric %s: %s",
|
||||
aggId,
|
||||
innerMsg,
|
||||
)
|
||||
|
||||
// Add any additional context from the inner error
|
||||
if len(additionals) > 0 {
|
||||
newErr = newErr.WithAdditional(additionals...)
|
||||
}
|
||||
|
||||
return newErr
|
||||
}
|
||||
case TraceAggregation:
|
||||
if v.Expression == "" {
|
||||
aggId := fmt.Sprintf("aggregation #%d", i+1)
|
||||
if q.Name != "" {
|
||||
aggId = fmt.Sprintf("aggregation #%d in query '%s'", i+1, q.Name)
|
||||
}
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"expression is required for trace %s",
|
||||
aggId,
|
||||
)
|
||||
}
|
||||
if v.Alias != "" {
|
||||
if aliases[v.Alias] {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"duplicate aggregation alias: %s",
|
||||
v.Alias,
|
||||
)
|
||||
}
|
||||
aliases[v.Alias] = true
|
||||
}
|
||||
case LogAggregation:
|
||||
if v.Expression == "" {
|
||||
aggId := fmt.Sprintf("aggregation #%d", i+1)
|
||||
if q.Name != "" {
|
||||
aggId = fmt.Sprintf("aggregation #%d in query '%s'", i+1, q.Name)
|
||||
}
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"expression is required for log %s",
|
||||
aggId,
|
||||
)
|
||||
}
|
||||
if v.Alias != "" {
|
||||
if aliases[v.Alias] {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"duplicate aggregation alias: %s",
|
||||
v.Alias,
|
||||
)
|
||||
}
|
||||
aliases[v.Alias] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *QueryBuilderQuery[T]) validateLimitAndPagination() error {
|
||||
// Validate limit
|
||||
if q.Limit < 0 {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"limit must be non-negative, got %d",
|
||||
q.Limit,
|
||||
)
|
||||
}
|
||||
|
||||
if q.Limit > MaxQueryLimit {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"limit exceeds maximum allowed value of %d",
|
||||
MaxQueryLimit,
|
||||
).WithAdditional(
|
||||
fmt.Sprintf("Provided limit: %d", q.Limit),
|
||||
)
|
||||
}
|
||||
|
||||
// Validate offset
|
||||
if q.Offset < 0 {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"offset must be non-negative, got %d",
|
||||
q.Offset,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *QueryBuilderQuery[T]) validateFunctions() error {
|
||||
for i, fn := range q.Functions {
|
||||
if err := ValidateFunctionName(fn.Name); err != nil {
|
||||
// Extract the underlying error details
|
||||
_, _, innerMsg, _, _, additionals := errors.Unwrapb(err)
|
||||
|
||||
// Create a new error with friendly identifier
|
||||
fnId := fmt.Sprintf("function #%d", i+1)
|
||||
if q.Name != "" {
|
||||
fnId = fmt.Sprintf("function #%d in query '%s'", i+1, q.Name)
|
||||
}
|
||||
newErr := errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid %s: %s",
|
||||
fnId,
|
||||
innerMsg,
|
||||
)
|
||||
|
||||
// Add any additional context from the inner error
|
||||
if len(additionals) > 0 {
|
||||
newErr = newErr.WithAdditional(additionals...)
|
||||
}
|
||||
|
||||
return newErr
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *QueryBuilderQuery[T]) validateSecondaryAggregations() error {
|
||||
for i, secAgg := range q.SecondaryAggregations {
|
||||
// Secondary aggregation expression can be empty - we allow it per requirements
|
||||
// Just validate structure
|
||||
if secAgg.Limit < 0 {
|
||||
secAggId := fmt.Sprintf("secondary aggregation #%d", i+1)
|
||||
if q.Name != "" {
|
||||
secAggId = fmt.Sprintf("secondary aggregation #%d in query '%s'", i+1, q.Name)
|
||||
}
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"%s: limit must be non-negative",
|
||||
secAggId,
|
||||
)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *QueryBuilderQuery[T]) validateOrderBy() error {
|
||||
for i, order := range q.Order {
|
||||
// Direction validation is handled by the OrderDirection type
|
||||
if order.Direction != OrderDirectionAsc && order.Direction != OrderDirectionDesc {
|
||||
orderId := fmt.Sprintf("order by clause #%d", i+1)
|
||||
if q.Name != "" {
|
||||
orderId = fmt.Sprintf("order by clause #%d in query '%s'", i+1, q.Name)
|
||||
}
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid direction for %s: %s",
|
||||
orderId,
|
||||
order.Direction.StringValue(),
|
||||
).WithAdditional(
|
||||
"Valid directions are: asc, desc",
|
||||
)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateQueryRangeRequest validates the entire query range request
|
||||
func (r *QueryRangeRequest) Validate() error {
|
||||
// Validate time range
|
||||
if r.Start >= r.End {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"start time must be before end time",
|
||||
)
|
||||
}
|
||||
|
||||
// Validate request type
|
||||
switch r.RequestType {
|
||||
case RequestTypeRaw, RequestTypeTimeSeries, RequestTypeScalar:
|
||||
// Valid request types
|
||||
default:
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid request type: %s",
|
||||
r.RequestType,
|
||||
).WithAdditional(
|
||||
"Valid request types are: raw, timeseries, scalar",
|
||||
)
|
||||
}
|
||||
|
||||
// Validate composite query
|
||||
if err := r.validateCompositeQuery(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *QueryRangeRequest) validateCompositeQuery() error {
|
||||
// Validate queries in composite query
|
||||
if len(r.CompositeQuery.Queries) == 0 {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"at least one query is required",
|
||||
)
|
||||
}
|
||||
|
||||
// Track query names for uniqueness (only for non-formula queries)
|
||||
queryNames := make(map[string]bool)
|
||||
|
||||
// Validate each query based on its type
|
||||
for i, envelope := range r.CompositeQuery.Queries {
|
||||
switch envelope.Type {
|
||||
case QueryTypeBuilder, QueryTypeSubQuery:
|
||||
// Validate based on the concrete type
|
||||
switch spec := envelope.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
if err := spec.Validate(r.RequestType); err != nil {
|
||||
// Extract the underlying error details
|
||||
_, _, innerMsg, _, _, additionals := errors.Unwrapb(err)
|
||||
|
||||
// Create a new error with friendly query identifier
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
newErr := errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid %s: %s",
|
||||
queryId,
|
||||
innerMsg,
|
||||
)
|
||||
|
||||
// Add any additional context from the inner error
|
||||
if len(additionals) > 0 {
|
||||
newErr = newErr.WithAdditional(additionals...)
|
||||
}
|
||||
|
||||
return newErr
|
||||
}
|
||||
// Check name uniqueness for non-formula context
|
||||
if spec.Name != "" {
|
||||
if queryNames[spec.Name] {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"duplicate query name '%s'",
|
||||
spec.Name,
|
||||
)
|
||||
}
|
||||
queryNames[spec.Name] = true
|
||||
}
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
if err := spec.Validate(r.RequestType); err != nil {
|
||||
// Extract the underlying error details
|
||||
_, _, innerMsg, _, _, additionals := errors.Unwrapb(err)
|
||||
|
||||
// Create a new error with friendly query identifier
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
newErr := errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid %s: %s",
|
||||
queryId,
|
||||
innerMsg,
|
||||
)
|
||||
|
||||
// Add any additional context from the inner error
|
||||
if len(additionals) > 0 {
|
||||
newErr = newErr.WithAdditional(additionals...)
|
||||
}
|
||||
|
||||
return newErr
|
||||
}
|
||||
// Check name uniqueness for non-formula context
|
||||
if spec.Name != "" {
|
||||
if queryNames[spec.Name] {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"duplicate query name '%s'",
|
||||
spec.Name,
|
||||
)
|
||||
}
|
||||
queryNames[spec.Name] = true
|
||||
}
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
if err := spec.Validate(r.RequestType); err != nil {
|
||||
// Extract the underlying error details
|
||||
_, _, innerMsg, _, _, additionals := errors.Unwrapb(err)
|
||||
|
||||
// Create a new error with friendly query identifier
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
newErr := errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid %s: %s",
|
||||
queryId,
|
||||
innerMsg,
|
||||
)
|
||||
|
||||
// Add any additional context from the inner error
|
||||
if len(additionals) > 0 {
|
||||
newErr = newErr.WithAdditional(additionals...)
|
||||
}
|
||||
|
||||
return newErr
|
||||
}
|
||||
// Check name uniqueness for non-formula context
|
||||
if spec.Name != "" {
|
||||
if queryNames[spec.Name] {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"duplicate query name '%s'",
|
||||
spec.Name,
|
||||
)
|
||||
}
|
||||
queryNames[spec.Name] = true
|
||||
}
|
||||
default:
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"unknown spec type for %s",
|
||||
queryId,
|
||||
)
|
||||
}
|
||||
case QueryTypeFormula:
|
||||
// Formula validation is handled separately
|
||||
spec, ok := envelope.Spec.(QueryBuilderFormula)
|
||||
if !ok {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid spec for %s",
|
||||
queryId,
|
||||
)
|
||||
}
|
||||
if spec.Expression == "" {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"expression is required for %s",
|
||||
queryId,
|
||||
)
|
||||
}
|
||||
case QueryTypeJoin:
|
||||
// Join validation is handled separately
|
||||
_, ok := envelope.Spec.(QueryBuilderJoin)
|
||||
if !ok {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid spec for %s",
|
||||
queryId,
|
||||
)
|
||||
}
|
||||
case QueryTypePromQL:
|
||||
// PromQL validation is handled separately
|
||||
spec, ok := envelope.Spec.(PromQuery)
|
||||
if !ok {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid spec for %s",
|
||||
queryId,
|
||||
)
|
||||
}
|
||||
if spec.Query == "" {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"query expression is required for %s",
|
||||
queryId,
|
||||
)
|
||||
}
|
||||
case QueryTypeClickHouseSQL:
|
||||
// ClickHouse SQL validation is handled separately
|
||||
spec, ok := envelope.Spec.(ClickHouseQuery)
|
||||
if !ok {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid spec for %s",
|
||||
queryId,
|
||||
)
|
||||
}
|
||||
if spec.Query == "" {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"query expression is required for %s",
|
||||
queryId,
|
||||
)
|
||||
}
|
||||
default:
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"unknown query type '%s' for %s",
|
||||
envelope.Type,
|
||||
queryId,
|
||||
).WithAdditional(
|
||||
"Valid query types are: builder_query, builder_formula, builder_join, promql, clickhouse_sql",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate performs validation on CompositeQuery
|
||||
func (c *CompositeQuery) Validate(requestType RequestType) error {
|
||||
if len(c.Queries) == 0 {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"at least one query is required",
|
||||
)
|
||||
}
|
||||
|
||||
// Validate each query
|
||||
for i, envelope := range c.Queries {
|
||||
if err := validateQueryEnvelope(envelope, requestType); err != nil {
|
||||
// Extract the underlying error details
|
||||
_, _, innerMsg, _, _, additionals := errors.Unwrapb(err)
|
||||
|
||||
// Create a new error with friendly query identifier
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
newErr := errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid %s: %s",
|
||||
queryId,
|
||||
innerMsg,
|
||||
)
|
||||
|
||||
// Add any additional context from the inner error
|
||||
if len(additionals) > 0 {
|
||||
newErr = newErr.WithAdditional(additionals...)
|
||||
}
|
||||
|
||||
return newErr
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateQueryEnvelope(envelope QueryEnvelope, requestType RequestType) error {
|
||||
switch envelope.Type {
|
||||
case QueryTypeBuilder, QueryTypeSubQuery:
|
||||
switch spec := envelope.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.Validate(requestType)
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.Validate(requestType)
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.Validate(requestType)
|
||||
default:
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"unknown query spec type",
|
||||
)
|
||||
}
|
||||
case QueryTypeFormula:
|
||||
spec, ok := envelope.Spec.(QueryBuilderFormula)
|
||||
if !ok {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid formula spec",
|
||||
)
|
||||
}
|
||||
if spec.Expression == "" {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"formula expression is required",
|
||||
)
|
||||
}
|
||||
return nil
|
||||
case QueryTypeJoin:
|
||||
_, ok := envelope.Spec.(QueryBuilderJoin)
|
||||
if !ok {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid join spec",
|
||||
)
|
||||
}
|
||||
return nil
|
||||
case QueryTypePromQL:
|
||||
spec, ok := envelope.Spec.(PromQuery)
|
||||
if !ok {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid PromQL spec",
|
||||
)
|
||||
}
|
||||
if spec.Query == "" {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"PromQL query is required",
|
||||
)
|
||||
}
|
||||
return nil
|
||||
case QueryTypeClickHouseSQL:
|
||||
spec, ok := envelope.Spec.(ClickHouseQuery)
|
||||
if !ok {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid ClickHouse SQL spec",
|
||||
)
|
||||
}
|
||||
if spec.Query == "" {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"ClickHouse SQL query is required",
|
||||
)
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"unknown query type: %s",
|
||||
envelope.Type,
|
||||
).WithAdditional(
|
||||
"Valid query types are: builder_query, builder_sub_query, builder_formula, builder_join, promql, clickhouse_sql",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// validateMetricAggregation validates metric-specific aggregation parameters
|
||||
func validateMetricAggregation(agg MetricAggregation) error {
|
||||
// Validate that rate/increase are only used with appropriate temporalities
|
||||
if agg.TimeAggregation == metrictypes.TimeAggregationRate || agg.TimeAggregation == metrictypes.TimeAggregationIncrease {
|
||||
// For gauge metrics (Unspecified temporality), rate/increase doesn't make sense
|
||||
if agg.Temporality == metrictypes.Unspecified {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"rate/increase aggregation cannot be used with gauge metrics (unspecified temporality)",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate percentile aggregations are only used with histogram types
|
||||
if agg.SpaceAggregation.IsPercentile() {
|
||||
if agg.Type != metrictypes.HistogramType && agg.Type != metrictypes.ExpHistogramType && agg.Type != metrictypes.SummaryType {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"percentile aggregation can only be used with histogram or summary metric types",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate time aggregation values
|
||||
validTimeAggregations := []metrictypes.TimeAggregation{
|
||||
metrictypes.TimeAggregationUnspecified,
|
||||
metrictypes.TimeAggregationLatest,
|
||||
metrictypes.TimeAggregationSum,
|
||||
metrictypes.TimeAggregationAvg,
|
||||
metrictypes.TimeAggregationMin,
|
||||
metrictypes.TimeAggregationMax,
|
||||
metrictypes.TimeAggregationCount,
|
||||
metrictypes.TimeAggregationCountDistinct,
|
||||
metrictypes.TimeAggregationRate,
|
||||
metrictypes.TimeAggregationIncrease,
|
||||
}
|
||||
|
||||
validTimeAgg := slices.Contains(validTimeAggregations, agg.TimeAggregation)
|
||||
if !validTimeAgg {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid time aggregation: %s",
|
||||
agg.TimeAggregation.StringValue(),
|
||||
).WithAdditional(
|
||||
"Valid time aggregations: latest, sum, avg, min, max, count, count_distinct, rate, increase",
|
||||
)
|
||||
}
|
||||
|
||||
// Validate space aggregation values
|
||||
validSpaceAggregations := []metrictypes.SpaceAggregation{
|
||||
metrictypes.SpaceAggregationUnspecified,
|
||||
metrictypes.SpaceAggregationSum,
|
||||
metrictypes.SpaceAggregationAvg,
|
||||
metrictypes.SpaceAggregationMin,
|
||||
metrictypes.SpaceAggregationMax,
|
||||
metrictypes.SpaceAggregationCount,
|
||||
metrictypes.SpaceAggregationPercentile50,
|
||||
metrictypes.SpaceAggregationPercentile75,
|
||||
metrictypes.SpaceAggregationPercentile90,
|
||||
metrictypes.SpaceAggregationPercentile95,
|
||||
metrictypes.SpaceAggregationPercentile99,
|
||||
}
|
||||
|
||||
validSpaceAgg := slices.Contains(validSpaceAggregations, agg.SpaceAggregation)
|
||||
if !validSpaceAgg {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid space aggregation: %s",
|
||||
agg.SpaceAggregation.StringValue(),
|
||||
).WithAdditional(
|
||||
"Valid space aggregations: sum, avg, min, max, count, p50, p75, p90, p95, p99",
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,213 @@
|
||||
package querybuildertypesv5
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestValidateMetricAggregation(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
agg MetricAggregation
|
||||
wantErr bool
|
||||
errMsg string
|
||||
}{
|
||||
{
|
||||
name: "valid sum aggregation",
|
||||
agg: MetricAggregation{
|
||||
MetricName: "test_metric",
|
||||
Type: metrictypes.SumType,
|
||||
Temporality: metrictypes.Cumulative,
|
||||
TimeAggregation: metrictypes.TimeAggregationSum,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid rate on gauge",
|
||||
agg: MetricAggregation{
|
||||
MetricName: "test_metric",
|
||||
Type: metrictypes.GaugeType,
|
||||
Temporality: metrictypes.Unspecified,
|
||||
TimeAggregation: metrictypes.TimeAggregationRate,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
},
|
||||
wantErr: true,
|
||||
errMsg: "rate/increase aggregation cannot be used with gauge metrics",
|
||||
},
|
||||
{
|
||||
name: "invalid increase on gauge",
|
||||
agg: MetricAggregation{
|
||||
MetricName: "test_metric",
|
||||
Type: metrictypes.GaugeType,
|
||||
Temporality: metrictypes.Unspecified,
|
||||
TimeAggregation: metrictypes.TimeAggregationIncrease,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
},
|
||||
wantErr: true,
|
||||
errMsg: "rate/increase aggregation cannot be used with gauge metrics",
|
||||
},
|
||||
{
|
||||
name: "valid rate on cumulative",
|
||||
agg: MetricAggregation{
|
||||
MetricName: "test_metric",
|
||||
Type: metrictypes.SumType,
|
||||
Temporality: metrictypes.Cumulative,
|
||||
TimeAggregation: metrictypes.TimeAggregationRate,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid rate on delta",
|
||||
agg: MetricAggregation{
|
||||
MetricName: "test_metric",
|
||||
Type: metrictypes.SumType,
|
||||
Temporality: metrictypes.Delta,
|
||||
TimeAggregation: metrictypes.TimeAggregationRate,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid percentile on non-histogram",
|
||||
agg: MetricAggregation{
|
||||
MetricName: "test_metric",
|
||||
Type: metrictypes.SumType,
|
||||
Temporality: metrictypes.Cumulative,
|
||||
TimeAggregation: metrictypes.TimeAggregationSum,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationPercentile95,
|
||||
},
|
||||
wantErr: true,
|
||||
errMsg: "percentile aggregation can only be used with histogram",
|
||||
},
|
||||
{
|
||||
name: "valid percentile on histogram",
|
||||
agg: MetricAggregation{
|
||||
MetricName: "test_metric",
|
||||
Type: metrictypes.HistogramType,
|
||||
Temporality: metrictypes.Delta,
|
||||
TimeAggregation: metrictypes.TimeAggregationSum,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationPercentile95,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid percentile on exp histogram",
|
||||
agg: MetricAggregation{
|
||||
MetricName: "test_metric",
|
||||
Type: metrictypes.ExpHistogramType,
|
||||
Temporality: metrictypes.Delta,
|
||||
TimeAggregation: metrictypes.TimeAggregationSum,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationPercentile99,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid percentile on summary",
|
||||
agg: MetricAggregation{
|
||||
MetricName: "test_metric",
|
||||
Type: metrictypes.SummaryType,
|
||||
Temporality: metrictypes.Delta,
|
||||
TimeAggregation: metrictypes.TimeAggregationSum,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationPercentile50,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := validateMetricAggregation(tt.agg)
|
||||
if tt.wantErr {
|
||||
assert.Error(t, err)
|
||||
if tt.errMsg != "" {
|
||||
assert.Contains(t, err.Error(), tt.errMsg)
|
||||
}
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryBuilderQuery_ValidateMetrics(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
query QueryBuilderQuery[MetricAggregation]
|
||||
reqType RequestType
|
||||
wantErr bool
|
||||
errMsg string
|
||||
}{
|
||||
{
|
||||
name: "valid metric query",
|
||||
query: QueryBuilderQuery[MetricAggregation]{
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []MetricAggregation{
|
||||
{
|
||||
MetricName: "test_metric",
|
||||
Type: metrictypes.SumType,
|
||||
Temporality: metrictypes.Cumulative,
|
||||
TimeAggregation: metrictypes.TimeAggregationRate,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
},
|
||||
},
|
||||
},
|
||||
reqType: RequestTypeTimeSeries,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid metric query - rate on gauge",
|
||||
query: QueryBuilderQuery[MetricAggregation]{
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []MetricAggregation{
|
||||
{
|
||||
MetricName: "test_metric",
|
||||
Type: metrictypes.GaugeType,
|
||||
Temporality: metrictypes.Unspecified,
|
||||
TimeAggregation: metrictypes.TimeAggregationRate,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
},
|
||||
},
|
||||
},
|
||||
reqType: RequestTypeTimeSeries,
|
||||
wantErr: true,
|
||||
errMsg: "rate/increase aggregation cannot be used with gauge metrics",
|
||||
},
|
||||
{
|
||||
name: "empty metric name",
|
||||
query: QueryBuilderQuery[MetricAggregation]{
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []MetricAggregation{
|
||||
{
|
||||
MetricName: "",
|
||||
Type: metrictypes.SumType,
|
||||
Temporality: metrictypes.Cumulative,
|
||||
TimeAggregation: metrictypes.TimeAggregationSum,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
},
|
||||
},
|
||||
},
|
||||
reqType: RequestTypeTimeSeries,
|
||||
wantErr: true,
|
||||
errMsg: "metric name is required",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := tt.query.Validate(tt.reqType)
|
||||
if tt.wantErr {
|
||||
assert.Error(t, err)
|
||||
if tt.errMsg != "" {
|
||||
assert.Contains(t, err.Error(), tt.errMsg)
|
||||
}
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user