Compare commits

...

30 Commits

Author SHA1 Message Date
Nityananda Gohain
72cbc1a9e7 fix: add back temlemetry for dashboard with logs queries (#5997) 2024-09-18 10:29:00 +05:30
Vishal Sharma
a9841755a7 chore: add request dashboard issue template (#5991) 2024-09-17 22:44:14 +05:30
Nityananda Gohain
03e6c33f82 fix: use new table for default alerts (#5992) 2024-09-17 21:06:52 +05:30
Shaheer Kochai
3c5aa86ee2 feat: make the label value clickable if it's a link (#5927) 2024-09-17 19:05:51 +05:30
Srikanth Chekuri
06a89b21da chore: use mean of past, past2 and past3 seasons for growth (#5974) 2024-09-17 16:12:17 +05:30
Raj Kamal Singh
8c891f0e87 Fix: cheaper query for fetching log attribute values for filter suggestions (#5989)
* chore: change query for fetching multiple log attribs to make sure it is always cheap

* chore: get filter suggestions tests passing
2024-09-17 15:49:14 +05:30
Srikanth Chekuri
49dd5f2ef7 chore: add enrichment in threshold rule (#5925) 2024-09-17 15:33:17 +05:30
Raj Kamal Singh
83d01e7a0d fix: dont request query progress reporting if reporting query start failed (#5958) 2024-09-17 12:38:53 +05:30
Srikanth Chekuri
f8e97c9c5c chore: move channel management from CH reader to rule DB (#5934) 2024-09-17 11:41:46 +05:30
Nityananda Gohain
b78ade2cf2 fix: add limits to suggestion query (#5984) 2024-09-16 23:04:39 +05:30
Nityananda Gohain
1b59719891 Fix/iscolumn (#5983)
* fix: fix isColumn method to rely on column instead of index

* fix: add the space for explicit check
2024-09-16 16:35:47 +05:30
Nityananda Gohain
481c4e1271 fix: use proper tableName (#5982)
* fix: use proper tableName

* fix: remove duplicate code
2024-09-16 14:30:31 +05:30
Vikrant Gupta
fe0d2a967f chore: remove the jest-playwright-test unused package causing axios vunlerability (#5972) 2024-09-16 10:42:16 +05:30
Vikrant Gupta
e77a6f4d7a feat: send last log line time stamp for timestamp order-by desc (#5968)
* feat: send last log line time stamp for timestamp orderby desc

* chore: little cleanup
2024-09-16 10:06:09 +05:30
Srikanth Chekuri
a023a7514e chore: move analytics related methods from CH reader to their own mod… (#5935) 2024-09-14 13:23:49 +05:30
Vikrant Gupta
3573c0863c feat: add support to configure units for pie chart values (#5960)
* feat: add units for pie chart

* chore: set the default to none in case no unit present

* chore: rename the y axis unit to unit

---------

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2024-09-14 13:11:04 +05:30
Srikanth Chekuri
b444c1e6b1 fix: do not use removed column in traces clickhouse query (#5953) 2024-09-13 18:20:37 +05:30
Srikanth Chekuri
5698628839 chore: move some structs out of v3 (#5932) 2024-09-13 18:10:49 +05:30
Srikanth Chekuri
3596f73fb1 chore: add anomaly provider interface (#5856) 2024-09-13 18:06:20 +05:30
Srikanth Chekuri
5b22490d6d chore: improve error message readability (#5628) 2024-09-13 18:01:37 +05:30
Srikanth Chekuri
39f9fc6900 fix: missing related logs or traces links in alert notification (#5946) 2024-09-13 17:30:02 +05:30
Nityananda Gohain
f854cdd9d3 feat: collect telemetry for ch log queries in alerts and dashboards (#5967)
* feat: collect telemtry for ch log queries in alerts and dashboards

* feat: consider local table as well

* fix: address pr comments
2024-09-13 17:15:03 +05:30
Nityananda Gohain
011b2167ba Integrate V4 QB (#5914)
* feat: logsV4 initial refactoring

* feat: filter_query builder with tests added

* feat: all functions of v4 refactored

* fix: tests fixed

* feat: logs list API, logic update for better perf

* feat: integrate v4 qb

* fix: pass flag

* fix: update select for table panel

* fix: tests updated with better examples of limit and group by

* fix: resource filter support in live tail

* fix: v4 livetail api

* fix: changes for casting pointer value

* fix: reader options updated

* feat: cleanup and use flag

* feat: restrict new list api to single query

* fix: move getTsRanges to utils

* fix: address pr comments
2024-09-13 17:04:22 +05:30
Srikanth Chekuri
a5f3a189f8 chore: move traces builder query attributes enrichment before query prep (#5917) 2024-09-13 16:43:56 +05:30
Vikrant Gupta
3fdfb51e02 chore: deprecate clarity from frontend (#5962) 2024-09-13 13:55:45 +05:30
Vikrant Gupta
43577c7ead feat: group by severity logs explorer page by default (#5772)
* feat: initial setup for group by severity logs explorer page

* chore: reduce the height of the histogram

* chore: pr cleanup

* chore: minor color update

* chore: clean the PR

* chore: clean the PR

* chore: better base handling

* fix: append query names to the legends  in case of multiple queries

* feat: make the changes only for list view and add back legends
2024-09-13 13:47:08 +05:30
Vikrant Gupta
6661aa7686 chore: update the filter in / filter out operators (#5923)
* chore: update the filter in / filter out operators

* fix: handle cases for old logs explorer
2024-09-13 13:43:40 +05:30
Vikrant Gupta
8d54e3b766 fix: dashboard list page showing older data (#5961) 2024-09-13 13:41:55 +05:30
Sudeep MP
6c446226eb refactor(ListAlert): update styles and button layout (#5931) 2024-09-13 01:03:22 +05:30
Nityananda Gohain
90b5f88413 feat: logs list API, logic update for better perf (#5912)
* feat: logsV4 initial refactoring

* feat: filter_query builder with tests added

* feat: all functions of v4 refactored

* fix: tests fixed

* feat: logs list API, logic update for better perf

* fix: update select for table panel

* fix: tests updated with better examples of limit and group by

* fix: resource filter support in live tail

* feat: cleanup and use flag

* feat: restrict new list api to single query

* fix: move getTsRanges to utils
2024-09-12 21:34:27 +05:30
91 changed files with 3913 additions and 3175 deletions

View File

@@ -0,0 +1,58 @@
---
name: Request Dashboard
about: Request a new dashboard for the SigNoz Dashboards repository
title: ''
labels: 'dashboard-template'
assignees: ''
---
## 📝 Dashboard Request Template
*Use this template to request a new dashboard for the SigNoz Dashboards repository. Please provide as much detail as possible to help us understand your needs.*
---
### 1. Dashboard Name
Name of the requested dashboard (e.g., MySQL Monitoring Dashboard):
---
### 2. Expected Dashboard Sections and Panels
#### Section Name
Brief description of the section (e.g., "Resource usage metrics for MySQL database").
#### Panel Name
Panel description (e.g., "Value-type panels displaying current CPU usage, memory usage, etc.").
- **Example:**
- **Section**: Resource Metrics
- **Panel**: CPU Usage - Displays the current CPU usage across all database instances.
- **Panel**: Memory Usage - Displays the total memory used by the MySQL process.
(Repeat this format for additional sections and panels)
---
### 3. Expected Variables
List any variables you expect to use in the dashboard (e.g., `deployment.environment`, `hostname`, etc.).
---
### 4. Additional Comments or Requirements
Any additional details or special requirements for the dashboard?
---
### 📋 Notes
Please review the [CONTRIBUTING.md](https://github.com/SigNoz/dashboards/blob/main/CONTRIBUTING.md) for guidelines on dashboard structure, naming conventions, and how to submit a pull request.
---
Thank you for your request! We will review it and provide feedback or guidance as necessary.

View File

@@ -11,9 +11,9 @@ jobs:
check-no-ee-references:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Run check
run: make check-no-ee-references
- uses: actions/checkout@v4
- name: Run check
run: make check-no-ee-references
build-frontend:
runs-on: ubuntu-latest
@@ -43,7 +43,6 @@ jobs:
run: |
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
echo 'CLARITY_PROJECT_ID="${{ secrets.CLARITY_PROJECT_ID }}"' >> frontend/.env
- name: Install dependencies
run: cd frontend && yarn install
- name: Run ESLint

View File

@@ -9,7 +9,6 @@ on:
- v*
jobs:
image-build-and-push-query-service:
runs-on: ubuntu-latest
steps:
@@ -151,7 +150,6 @@ jobs:
run: |
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
echo 'CLARITY_PROJECT_ID="${{ secrets.CLARITY_PROJECT_ID }}"' >> frontend/.env
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env

View File

@@ -0,0 +1,44 @@
package anomaly
import (
"context"
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
)
type DailyProvider struct {
BaseSeasonalProvider
}
var _ BaseProvider = (*DailyProvider)(nil)
func (dp *DailyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider {
return &dp.BaseSeasonalProvider
}
// NewDailyProvider uses the same generic option type
func NewDailyProvider(opts ...GenericProviderOption[*DailyProvider]) *DailyProvider {
dp := &DailyProvider{
BaseSeasonalProvider: BaseSeasonalProvider{},
}
for _, opt := range opts {
opt(dp)
}
dp.querierV2 = querierV2.NewQuerier(querierV2.QuerierOptions{
Reader: dp.reader,
Cache: dp.cache,
KeyGenerator: queryBuilder.NewKeyGenerator(),
FluxInterval: dp.fluxInterval,
FeatureLookup: dp.ff,
})
return dp
}
func (p *DailyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
req.Seasonality = SeasonalityDaily
return p.getAnomalies(ctx, req)
}

View File

@@ -0,0 +1,44 @@
package anomaly
import (
"context"
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
)
type HourlyProvider struct {
BaseSeasonalProvider
}
var _ BaseProvider = (*HourlyProvider)(nil)
func (hp *HourlyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider {
return &hp.BaseSeasonalProvider
}
// NewHourlyProvider now uses the generic option type
func NewHourlyProvider(opts ...GenericProviderOption[*HourlyProvider]) *HourlyProvider {
hp := &HourlyProvider{
BaseSeasonalProvider: BaseSeasonalProvider{},
}
for _, opt := range opts {
opt(hp)
}
hp.querierV2 = querierV2.NewQuerier(querierV2.QuerierOptions{
Reader: hp.reader,
Cache: hp.cache,
KeyGenerator: queryBuilder.NewKeyGenerator(),
FluxInterval: hp.fluxInterval,
FeatureLookup: hp.ff,
})
return hp
}
func (p *HourlyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
req.Seasonality = SeasonalityHourly
return p.getAnomalies(ctx, req)
}

View File

@@ -0,0 +1,244 @@
package anomaly
import (
"math"
"time"
"go.signoz.io/signoz/pkg/query-service/common"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
)
type Seasonality string
const (
SeasonalityHourly Seasonality = "hourly"
SeasonalityDaily Seasonality = "daily"
SeasonalityWeekly Seasonality = "weekly"
)
var (
oneWeekOffset = 24 * 7 * time.Hour.Milliseconds()
oneDayOffset = 24 * time.Hour.Milliseconds()
oneHourOffset = time.Hour.Milliseconds()
fiveMinOffset = 5 * time.Minute.Milliseconds()
)
func (s Seasonality) IsValid() bool {
switch s {
case SeasonalityHourly, SeasonalityDaily, SeasonalityWeekly:
return true
default:
return false
}
}
type GetAnomaliesRequest struct {
Params *v3.QueryRangeParamsV3
Seasonality Seasonality
}
type GetAnomaliesResponse struct {
Results []*v3.Result
}
// anomalyParams is the params for anomaly detection
// prediction = avg(past_period_query) + avg(current_season_query) - mean(past_season_query, past2_season_query, past3_season_query)
//
// ^ ^
// | |
// (rounded value for past peiod) + (seasonal growth)
//
// score = abs(value - prediction) / stddev (current_season_query)
type anomalyQueryParams struct {
// CurrentPeriodQuery is the query range params for period user is looking at or eval window
// Example: (now-5m, now), (now-30m, now), (now-1h, now)
// The results obtained from this query are used to compare with predicted values
// and to detect anomalies
CurrentPeriodQuery *v3.QueryRangeParamsV3
// PastPeriodQuery is the query range params for past seasonal period
// Example: For weekly seasonality, (now-1w-5m, now-1w)
// : For daily seasonality, (now-1d-5m, now-1d)
// : For hourly seasonality, (now-1h-5m, now-1h)
PastPeriodQuery *v3.QueryRangeParamsV3
// CurrentSeasonQuery is the query range params for current period (seasonal)
// Example: For weekly seasonality, this is the query range params for the (now-1w-5m, now)
// : For daily seasonality, this is the query range params for the (now-1d-5m, now)
// : For hourly seasonality, this is the query range params for the (now-1h-5m, now)
CurrentSeasonQuery *v3.QueryRangeParamsV3
// PastSeasonQuery is the query range params for past seasonal period to the current season
// Example: For weekly seasonality, this is the query range params for the (now-2w-5m, now-1w)
// : For daily seasonality, this is the query range params for the (now-2d-5m, now-1d)
// : For hourly seasonality, this is the query range params for the (now-2h-5m, now-1h)
PastSeasonQuery *v3.QueryRangeParamsV3
// Past2SeasonQuery is the query range params for past 2 seasonal period to the current season
// Example: For weekly seasonality, this is the query range params for the (now-3w-5m, now-2w)
// : For daily seasonality, this is the query range params for the (now-3d-5m, now-2d)
// : For hourly seasonality, this is the query range params for the (now-3h-5m, now-2h)
Past2SeasonQuery *v3.QueryRangeParamsV3
// Past3SeasonQuery is the query range params for past 3 seasonal period to the current season
// Example: For weekly seasonality, this is the query range params for the (now-4w-5m, now-3w)
// : For daily seasonality, this is the query range params for the (now-4d-5m, now-3d)
// : For hourly seasonality, this is the query range params for the (now-4h-5m, now-3h)
Past3SeasonQuery *v3.QueryRangeParamsV3
}
func updateStepInterval(req *v3.QueryRangeParamsV3) {
start := req.Start
end := req.End
req.Step = int64(math.Max(float64(common.MinAllowedStepInterval(start, end)), 60))
for _, q := range req.CompositeQuery.BuilderQueries {
// If the step interval is less than the minimum allowed step interval, set it to the minimum allowed step interval
if minStep := common.MinAllowedStepInterval(start, end); q.StepInterval < minStep {
q.StepInterval = minStep
}
}
}
func prepareAnomalyQueryParams(req *v3.QueryRangeParamsV3, seasonality Seasonality) *anomalyQueryParams {
start := req.Start
end := req.End
currentPeriodQuery := &v3.QueryRangeParamsV3{
Start: start,
End: end,
CompositeQuery: req.CompositeQuery.Clone(),
Variables: make(map[string]interface{}, 0),
NoCache: false,
}
updateStepInterval(currentPeriodQuery)
var pastPeriodStart, pastPeriodEnd int64
switch seasonality {
// for one week period, we fetch the data from the past week with 5 min offset
case SeasonalityWeekly:
pastPeriodStart = start - oneWeekOffset - fiveMinOffset
pastPeriodEnd = end - oneWeekOffset
// for one day period, we fetch the data from the past day with 5 min offset
case SeasonalityDaily:
pastPeriodStart = start - oneDayOffset - fiveMinOffset
pastPeriodEnd = end - oneDayOffset
// for one hour period, we fetch the data from the past hour with 5 min offset
case SeasonalityHourly:
pastPeriodStart = start - oneHourOffset - fiveMinOffset
pastPeriodEnd = end - oneHourOffset
}
pastPeriodQuery := &v3.QueryRangeParamsV3{
Start: pastPeriodStart,
End: pastPeriodEnd,
CompositeQuery: req.CompositeQuery.Clone(),
Variables: make(map[string]interface{}, 0),
NoCache: false,
}
updateStepInterval(pastPeriodQuery)
// seasonality growth trend
var currentGrowthPeriodStart, currentGrowthPeriodEnd int64
switch seasonality {
case SeasonalityWeekly:
currentGrowthPeriodStart = start - oneWeekOffset
currentGrowthPeriodEnd = end
case SeasonalityDaily:
currentGrowthPeriodStart = start - oneDayOffset
currentGrowthPeriodEnd = end
case SeasonalityHourly:
currentGrowthPeriodStart = start - oneHourOffset
currentGrowthPeriodEnd = end
}
currentGrowthQuery := &v3.QueryRangeParamsV3{
Start: currentGrowthPeriodStart,
End: currentGrowthPeriodEnd,
CompositeQuery: req.CompositeQuery.Clone(),
Variables: make(map[string]interface{}, 0),
NoCache: false,
}
updateStepInterval(currentGrowthQuery)
var pastGrowthPeriodStart, pastGrowthPeriodEnd int64
switch seasonality {
case SeasonalityWeekly:
pastGrowthPeriodStart = start - 2*oneWeekOffset
pastGrowthPeriodEnd = start - 1*oneWeekOffset
case SeasonalityDaily:
pastGrowthPeriodStart = start - 2*oneDayOffset
pastGrowthPeriodEnd = start - 1*oneDayOffset
case SeasonalityHourly:
pastGrowthPeriodStart = start - 2*oneHourOffset
pastGrowthPeriodEnd = start - 1*oneHourOffset
}
pastGrowthQuery := &v3.QueryRangeParamsV3{
Start: pastGrowthPeriodStart,
End: pastGrowthPeriodEnd,
CompositeQuery: req.CompositeQuery.Clone(),
Variables: make(map[string]interface{}, 0),
NoCache: false,
}
updateStepInterval(pastGrowthQuery)
var past2GrowthPeriodStart, past2GrowthPeriodEnd int64
switch seasonality {
case SeasonalityWeekly:
past2GrowthPeriodStart = start - 3*oneWeekOffset
past2GrowthPeriodEnd = start - 2*oneWeekOffset
case SeasonalityDaily:
past2GrowthPeriodStart = start - 3*oneDayOffset
past2GrowthPeriodEnd = start - 2*oneDayOffset
case SeasonalityHourly:
past2GrowthPeriodStart = start - 3*oneHourOffset
past2GrowthPeriodEnd = start - 2*oneHourOffset
}
past2GrowthQuery := &v3.QueryRangeParamsV3{
Start: past2GrowthPeriodStart,
End: past2GrowthPeriodEnd,
CompositeQuery: req.CompositeQuery.Clone(),
Variables: make(map[string]interface{}, 0),
NoCache: false,
}
updateStepInterval(past2GrowthQuery)
var past3GrowthPeriodStart, past3GrowthPeriodEnd int64
switch seasonality {
case SeasonalityWeekly:
past3GrowthPeriodStart = start - 4*oneWeekOffset
past3GrowthPeriodEnd = start - 3*oneWeekOffset
case SeasonalityDaily:
past3GrowthPeriodStart = start - 4*oneDayOffset
past3GrowthPeriodEnd = start - 3*oneDayOffset
case SeasonalityHourly:
past3GrowthPeriodStart = start - 4*oneHourOffset
past3GrowthPeriodEnd = start - 3*oneHourOffset
}
past3GrowthQuery := &v3.QueryRangeParamsV3{
Start: past3GrowthPeriodStart,
End: past3GrowthPeriodEnd,
CompositeQuery: req.CompositeQuery.Clone(),
Variables: make(map[string]interface{}, 0),
NoCache: false,
}
updateStepInterval(past3GrowthQuery)
return &anomalyQueryParams{
CurrentPeriodQuery: currentPeriodQuery,
PastPeriodQuery: pastPeriodQuery,
CurrentSeasonQuery: currentGrowthQuery,
PastSeasonQuery: pastGrowthQuery,
Past2SeasonQuery: past2GrowthQuery,
Past3SeasonQuery: past3GrowthQuery,
}
}
type anomalyQueryResults struct {
CurrentPeriodResults []*v3.Result
PastPeriodResults []*v3.Result
CurrentSeasonResults []*v3.Result
PastSeasonResults []*v3.Result
Past2SeasonResults []*v3.Result
Past3SeasonResults []*v3.Result
}

View File

@@ -0,0 +1,9 @@
package anomaly
import (
"context"
)
type Provider interface {
GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error)
}

View File

@@ -0,0 +1,464 @@
package anomaly
import (
"context"
"math"
"time"
"go.signoz.io/signoz/pkg/query-service/cache"
"go.signoz.io/signoz/pkg/query-service/interfaces"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/query-service/postprocess"
"go.signoz.io/signoz/pkg/query-service/utils/labels"
"go.uber.org/zap"
)
var (
// TODO(srikanthccv): make this configurable?
movingAvgWindowSize = 7
)
// BaseProvider is an interface that includes common methods for all provider types
type BaseProvider interface {
GetBaseSeasonalProvider() *BaseSeasonalProvider
}
// GenericProviderOption is a generic type for provider options
type GenericProviderOption[T BaseProvider] func(T)
func WithCache[T BaseProvider](cache cache.Cache) GenericProviderOption[T] {
return func(p T) {
p.GetBaseSeasonalProvider().cache = cache
}
}
func WithKeyGenerator[T BaseProvider](keyGenerator cache.KeyGenerator) GenericProviderOption[T] {
return func(p T) {
p.GetBaseSeasonalProvider().keyGenerator = keyGenerator
}
}
func WithFeatureLookup[T BaseProvider](ff interfaces.FeatureLookup) GenericProviderOption[T] {
return func(p T) {
p.GetBaseSeasonalProvider().ff = ff
}
}
func WithReader[T BaseProvider](reader interfaces.Reader) GenericProviderOption[T] {
return func(p T) {
p.GetBaseSeasonalProvider().reader = reader
}
}
type BaseSeasonalProvider struct {
querierV2 interfaces.Querier
reader interfaces.Reader
fluxInterval time.Duration
cache cache.Cache
keyGenerator cache.KeyGenerator
ff interfaces.FeatureLookup
}
func (p *BaseSeasonalProvider) getQueryParams(req *GetAnomaliesRequest) *anomalyQueryParams {
if !req.Seasonality.IsValid() {
req.Seasonality = SeasonalityDaily
}
return prepareAnomalyQueryParams(req.Params, req.Seasonality)
}
func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQueryParams) (*anomalyQueryResults, error) {
currentPeriodResults, _, err := p.querierV2.QueryRange(ctx, params.CurrentPeriodQuery)
if err != nil {
return nil, err
}
currentPeriodResults, err = postprocess.PostProcessResult(currentPeriodResults, params.CurrentPeriodQuery)
if err != nil {
return nil, err
}
pastPeriodResults, _, err := p.querierV2.QueryRange(ctx, params.PastPeriodQuery)
if err != nil {
return nil, err
}
pastPeriodResults, err = postprocess.PostProcessResult(pastPeriodResults, params.PastPeriodQuery)
if err != nil {
return nil, err
}
currentSeasonResults, _, err := p.querierV2.QueryRange(ctx, params.CurrentSeasonQuery)
if err != nil {
return nil, err
}
currentSeasonResults, err = postprocess.PostProcessResult(currentSeasonResults, params.CurrentSeasonQuery)
if err != nil {
return nil, err
}
pastSeasonResults, _, err := p.querierV2.QueryRange(ctx, params.PastSeasonQuery)
if err != nil {
return nil, err
}
pastSeasonResults, err = postprocess.PostProcessResult(pastSeasonResults, params.PastSeasonQuery)
if err != nil {
return nil, err
}
past2SeasonResults, _, err := p.querierV2.QueryRange(ctx, params.Past2SeasonQuery)
if err != nil {
return nil, err
}
past2SeasonResults, err = postprocess.PostProcessResult(past2SeasonResults, params.Past2SeasonQuery)
if err != nil {
return nil, err
}
past3SeasonResults, _, err := p.querierV2.QueryRange(ctx, params.Past3SeasonQuery)
if err != nil {
return nil, err
}
past3SeasonResults, err = postprocess.PostProcessResult(past3SeasonResults, params.Past3SeasonQuery)
if err != nil {
return nil, err
}
return &anomalyQueryResults{
CurrentPeriodResults: currentPeriodResults,
PastPeriodResults: pastPeriodResults,
CurrentSeasonResults: currentSeasonResults,
PastSeasonResults: pastSeasonResults,
Past2SeasonResults: past2SeasonResults,
Past3SeasonResults: past3SeasonResults,
}, nil
}
// getMatchingSeries gets the matching series from the query result
// for the given series
func (p *BaseSeasonalProvider) getMatchingSeries(queryResult *v3.Result, series *v3.Series) *v3.Series {
if queryResult == nil || len(queryResult.Series) == 0 {
return nil
}
for _, curr := range queryResult.Series {
currLabels := labels.FromMap(curr.Labels)
seriesLabels := labels.FromMap(series.Labels)
if currLabels.Hash() == seriesLabels.Hash() {
return curr
}
}
return nil
}
func (p *BaseSeasonalProvider) getAvg(series *v3.Series) float64 {
if series == nil || len(series.Points) == 0 {
return 0
}
var sum float64
for _, smpl := range series.Points {
sum += smpl.Value
}
return sum / float64(len(series.Points))
}
func (p *BaseSeasonalProvider) getStdDev(series *v3.Series) float64 {
if series == nil || len(series.Points) == 0 {
return 0
}
avg := p.getAvg(series)
var sum float64
for _, smpl := range series.Points {
sum += math.Pow(smpl.Value-avg, 2)
}
return math.Sqrt(sum / float64(len(series.Points)))
}
// getMovingAvg gets the moving average for the given series
// for the given window size and start index
func (p *BaseSeasonalProvider) getMovingAvg(series *v3.Series, movingAvgWindowSize, startIdx int) float64 {
if series == nil || len(series.Points) == 0 {
return 0
}
if startIdx >= len(series.Points)-movingAvgWindowSize {
startIdx = len(series.Points) - movingAvgWindowSize
}
var sum float64
points := series.Points[startIdx:]
for i := 0; i < movingAvgWindowSize && i < len(points); i++ {
sum += points[i].Value
}
avg := sum / float64(movingAvgWindowSize)
return avg
}
func (p *BaseSeasonalProvider) getMean(floats ...float64) float64 {
if len(floats) == 0 {
return 0
}
var sum float64
for _, f := range floats {
sum += f
}
return sum / float64(len(floats))
}
func (p *BaseSeasonalProvider) getPredictedSeries(
series, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries *v3.Series,
) *v3.Series {
predictedSeries := &v3.Series{
Labels: series.Labels,
LabelsArray: series.LabelsArray,
Points: []v3.Point{},
}
// for each point in the series, get the predicted value
// the predicted value is the moving average (with window size = 7) of the previous period series
// plus the average of the current season series
// minus the mean of the past season series, past2 season series and past3 season series
for idx, curr := range series.Points {
predictedValue :=
p.getMovingAvg(prevSeries, movingAvgWindowSize, idx) +
p.getAvg(currentSeasonSeries) -
p.getMean(p.getAvg(pastSeasonSeries), p.getAvg(past2SeasonSeries), p.getAvg(past3SeasonSeries))
if predictedValue < 0 {
predictedValue = p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
}
zap.L().Info("predictedSeries",
zap.Float64("movingAvg", p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)),
zap.Float64("avg", p.getAvg(currentSeasonSeries)),
zap.Float64("mean", p.getMean(p.getAvg(pastSeasonSeries), p.getAvg(past2SeasonSeries), p.getAvg(past3SeasonSeries))),
zap.Any("labels", series.Labels),
zap.Float64("predictedValue", predictedValue),
)
predictedSeries.Points = append(predictedSeries.Points, v3.Point{
Timestamp: curr.Timestamp,
Value: predictedValue,
})
}
return predictedSeries
}
// getBounds gets the upper and lower bounds for the given series
// for the given z score threshold
// moving avg of the previous period series + z score threshold * std dev of the series
// moving avg of the previous period series - z score threshold * std dev of the series
func (p *BaseSeasonalProvider) getBounds(
series, prevSeries, _, _, _, _ *v3.Series,
zScoreThreshold float64,
) (*v3.Series, *v3.Series) {
upperBoundSeries := &v3.Series{
Labels: series.Labels,
LabelsArray: series.LabelsArray,
Points: []v3.Point{},
}
lowerBoundSeries := &v3.Series{
Labels: series.Labels,
LabelsArray: series.LabelsArray,
Points: []v3.Point{},
}
for idx, curr := range series.Points {
upperBound := p.getMovingAvg(prevSeries, movingAvgWindowSize, idx) + zScoreThreshold*p.getStdDev(series)
lowerBound := p.getMovingAvg(prevSeries, movingAvgWindowSize, idx) - zScoreThreshold*p.getStdDev(series)
upperBoundSeries.Points = append(upperBoundSeries.Points, v3.Point{
Timestamp: curr.Timestamp,
Value: upperBound,
})
lowerBoundSeries.Points = append(lowerBoundSeries.Points, v3.Point{
Timestamp: curr.Timestamp,
Value: math.Max(lowerBound, 0),
})
}
return upperBoundSeries, lowerBoundSeries
}
// getExpectedValue gets the expected value for the given series
// for the given index
// prevSeriesAvg + currentSeasonSeriesAvg - mean of past season series, past2 season series and past3 season series
func (p *BaseSeasonalProvider) getExpectedValue(
_, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries *v3.Series, idx int,
) float64 {
prevSeriesAvg := p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
currentSeasonSeriesAvg := p.getAvg(currentSeasonSeries)
pastSeasonSeriesAvg := p.getAvg(pastSeasonSeries)
past2SeasonSeriesAvg := p.getAvg(past2SeasonSeries)
past3SeasonSeriesAvg := p.getAvg(past3SeasonSeries)
return prevSeriesAvg + currentSeasonSeriesAvg - p.getMean(pastSeasonSeriesAvg, past2SeasonSeriesAvg, past3SeasonSeriesAvg)
}
// getScore gets the anomaly score for the given series
// for the given index
// (value - expectedValue) / std dev of the series
func (p *BaseSeasonalProvider) getScore(
series, prevSeries, weekSeries, weekPrevSeries, past2SeasonSeries, past3SeasonSeries *v3.Series, value float64, idx int,
) float64 {
expectedValue := p.getExpectedValue(series, prevSeries, weekSeries, weekPrevSeries, past2SeasonSeries, past3SeasonSeries, idx)
return (value - expectedValue) / p.getStdDev(weekSeries)
}
// getAnomalyScores gets the anomaly scores for the given series
// for the given index
// (value - expectedValue) / std dev of the series
func (p *BaseSeasonalProvider) getAnomalyScores(
series, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries *v3.Series,
) *v3.Series {
anomalyScoreSeries := &v3.Series{
Labels: series.Labels,
LabelsArray: series.LabelsArray,
Points: []v3.Point{},
}
for idx, curr := range series.Points {
anomalyScore := p.getScore(series, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries, curr.Value, idx)
anomalyScoreSeries.Points = append(anomalyScoreSeries.Points, v3.Point{
Timestamp: curr.Timestamp,
Value: anomalyScore,
})
}
return anomalyScoreSeries
}
func (p *BaseSeasonalProvider) getAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
anomalyParams := p.getQueryParams(req)
anomalyQueryResults, err := p.getResults(ctx, anomalyParams)
if err != nil {
return nil, err
}
currentPeriodResultsMap := make(map[string]*v3.Result)
for _, result := range anomalyQueryResults.CurrentPeriodResults {
currentPeriodResultsMap[result.QueryName] = result
}
pastPeriodResultsMap := make(map[string]*v3.Result)
for _, result := range anomalyQueryResults.PastPeriodResults {
pastPeriodResultsMap[result.QueryName] = result
}
currentSeasonResultsMap := make(map[string]*v3.Result)
for _, result := range anomalyQueryResults.CurrentSeasonResults {
currentSeasonResultsMap[result.QueryName] = result
}
pastSeasonResultsMap := make(map[string]*v3.Result)
for _, result := range anomalyQueryResults.PastSeasonResults {
pastSeasonResultsMap[result.QueryName] = result
}
past2SeasonResultsMap := make(map[string]*v3.Result)
for _, result := range anomalyQueryResults.Past2SeasonResults {
past2SeasonResultsMap[result.QueryName] = result
}
past3SeasonResultsMap := make(map[string]*v3.Result)
for _, result := range anomalyQueryResults.Past3SeasonResults {
past3SeasonResultsMap[result.QueryName] = result
}
for _, result := range currentPeriodResultsMap {
funcs := req.Params.CompositeQuery.BuilderQueries[result.QueryName].Functions
var zScoreThreshold float64
for _, f := range funcs {
if f.Name == v3.FunctionNameAnomaly {
value, ok := f.NamedArgs["z_score_threshold"]
if ok {
zScoreThreshold = value.(float64)
} else {
zScoreThreshold = 3
}
break
}
}
pastPeriodResult, ok := pastPeriodResultsMap[result.QueryName]
if !ok {
continue
}
currentSeasonResult, ok := currentSeasonResultsMap[result.QueryName]
if !ok {
continue
}
pastSeasonResult, ok := pastSeasonResultsMap[result.QueryName]
if !ok {
continue
}
past2SeasonResult, ok := past2SeasonResultsMap[result.QueryName]
if !ok {
continue
}
past3SeasonResult, ok := past3SeasonResultsMap[result.QueryName]
if !ok {
continue
}
for _, series := range result.Series {
stdDev := p.getStdDev(series)
zap.L().Info("stdDev", zap.Float64("stdDev", stdDev), zap.Any("labels", series.Labels))
pastPeriodSeries := p.getMatchingSeries(pastPeriodResult, series)
currentSeasonSeries := p.getMatchingSeries(currentSeasonResult, series)
pastSeasonSeries := p.getMatchingSeries(pastSeasonResult, series)
past2SeasonSeries := p.getMatchingSeries(past2SeasonResult, series)
past3SeasonSeries := p.getMatchingSeries(past3SeasonResult, series)
prevSeriesAvg := p.getAvg(pastPeriodSeries)
currentSeasonSeriesAvg := p.getAvg(currentSeasonSeries)
pastSeasonSeriesAvg := p.getAvg(pastSeasonSeries)
past2SeasonSeriesAvg := p.getAvg(past2SeasonSeries)
past3SeasonSeriesAvg := p.getAvg(past3SeasonSeries)
zap.L().Info("getAvg", zap.Float64("prevSeriesAvg", prevSeriesAvg), zap.Float64("currentSeasonSeriesAvg", currentSeasonSeriesAvg), zap.Float64("pastSeasonSeriesAvg", pastSeasonSeriesAvg), zap.Float64("past2SeasonSeriesAvg", past2SeasonSeriesAvg), zap.Float64("past3SeasonSeriesAvg", past3SeasonSeriesAvg), zap.Any("labels", series.Labels))
predictedSeries := p.getPredictedSeries(
series,
pastPeriodSeries,
currentSeasonSeries,
pastSeasonSeries,
past2SeasonSeries,
past3SeasonSeries,
)
result.PredictedSeries = append(result.PredictedSeries, predictedSeries)
upperBoundSeries, lowerBoundSeries := p.getBounds(
series,
pastPeriodSeries,
currentSeasonSeries,
pastSeasonSeries,
past2SeasonSeries,
past3SeasonSeries,
zScoreThreshold,
)
result.UpperBoundSeries = append(result.UpperBoundSeries, upperBoundSeries)
result.LowerBoundSeries = append(result.LowerBoundSeries, lowerBoundSeries)
anomalyScoreSeries := p.getAnomalyScores(
series,
pastPeriodSeries,
currentSeasonSeries,
pastSeasonSeries,
past2SeasonSeries,
past3SeasonSeries,
)
result.AnomalyScores = append(result.AnomalyScores, anomalyScoreSeries)
}
}
results := make([]*v3.Result, 0, len(currentPeriodResultsMap))
for _, result := range currentPeriodResultsMap {
results = append(results, result)
}
return &GetAnomaliesResponse{
Results: results,
}, nil
}

View File

@@ -0,0 +1,43 @@
package anomaly
import (
"context"
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
)
type WeeklyProvider struct {
BaseSeasonalProvider
}
var _ BaseProvider = (*WeeklyProvider)(nil)
func (wp *WeeklyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider {
return &wp.BaseSeasonalProvider
}
func NewWeeklyProvider(opts ...GenericProviderOption[*WeeklyProvider]) *WeeklyProvider {
wp := &WeeklyProvider{
BaseSeasonalProvider: BaseSeasonalProvider{},
}
for _, opt := range opts {
opt(wp)
}
wp.querierV2 = querierV2.NewQuerier(querierV2.QuerierOptions{
Reader: wp.reader,
Cache: wp.cache,
KeyGenerator: queryBuilder.NewKeyGenerator(),
FluxInterval: wp.fluxInterval,
FeatureLookup: wp.ff,
})
return wp
}
func (p *WeeklyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
req.Seasonality = SeasonalityWeekly
return p.getAnomalies(ctx, req)
}

View File

@@ -38,8 +38,7 @@ type APIHandlerOptions struct {
Cache cache.Cache
Gateway *httputil.ReverseProxy
// Querier Influx Interval
FluxInterval time.Duration
FluxInterval time.Duration
UseLogsNewSchema bool
}

View File

@@ -1,401 +0,0 @@
package db
import (
"context"
"crypto/md5"
"encoding/json"
"fmt"
"reflect"
"regexp"
"sort"
"strings"
"time"
"go.signoz.io/signoz/ee/query-service/model"
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.signoz.io/signoz/pkg/query-service/utils"
"go.uber.org/zap"
)
// GetMetricResultEE runs the query and returns list of time series
func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string) ([]*basemodel.Series, string, error) {
defer utils.Elapsed("GetMetricResult", nil)()
zap.L().Info("Executing metric result query: ", zap.String("query", query))
var hash string
// If getSubTreeSpans function is used in the clickhouse query
if strings.Contains(query, "getSubTreeSpans(") {
var err error
query, hash, err = r.getSubTreeSpansCustomFunction(ctx, query, hash)
if err == fmt.Errorf("no spans found for the given query") {
return nil, "", nil
}
if err != nil {
return nil, "", err
}
}
rows, err := r.conn.Query(ctx, query)
if err != nil {
zap.L().Error("Error in processing query", zap.Error(err))
return nil, "", fmt.Errorf("error in processing query")
}
var (
columnTypes = rows.ColumnTypes()
columnNames = rows.Columns()
vars = make([]interface{}, len(columnTypes))
)
for i := range columnTypes {
vars[i] = reflect.New(columnTypes[i].ScanType()).Interface()
}
// when group by is applied, each combination of cartesian product
// of attributes is separate series. each item in metricPointsMap
// represent a unique series.
metricPointsMap := make(map[string][]basemodel.MetricPoint)
// attribute key-value pairs for each group selection
attributesMap := make(map[string]map[string]string)
defer rows.Close()
for rows.Next() {
if err := rows.Scan(vars...); err != nil {
return nil, "", err
}
var groupBy []string
var metricPoint basemodel.MetricPoint
groupAttributes := make(map[string]string)
// Assuming that the end result row contains a timestamp, value and option labels
// Label key and value are both strings.
for idx, v := range vars {
colName := columnNames[idx]
switch v := v.(type) {
case *string:
// special case for returning all labels
if colName == "fullLabels" {
var metric map[string]string
err := json.Unmarshal([]byte(*v), &metric)
if err != nil {
return nil, "", err
}
for key, val := range metric {
groupBy = append(groupBy, val)
groupAttributes[key] = val
}
} else {
groupBy = append(groupBy, *v)
groupAttributes[colName] = *v
}
case *time.Time:
metricPoint.Timestamp = v.UnixMilli()
case *float64:
metricPoint.Value = *v
case **float64:
// ch seems to return this type when column is derived from
// SELECT count(*)/ SELECT count(*)
floatVal := *v
if floatVal != nil {
metricPoint.Value = *floatVal
}
case *float32:
float32Val := float32(*v)
metricPoint.Value = float64(float32Val)
case *uint8, *uint64, *uint16, *uint32:
if _, ok := baseconst.ReservedColumnTargetAliases[colName]; ok {
metricPoint.Value = float64(reflect.ValueOf(v).Elem().Uint())
} else {
groupBy = append(groupBy, fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Uint()))
groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Uint())
}
case *int8, *int16, *int32, *int64:
if _, ok := baseconst.ReservedColumnTargetAliases[colName]; ok {
metricPoint.Value = float64(reflect.ValueOf(v).Elem().Int())
} else {
groupBy = append(groupBy, fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int()))
groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int())
}
default:
zap.L().Error("invalid var found in metric builder query result", zap.Any("var", v), zap.String("colName", colName))
}
}
sort.Strings(groupBy)
key := strings.Join(groupBy, "")
attributesMap[key] = groupAttributes
metricPointsMap[key] = append(metricPointsMap[key], metricPoint)
}
var seriesList []*basemodel.Series
for key := range metricPointsMap {
points := metricPointsMap[key]
// first point in each series could be invalid since the
// aggregations are applied with point from prev series
if len(points) != 0 && len(points) > 1 {
points = points[1:]
}
attributes := attributesMap[key]
series := basemodel.Series{Labels: attributes, Points: points}
seriesList = append(seriesList, &series)
}
// err = r.conn.Exec(ctx, "DROP TEMPORARY TABLE IF EXISTS getSubTreeSpans"+hash)
// if err != nil {
// zap.L().Error("Error in dropping temporary table: ", err)
// return nil, err
// }
if hash == "" {
return seriesList, hash, nil
} else {
return seriesList, "getSubTreeSpans" + hash, nil
}
}
func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, query string, hash string) (string, string, error) {
zap.L().Debug("Executing getSubTreeSpans function")
// str1 := `select fromUnixTimestamp64Milli(intDiv( toUnixTimestamp64Milli ( timestamp ), 100) * 100) AS interval, toFloat64(count()) as count from (select timestamp, spanId, parentSpanId, durationNano from getSubTreeSpans(select * from signoz_traces.signoz_index_v2 where serviceName='frontend' and name='/driver.DriverService/FindNearest' and traceID='00000000000000004b0a863cb5ed7681') where name='FindDriverIDs' group by interval order by interval asc;`
// process the query to fetch subTree query
var subtreeInput string
query, subtreeInput, hash = processQuery(query, hash)
err := r.conn.Exec(ctx, "DROP TABLE IF EXISTS getSubTreeSpans"+hash)
if err != nil {
zap.L().Error("Error in dropping temporary table", zap.Error(err))
return query, hash, err
}
// Create temporary table to store the getSubTreeSpans() results
zap.L().Debug("Creating temporary table getSubTreeSpans", zap.String("hash", hash))
err = r.conn.Exec(ctx, "CREATE TABLE IF NOT EXISTS "+"getSubTreeSpans"+hash+" (timestamp DateTime64(9) CODEC(DoubleDelta, LZ4), traceID FixedString(32) CODEC(ZSTD(1)), spanID String CODEC(ZSTD(1)), parentSpanID String CODEC(ZSTD(1)), rootSpanID String CODEC(ZSTD(1)), serviceName LowCardinality(String) CODEC(ZSTD(1)), name LowCardinality(String) CODEC(ZSTD(1)), rootName LowCardinality(String) CODEC(ZSTD(1)), durationNano UInt64 CODEC(T64, ZSTD(1)), kind Int8 CODEC(T64, ZSTD(1)), tagMap Map(LowCardinality(String), String) CODEC(ZSTD(1)), events Array(String) CODEC(ZSTD(2))) ENGINE = MergeTree() ORDER BY (timestamp)")
if err != nil {
zap.L().Error("Error in creating temporary table", zap.Error(err))
return query, hash, err
}
var getSpansSubQueryDBResponses []model.GetSpansSubQueryDBResponse
getSpansSubQuery := subtreeInput
// Execute the subTree query
zap.L().Debug("Executing subTree query", zap.String("query", getSpansSubQuery))
err = r.conn.Select(ctx, &getSpansSubQueryDBResponses, getSpansSubQuery)
// zap.L().Info(getSpansSubQuery)
if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err))
return query, hash, fmt.Errorf("error in processing sql query")
}
var searchScanResponses []basemodel.SearchSpanDBResponseItem
// TODO : @ankit: I think the algorithm does not need to assume that subtrees are from the same TraceID. We can take this as an improvement later.
// Fetch all the spans from of same TraceID so that we can build subtree
modelQuery := fmt.Sprintf("SELECT timestamp, traceID, model FROM %s.%s WHERE traceID=$1", r.TraceDB, r.SpansTable)
if len(getSpansSubQueryDBResponses) == 0 {
return query, hash, fmt.Errorf("no spans found for the given query")
}
zap.L().Debug("Executing query to fetch all the spans from the same TraceID: ", zap.String("modelQuery", modelQuery))
err = r.conn.Select(ctx, &searchScanResponses, modelQuery, getSpansSubQueryDBResponses[0].TraceID)
if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err))
return query, hash, fmt.Errorf("error in processing sql query")
}
// Process model to fetch the spans
zap.L().Debug("Processing model to fetch the spans")
searchSpanResponses := []basemodel.SearchSpanResponseItem{}
for _, item := range searchScanResponses {
var jsonItem basemodel.SearchSpanResponseItem
json.Unmarshal([]byte(item.Model), &jsonItem)
jsonItem.TimeUnixNano = uint64(item.Timestamp.UnixNano())
if jsonItem.Events == nil {
jsonItem.Events = []string{}
}
searchSpanResponses = append(searchSpanResponses, jsonItem)
}
// Build the subtree and store all the subtree spans in temporary table getSubTreeSpans+hash
// Use map to store pointer to the spans to avoid duplicates and save memory
zap.L().Debug("Building the subtree to store all the subtree spans in temporary table getSubTreeSpans", zap.String("hash", hash))
treeSearchResponse, err := getSubTreeAlgorithm(searchSpanResponses, getSpansSubQueryDBResponses)
if err != nil {
zap.L().Error("Error in getSubTreeAlgorithm function", zap.Error(err))
return query, hash, err
}
zap.L().Debug("Preparing batch to store subtree spans in temporary table getSubTreeSpans", zap.String("hash", hash))
statement, err := r.conn.PrepareBatch(context.Background(), fmt.Sprintf("INSERT INTO getSubTreeSpans"+hash))
if err != nil {
zap.L().Error("Error in preparing batch statement", zap.Error(err))
return query, hash, err
}
for _, span := range treeSearchResponse {
var parentID string
if len(span.References) > 0 && span.References[0].RefType == "CHILD_OF" {
parentID = span.References[0].SpanId
}
err = statement.Append(
time.Unix(0, int64(span.TimeUnixNano)),
span.TraceID,
span.SpanID,
parentID,
span.RootSpanID,
span.ServiceName,
span.Name,
span.RootName,
uint64(span.DurationNano),
int8(span.Kind),
span.TagMap,
span.Events,
)
if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err))
return query, hash, err
}
}
zap.L().Debug("Inserting the subtree spans in temporary table getSubTreeSpans", zap.String("hash", hash))
err = statement.Send()
if err != nil {
zap.L().Error("Error in sending statement", zap.Error(err))
return query, hash, err
}
return query, hash, nil
}
//lint:ignore SA4009 return hash is feeded to the query
func processQuery(query string, hash string) (string, string, string) {
re3 := regexp.MustCompile(`getSubTreeSpans`)
submatchall3 := re3.FindAllStringIndex(query, -1)
getSubtreeSpansMatchIndex := submatchall3[0][1]
query2countParenthesis := query[getSubtreeSpansMatchIndex:]
sqlCompleteIndex := 0
countParenthesisImbalance := 0
for i, char := range query2countParenthesis {
if string(char) == "(" {
countParenthesisImbalance += 1
}
if string(char) == ")" {
countParenthesisImbalance -= 1
}
if countParenthesisImbalance == 0 {
sqlCompleteIndex = i
break
}
}
subtreeInput := query2countParenthesis[1:sqlCompleteIndex]
// hash the subtreeInput
hmd5 := md5.Sum([]byte(subtreeInput))
hash = fmt.Sprintf("%x", hmd5)
// Reformat the query to use the getSubTreeSpans function
query = query[:getSubtreeSpansMatchIndex] + hash + " " + query2countParenthesis[sqlCompleteIndex+1:]
return query, subtreeInput, hash
}
// getSubTreeAlgorithm is an algorithm to build the subtrees of the spans and return the list of spans
func getSubTreeAlgorithm(payload []basemodel.SearchSpanResponseItem, getSpansSubQueryDBResponses []model.GetSpansSubQueryDBResponse) (map[string]*basemodel.SearchSpanResponseItem, error) {
var spans []*model.SpanForTraceDetails
for _, spanItem := range payload {
var parentID string
if len(spanItem.References) > 0 && spanItem.References[0].RefType == "CHILD_OF" {
parentID = spanItem.References[0].SpanId
}
span := &model.SpanForTraceDetails{
TimeUnixNano: spanItem.TimeUnixNano,
SpanID: spanItem.SpanID,
TraceID: spanItem.TraceID,
ServiceName: spanItem.ServiceName,
Name: spanItem.Name,
Kind: spanItem.Kind,
DurationNano: spanItem.DurationNano,
TagMap: spanItem.TagMap,
ParentID: parentID,
Events: spanItem.Events,
HasError: spanItem.HasError,
}
spans = append(spans, span)
}
zap.L().Debug("Building Tree")
roots, err := buildSpanTrees(&spans)
if err != nil {
return nil, err
}
searchSpansResult := make(map[string]*basemodel.SearchSpanResponseItem)
// Every span which was fetched from getSubTree Input SQL query is considered root
// For each root, get the subtree spans
for _, getSpansSubQueryDBResponse := range getSpansSubQueryDBResponses {
targetSpan := &model.SpanForTraceDetails{}
// zap.L().Debug("Building tree for span id: " + getSpansSubQueryDBResponse.SpanID + " " + strconv.Itoa(i+1) + " of " + strconv.Itoa(len(getSpansSubQueryDBResponses)))
// Search target span object in the tree
for _, root := range roots {
targetSpan, err = breadthFirstSearch(root, getSpansSubQueryDBResponse.SpanID)
if targetSpan != nil {
break
}
if err != nil {
zap.L().Error("Error during BreadthFirstSearch()", zap.Error(err))
return nil, err
}
}
if targetSpan == nil {
return nil, nil
}
// Build subtree for the target span
// Mark the target span as root by setting parent ID as empty string
targetSpan.ParentID = ""
preParents := []*model.SpanForTraceDetails{targetSpan}
children := []*model.SpanForTraceDetails{}
// Get the subtree child spans
for i := 0; len(preParents) != 0; i++ {
parents := []*model.SpanForTraceDetails{}
for _, parent := range preParents {
children = append(children, parent.Children...)
parents = append(parents, parent.Children...)
}
preParents = parents
}
resultSpans := children
// Add the target span to the result spans
resultSpans = append(resultSpans, targetSpan)
for _, item := range resultSpans {
references := []basemodel.OtelSpanRef{
{
TraceId: item.TraceID,
SpanId: item.ParentID,
RefType: "CHILD_OF",
},
}
if item.Events == nil {
item.Events = []string{}
}
searchSpansResult[item.SpanID] = &basemodel.SearchSpanResponseItem{
TimeUnixNano: item.TimeUnixNano,
SpanID: item.SpanID,
TraceID: item.TraceID,
ServiceName: item.ServiceName,
Name: item.Name,
Kind: item.Kind,
References: references,
DurationNano: item.DurationNano,
TagMap: item.TagMap,
Events: item.Events,
HasError: item.HasError,
RootSpanID: getSpansSubQueryDBResponse.SpanID,
RootName: targetSpan.Name,
}
}
}
return searchSpansResult, nil
}

View File

@@ -207,7 +207,6 @@
"eslint-plugin-sonarjs": "^0.12.0",
"husky": "^7.0.4",
"is-ci": "^3.0.1",
"jest-playwright-preset": "^1.7.2",
"jest-styled-components": "^7.0.8",
"lint-staged": "^12.5.0",
"msw": "1.3.2",

View File

@@ -137,7 +137,6 @@ function App(): JSX.Element {
window.analytics.identify(email, sanitizedIdentifyPayload);
window.analytics.group(domain, groupTraits);
window.clarity('identify', email, name);
posthog?.identify(email, {
email,

View File

@@ -139,6 +139,7 @@ export const getGraphOptions = (
},
scales: {
x: {
stacked: isStacked,
grid: {
display: true,
color: getGridColor(),
@@ -165,6 +166,7 @@ export const getGraphOptions = (
ticks: { color: getAxisLabelColor(currentTheme) },
},
y: {
stacked: isStacked,
display: true,
grid: {
display: true,
@@ -178,9 +180,6 @@ export const getGraphOptions = (
},
},
},
stacked: {
display: isStacked === undefined ? false : 'auto',
},
},
elements: {
line: {

View File

@@ -15,7 +15,7 @@ function AddToQueryHOC({
}: AddToQueryHOCProps): JSX.Element {
const handleQueryAdd = (event: MouseEvent<HTMLDivElement>): void => {
event.stopPropagation();
onAddToQuery(fieldKey, fieldValue, OPERATORS.IN);
onAddToQuery(fieldKey, fieldValue, OPERATORS['=']);
};
const popOverContent = useMemo(() => <span>Add to query: {fieldKey}</span>, [

View File

@@ -65,7 +65,7 @@ export const logAlertDefaults: AlertDef = {
chQueries: {
A: {
name: 'A',
query: `select \ntoStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 30 MINUTE) AS interval, \ntoFloat64(count()) as value \nFROM signoz_logs.distributed_logs \nWHERE timestamp BETWEEN {{.start_timestamp_nano}} AND {{.end_timestamp_nano}} \nGROUP BY interval;\n\n-- available variables:\n-- \t{{.start_timestamp_nano}}\n-- \t{{.end_timestamp_nano}}\n\n-- required columns (or alias):\n-- \tvalue\n-- \tinterval`,
query: `select \ntoStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 30 MINUTE) AS interval, \ntoFloat64(count()) as value \nFROM signoz_logs.distributed_logs_v2 \nWHERE timestamp BETWEEN {{.start_timestamp_nano}} AND {{.end_timestamp_nano}} \nGROUP BY interval;\n\n-- available variables:\n-- \t{{.start_timestamp_nano}}\n-- \t{{.end_timestamp_nano}}\n\n-- required columns (or alias):\n-- \tvalue\n-- \tinterval`,
legend: '',
disabled: false,
},
@@ -95,7 +95,7 @@ export const traceAlertDefaults: AlertDef = {
chQueries: {
A: {
name: 'A',
query: `SELECT \n\ttoStartOfInterval(timestamp, INTERVAL 1 MINUTE) AS interval, \n\ttagMap['peer.service'] AS op_name, \n\ttoFloat64(avg(durationNano)) AS value \nFROM signoz_traces.distributed_signoz_index_v2 \nWHERE tagMap['peer.service']!='' \nAND timestamp BETWEEN {{.start_datetime}} AND {{.end_datetime}} \nGROUP BY (op_name, interval);\n\n-- available variables:\n-- \t{{.start_datetime}}\n-- \t{{.end_datetime}}\n\n-- required column alias:\n-- \tvalue\n-- \tinterval`,
query: `SELECT \n\ttoStartOfInterval(timestamp, INTERVAL 1 MINUTE) AS interval, \n\tstringTagMap['peer.service'] AS op_name, \n\ttoFloat64(avg(durationNano)) AS value \nFROM signoz_traces.distributed_signoz_index_v2 \nWHERE stringTagMap['peer.service']!='' \nAND timestamp BETWEEN {{.start_datetime}} AND {{.end_datetime}} \nGROUP BY (op_name, interval);\n\n-- available variables:\n-- \t{{.start_datetime}}\n-- \t{{.end_datetime}}\n\n-- required column alias:\n-- \tvalue\n-- \tinterval`,
legend: '',
disabled: false,
},

View File

@@ -1,6 +1,6 @@
/* eslint-disable react/display-name */
import { PlusOutlined } from '@ant-design/icons';
import { Input, Typography } from 'antd';
import { Flex, Input, Typography } from 'antd';
import type { ColumnsType } from 'antd/es/table/interface';
import saveAlertApi from 'api/alerts/save';
import logEvent from 'api/common/logEvent';
@@ -34,12 +34,7 @@ import { GettableAlert } from 'types/api/alerts/get';
import AppReducer from 'types/reducer/app';
import DeleteAlert from './DeleteAlert';
import {
Button,
ButtonContainer,
ColumnButton,
SearchContainer,
} from './styles';
import { Button, ColumnButton, SearchContainer } from './styles';
import Status from './TableComponents/Status';
import ToggleAlertState from './ToggleAlertState';
import { alertActionLogEvent, filterAlerts } from './utils';
@@ -373,21 +368,25 @@ function ListAlert({ allAlertRules, refetch }: ListAlertProps): JSX.Element {
onChange={handleSearch}
defaultValue={searchString}
/>
<ButtonContainer>
<Flex gap={12}>
{addNewAlert && (
<Button
type="primary"
onClick={onClickNewAlertHandler}
icon={<PlusOutlined />}
>
New Alert
</Button>
)}
<TextToolTip
{...{
text: `More details on how to create alerts`,
url:
'https://signoz.io/docs/alerts/?utm_source=product&utm_medium=list-alerts',
urlText: 'Learn More',
}}
/>
{addNewAlert && (
<Button onClick={onClickNewAlertHandler} icon={<PlusOutlined />}>
New Alert
</Button>
)}
</ButtonContainer>
</Flex>
</SearchContainer>
<DynamicColumnTable
tablesource={TableDataSource.Alert}

View File

@@ -9,12 +9,6 @@ export const SearchContainer = styled.div`
gap: 2rem;
}
`;
export const ButtonContainer = styled.div`
&&& {
display: flex;
align-items: center;
}
`;
export const Button = styled(ButtonComponent)`
&&& {

View File

@@ -91,6 +91,7 @@ function DashboardsList(): JSX.Element {
const {
data: dashboardListResponse,
isLoading: isDashboardListLoading,
isRefetching: isDashboardListRefetching,
error: dashboardFetchError,
refetch: refetchDashboardList,
} = useGetAllDashboard();
@@ -703,7 +704,9 @@ function DashboardsList(): JSX.Element {
</Flex>
</div>
{isDashboardListLoading || isFilteringDashboards ? (
{isDashboardListLoading ||
isFilteringDashboards ||
isDashboardListRefetching ? (
<div className="loading-dashboard-details">
<Skeleton.Input active size="large" className="skeleton-1" />
<Skeleton.Input active size="large" className="skeleton-1" />
@@ -902,7 +905,11 @@ function DashboardsList(): JSX.Element {
columns={columns}
dataSource={data}
showSorterTooltip
loading={isDashboardListLoading || isFilteringDashboards}
loading={
isDashboardListLoading ||
isFilteringDashboards ||
isDashboardListRefetching
}
showHeader={false}
pagination={paginationConfig}
/>

View File

@@ -122,10 +122,10 @@ function TableView({
fieldValue: string,
) => (): void => {
handleClick(operator, fieldKey, fieldValue);
if (operator === OPERATORS.IN) {
if (operator === OPERATORS['=']) {
setIsFilterInLoading(true);
}
if (operator === OPERATORS.NIN) {
if (operator === OPERATORS['!=']) {
setIsFilterOutLoading(true);
}
};

View File

@@ -139,7 +139,7 @@ export function TableViewActions(
<ArrowDownToDot size={14} style={{ transform: 'rotate(90deg)' }} />
)
}
onClick={onClickHandler(OPERATORS.IN, fieldFilterKey, fieldData.value)}
onClick={onClickHandler(OPERATORS['='], fieldFilterKey, fieldData.value)}
/>
</Tooltip>
<Tooltip title="Filter out value">
@@ -152,7 +152,11 @@ export function TableViewActions(
<ArrowUpFromDot size={14} style={{ transform: 'rotate(90deg)' }} />
)
}
onClick={onClickHandler(OPERATORS.NIN, fieldFilterKey, fieldData.value)}
onClick={onClickHandler(
OPERATORS['!='],
fieldFilterKey,
fieldData.value,
)}
/>
</Tooltip>
{!isOldLogsExplorerOrLiveLogsPage && (

View File

@@ -1,6 +1,7 @@
import LogDetail from 'components/LogDetail';
import { VIEW_TYPES } from 'components/LogDetail/constants';
import ROUTES from 'constants/routes';
import { getOldLogsOperatorFromNew } from 'hooks/logs/useActiveLog';
import { getGeneratedFilterQueryString } from 'lib/getGeneratedFilterQueryString';
import getStep from 'lib/getStep';
import { getIdConditions } from 'pages/Logs/utils';
@@ -57,10 +58,11 @@ function LogDetailedView({
const handleAddToQuery = useCallback(
(fieldKey: string, fieldValue: string, operator: string) => {
const newOperator = getOldLogsOperatorFromNew(operator);
const updatedQueryString = getGeneratedFilterQueryString(
fieldKey,
fieldValue,
operator,
newOperator,
queryString,
);
@@ -71,10 +73,11 @@ function LogDetailedView({
const handleClickActionItem = useCallback(
(fieldKey: string, fieldValue: string, operator: string): void => {
const newOperator = getOldLogsOperatorFromNew(operator);
const updatedQueryString = getGeneratedFilterQueryString(
fieldKey,
fieldValue,
operator,
newOperator,
queryString,
);

View File

@@ -3,6 +3,7 @@ import { QueryData } from 'types/api/widgets/getQuery';
export type LogsExplorerChartProps = {
data: QueryData[];
isLoading: boolean;
isLogsExplorerViews?: boolean;
isLabelEnabled?: boolean;
className?: string;
};

View File

@@ -16,12 +16,14 @@ import { UpdateTimeInterval } from 'store/actions';
import { LogsExplorerChartProps } from './LogsExplorerChart.interfaces';
import { CardStyled } from './LogsExplorerChart.styled';
import { getColorsForSeverityLabels } from './utils';
function LogsExplorerChart({
data,
isLoading,
isLabelEnabled = true,
className,
isLogsExplorerViews = false,
}: LogsExplorerChartProps): JSX.Element {
const dispatch = useDispatch();
const urlQuery = useUrlQuery();
@@ -29,15 +31,19 @@ function LogsExplorerChart({
const handleCreateDatasets: Required<GetChartDataProps>['createDataset'] = useCallback(
(element, index, allLabels) => ({
data: element,
backgroundColor: colors[index % colors.length] || themeColors.red,
borderColor: colors[index % colors.length] || themeColors.red,
backgroundColor: isLogsExplorerViews
? getColorsForSeverityLabels(allLabels[index], index)
: colors[index % colors.length] || themeColors.red,
borderColor: isLogsExplorerViews
? getColorsForSeverityLabels(allLabels[index], index)
: colors[index % colors.length] || themeColors.red,
...(isLabelEnabled
? {
label: allLabels[index],
}
: {}),
}),
[isLabelEnabled],
[isLabelEnabled, isLogsExplorerViews],
);
const onDragSelect = useCallback(
@@ -112,6 +118,7 @@ function LogsExplorerChart({
<Graph
name="logsExplorerChart"
data={graphData.data}
isStacked={isLogsExplorerViews}
type="bar"
animate
onDragSelect={onDragSelect}

View File

@@ -0,0 +1,36 @@
import { Color } from '@signozhq/design-tokens';
import { themeColors } from 'constants/theme';
import { colors } from 'lib/getRandomColor';
export function getColorsForSeverityLabels(
label: string,
index: number,
): string {
const lowerCaseLabel = label.toLowerCase();
if (lowerCaseLabel.includes(`{severity_text="trace"}`)) {
return Color.BG_ROBIN_300;
}
if (lowerCaseLabel.includes(`{severity_text="debug"}`)) {
return Color.BG_FOREST_500;
}
if (lowerCaseLabel.includes(`{severity_text="info"}`)) {
return Color.BG_SLATE_400;
}
if (lowerCaseLabel.includes(`{severity_text="warn"}`)) {
return Color.BG_AMBER_500;
}
if (lowerCaseLabel.includes(`{severity_text="error"}`)) {
return Color.BG_CHERRY_500;
}
if (lowerCaseLabel.includes(`{severity_text="fatal"}`)) {
return Color.BG_SAKURA_500;
}
return colors[index % colors.length] || themeColors.red;
}

View File

@@ -147,6 +147,13 @@
}
.logs-histogram {
.ant-card-body {
height: 140px;
min-height: 140px;
padding: 0 16px 22px 16px;
font-family: 'Geist Mono';
}
margin-bottom: 0px;
}
}

View File

@@ -64,6 +64,7 @@ import { useHistory } from 'react-router-dom';
import { AppState } from 'store/reducers';
import { Dashboard } from 'types/api/dashboard/getAll';
import { ILog } from 'types/api/logs/log';
import { DataTypes } from 'types/api/queryBuilder/queryAutocompleteResponse';
import {
IBuilderQuery,
OrderByPayload,
@@ -132,6 +133,9 @@ function LogsExplorerViews({
// State
const [page, setPage] = useState<number>(1);
const [logs, setLogs] = useState<ILog[]>([]);
const [lastLogLineTimestamp, setLastLogLineTimestamp] = useState<
number | string | null
>();
const [requestData, setRequestData] = useState<Query | null>(null);
const [showFormatMenuItems, setShowFormatMenuItems] = useState(false);
const [queryId, setQueryId] = useState<string>(v4());
@@ -188,6 +192,16 @@ function LogsExplorerViews({
const modifiedQueryData: IBuilderQuery = {
...listQuery,
aggregateOperator: LogsAggregatorOperator.COUNT,
groupBy: [
{
key: 'severity_text',
dataType: DataTypes.String,
type: '',
isColumn: true,
isJSON: false,
id: 'severity_text--string----true',
},
],
};
const modifiedQuery: Query = {
@@ -259,6 +273,14 @@ function LogsExplorerViews({
start: minTime,
end: maxTime,
}),
// send the lastLogTimeStamp only when the panel type is list and the orderBy is timestamp and the order is desc
lastLogLineTimestamp:
panelType === PANEL_TYPES.LIST &&
requestData?.builder?.queryData?.[0]?.orderBy?.[0]?.columnName ===
'timestamp' &&
requestData?.builder?.queryData?.[0]?.orderBy?.[0]?.order === 'desc'
? lastLogLineTimestamp
: undefined,
},
undefined,
listQueryKeyRef,
@@ -336,6 +358,10 @@ function LogsExplorerViews({
pageSize: nextPageSize,
});
// initialise the last log timestamp to null as we don't have the logs.
// as soon as we scroll to the end of the logs we set the lastLogLineTimestamp to the last log timestamp.
setLastLogLineTimestamp(lastLog.timestamp);
setPage((prevPage) => prevPage + 1);
setRequestData(newRequestData);
@@ -528,6 +554,11 @@ function LogsExplorerViews({
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [data]);
useEffect(() => {
// clear the lastLogLineTimestamp when the data changes
setLastLogLineTimestamp(null);
}, [data]);
useEffect(() => {
if (
requestData?.id !== stagedQuery?.id ||
@@ -661,6 +692,7 @@ function LogsExplorerViews({
className="logs-histogram"
isLoading={isFetchingListChartData || isLoadingListChartData}
data={chartData}
isLogsExplorerViews={panelType === PANEL_TYPES.LIST}
/>
)}

View File

@@ -74,7 +74,7 @@ export const panelTypeVsYAxisUnit: { [key in PANEL_TYPES]: boolean } = {
[PANEL_TYPES.VALUE]: true,
[PANEL_TYPES.TABLE]: false,
[PANEL_TYPES.LIST]: false,
[PANEL_TYPES.PIE]: false,
[PANEL_TYPES.PIE]: true,
[PANEL_TYPES.BAR]: true,
[PANEL_TYPES.HISTOGRAM]: false,
[PANEL_TYPES.TRACE]: false,

View File

@@ -211,7 +211,11 @@ function RightContainer({
<YAxisUnitSelector
defaultValue={yAxisUnit}
onSelect={setYAxisUnit}
fieldLabel={selectedGraphType === 'Value' ? 'Unit' : 'Y Axis Unit'}
fieldLabel={
selectedGraphType === 'Value' || selectedGraphType === 'Pie'
? 'Unit'
: 'Y Axis Unit'
}
/>
)}
{allowSoftMinMax && (

View File

@@ -4,6 +4,7 @@ import { Color } from '@signozhq/design-tokens';
import { Group } from '@visx/group';
import { Pie } from '@visx/shape';
import { useTooltip, useTooltipInPortal } from '@visx/tooltip';
import { getYAxisFormattedValue } from 'components/Graph/yAxisConfig';
import { themeColors } from 'constants/theme';
import { useIsDarkMode } from 'hooks/useDarkMode';
import { generateColor } from 'lib/uPlotLib/utils/generateColor';
@@ -129,7 +130,12 @@ function PiePanelWrapper({
showTooltip({
tooltipData: {
label,
value: arc.data.value,
// do not update the unit in the data as the arc allotment is based on value
// and treats 4K smaller than 40
value: getYAxisFormattedValue(
arc.data.value,
widget?.yAxisUnit || 'none',
),
color: arc.data.color,
key: label,
},

View File

@@ -1,6 +1,6 @@
import { getAggregateKeys } from 'api/queryBuilder/getAttributeKeys';
import { SOMETHING_WENT_WRONG } from 'constants/api';
import { QueryBuilderKeys } from 'constants/queryBuilder';
import { OPERATORS, QueryBuilderKeys } from 'constants/queryBuilder';
import ROUTES from 'constants/routes';
import { getOperatorValue } from 'container/QueryBuilder/filters/QueryBuilderSearch/utils';
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
@@ -24,6 +24,16 @@ import { v4 as uuid } from 'uuid';
import { UseActiveLog } from './types';
export function getOldLogsOperatorFromNew(operator: string): string {
switch (operator) {
case OPERATORS['=']:
return OPERATORS.IN;
case OPERATORS['!=']:
return OPERATORS.NIN;
default:
return operator;
}
}
export const useActiveLog = (): UseActiveLog => {
const dispatch = useDispatch();
@@ -178,10 +188,11 @@ export const useActiveLog = (): UseActiveLog => {
);
const onAddToQueryLogs = useCallback(
(fieldKey: string, fieldValue: string, operator: string) => {
const newOperator = getOldLogsOperatorFromNew(operator);
const updatedQueryString = getGeneratedFilterQueryString(
fieldKey,
fieldValue,
operator,
newOperator,
queryString,
);

View File

@@ -114,25 +114,6 @@
})();
</script>
<script type="text/javascript">
//Set your CLARITY_PROJECT_ID
const CLARITY_PROJECT_ID =
'<%= htmlWebpackPlugin.options.CLARITY_PROJECT_ID %>';
(function (c, l, a, r, i, t, y) {
c[a] =
c[a] ||
function () {
(c[a].q = c[a].q || []).push(arguments);
};
t = l.createElement(r);
t.async = 1;
t.src = 'https://www.clarity.ms/tag/' + i;
y = l.getElementsByTagName(r)[0];
y.parentNode.insertBefore(t, y);
})(window, document, 'clarity', 'script', CLARITY_PROJECT_ID);
</script>
<script>
//Set your SEGMENT_ID
const SEGMENT_ID = '<%= htmlWebpackPlugin.options.SEGMENT_ID %>';

View File

@@ -1,6 +1,7 @@
import getStartEndRangeTime from 'lib/getStartEndRangeTime';
import getStep from 'lib/getStep';
import { mapQueryDataToApi } from 'lib/newQueryBuilder/queryBuilderMappers/mapQueryDataToApi';
import { isUndefined } from 'lodash-es';
import store from 'store';
import { QueryRangePayload } from 'types/api/metrics/getQueryRange';
import { EQueryType } from 'types/common/dashboard';
@@ -24,7 +25,11 @@ export const prepareQueryRangePayload = ({
fillGaps = false,
}: GetQueryResultsProps): PrepareQueryRangePayload => {
let legendMap: Record<string, string> = {};
const { allowSelectedIntervalForStepGen, ...restParams } = params;
const {
allowSelectedIntervalForStepGen,
lastLogLineTimestamp,
...restParams
} = params;
const compositeQuery: QueryRangePayload['compositeQuery'] = {
queryType: query.queryType,
@@ -90,9 +95,13 @@ export const prepareQueryRangePayload = ({
interval: globalSelectedInterval,
});
const endLogTimeStamp = !isUndefined(lastLogLineTimestamp)
? new Date(lastLogLineTimestamp as string | number)?.getTime() || undefined
: undefined;
const queryPayload: QueryRangePayload = {
start: parseInt(start, 10) * 1e3,
end: parseInt(end, 10) * 1e3,
end: endLogTimeStamp || parseInt(end, 10) * 1e3,
step: getStep({
start: allowSelectedIntervalForStepGen
? start

View File

@@ -12,6 +12,10 @@
font-weight: 400;
line-height: 18px;
letter-spacing: -0.005em;
&,
&:hover {
color: var(--text-vanilla-400);
}
}
&__key {
background: var(--bg-ink-400);
@@ -20,13 +24,15 @@
&__value {
background: var(--bg-slate-400);
}
color: var(--text-vanilla-400);
}
.lightMode {
.key-value-label {
border-color: var(--bg-vanilla-400);
color: var(--text-ink-400);
&__key,
&__value {
color: var(--text-ink-400);
}
&__key {
background: var(--bg-vanilla-300);
}

View File

@@ -1,6 +1,7 @@
import './KeyValueLabel.styles.scss';
import { Tooltip } from 'antd';
import { useMemo } from 'react';
import TrimmedText from '../TrimmedText/TrimmedText';
@@ -15,19 +16,33 @@ export default function KeyValueLabel({
badgeValue,
maxCharacters = 20,
}: KeyValueLabelProps): JSX.Element | null {
const isUrl = useMemo(() => /^https?:\/\//.test(badgeValue), [badgeValue]);
if (!badgeKey || !badgeValue) {
return null;
}
return (
<div className="key-value-label">
<div className="key-value-label__key">
<TrimmedText text={badgeKey} maxCharacters={maxCharacters} />
</div>
<Tooltip title={badgeValue}>
<div className="key-value-label__value">
{isUrl ? (
<a
href={badgeValue}
target="_blank"
rel="noopener noreferrer"
className="key-value-label__value"
>
<TrimmedText text={badgeValue} maxCharacters={maxCharacters} />
</div>
</Tooltip>
</a>
) : (
<Tooltip title={badgeValue}>
<div className="key-value-label__value">
<TrimmedText text={badgeValue} maxCharacters={maxCharacters} />
</div>
</Tooltip>
)}
</div>
);
}

View File

@@ -1,11 +1,8 @@
import { compose, Store } from 'redux';
type ClarityType<T> = (...args: string[]) => T;
declare global {
interface Window {
store: Store;
clarity: ClarityType<string>;
Intercom: any;
analytics: Record<string, any>;
__REDUX_DEVTOOLS_EXTENSION_COMPOSE__: typeof compose;

View File

@@ -23,7 +23,6 @@ const plugins = [
INTERCOM_APP_ID: process.env.INTERCOM_APP_ID,
SEGMENT_ID: process.env.SEGMENT_ID,
POSTHOG_KEY: process.env.POSTHOG_KEY,
CLARITY_PROJECT_ID: process.env.CLARITY_PROJECT_ID,
SENTRY_AUTH_TOKEN: process.env.SENTRY_AUTH_TOKEN,
SENTRY_ORG: process.env.SENTRY_ORG,
SENTRY_PROJECT_ID: process.env.SENTRY_PROJECT_ID,
@@ -42,7 +41,6 @@ const plugins = [
INTERCOM_APP_ID: process.env.INTERCOM_APP_ID,
SEGMENT_ID: process.env.SEGMENT_ID,
POSTHOG_KEY: process.env.POSTHOG_KEY,
CLARITY_PROJECT_ID: process.env.CLARITY_PROJECT_ID,
SENTRY_AUTH_TOKEN: process.env.SENTRY_AUTH_TOKEN,
SENTRY_ORG: process.env.SENTRY_ORG,
SENTRY_PROJECT_ID: process.env.SENTRY_PROJECT_ID,

View File

@@ -28,7 +28,6 @@ const plugins = [
INTERCOM_APP_ID: process.env.INTERCOM_APP_ID,
SEGMENT_ID: process.env.SEGMENT_ID,
POSTHOG_KEY: process.env.POSTHOG_KEY,
CLARITY_PROJECT_ID: process.env.CLARITY_PROJECT_ID,
SENTRY_AUTH_TOKEN: process.env.SENTRY_AUTH_TOKEN,
SENTRY_ORG: process.env.SENTRY_ORG,
SENTRY_PROJECT_ID: process.env.SENTRY_PROJECT_ID,
@@ -52,7 +51,6 @@ const plugins = [
INTERCOM_APP_ID: process.env.INTERCOM_APP_ID,
SEGMENT_ID: process.env.SEGMENT_ID,
POSTHOG_KEY: process.env.POSTHOG_KEY,
CLARITY_PROJECT_ID: process.env.CLARITY_PROJECT_ID,
SENTRY_AUTH_TOKEN: process.env.SENTRY_AUTH_TOKEN,
SENTRY_ORG: process.env.SENTRY_ORG,
SENTRY_PROJECT_ID: process.env.SENTRY_PROJECT_ID,

View File

@@ -169,7 +169,7 @@
resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.23.5.tgz#ffb878728bb6bdcb6f4510aa51b1be9afb8cfd98"
integrity sha512-uU27kfDRlhfKl+w1U6vp16IuvSLtjAxdArVXPa9BvLkrr7CYIsxH5adpHObeAGY/41+syctUWOZ140a2Rvkgjw==
"@babel/core@^7.1.0", "@babel/core@^7.12.3", "@babel/core@^7.16.0", "@babel/core@^7.7.2", "@babel/core@^7.7.5", "@babel/core@^7.8.0":
"@babel/core@^7.1.0", "@babel/core@^7.12.3", "@babel/core@^7.16.0", "@babel/core@^7.7.2", "@babel/core@^7.8.0":
version "7.21.4"
resolved "https://registry.npmjs.org/@babel/core/-/core-7.21.4.tgz"
integrity sha512-qt/YV149Jman/6AfmlxJ04LMIu8bMoyl3RB91yTFrxQmgbrSvQMy7cI8Q62FHx1t8wJ8B5fu0UDoLwHAhUo1QA==
@@ -2659,18 +2659,6 @@
dependencies:
tslib "2.5.0"
"@hapi/hoek@^9.0.0":
version "9.3.0"
resolved "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz"
integrity sha512-/c6rf4UJlmHlC9b5BaNvzAcFv7HZ2QHaV0D4/HNlBdvFnvQq8RI4kYdhyPCl7Xj+oWvTWQ8ujhqS53LIgAe6KQ==
"@hapi/topo@^5.0.0":
version "5.1.0"
resolved "https://registry.npmjs.org/@hapi/topo/-/topo-5.1.0.tgz"
integrity sha512-foQZKJig7Ob0BMAYBfcJk8d77QtOe7Wo4ox7ff1lQYoNNAb6jwcY1ncdoy2e9wQZzvNy7ODZCYJkK8kzmcAnAg==
dependencies:
"@hapi/hoek" "^9.0.0"
"@humanwhocodes/config-array@^0.5.0":
version "0.5.0"
resolved "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.5.0.tgz"
@@ -3751,23 +3739,6 @@
unplugin "1.0.1"
uuid "^9.0.0"
"@sideway/address@^4.1.3":
version "4.1.4"
resolved "https://registry.npmjs.org/@sideway/address/-/address-4.1.4.tgz"
integrity sha512-7vwq+rOHVWjyXxVlR76Agnvhy8I9rpzjosTESvmhNeXOXdZZB15Fl+TI9x1SiHZH5Jv2wTGduSxFDIaq0m3DUw==
dependencies:
"@hapi/hoek" "^9.0.0"
"@sideway/formula@^3.0.1":
version "3.0.1"
resolved "https://registry.npmjs.org/@sideway/formula/-/formula-3.0.1.tgz"
integrity sha512-/poHZJJVjx3L+zVD6g9KgHfYnb443oi7wLu/XKojDviHy6HOEOA6z1Trk5aR1dGcmPenJEgb2sK2I80LeS3MIg==
"@sideway/pinpoint@^2.0.0":
version "2.0.0"
resolved "https://registry.npmjs.org/@sideway/pinpoint/-/pinpoint-2.0.0.tgz"
integrity sha512-RNiOoTPkptFtSVzQevY/yWtZwf/RxyVnPy/OcA9HBM3MlGDnBEYL5B41H0MTn0Uec8Hi+2qUtTfG2WWZBmMejQ==
"@signozhq/design-tokens@0.0.8":
version "0.0.8"
resolved "https://registry.yarnpkg.com/@signozhq/design-tokens/-/design-tokens-0.0.8.tgz#368dc92cfe01d0cd893df140445c5d9dfd944a88"
@@ -4591,13 +4562,6 @@
resolved "https://registry.npmjs.org/@types/uuid/-/uuid-8.3.4.tgz"
integrity sha512-c/I8ZRb51j+pYGAu5CrFMRxqZ2ke4y2grEBO5AUjgSkSk+qT2Ea+OdWElz/OiMf5MNpn2b17kuVBwZLQJXzihw==
"@types/wait-on@^5.2.0":
version "5.3.1"
resolved "https://registry.npmjs.org/@types/wait-on/-/wait-on-5.3.1.tgz"
integrity sha512-2FFOKCF/YydrMUaqg+fkk49qf0e5rDgwt6aQsMzFQzbS419h2gNOXyiwp/o2yYy27bi/C1z+HgfncryjGzlvgQ==
dependencies:
"@types/node" "*"
"@types/webpack-dev-server@^4.7.2":
version "4.7.2"
resolved "https://registry.yarnpkg.com/@types/webpack-dev-server/-/webpack-dev-server-4.7.2.tgz#a12d9881aa23cdd4cecbb2d31fa784a45c4967e0"
@@ -5428,18 +5392,6 @@ anymatch@^3.0.3, anymatch@~3.1.2:
normalize-path "^3.0.0"
picomatch "^2.0.4"
append-transform@^2.0.0:
version "2.0.0"
resolved "https://registry.npmjs.org/append-transform/-/append-transform-2.0.0.tgz"
integrity sha512-7yeyCEurROLQJFv5Xj4lEGTy0borxepjFv1g22oAdqFu//SrAlDl1O1Nxx15SH1RoliUml6p8dwJW9jvZughhg==
dependencies:
default-require-extensions "^3.0.0"
archy@^1.0.0:
version "1.0.0"
resolved "https://registry.npmjs.org/archy/-/archy-1.0.0.tgz"
integrity sha512-Xg+9RwCg/0p32teKdGMPTPnVXKD0w3DfHnFTficozsAgsvq2XenPJq/MYpzzQ/v8zrOyJn6Ds39VA4JIDwFfqw==
arg@^4.1.0:
version "4.1.3"
resolved "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz"
@@ -5635,13 +5587,6 @@ axios@1.7.4:
form-data "^4.0.0"
proxy-from-env "^1.1.0"
axios@^0.21.1:
version "0.21.4"
resolved "https://registry.yarnpkg.com/axios/-/axios-0.21.4.tgz#c67b90dc0568e5c1cf2b0b858c43ba28e2eda575"
integrity sha512-ut5vewkiu8jjGBdqpM44XxjuCjq9LAKeHVmoVfHVzy8eHgxxq8SbAVQNovDA8mVi05kP0Ea/n/UzcSHcTJQfNg==
dependencies:
follow-redirects "^1.14.0"
axobject-query@^3.1.1:
version "3.1.1"
resolved "https://registry.npmjs.org/axobject-query/-/axobject-query-3.1.1.tgz"
@@ -6315,16 +6260,6 @@ bytes@3.1.2:
resolved "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz"
integrity sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==
caching-transform@^4.0.0:
version "4.0.0"
resolved "https://registry.npmjs.org/caching-transform/-/caching-transform-4.0.0.tgz"
integrity sha512-kpqOvwXnjjN44D89K5ccQC+RUrsy7jB/XLlRrx0D7/2HNcTPqzsb6XgYoErwko6QsV184CA2YgS1fxDiiDZMWA==
dependencies:
hasha "^5.0.0"
make-dir "^3.0.0"
package-hash "^4.0.0"
write-file-atomic "^3.0.0"
call-bind@^1.0.0, call-bind@^1.0.2:
version "1.0.2"
resolved "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz"
@@ -6355,7 +6290,7 @@ camelcase-keys@^6.2.2:
map-obj "^4.0.0"
quick-lru "^4.0.1"
camelcase@^5.0.0, camelcase@^5.3.1:
camelcase@^5.3.1:
version "5.3.1"
resolved "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz"
integrity sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==
@@ -6620,15 +6555,6 @@ cli-width@^3.0.0:
resolved "https://registry.yarnpkg.com/cli-width/-/cli-width-3.0.0.tgz#a2f48437a2caa9a22436e794bf071ec9e61cedf6"
integrity sha512-FxqpkPPwu1HjuN93Omfm4h8uIanXofW0RxVEW3k5RKx+mJJYSthzNhp32Kzxxy3YAEZ/Dc/EWN1vZRY0+kOhbw==
cliui@^6.0.0:
version "6.0.0"
resolved "https://registry.npmjs.org/cliui/-/cliui-6.0.0.tgz"
integrity sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ==
dependencies:
string-width "^4.2.0"
strip-ansi "^6.0.0"
wrap-ansi "^6.2.0"
cliui@^7.0.2:
version "7.0.4"
resolved "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz"
@@ -6772,16 +6698,6 @@ commander@^10.0.0:
resolved "https://registry.yarnpkg.com/commander/-/commander-10.0.1.tgz#881ee46b4f77d1c1dccc5823433aa39b022cbe06"
integrity sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==
commander@^3.0.2:
version "3.0.2"
resolved "https://registry.npmjs.org/commander/-/commander-3.0.2.tgz"
integrity sha512-Gar0ASD4BDyKC4hl4DwHqDrmvjoxWKZigVnAbn5H1owvm4CxCPdb0HQDehwNYMJpla5+M2tPmPARzhtYuwpHow==
commander@^5.1.0:
version "5.1.0"
resolved "https://registry.yarnpkg.com/commander/-/commander-5.1.0.tgz#46abbd1652f8e059bddaef99bbdcb2ad9cf179ae"
integrity sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg==
commander@^7.0.0, commander@^7.2.0:
version "7.2.0"
resolved "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz"
@@ -6802,11 +6718,6 @@ common-path-prefix@^3.0.0:
resolved "https://registry.yarnpkg.com/common-path-prefix/-/common-path-prefix-3.0.0.tgz#7d007a7e07c58c4b4d5f433131a19141b29f11e0"
integrity sha512-QE33hToZseCH3jS0qN96O/bSh3kaw/h+Tq7ngyY9eWDUnTlTNUyqfqvCXioLe5Na5jFsL78ra/wuBU4iuEgd4w==
commondir@^1.0.1:
version "1.0.1"
resolved "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz"
integrity sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg==
compare-func@^2.0.0:
version "2.0.0"
resolved "https://registry.npmjs.org/compare-func/-/compare-func-2.0.0.tgz"
@@ -7073,7 +6984,7 @@ cross-spawn@^6.0.5:
shebang-command "^1.2.0"
which "^1.2.9"
cross-spawn@^7.0.0, cross-spawn@^7.0.1, cross-spawn@^7.0.2, cross-spawn@^7.0.3:
cross-spawn@^7.0.1, cross-spawn@^7.0.2, cross-spawn@^7.0.3:
version "7.0.3"
resolved "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz"
integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==
@@ -7303,14 +7214,6 @@ custom-event-polyfill@^1.0.6:
resolved "https://registry.npmjs.org/custom-event-polyfill/-/custom-event-polyfill-1.0.7.tgz"
integrity sha512-TDDkd5DkaZxZFM8p+1I3yAlvM3rSr1wbrOliG4yJiwinMZN8z/iGL7BTlDkrJcYTmgUSb4ywVCc3ZaUtOtC76w==
cwd@^0.10.0:
version "0.10.0"
resolved "https://registry.npmjs.org/cwd/-/cwd-0.10.0.tgz"
integrity sha512-YGZxdTTL9lmLkCUTpg4j0zQ7IhRB5ZmqNBbGCl3Tg6MP/d5/6sY7L5mmTjzbc6JKgVZYiqTQTNhPFsbXNGlRaA==
dependencies:
find-pkg "^0.1.2"
fs-exists-sync "^0.1.0"
"d3-array@1 - 3", "d3-array@2 - 3", "d3-array@2.10.0 - 3":
version "3.2.3"
resolved "https://registry.npmjs.org/d3-array/-/d3-array-3.2.3.tgz"
@@ -7555,7 +7458,7 @@ decamelize-keys@^1.1.0:
decamelize "^1.1.0"
map-obj "^1.0.0"
decamelize@^1.1.0, decamelize@^1.2.0:
decamelize@^1.1.0:
version "1.2.0"
resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290"
integrity sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==
@@ -7637,13 +7540,6 @@ default-gateway@^6.0.3:
dependencies:
execa "^5.0.0"
default-require-extensions@^3.0.0:
version "3.0.1"
resolved "https://registry.npmjs.org/default-require-extensions/-/default-require-extensions-3.0.1.tgz"
integrity sha512-eXTJmRbm2TIt9MgWTsOH1wEuhew6XGZcMeGKCtLedIg/NCsg1iBePXkceTdK4Fii7pzmN9tGsZhKzZ4h7O/fxw==
dependencies:
strip-bom "^4.0.0"
defaults@^1.0.3:
version "1.0.4"
resolved "https://registry.yarnpkg.com/defaults/-/defaults-1.0.4.tgz#b0b02062c1e2aa62ff5d9528f0f98baa90978d7a"
@@ -8163,11 +8059,6 @@ es-to-primitive@^1.2.1:
is-date-object "^1.0.1"
is-symbol "^1.0.2"
es6-error@^4.0.1:
version "4.1.1"
resolved "https://registry.npmjs.org/es6-error/-/es6-error-4.1.1.tgz"
integrity sha512-Um/+FxMr9CISWh0bi5Zv0iOD+4cFh5qLeks1qhAopKVAJw3drgKbKySikp7wGhDL0HPeaja0P5ULZrxLkniUVg==
escalade@^3.1.1:
version "3.1.1"
resolved "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz"
@@ -8608,18 +8499,6 @@ exit@^0.1.2:
resolved "https://registry.npmjs.org/exit/-/exit-0.1.2.tgz"
integrity sha512-Zk/eNKV2zbjpKzrsQ+n1G6poVbErQxJ0LBOJXaKZ1EViLzH+hrLu9cdXI4zw9dBQJslwBEpbQ2P1oS7nDxs6jQ==
expand-tilde@^1.2.2:
version "1.2.2"
resolved "https://registry.npmjs.org/expand-tilde/-/expand-tilde-1.2.2.tgz"
integrity sha512-rtmc+cjLZqnu9dSYosX9EWmSJhTwpACgJQTfj4hgg2JjOD/6SIQalZrt4a3aQeh++oNxkazcaxrhPUj6+g5G/Q==
dependencies:
os-homedir "^1.0.1"
expect-playwright@^0.8.0:
version "0.8.0"
resolved "https://registry.npmjs.org/expect-playwright/-/expect-playwright-0.8.0.tgz"
integrity sha512-+kn8561vHAY+dt+0gMqqj1oY+g5xWrsuGMk4QGxotT2WS545nVqqjs37z6hrYfIuucwqthzwJfCJUEYqixyljg==
expect@^27.5.1:
version "27.5.1"
resolved "https://registry.npmjs.org/expect/-/expect-27.5.1.tgz"
@@ -8828,15 +8707,6 @@ finalhandler@1.2.0:
statuses "2.0.1"
unpipe "~1.0.0"
find-cache-dir@^3.2.0:
version "3.3.2"
resolved "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-3.3.2.tgz"
integrity sha512-wXZV5emFEjrridIgED11OoUKLxiYjAcqot/NJdAkOhlJ+vGzwhOAfcG5OX1jP+S0PcjEn8bdMJv+g2jwQ3Onig==
dependencies:
commondir "^1.0.1"
make-dir "^3.0.2"
pkg-dir "^4.1.0"
find-cache-dir@^4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/find-cache-dir/-/find-cache-dir-4.0.0.tgz#a30ee0448f81a3990708f6453633c733e2f6eec2"
@@ -8845,30 +8715,6 @@ find-cache-dir@^4.0.0:
common-path-prefix "^3.0.0"
pkg-dir "^7.0.0"
find-file-up@^0.1.2:
version "0.1.3"
resolved "https://registry.npmjs.org/find-file-up/-/find-file-up-0.1.3.tgz"
integrity sha512-mBxmNbVyjg1LQIIpgO8hN+ybWBgDQK8qjht+EbrTCGmmPV/sc7RF1i9stPTD6bpvXZywBdrwRYxhSdJv867L6A==
dependencies:
fs-exists-sync "^0.1.0"
resolve-dir "^0.1.0"
find-pkg@^0.1.2:
version "0.1.2"
resolved "https://registry.npmjs.org/find-pkg/-/find-pkg-0.1.2.tgz"
integrity sha512-0rnQWcFwZr7eO0513HahrWafsc3CTFioEB7DRiEYCUM/70QXSY8f3mCST17HXLcPvEhzH/Ty/Bxd72ZZsr/yvw==
dependencies:
find-file-up "^0.1.2"
find-process@^1.4.4:
version "1.4.7"
resolved "https://registry.npmjs.org/find-process/-/find-process-1.4.7.tgz"
integrity sha512-/U4CYp1214Xrp3u3Fqr9yNynUrr5Le4y0SsJh2lMDDSbpwYSz3M2SMWQC+wqcx79cN8PQtHQIL8KnuY9M66fdg==
dependencies:
chalk "^4.0.0"
commander "^5.1.0"
debug "^4.1.1"
find-up@^4.0.0, find-up@^4.1.0:
version "4.1.0"
resolved "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz"
@@ -8925,7 +8771,7 @@ flubber@^0.4.2:
svgpath "^2.2.1"
topojson-client "^3.0.0"
follow-redirects@^1.0.0, follow-redirects@^1.14.0, follow-redirects@^1.15.6:
follow-redirects@^1.0.0, follow-redirects@^1.15.6:
version "1.15.6"
resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.6.tgz#7f815c0cda4249c74ff09e95ef97c23b5fd0399b"
integrity sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==
@@ -8962,14 +8808,6 @@ force-graph@1:
kapsule "^1.14"
lodash-es "4"
foreground-child@^2.0.0:
version "2.0.0"
resolved "https://registry.npmjs.org/foreground-child/-/foreground-child-2.0.0.tgz"
integrity sha512-dCIq9FpEcyQyXKCkyzmlPTFNgrCzPudOe+mhvJU5zAtlBnGVy2yKxtfsxK2tQBThwq225jcvBjpw1Gr40uzZCA==
dependencies:
cross-spawn "^7.0.0"
signal-exit "^3.0.2"
form-data@^3.0.0:
version "3.0.1"
resolved "https://registry.npmjs.org/form-data/-/form-data-3.0.1.tgz"
@@ -9008,16 +8846,11 @@ fresh@0.5.2:
resolved "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz"
integrity sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==
fromentries@^1.2.0, fromentries@^1.3.2:
fromentries@^1.3.2:
version "1.3.2"
resolved "https://registry.npmjs.org/fromentries/-/fromentries-1.3.2.tgz"
integrity sha512-cHEpEQHUg0f8XdtZCc2ZAhrHzKzT0MrFUTcvx+hfxYu7rGMDc5SKoXFh+n4YigxsHXRzc6OrCshdR1bWH6HHyg==
fs-exists-sync@^0.1.0:
version "0.1.0"
resolved "https://registry.npmjs.org/fs-exists-sync/-/fs-exists-sync-0.1.0.tgz"
integrity sha512-cR/vflFyPZtrN6b38ZyWxpWdhlXrzZEBawlpBQMq7033xVY7/kg0GDMBK5jg8lDYQckdJ5x/YC88lM3C7VMsLg==
fs-extra@^10.0.0:
version "10.1.0"
resolved "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz"
@@ -9104,7 +8937,7 @@ geotiff@^2.0.7:
web-worker "^1.2.0"
xml-utils "^1.0.2"
get-caller-file@^2.0.1, get-caller-file@^2.0.5:
get-caller-file@^2.0.5:
version "2.0.5"
resolved "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz"
integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==
@@ -9208,24 +9041,6 @@ global-dirs@^0.1.1:
dependencies:
ini "^1.3.4"
global-modules@^0.2.3:
version "0.2.3"
resolved "https://registry.npmjs.org/global-modules/-/global-modules-0.2.3.tgz"
integrity sha512-JeXuCbvYzYXcwE6acL9V2bAOeSIGl4dD+iwLY9iUx2VBJJ80R18HCn+JCwHM9Oegdfya3lEkGCdaRkSyc10hDA==
dependencies:
global-prefix "^0.1.4"
is-windows "^0.2.0"
global-prefix@^0.1.4:
version "0.1.5"
resolved "https://registry.npmjs.org/global-prefix/-/global-prefix-0.1.5.tgz"
integrity sha512-gOPiyxcD9dJGCEArAhF4Hd0BAqvAe/JzERP7tYumE4yIkmIedPUVXcJFWbV3/p/ovIIvKjkrTk+f1UVkq7vvbw==
dependencies:
homedir-polyfill "^1.0.0"
ini "^1.3.4"
is-windows "^0.2.0"
which "^1.2.12"
global@^4.3.0, global@~4.4.0:
version "4.4.0"
resolved "https://registry.npmjs.org/global/-/global-4.4.0.tgz"
@@ -9272,7 +9087,7 @@ gopd@^1.0.1:
dependencies:
get-intrinsic "^1.1.3"
graceful-fs@^4.1.15, graceful-fs@^4.1.2, graceful-fs@^4.1.6, graceful-fs@^4.2.0, graceful-fs@^4.2.4, graceful-fs@^4.2.6, graceful-fs@^4.2.9:
graceful-fs@^4.1.2, graceful-fs@^4.1.6, graceful-fs@^4.2.0, graceful-fs@^4.2.4, graceful-fs@^4.2.6, graceful-fs@^4.2.9:
version "4.2.11"
resolved "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz"
integrity sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==
@@ -9345,14 +9160,6 @@ has@^1.0.3:
dependencies:
function-bind "^1.1.1"
hasha@^5.0.0:
version "5.2.2"
resolved "https://registry.npmjs.org/hasha/-/hasha-5.2.2.tgz"
integrity sha512-Hrp5vIK/xr5SkeN2onO32H0MgNZ0f17HRNH39WfL0SYUNOTZ5Lz1TJ8Pajo/87dYGEFlLMm7mIc/k/s6Bvz9HQ==
dependencies:
is-stream "^2.0.0"
type-fest "^0.8.0"
hasown@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/hasown/-/hasown-2.0.0.tgz#f4c513d454a57b7c7e1650778de226b11700546c"
@@ -9633,13 +9440,6 @@ hoist-non-react-statics@^3.0.0, hoist-non-react-statics@^3.1.0, hoist-non-react-
dependencies:
react-is "^16.7.0"
homedir-polyfill@^1.0.0:
version "1.0.3"
resolved "https://registry.npmjs.org/homedir-polyfill/-/homedir-polyfill-1.0.3.tgz"
integrity sha512-eSmmWE5bZTK2Nou4g0AI3zZ9rswp7GRKoKXS1BLUkvPviOqs4YTN1djQIqrXy9k5gEtdLPy86JjRwsNM9tnDcA==
dependencies:
parse-passwd "^1.0.0"
hosted-git-info@^2.1.4:
version "2.8.9"
resolved "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz"
@@ -10376,16 +10176,6 @@ is-what@^3.14.1:
resolved "https://registry.npmjs.org/is-what/-/is-what-3.14.1.tgz"
integrity sha512-sNxgpk9793nzSs7bA6JQJGeIuRBQhAaNGG77kzYQgMkrID+lS6SlK07K5LaptscDlSaIgH+GPFzf+d75FVxozA==
is-windows@^0.2.0:
version "0.2.0"
resolved "https://registry.npmjs.org/is-windows/-/is-windows-0.2.0.tgz"
integrity sha512-n67eJYmXbniZB7RF4I/FTjK1s6RPOCTxhYrVYLRaCt3lF0mpWZPKr3T2LSZAqyjQsxR2qMmGYXXzK0YWwcPM1Q==
is-windows@^1.0.2:
version "1.0.2"
resolved "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz"
integrity sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==
is-wsl@^2.2.0:
version "2.2.0"
resolved "https://registry.yarnpkg.com/is-wsl/-/is-wsl-2.2.0.tgz#74a4c76e77ca9fd3f932f290c17ea326cd157271"
@@ -10423,23 +10213,6 @@ istanbul-lib-coverage@^3.0.0, istanbul-lib-coverage@^3.2.0:
resolved "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.0.tgz"
integrity sha512-eOeJ5BHCmHYvQK7xt9GkdHuzuCGS1Y6g9Gvnx3Ym33fz/HpLRYxiS0wHNr+m/MBC8B647Xt608vCDEvhl9c6Mw==
istanbul-lib-hook@^3.0.0:
version "3.0.0"
resolved "https://registry.npmjs.org/istanbul-lib-hook/-/istanbul-lib-hook-3.0.0.tgz"
integrity sha512-Pt/uge1Q9s+5VAZ+pCo16TYMWPBIl+oaNIjgLQxcX0itS6ueeaA+pEfThZpH8WxhFgCiEb8sAJY6MdUKgiIWaQ==
dependencies:
append-transform "^2.0.0"
istanbul-lib-instrument@^4.0.0:
version "4.0.3"
resolved "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-4.0.3.tgz"
integrity sha512-BXgQl9kf4WTCPCCpmFGoJkz/+uhvm7h7PFKUYxh7qarQd3ER33vHG//qaE8eN25l07YqZPpHXU9I09l/RD5aGQ==
dependencies:
"@babel/core" "^7.7.5"
"@istanbuljs/schema" "^0.1.2"
istanbul-lib-coverage "^3.0.0"
semver "^6.3.0"
istanbul-lib-instrument@^5.0.4, istanbul-lib-instrument@^5.1.0:
version "5.2.1"
resolved "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-5.2.1.tgz"
@@ -10451,18 +10224,6 @@ istanbul-lib-instrument@^5.0.4, istanbul-lib-instrument@^5.1.0:
istanbul-lib-coverage "^3.2.0"
semver "^6.3.0"
istanbul-lib-processinfo@^2.0.2:
version "2.0.3"
resolved "https://registry.npmjs.org/istanbul-lib-processinfo/-/istanbul-lib-processinfo-2.0.3.tgz"
integrity sha512-NkwHbo3E00oybX6NGJi6ar0B29vxyvNwoC7eJ4G4Yq28UfY758Hgn/heV8VRFhevPED4LXfFz0DQ8z/0kw9zMg==
dependencies:
archy "^1.0.0"
cross-spawn "^7.0.3"
istanbul-lib-coverage "^3.2.0"
p-map "^3.0.0"
rimraf "^3.0.0"
uuid "^8.3.2"
istanbul-lib-report@^3.0.0:
version "3.0.0"
resolved "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz"
@@ -10481,7 +10242,7 @@ istanbul-lib-source-maps@^4.0.0:
istanbul-lib-coverage "^3.0.0"
source-map "^0.6.1"
istanbul-reports@^3.0.2, istanbul-reports@^3.1.3:
istanbul-reports@^3.1.3:
version "3.1.5"
resolved "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.1.5.tgz"
integrity sha512-nUsEMa9pBt/NOHqbcbeJEgqIlY/K7rVWUX6Lql2orY5e9roQOthbR3vtY4zzf2orPELg80fnxxk9zUyPlgwD1w==
@@ -10777,39 +10538,11 @@ jest-mock@^27.5.1:
"@jest/types" "^27.5.1"
"@types/node" "*"
jest-playwright-preset@^1.7.2:
version "1.7.2"
resolved "https://registry.yarnpkg.com/jest-playwright-preset/-/jest-playwright-preset-1.7.2.tgz#708942c4dcc1edc85429079d2b47a9382298c454"
integrity sha512-0M7M3z342bdKQLnS70cIptlJsW+uuGptbPnqIMg4K5Vp/L/DhqdTKZK7WM4n6miAUnZdUcjXKOdQWfZW/aBo7w==
dependencies:
expect-playwright "^0.8.0"
jest-process-manager "^0.3.1"
nyc "^15.1.0"
playwright-core ">=1.2.0"
rimraf "^3.0.2"
uuid "^8.3.2"
jest-pnp-resolver@^1.2.2:
version "1.2.3"
resolved "https://registry.npmjs.org/jest-pnp-resolver/-/jest-pnp-resolver-1.2.3.tgz"
integrity sha512-+3NpwQEnRoIBtx4fyhblQDPgJI0H1IEIkX7ShLUjPGA7TtUTvI1oiKi3SR4oBR0hQhQR80l4WAe5RrXBwWMA8w==
jest-process-manager@^0.3.1:
version "0.3.1"
resolved "https://registry.npmjs.org/jest-process-manager/-/jest-process-manager-0.3.1.tgz"
integrity sha512-x9W54UgZ7IkzUHgXtnI1x4GKOVjxtwW0CA/7yGbTHtT/YhENO0Lic2yfVyC/gekn7OIEMcQmy0L1r9WLQABfqw==
dependencies:
"@types/wait-on" "^5.2.0"
chalk "^4.1.0"
cwd "^0.10.0"
exit "^0.1.2"
find-process "^1.4.4"
prompts "^2.4.1"
signal-exit "^3.0.3"
spawnd "^5.0.0"
tree-kill "^1.2.2"
wait-on "^5.3.0"
jest-regex-util@^27.5.1:
version "27.5.1"
resolved "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-27.5.1.tgz"
@@ -11037,17 +10770,6 @@ jju@~1.4.0:
resolved "https://registry.yarnpkg.com/jju/-/jju-1.4.0.tgz#a3abe2718af241a2b2904f84a625970f389ae32a"
integrity sha512-8wb9Yw966OSxApiCt0K3yNJL8pnNeIv+OEq2YMidz4FKP6nonSRoOXc80iXY4JaN2FC11B9qsNmDsm+ZOfMROA==
joi@^17.3.0:
version "17.9.2"
resolved "https://registry.npmjs.org/joi/-/joi-17.9.2.tgz"
integrity sha512-Itk/r+V4Dx0V3c7RLFdRh12IOjySm2/WGPMubBT92cQvRfYZhPM2W0hZlctjj72iES8jsRCwp7S/cRmWBnJ4nw==
dependencies:
"@hapi/hoek" "^9.0.0"
"@hapi/topo" "^5.0.0"
"@sideway/address" "^4.1.3"
"@sideway/formula" "^3.0.1"
"@sideway/pinpoint" "^2.0.0"
js-base64@^3.7.2:
version "3.7.5"
resolved "https://registry.npmjs.org/js-base64/-/js-base64-3.7.5.tgz"
@@ -11475,11 +11197,6 @@ lodash.debounce@^4.0.8:
resolved "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz"
integrity sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==
lodash.flattendeep@^4.4.0:
version "4.4.0"
resolved "https://registry.npmjs.org/lodash.flattendeep/-/lodash.flattendeep-4.4.0.tgz"
integrity sha512-uHaJFihxmJcEX3kT4I23ABqKKalJ/zDrDg0lsFtc1h+3uw49SIJ5beyhx5ExVRti3AvKoOJngIj7xz3oylPdWQ==
lodash.get@^4.4.2:
version "4.4.2"
resolved "https://registry.yarnpkg.com/lodash.get/-/lodash.get-4.4.2.tgz#2d177f652fa31e939b4438d5341499dfa3825e99"
@@ -11614,7 +11331,7 @@ make-dir@^2.1.0:
pify "^4.0.1"
semver "^5.6.0"
make-dir@^3.0.0, make-dir@^3.0.2:
make-dir@^3.0.0:
version "3.1.0"
resolved "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz"
integrity sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==
@@ -12456,7 +12173,7 @@ minimist-options@4.1.0:
is-plain-obj "^1.1.0"
kind-of "^6.0.3"
minimist@^1.2.0, minimist@^1.2.5, minimist@^1.2.6:
minimist@^1.2.0, minimist@^1.2.6:
version "1.2.8"
resolved "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz"
integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==
@@ -12691,13 +12408,6 @@ node-int64@^0.4.0:
resolved "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz"
integrity sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==
node-preload@^0.2.1:
version "0.2.1"
resolved "https://registry.npmjs.org/node-preload/-/node-preload-0.2.1.tgz"
integrity sha512-RM5oyBy45cLEoHqCeh+MNuFAxO0vTFBLskvQbOKnEE7YTTSN4tbN8QWDIPQ6L+WvKsB/qLEGpYe2ZZ9d4W9OIQ==
dependencies:
process-on-spawn "^1.0.0"
node-releases@^2.0.13:
version "2.0.13"
resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.13.tgz#d5ed1627c23e3461e819b02e57b75e4899b1c81d"
@@ -12787,39 +12497,6 @@ nwsapi@^2.2.0:
resolved "https://registry.npmjs.org/nwsapi/-/nwsapi-2.2.4.tgz"
integrity sha512-NHj4rzRo0tQdijE9ZqAx6kYDcoRwYwSYzCA8MY3JzfxlrvEU0jhnhJT9BhqhJs7I/dKcrDm6TyulaRqZPIhN5g==
nyc@^15.1.0:
version "15.1.0"
resolved "https://registry.npmjs.org/nyc/-/nyc-15.1.0.tgz"
integrity sha512-jMW04n9SxKdKi1ZMGhvUTHBN0EICCRkHemEoE5jm6mTYcqcdas0ATzgUgejlQUHMvpnOZqGB5Xxsv9KxJW1j8A==
dependencies:
"@istanbuljs/load-nyc-config" "^1.0.0"
"@istanbuljs/schema" "^0.1.2"
caching-transform "^4.0.0"
convert-source-map "^1.7.0"
decamelize "^1.2.0"
find-cache-dir "^3.2.0"
find-up "^4.1.0"
foreground-child "^2.0.0"
get-package-type "^0.1.0"
glob "^7.1.6"
istanbul-lib-coverage "^3.0.0"
istanbul-lib-hook "^3.0.0"
istanbul-lib-instrument "^4.0.0"
istanbul-lib-processinfo "^2.0.2"
istanbul-lib-report "^3.0.0"
istanbul-lib-source-maps "^4.0.0"
istanbul-reports "^3.0.2"
make-dir "^3.0.0"
node-preload "^0.2.1"
p-map "^3.0.0"
process-on-spawn "^1.0.0"
resolve-from "^5.0.0"
rimraf "^3.0.0"
signal-exit "^3.0.2"
spawn-wrap "^2.0.0"
test-exclude "^6.0.0"
yargs "^15.0.2"
object-assign@^4.0.1, object-assign@^4.1.0, object-assign@^4.1.1:
version "4.1.1"
resolved "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz"
@@ -13013,11 +12690,6 @@ ora@^5.4.1:
strip-ansi "^6.0.0"
wcwidth "^1.0.1"
os-homedir@^1.0.1:
version "1.0.2"
resolved "https://registry.npmjs.org/os-homedir/-/os-homedir-1.0.2.tgz"
integrity sha512-B5JU3cabzk8c67mRRd3ECmROafjYMXbuzlwtqdM8IbS8ktlTix8aFGb2bAGKrSRIlnfKwovGUUr72JUPyOb6kQ==
os-tmpdir@~1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274"
@@ -13080,13 +12752,6 @@ p-locate@^6.0.0:
dependencies:
p-limit "^4.0.0"
p-map@^3.0.0:
version "3.0.0"
resolved "https://registry.npmjs.org/p-map/-/p-map-3.0.0.tgz"
integrity sha512-d3qXVTF/s+W+CdJ5A29wywV2n8CQQYahlgz2bFiA+4eVNJbHJodPZ+/gXwPGh0bOqA+j8S+6+ckmvLGPk1QpxQ==
dependencies:
aggregate-error "^3.0.0"
p-map@^4.0.0:
version "4.0.0"
resolved "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz"
@@ -13107,16 +12772,6 @@ p-try@^2.0.0:
resolved "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz"
integrity sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==
package-hash@^4.0.0:
version "4.0.0"
resolved "https://registry.npmjs.org/package-hash/-/package-hash-4.0.0.tgz"
integrity sha512-whdkPIooSu/bASggZ96BWVvZTRMOFxnyUG5PnTSGKoJE2gd5mbVNmR2Nj20QFzxYYgAXpoqC+AiXzl+UMRh7zQ==
dependencies:
graceful-fs "^4.1.15"
hasha "^5.0.0"
lodash.flattendeep "^4.4.0"
release-zalgo "^1.0.0"
pako@^2.0.4:
version "2.1.0"
resolved "https://registry.npmjs.org/pako/-/pako-2.1.0.tgz"
@@ -13229,11 +12884,6 @@ parse-numeric-range@^1.3.0:
resolved "https://registry.yarnpkg.com/parse-numeric-range/-/parse-numeric-range-1.3.0.tgz#7c63b61190d61e4d53a1197f0c83c47bb670ffa3"
integrity sha512-twN+njEipszzlMJd4ONUYgSfZPDxgHhT9Ahed5uTigpQn90FggW4SA/AIPq/6a149fTbE9qBEcSwE3FAEp6wQQ==
parse-passwd@^1.0.0:
version "1.0.0"
resolved "https://registry.npmjs.org/parse-passwd/-/parse-passwd-1.0.0.tgz"
integrity sha512-1Y1A//QUXEZK7YKz+rD9WydcE1+EuPr6ZBgKecAB8tmoW6UFv0NREVJe1p+jRxtThkcbbKkfwIbWJe/IeE6m2Q==
parse5-htmlparser2-tree-adapter@^6.0.1:
version "6.0.1"
resolved "https://registry.npmjs.org/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-6.0.1.tgz"
@@ -13410,7 +13060,7 @@ pirates@^4.0.4:
resolved "https://registry.npmjs.org/pirates/-/pirates-4.0.5.tgz"
integrity sha512-8V9+HQPupnaXMA23c5hvl69zXvTwTzyAYasnkb0Tts4XvO4CliqONMOnvlq26rkhLC3nWDFBJf73LU1e1VZLaQ==
pkg-dir@^4.1.0, pkg-dir@^4.2.0:
pkg-dir@^4.2.0:
version "4.2.0"
resolved "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz"
integrity sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==
@@ -13424,7 +13074,7 @@ pkg-dir@^7.0.0:
dependencies:
find-up "^6.3.0"
playwright-core@1.33.0, playwright-core@>=1.2.0:
playwright-core@1.33.0:
version "1.33.0"
resolved "https://registry.npmjs.org/playwright-core/-/playwright-core-1.33.0.tgz"
integrity sha512-aizyPE1Cj62vAECdph1iaMILpT0WUDCq3E6rW6I+dleSbBoGbktvJtzS6VHkZ4DKNEOG9qJpiom/ZxO+S15LAw==
@@ -13807,13 +13457,6 @@ process-nextick-args@~2.0.0:
resolved "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz"
integrity sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==
process-on-spawn@^1.0.0:
version "1.0.0"
resolved "https://registry.npmjs.org/process-on-spawn/-/process-on-spawn-1.0.0.tgz"
integrity sha512-1WsPDsUSMmZH5LeMLegqkPDrsGgsWwk1Exipy2hvB0o/F0ASzbpIctSCcZIK1ykJvtTJULEH+20WOFjMvGnCTg==
dependencies:
fromentries "^1.2.0"
process@^0.11.10:
version "0.11.10"
resolved "https://registry.npmjs.org/process/-/process-0.11.10.tgz"
@@ -13829,7 +13472,7 @@ promise-polyfill@^3.1.0:
resolved "https://registry.npmjs.org/promise-polyfill/-/promise-polyfill-3.1.0.tgz"
integrity sha512-t20OwHJ4ZOUj5fV+qms67oczphAVkRC6Rrjcrne+V1FJkQMym7n69xJmYyXHulm9OUQ0Ie5KSzg0QhOYgaxy+w==
prompts@^2.0.1, prompts@^2.4.1:
prompts@^2.0.1:
version "2.4.2"
resolved "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz"
integrity sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==
@@ -15010,13 +14653,6 @@ relateurl@^0.2.7:
resolved "https://registry.npmjs.org/relateurl/-/relateurl-0.2.7.tgz"
integrity sha512-G08Dxvm4iDN3MLM0EsP62EDV9IuhXPR6blNz6Utcp7zyV3tr4HVNINt6MpaRWbxoOHT3Q7YN2P+jaHX8vUbgog==
release-zalgo@^1.0.0:
version "1.0.0"
resolved "https://registry.npmjs.org/release-zalgo/-/release-zalgo-1.0.0.tgz"
integrity sha512-gUAyHVHPPC5wdqX/LG4LWtRYtgjxyX78oanFNTMMyFEfOqdC54s3eE82imuWKbOeqYht2CrNf64Qb8vgmmtZGA==
dependencies:
es6-error "^4.0.1"
remark-gfm@~3.0.1:
version "3.0.1"
resolved "https://registry.yarnpkg.com/remark-gfm/-/remark-gfm-3.0.1.tgz#0b180f095e3036545e9dddac0e8df3fa5cfee54f"
@@ -15080,11 +14716,6 @@ require-from-string@^2.0.2:
resolved "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz"
integrity sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==
require-main-filename@^2.0.0:
version "2.0.0"
resolved "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz"
integrity sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==
requires-port@^1.0.0:
version "1.0.0"
resolved "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz"
@@ -15107,14 +14738,6 @@ resolve-cwd@^3.0.0:
dependencies:
resolve-from "^5.0.0"
resolve-dir@^0.1.0:
version "0.1.1"
resolved "https://registry.npmjs.org/resolve-dir/-/resolve-dir-0.1.1.tgz"
integrity sha512-QxMPqI6le2u0dCLyiGzgy92kjkkL6zO0XyvHzjdTNH3zM6e5Hz3BwG6+aEyNgiQ5Xz6PwTwgQEj3U50dByPKIA==
dependencies:
expand-tilde "^1.2.2"
global-modules "^0.2.3"
resolve-from@5.0.0, resolve-from@^5.0.0:
version "5.0.0"
resolved "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz"
@@ -15250,13 +14873,6 @@ rxjs@7.8.0:
dependencies:
tslib "^2.1.0"
rxjs@^6.6.3:
version "6.6.7"
resolved "https://registry.npmjs.org/rxjs/-/rxjs-6.6.7.tgz"
integrity sha512-hTdwr+7yYNIT5n4AMYp85KA6yw2Va0FLa3Rguvbpa4W3I5xynaBZo41cM3XM+4Q6fRMj3sBYIR1VAmZMXYJvRQ==
dependencies:
tslib "^1.9.0"
rxjs@^7.5.5:
version "7.8.1"
resolved "https://registry.npmjs.org/rxjs/-/rxjs-7.8.1.tgz"
@@ -15483,11 +15099,6 @@ serve-static@1.15.0:
parseurl "~1.3.3"
send "0.18.0"
set-blocking@^2.0.0:
version "2.0.0"
resolved "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz"
integrity sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==
set-cookie-parser@^2.4.6:
version "2.6.0"
resolved "https://registry.yarnpkg.com/set-cookie-parser/-/set-cookie-parser-2.6.0.tgz#131921e50f62ff1a66a461d7d62d7b21d5d15a51"
@@ -15722,28 +15333,6 @@ space-separated-tokens@^2.0.0:
resolved "https://registry.yarnpkg.com/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz#1ecd9d2350a3844572c3f4a312bceb018348859f"
integrity sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==
spawn-wrap@^2.0.0:
version "2.0.0"
resolved "https://registry.npmjs.org/spawn-wrap/-/spawn-wrap-2.0.0.tgz"
integrity sha512-EeajNjfN9zMnULLwhZZQU3GWBoFNkbngTUPfaawT4RkMiviTxcX0qfhVbGey39mfctfDHkWtuecgQ8NJcyQWHg==
dependencies:
foreground-child "^2.0.0"
is-windows "^1.0.2"
make-dir "^3.0.0"
rimraf "^3.0.0"
signal-exit "^3.0.2"
which "^2.0.1"
spawnd@^5.0.0:
version "5.0.0"
resolved "https://registry.npmjs.org/spawnd/-/spawnd-5.0.0.tgz"
integrity sha512-28+AJr82moMVWolQvlAIv3JcYDkjkFTEmfDc503wxrF5l2rQ3dFz6DpbXp3kD4zmgGGldfM4xM4v1sFj/ZaIOA==
dependencies:
exit "^0.1.2"
signal-exit "^3.0.3"
tree-kill "^1.2.2"
wait-port "^0.2.9"
spdx-correct@^3.0.0:
version "3.2.0"
resolved "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.2.0.tgz"
@@ -16450,11 +16039,6 @@ tr46@~0.0.3:
resolved "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz"
integrity sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==
tree-kill@^1.2.2:
version "1.2.2"
resolved "https://registry.npmjs.org/tree-kill/-/tree-kill-1.2.2.tgz"
integrity sha512-L0Orpi8qGpRG//Nd+H90vFB+3iHnue1zSSGmNOOCh1GLJ7rUKVwV2HvijphGQS2UmhUZewS9VgvxYIdgr+fG1A==
trim-lines@^3.0.0:
version "3.0.1"
resolved "https://registry.yarnpkg.com/trim-lines/-/trim-lines-3.0.1.tgz#d802e332a07df861c48802c04321017b1bd87338"
@@ -16541,7 +16125,7 @@ tslib@2.5.0, tslib@^2.0.3, tslib@^2.1.0, tslib@^2.3.0:
resolved "https://registry.npmjs.org/tslib/-/tslib-2.5.0.tgz"
integrity sha512-336iVw3rtn2BUK7ORdIAHTyxHGRIHVReokCR3XjbckJMK7ms8FysBfhLR8IXnAgy7T0PTPNBWKiH514FOW/WSg==
tslib@^1.8.1, tslib@^1.9.0:
tslib@^1.8.1:
version "1.14.1"
resolved "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz"
integrity sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==
@@ -16614,7 +16198,7 @@ type-fest@^0.6.0:
resolved "https://registry.npmjs.org/type-fest/-/type-fest-0.6.0.tgz"
integrity sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg==
type-fest@^0.8.0, type-fest@^0.8.1:
type-fest@^0.8.1:
version "0.8.1"
resolved "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz"
integrity sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==
@@ -17161,26 +16745,6 @@ w3c-xmlserializer@^2.0.0:
dependencies:
xml-name-validator "^3.0.0"
wait-on@^5.3.0:
version "5.3.0"
resolved "https://registry.npmjs.org/wait-on/-/wait-on-5.3.0.tgz"
integrity sha512-DwrHrnTK+/0QFaB9a8Ol5Lna3k7WvUR4jzSKmz0YaPBpuN2sACyiPVKVfj6ejnjcajAcvn3wlbTyMIn9AZouOg==
dependencies:
axios "^0.21.1"
joi "^17.3.0"
lodash "^4.17.21"
minimist "^1.2.5"
rxjs "^6.6.3"
wait-port@^0.2.9:
version "0.2.14"
resolved "https://registry.npmjs.org/wait-port/-/wait-port-0.2.14.tgz"
integrity sha512-kIzjWcr6ykl7WFbZd0TMae8xovwqcqbx6FM9l+7agOgUByhzdjfzZBPK2CPufldTOMxbUivss//Sh9MFawmPRQ==
dependencies:
chalk "^2.4.2"
commander "^3.0.2"
debug "^4.1.1"
walker@^1.0.7, walker@^1.0.8:
version "1.0.8"
resolved "https://registry.yarnpkg.com/walker/-/walker-1.0.8.tgz#bd498db477afe573dc04185f011d3ab8a8d7653f"
@@ -17549,11 +17113,6 @@ which-collection@^1.0.1:
is-weakmap "^2.0.1"
is-weakset "^2.0.1"
which-module@^2.0.0:
version "2.0.1"
resolved "https://registry.npmjs.org/which-module/-/which-module-2.0.1.tgz"
integrity sha512-iBdZ57RDvnOR9AGBhML2vFZf7h8vmBjhoaZqODJBFWHVtKkDmKuHai3cx5PgVMrX5YDNp27AofYbAwctSS+vhQ==
which-typed-array@^1.1.10, which-typed-array@^1.1.11, which-typed-array@^1.1.2:
version "1.1.11"
resolved "https://registry.yarnpkg.com/which-typed-array/-/which-typed-array-1.1.11.tgz#99d691f23c72aab6768680805a271b69761ed61a"
@@ -17577,7 +17136,7 @@ which-typed-array@^1.1.9:
has-tostringtag "^1.0.0"
is-typed-array "^1.1.10"
which@^1.2.12, which@^1.2.9:
which@^1.2.9:
version "1.3.1"
resolved "https://registry.yarnpkg.com/which/-/which-1.3.1.tgz#a45043d54f5805316da8d62f9f50918d3da70b0a"
integrity sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==
@@ -17731,11 +17290,6 @@ xtend@^4.0.0:
resolved "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz"
integrity sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==
y18n@^4.0.0:
version "4.0.3"
resolved "https://registry.npmjs.org/y18n/-/y18n-4.0.3.tgz"
integrity sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ==
y18n@^5.0.5:
version "5.0.8"
resolved "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz"
@@ -17761,36 +17315,11 @@ yargs-parser@20.x, yargs-parser@^20.2.2, yargs-parser@^20.2.3:
resolved "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz"
integrity sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==
yargs-parser@^18.1.2:
version "18.1.3"
resolved "https://registry.npmjs.org/yargs-parser/-/yargs-parser-18.1.3.tgz"
integrity sha512-o50j0JeToy/4K6OZcaQmW6lyXXKhq7csREXcDwk2omFPJEwUNOVtJKvmDr9EI1fAJZUyZcRF7kxGBWmRXudrCQ==
dependencies:
camelcase "^5.0.0"
decamelize "^1.2.0"
yargs-parser@^21.1.1:
version "21.1.1"
resolved "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz"
integrity sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==
yargs@^15.0.2:
version "15.4.1"
resolved "https://registry.npmjs.org/yargs/-/yargs-15.4.1.tgz"
integrity sha512-aePbxDmcYW++PaqBsJ+HYUFwCdv4LVvdnhBy78E57PIor8/OVvhMrADFFEDh8DHDFRv/O9i3lPhsENjO7QX0+A==
dependencies:
cliui "^6.0.0"
decamelize "^1.2.0"
find-up "^4.1.0"
get-caller-file "^2.0.1"
require-directory "^2.1.1"
require-main-filename "^2.0.0"
set-blocking "^2.0.0"
string-width "^4.2.0"
which-module "^2.0.0"
y18n "^4.0.0"
yargs-parser "^18.1.2"
yargs@^16.2.0:
version "16.2.0"
resolved "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz"

View File

@@ -40,6 +40,11 @@ const (
defaultWriteBatchDelay time.Duration = 5 * time.Second
defaultWriteBatchSize int = 10000
defaultEncoding Encoding = EncodingJSON
defaultLogsLocalTableV2 string = "logs_v2"
defaultLogsTableV2 string = "distributed_logs_v2"
defaultLogsResourceLocalTableV2 string = "logs_v2_resource"
defaultLogsResourceTableV2 string = "distributed_logs_v2_resource"
)
// NamespaceConfig is Clickhouse's internal configuration data
@@ -72,6 +77,11 @@ type namespaceConfig struct {
WriteBatchSize int
Encoding Encoding
Connector Connector
LogsLocalTableV2 string
LogsTableV2 string
LogsResourceLocalTableV2 string
LogsResourceTableV2 string
}
// Connecto defines how to connect to the database
@@ -159,6 +169,11 @@ func NewOptions(
WriteBatchSize: defaultWriteBatchSize,
Encoding: defaultEncoding,
Connector: defaultConnector,
LogsTableV2: defaultLogsTableV2,
LogsLocalTableV2: defaultLogsLocalTableV2,
LogsResourceTableV2: defaultLogsResourceTableV2,
LogsResourceLocalTableV2: defaultLogsResourceLocalTableV2,
},
others: make(map[string]*namespaceConfig, len(otherNamespaces)),
}

View File

@@ -7,7 +7,6 @@ import (
"github.com/ClickHouse/clickhouse-go/v2"
"github.com/google/uuid"
"go.signoz.io/signoz/pkg/query-service/model"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.uber.org/zap"
"golang.org/x/exp/maps"
)
@@ -52,7 +51,7 @@ func (tracker *inMemoryQueryProgressTracker) ReportQueryProgress(
func (tracker *inMemoryQueryProgressTracker) SubscribeToQueryProgress(
queryId string,
) (<-chan v3.QueryProgress, func(), *model.ApiError) {
) (<-chan model.QueryProgress, func(), *model.ApiError) {
queryTracker, err := tracker.getQueryTracker(queryId)
if err != nil {
return nil, nil, err
@@ -97,7 +96,7 @@ type queryTracker struct {
queryId string
isFinished bool
progress *v3.QueryProgress
progress *model.QueryProgress
subscriptions map[string]*queryProgressSubscription
lock sync.Mutex
@@ -124,7 +123,7 @@ func (qt *queryTracker) handleProgressUpdate(p *clickhouse.Progress) {
if qt.progress == nil {
// This is the first update
qt.progress = &v3.QueryProgress{}
qt.progress = &model.QueryProgress{}
}
updateQueryProgress(qt.progress, p)
@@ -135,7 +134,7 @@ func (qt *queryTracker) handleProgressUpdate(p *clickhouse.Progress) {
}
func (qt *queryTracker) subscribe() (
<-chan v3.QueryProgress, func(), *model.ApiError,
<-chan model.QueryProgress, func(), *model.ApiError,
) {
qt.lock.Lock()
defer qt.lock.Unlock()
@@ -200,20 +199,20 @@ func (qt *queryTracker) onFinished() {
}
type queryProgressSubscription struct {
ch chan v3.QueryProgress
ch chan model.QueryProgress
isClosed bool
lock sync.Mutex
}
func newQueryProgressSubscription() *queryProgressSubscription {
ch := make(chan v3.QueryProgress, 1000)
ch := make(chan model.QueryProgress, 1000)
return &queryProgressSubscription{
ch: ch,
}
}
// Must not block or panic in any scenario
func (ch *queryProgressSubscription) send(progress v3.QueryProgress) {
func (ch *queryProgressSubscription) send(progress model.QueryProgress) {
ch.lock.Lock()
defer ch.lock.Unlock()
@@ -248,7 +247,7 @@ func (ch *queryProgressSubscription) close() {
}
}
func updateQueryProgress(qp *v3.QueryProgress, chProgress *clickhouse.Progress) {
func updateQueryProgress(qp *model.QueryProgress, chProgress *clickhouse.Progress) {
qp.ReadRows += chProgress.Rows
qp.ReadBytes += chProgress.Bytes
qp.ElapsedMs += uint64(chProgress.Elapsed.Milliseconds())

View File

@@ -3,7 +3,6 @@ package queryprogress
import (
"github.com/ClickHouse/clickhouse-go/v2"
"go.signoz.io/signoz/pkg/query-service/model"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
)
type QueryProgressTracker interface {
@@ -19,7 +18,7 @@ type QueryProgressTracker interface {
// The returned channel will produce `QueryProgress` instances representing
// the latest state of query progress stats. Also returns a function that
// can be called to unsubscribe before the query finishes, if needed.
SubscribeToQueryProgress(queryId string) (ch <-chan v3.QueryProgress, unsubscribe func(), err *model.ApiError)
SubscribeToQueryProgress(queryId string) (ch <-chan model.QueryProgress, unsubscribe func(), err *model.ApiError)
}
func NewQueryProgressTracker() QueryProgressTracker {

View File

@@ -7,7 +7,6 @@ import (
"github.com/ClickHouse/clickhouse-go/v2"
"github.com/stretchr/testify/require"
"go.signoz.io/signoz/pkg/query-service/model"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
)
func TestQueryProgressTracking(t *testing.T) {
@@ -45,7 +44,7 @@ func TestQueryProgressTracking(t *testing.T) {
require.NotNil(ch)
require.NotNil(unsubscribe)
expectedProgress := v3.QueryProgress{}
expectedProgress := model.QueryProgress{}
updateQueryProgress(&expectedProgress, testProgress1)
require.Equal(expectedProgress.ReadRows, testProgress1.Rows)
select {

File diff suppressed because it is too large Load Diff

View File

@@ -5,19 +5,18 @@ import (
"encoding/base64"
"encoding/json"
"fmt"
"reflect"
"regexp"
"strconv"
"strings"
"time"
"github.com/google/uuid"
"github.com/gosimple/slug"
"github.com/jmoiron/sqlx"
"github.com/mitchellh/mapstructure"
"go.signoz.io/signoz/pkg/query-service/common"
"go.signoz.io/signoz/pkg/query-service/interfaces"
"go.signoz.io/signoz/pkg/query-service/model"
"go.signoz.io/signoz/pkg/query-service/telemetry"
"go.uber.org/zap"
)
@@ -152,6 +151,8 @@ func InitDB(dataSourceName string) (*sqlx.DB, error) {
return nil, fmt.Errorf("error in adding column locked to dashboards table: %s", err.Error())
}
telemetry.GetInstance().SetDashboardsInfoCallback(GetDashboardsInfo)
return db, nil
}
@@ -216,14 +217,6 @@ func CreateDashboard(ctx context.Context, data map[string]interface{}, fm interf
return nil, &model.ApiError{Typ: model.ErrorExec, Err: err}
}
newCount, _ := countTraceAndLogsPanel(data)
if newCount > 0 {
fErr := checkFeatureUsage(fm, newCount)
if fErr != nil {
return nil, fErr
}
}
result, err := db.Exec("INSERT INTO dashboards (uuid, created_at, created_by, updated_at, updated_by, data) VALUES ($1, $2, $3, $4, $5, $6)",
dash.Uuid, dash.CreatedAt, userEmail, dash.UpdatedAt, userEmail, mapData)
@@ -237,11 +230,6 @@ func CreateDashboard(ctx context.Context, data map[string]interface{}, fm interf
}
dash.Id = int(lastInsertId)
traceAndLogsPanelUsage, _ := countTraceAndLogsPanel(data)
if traceAndLogsPanelUsage > 0 {
updateFeatureUsage(fm, traceAndLogsPanelUsage)
}
return dash, nil
}
@@ -287,11 +275,6 @@ func DeleteDashboard(ctx context.Context, uuid string, fm interfaces.FeatureLook
return &model.ApiError{Typ: model.ErrorNotFound, Err: fmt.Errorf("no dashboard found with uuid: %s", uuid)}
}
traceAndLogsPanelUsage, _ := countTraceAndLogsPanel(dashboard.Data)
if traceAndLogsPanelUsage > 0 {
updateFeatureUsage(fm, -traceAndLogsPanelUsage)
}
return nil
}
@@ -329,28 +312,15 @@ func UpdateDashboard(ctx context.Context, uuid string, data map[string]interface
}
}
// check if the count of trace and logs QB panel has changed, if yes, then check feature flag count
existingCount, existingTotal := countTraceAndLogsPanel(dashboard.Data)
newCount, newTotal := countTraceAndLogsPanel(data)
if newCount > existingCount {
err := checkFeatureUsage(fm, newCount-existingCount)
if err != nil {
return nil, err
}
}
// if the total count of panels has reduced by more than 1,
// return error
existingIds := getWidgetIds(dashboard.Data)
newIds := getWidgetIds(data)
if existingTotal > newTotal && existingTotal-newTotal > 1 {
// if the total count of panels has reduced by more than 1,
// return error
existingIds := getWidgetIds(dashboard.Data)
newIds := getWidgetIds(data)
differenceIds := getIdDifference(existingIds, newIds)
if len(differenceIds) > 1 {
return nil, model.BadRequest(fmt.Errorf("deleting more than one panel is not supported"))
}
differenceIds := getIdDifference(existingIds, newIds)
if len(differenceIds) > 1 {
return nil, model.BadRequest(fmt.Errorf("deleting more than one panel is not supported"))
}
dashboard.UpdatedAt = time.Now()
@@ -364,10 +334,6 @@ func UpdateDashboard(ctx context.Context, uuid string, data map[string]interface
zap.L().Error("Error in inserting dashboard data", zap.Any("data", data), zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: err}
}
if existingCount != newCount {
// if the count of trace and logs panel has changed, we need to update feature flag count as well
updateFeatureUsage(fm, newCount-existingCount)
}
return dashboard, nil
}
@@ -389,51 +355,6 @@ func LockUnlockDashboard(ctx context.Context, uuid string, lock bool) *model.Api
return nil
}
func updateFeatureUsage(fm interfaces.FeatureLookup, usage int64) *model.ApiError {
feature, err := fm.GetFeatureFlag(model.QueryBuilderPanels)
if err != nil {
switch err.(type) {
case model.ErrFeatureUnavailable:
zap.L().Error("feature unavailable", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err))
return model.BadRequest(err)
default:
zap.L().Error("feature check failed", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err))
return model.BadRequest(err)
}
}
feature.Usage += usage
if feature.Usage >= feature.UsageLimit && feature.UsageLimit != -1 {
feature.Active = false
}
if feature.Usage < feature.UsageLimit || feature.UsageLimit == -1 {
feature.Active = true
}
err = fm.UpdateFeatureFlag(feature)
if err != nil {
return model.BadRequest(err)
}
return nil
}
func checkFeatureUsage(fm interfaces.FeatureLookup, usage int64) *model.ApiError {
feature, err := fm.GetFeatureFlag(model.QueryBuilderPanels)
if err != nil {
switch err.(type) {
case model.ErrFeatureUnavailable:
zap.L().Error("feature unavailable", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err))
return model.BadRequest(err)
default:
zap.L().Error("feature check failed", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err))
return model.BadRequest(err)
}
}
if feature.UsageLimit-(feature.Usage+usage) < 0 && feature.UsageLimit != -1 {
return model.BadRequest(fmt.Errorf("feature usage exceeded"))
}
return nil
}
// UpdateSlug updates the slug
func (d *Dashboard) UpdateSlug() {
var title string
@@ -469,276 +390,6 @@ func SlugifyTitle(title string) string {
return s
}
func widgetFromPanel(panel model.Panels, idx int, variables map[string]model.Variable) *model.Widget {
widget := model.Widget{
Description: panel.Description,
ID: strconv.Itoa(idx),
IsStacked: false,
NullZeroValues: "zero",
Opacity: "1",
PanelTypes: "TIME_SERIES", // TODO: Need to figure out how to get this
Query: model.Query{
ClickHouse: []model.ClickHouseQueryDashboard{
{
Disabled: false,
Legend: "",
Name: "A",
Query: "",
},
},
MetricsBuilder: model.MetricsBuilder{
Formulas: []string{},
QueryBuilder: []model.QueryBuilder{
{
AggregateOperator: 1,
Disabled: false,
GroupBy: []string{},
Legend: "",
MetricName: "",
Name: "A",
ReduceTo: 1,
},
},
},
PromQL: []model.PromQueryDashboard{},
QueryType: int(model.PROM),
},
QueryData: model.QueryDataDashboard{
Data: model.Data{
QueryData: []interface{}{},
},
},
Title: panel.Title,
YAxisUnit: panel.FieldConfig.Defaults.Unit,
QueryType: int(model.PROM), // TODO: Supprot for multiple query types
}
for _, target := range panel.Targets {
if target.Expr != "" {
for name := range variables {
target.Expr = strings.ReplaceAll(target.Expr, "$"+name, "{{"+"."+name+"}}")
target.Expr = strings.ReplaceAll(target.Expr, "$"+"__rate_interval", "5m")
}
// prometheus receiver in collector maps job,instance as service_name,service_instance_id
target.Expr = instanceEQRE.ReplaceAllString(target.Expr, "service_instance_id=\"{{.instance}}\"")
target.Expr = nodeEQRE.ReplaceAllString(target.Expr, "service_instance_id=\"{{.node}}\"")
target.Expr = jobEQRE.ReplaceAllString(target.Expr, "service_name=\"{{.job}}\"")
target.Expr = instanceRERE.ReplaceAllString(target.Expr, "service_instance_id=~\"{{.instance}}\"")
target.Expr = nodeRERE.ReplaceAllString(target.Expr, "service_instance_id=~\"{{.node}}\"")
target.Expr = jobRERE.ReplaceAllString(target.Expr, "service_name=~\"{{.job}}\"")
widget.Query.PromQL = append(
widget.Query.PromQL,
model.PromQueryDashboard{
Disabled: false,
Legend: target.LegendFormat,
Name: target.RefID,
Query: target.Expr,
},
)
}
}
return &widget
}
func TransformGrafanaJSONToSignoz(grafanaJSON model.GrafanaJSON) model.DashboardData {
var toReturn model.DashboardData
toReturn.Title = grafanaJSON.Title
toReturn.Tags = grafanaJSON.Tags
toReturn.Variables = make(map[string]model.Variable)
for templateIdx, template := range grafanaJSON.Templating.List {
var sort, typ, textboxValue, customValue, queryValue string
if template.Sort == 1 {
sort = "ASC"
} else if template.Sort == 2 {
sort = "DESC"
} else {
sort = "DISABLED"
}
if template.Type == "query" {
if template.Datasource == nil {
zap.L().Warn("Skipping panel as it has no datasource", zap.Int("templateIdx", templateIdx))
continue
}
// Skip if the source is not prometheus
source, stringOk := template.Datasource.(string)
if stringOk && !strings.Contains(strings.ToLower(source), "prometheus") {
zap.L().Warn("Skipping template as it is not prometheus", zap.Int("templateIdx", templateIdx))
continue
}
var result model.Datasource
var structOk bool
if reflect.TypeOf(template.Datasource).Kind() == reflect.Map {
err := mapstructure.Decode(template.Datasource, &result)
if err == nil {
structOk = true
}
}
if result.Type != "prometheus" && result.Type != "" {
zap.L().Warn("Skipping template as it is not prometheus", zap.Int("templateIdx", templateIdx))
continue
}
if !stringOk && !structOk {
zap.L().Warn("Didn't recognize source, skipping")
continue
}
typ = "QUERY"
} else if template.Type == "custom" {
typ = "CUSTOM"
} else if template.Type == "textbox" {
typ = "TEXTBOX"
text, ok := template.Current.Text.(string)
if ok {
textboxValue = text
}
array, ok := template.Current.Text.([]string)
if ok {
textboxValue = strings.Join(array, ",")
}
} else {
continue
}
var selectedValue string
text, ok := template.Current.Value.(string)
if ok {
selectedValue = text
}
array, ok := template.Current.Value.([]string)
if ok {
selectedValue = strings.Join(array, ",")
}
toReturn.Variables[template.Name] = model.Variable{
AllSelected: false,
CustomValue: customValue,
Description: template.Label,
MultiSelect: template.Multi,
QueryValue: queryValue,
SelectedValue: selectedValue,
ShowALLOption: template.IncludeAll,
Sort: sort,
TextboxValue: textboxValue,
Type: typ,
}
}
row := 0
idx := 0
for _, panel := range grafanaJSON.Panels {
if panel.Type == "row" {
if panel.Panels != nil && len(panel.Panels) > 0 {
for _, innerPanel := range panel.Panels {
if idx%3 == 0 {
row++
}
toReturn.Layout = append(
toReturn.Layout,
model.Layout{
X: idx % 3 * 4,
Y: row * 3,
W: 4,
H: 3,
I: strconv.Itoa(idx),
},
)
toReturn.Widgets = append(toReturn.Widgets, *widgetFromPanel(innerPanel, idx, toReturn.Variables))
idx++
}
}
continue
}
if panel.Datasource == nil {
zap.L().Warn("Skipping panel as it has no datasource", zap.Int("idx", idx))
continue
}
// Skip if the datasource is not prometheus
source, stringOk := panel.Datasource.(string)
if stringOk && !strings.Contains(strings.ToLower(source), "prometheus") {
zap.L().Warn("Skipping panel as it is not prometheus", zap.Int("idx", idx))
continue
}
var result model.Datasource
var structOk bool
if reflect.TypeOf(panel.Datasource).Kind() == reflect.Map {
err := mapstructure.Decode(panel.Datasource, &result)
if err == nil {
structOk = true
}
}
if result.Type != "prometheus" && result.Type != "" {
zap.L().Warn("Skipping panel as it is not prometheus", zap.Int("idx", idx))
continue
}
if !stringOk && !structOk {
zap.L().Warn("Didn't recognize source, skipping")
continue
}
// Create a panel from "gridPos"
if idx%3 == 0 {
row++
}
toReturn.Layout = append(
toReturn.Layout,
model.Layout{
X: idx % 3 * 4,
Y: row * 3,
W: 4,
H: 3,
I: strconv.Itoa(idx),
},
)
toReturn.Widgets = append(toReturn.Widgets, *widgetFromPanel(panel, idx, toReturn.Variables))
idx++
}
return toReturn
}
func countTraceAndLogsPanel(data map[string]interface{}) (int64, int64) {
count := int64(0)
totalPanels := int64(0)
if data != nil && data["widgets"] != nil {
widgets, ok := data["widgets"]
if ok {
data, ok := widgets.([]interface{})
if ok {
for _, widget := range data {
sData, ok := widget.(map[string]interface{})
if ok && sData["query"] != nil {
totalPanels++
query, ok := sData["query"].(map[string]interface{})
if ok && query["queryType"] == "builder" && query["builder"] != nil {
builderData, ok := query["builder"].(map[string]interface{})
if ok && builderData["queryData"] != nil {
builderQueryData, ok := builderData["queryData"].([]interface{})
if ok {
for _, queryData := range builderQueryData {
data, ok := queryData.(map[string]interface{})
if ok {
if data["dataSource"] == "traces" || data["dataSource"] == "logs" {
count++
}
}
}
}
}
}
}
}
}
}
}
return count, totalPanels
}
func getWidgetIds(data map[string]interface{}) []string {
widgetIds := []string{}
if data != nil && data["widgets"] != nil {
@@ -787,3 +438,141 @@ func getIdDifference(existingIds []string, newIds []string) []string {
return difference
}
// GetDashboardsInfo returns analytics data for dashboards
func GetDashboardsInfo(ctx context.Context) (*model.DashboardsInfo, error) {
dashboardsInfo := model.DashboardsInfo{}
// fetch dashboards from dashboard db
query := "SELECT data FROM dashboards"
var dashboardsData []Dashboard
err := db.Select(&dashboardsData, query)
if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err))
return &dashboardsInfo, err
}
totalDashboardsWithPanelAndName := 0
var dashboardNames []string
count := 0
logChQueriesCount := 0
for _, dashboard := range dashboardsData {
if isDashboardWithPanelAndName(dashboard.Data) {
totalDashboardsWithPanelAndName = totalDashboardsWithPanelAndName + 1
}
dashboardName := extractDashboardName(dashboard.Data)
if dashboardName != "" {
dashboardNames = append(dashboardNames, dashboardName)
}
dashboardInfo := countPanelsInDashboard(dashboard.Data)
dashboardsInfo.LogsBasedPanels += dashboardInfo.LogsBasedPanels
dashboardsInfo.TracesBasedPanels += dashboardInfo.TracesBasedPanels
dashboardsInfo.MetricBasedPanels += dashboardsInfo.MetricBasedPanels
if isDashboardWithTSV2(dashboard.Data) {
count = count + 1
}
if isDashboardWithLogsClickhouseQuery(dashboard.Data) {
logChQueriesCount = logChQueriesCount + 1
}
}
dashboardsInfo.DashboardNames = dashboardNames
dashboardsInfo.TotalDashboards = len(dashboardsData)
dashboardsInfo.TotalDashboardsWithPanelAndName = totalDashboardsWithPanelAndName
dashboardsInfo.QueriesWithTSV2 = count
dashboardsInfo.DashboardsWithLogsChQuery = logChQueriesCount
return &dashboardsInfo, nil
}
func isDashboardWithTSV2(data map[string]interface{}) bool {
jsonData, err := json.Marshal(data)
if err != nil {
return false
}
return strings.Contains(string(jsonData), "time_series_v2")
}
func isDashboardWithLogsClickhouseQuery(data map[string]interface{}) bool {
jsonData, err := json.Marshal(data)
if err != nil {
return false
}
result := strings.Contains(string(jsonData), "signoz_logs.distributed_logs ") ||
strings.Contains(string(jsonData), "signoz_logs.logs ")
return result
}
func isDashboardWithPanelAndName(data map[string]interface{}) bool {
isDashboardName := false
isDashboardWithPanelAndName := false
if data != nil && data["title"] != nil && data["widgets"] != nil {
title, ok := data["title"].(string)
if ok && title != "Sample Title" {
isDashboardName = true
}
widgets, ok := data["widgets"]
if ok && isDashboardName {
data, ok := widgets.([]interface{})
if ok && len(data) > 0 {
isDashboardWithPanelAndName = true
}
}
}
return isDashboardWithPanelAndName
}
func extractDashboardName(data map[string]interface{}) string {
if data != nil && data["title"] != nil {
title, ok := data["title"].(string)
if ok {
return title
}
}
return ""
}
func countPanelsInDashboard(data map[string]interface{}) model.DashboardsInfo {
var logsPanelCount, tracesPanelCount, metricsPanelCount int
// totalPanels := 0
if data != nil && data["widgets"] != nil {
widgets, ok := data["widgets"]
if ok {
data, ok := widgets.([]interface{})
if ok {
for _, widget := range data {
sData, ok := widget.(map[string]interface{})
if ok && sData["query"] != nil {
// totalPanels++
query, ok := sData["query"].(map[string]interface{})
if ok && query["queryType"] == "builder" && query["builder"] != nil {
builderData, ok := query["builder"].(map[string]interface{})
if ok && builderData["queryData"] != nil {
builderQueryData, ok := builderData["queryData"].([]interface{})
if ok {
for _, queryData := range builderQueryData {
data, ok := queryData.(map[string]interface{})
if ok {
if data["dataSource"] == "traces" {
tracesPanelCount++
} else if data["dataSource"] == "metrics" {
metricsPanelCount++
} else if data["dataSource"] == "logs" {
logsPanelCount++
}
}
}
}
}
}
}
}
}
}
}
return model.DashboardsInfo{
LogsBasedPanels: logsPanelCount,
TracesBasedPanels: tracesPanelCount,
MetricBasedPanels: metricsPanelCount,
}
}

View File

@@ -10,7 +10,10 @@ import (
"github.com/google/uuid"
"github.com/jmoiron/sqlx"
"go.signoz.io/signoz/pkg/query-service/auth"
"go.signoz.io/signoz/pkg/query-service/model"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/query-service/telemetry"
"go.uber.org/zap"
)
var db *sqlx.DB
@@ -57,6 +60,8 @@ func InitWithDSN(dataSourceName string) (*sqlx.DB, error) {
return nil, fmt.Errorf("error in creating saved views table: %s", err.Error())
}
telemetry.GetInstance().SetSavedViewsInfoCallback(GetSavedViewsInfo)
return db, nil
}
@@ -228,3 +233,21 @@ func DeleteView(uuid_ string) error {
}
return nil
}
func GetSavedViewsInfo(ctx context.Context) (*model.SavedViewsInfo, error) {
savedViewsInfo := model.SavedViewsInfo{}
savedViews, err := GetViews()
if err != nil {
zap.S().Debug("Error in fetching saved views info: ", err)
return &savedViewsInfo, err
}
savedViewsInfo.TotalSavedViews = len(savedViews)
for _, view := range savedViews {
if view.SourcePage == "traces" {
savedViewsInfo.TracesSavedViews += 1
} else if view.SourcePage == "logs" {
savedViewsInfo.LogsSavedViews += 1
}
}
return &savedViewsInfo, nil
}

View File

@@ -29,6 +29,7 @@ import (
"go.signoz.io/signoz/pkg/query-service/app/integrations"
"go.signoz.io/signoz/pkg/query-service/app/logs"
logsv3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3"
logsv4 "go.signoz.io/signoz/pkg/query-service/app/logs/v4"
"go.signoz.io/signoz/pkg/query-service/app/metrics"
metricsv3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3"
"go.signoz.io/signoz/pkg/query-service/app/preferences"
@@ -40,6 +41,7 @@ import (
"go.signoz.io/signoz/pkg/query-service/cache"
"go.signoz.io/signoz/pkg/query-service/common"
"go.signoz.io/signoz/pkg/query-service/constants"
"go.signoz.io/signoz/pkg/query-service/contextlinks"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/query-service/postprocess"
@@ -143,14 +145,14 @@ type APIHandlerOpts struct {
// Querier Influx Interval
FluxInterval time.Duration
// Use new schema
// Use Logs New schema
UseLogsNewSchema bool
}
// NewAPIHandler returns an APIHandler
func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
alertManager, err := am.New("")
alertManager, err := am.New()
if err != nil {
return nil, err
}
@@ -195,10 +197,15 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
UseLogsNewSchema: opts.UseLogsNewSchema,
}
logsQueryBuilder := logsv3.PrepareLogsQuery
if opts.UseLogsNewSchema {
logsQueryBuilder = logsv4.PrepareLogsQuery
}
builderOpts := queryBuilder.QueryBuilderOptions{
BuildMetricQuery: metricsv3.PrepareMetricQuery,
BuildTraceQuery: tracesV3.PrepareTracesQuery,
BuildLogQuery: logsv3.PrepareLogsQuery,
BuildLogQuery: logsQueryBuilder,
}
aH.queryBuilder = queryBuilder.NewQueryBuilder(builderOpts, aH.featureFlags)
@@ -390,11 +397,9 @@ func (aH *APIHandler) RegisterRoutes(router *mux.Router, am *AuthMiddleware) {
router.HandleFunc("/api/v1/dashboards", am.ViewAccess(aH.getDashboards)).Methods(http.MethodGet)
router.HandleFunc("/api/v1/dashboards", am.EditAccess(aH.createDashboards)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/dashboards/grafana", am.EditAccess(aH.createDashboardsTransform)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/dashboards/{uuid}", am.ViewAccess(aH.getDashboard)).Methods(http.MethodGet)
router.HandleFunc("/api/v1/dashboards/{uuid}", am.EditAccess(aH.updateDashboard)).Methods(http.MethodPut)
router.HandleFunc("/api/v1/dashboards/{uuid}", am.EditAccess(aH.deleteDashboard)).Methods(http.MethodDelete)
router.HandleFunc("/api/v1/variables/query", am.ViewAccess(aH.queryDashboardVars)).Methods(http.MethodGet)
router.HandleFunc("/api/v2/variables/query", am.ViewAccess(aH.queryDashboardVarsV2)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/explorer/views", am.ViewAccess(aH.getSavedViews)).Methods(http.MethodGet)
@@ -665,7 +670,7 @@ func (aH *APIHandler) deleteDowntimeSchedule(w http.ResponseWriter, r *http.Requ
func (aH *APIHandler) getRuleStats(w http.ResponseWriter, r *http.Request) {
ruleID := mux.Vars(r)["id"]
params := v3.QueryRuleStateHistory{}
params := model.QueryRuleStateHistory{}
err := json.NewDecoder(r.Body).Decode(&params)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
@@ -731,7 +736,7 @@ func (aH *APIHandler) getRuleStats(w http.ResponseWriter, r *http.Request) {
pastAvgResolutionTime = 0
}
stats := v3.Stats{
stats := model.Stats{
TotalCurrentTriggers: totalCurrentTriggers,
TotalPastTriggers: totalPastTriggers,
CurrentTriggersSeries: currentTriggersSeries,
@@ -747,7 +752,7 @@ func (aH *APIHandler) getRuleStats(w http.ResponseWriter, r *http.Request) {
func (aH *APIHandler) getOverallStateTransitions(w http.ResponseWriter, r *http.Request) {
ruleID := mux.Vars(r)["id"]
params := v3.QueryRuleStateHistory{}
params := model.QueryRuleStateHistory{}
err := json.NewDecoder(r.Body).Decode(&params)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
@@ -763,9 +768,51 @@ func (aH *APIHandler) getOverallStateTransitions(w http.ResponseWriter, r *http.
aH.Respond(w, stateItems)
}
func (aH *APIHandler) metaForLinks(ctx context.Context, rule *rules.GettableRule) ([]v3.FilterItem, []v3.AttributeKey, map[string]v3.AttributeKey) {
filterItems := []v3.FilterItem{}
groupBy := []v3.AttributeKey{}
keys := make(map[string]v3.AttributeKey)
if rule.AlertType == rules.AlertTypeLogs {
logFields, err := aH.reader.GetLogFields(ctx)
if err == nil {
params := &v3.QueryRangeParamsV3{
CompositeQuery: rule.RuleCondition.CompositeQuery,
}
keys = model.GetLogFieldsV3(ctx, params, logFields)
} else {
zap.L().Error("failed to get log fields using empty keys; the link might not work as expected", zap.Error(err))
}
} else if rule.AlertType == rules.AlertTypeTraces {
traceFields, err := aH.reader.GetSpanAttributeKeys(ctx)
if err == nil {
keys = traceFields
} else {
zap.L().Error("failed to get span attributes using empty keys; the link might not work as expected", zap.Error(err))
}
}
if rule.AlertType == rules.AlertTypeLogs || rule.AlertType == rules.AlertTypeTraces {
if rule.RuleCondition.CompositeQuery != nil {
if rule.RuleCondition.QueryType() == v3.QueryTypeBuilder {
selectedQuery := rule.RuleCondition.GetSelectedQueryName()
if rule.RuleCondition.CompositeQuery.BuilderQueries[selectedQuery] != nil &&
rule.RuleCondition.CompositeQuery.BuilderQueries[selectedQuery].Filters != nil {
filterItems = rule.RuleCondition.CompositeQuery.BuilderQueries[selectedQuery].Filters.Items
}
if rule.RuleCondition.CompositeQuery.BuilderQueries[selectedQuery] != nil &&
rule.RuleCondition.CompositeQuery.BuilderQueries[selectedQuery].GroupBy != nil {
groupBy = rule.RuleCondition.CompositeQuery.BuilderQueries[selectedQuery].GroupBy
}
}
}
}
return filterItems, groupBy, keys
}
func (aH *APIHandler) getRuleStateHistory(w http.ResponseWriter, r *http.Request) {
ruleID := mux.Vars(r)["id"]
params := v3.QueryRuleStateHistory{}
params := model.QueryRuleStateHistory{}
err := json.NewDecoder(r.Body).Decode(&params)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
@@ -790,24 +837,18 @@ func (aH *APIHandler) getRuleStateHistory(w http.ResponseWriter, r *http.Request
if err != nil {
continue
}
filterItems := []v3.FilterItem{}
if rule.AlertType == rules.AlertTypeLogs || rule.AlertType == rules.AlertTypeTraces {
if rule.RuleCondition.CompositeQuery != nil {
if rule.RuleCondition.QueryType() == v3.QueryTypeBuilder {
for _, query := range rule.RuleCondition.CompositeQuery.BuilderQueries {
if query.Filters != nil && len(query.Filters.Items) > 0 {
filterItems = append(filterItems, query.Filters.Items...)
}
}
}
}
}
newFilters := common.PrepareFilters(lbls, filterItems)
ts := time.Unix(res.Items[idx].UnixMilli/1000, 0)
filterItems, groupBy, keys := aH.metaForLinks(r.Context(), rule)
newFilters := contextlinks.PrepareFilters(lbls, filterItems, groupBy, keys)
end := time.Unix(res.Items[idx].UnixMilli/1000, 0)
// why are we subtracting 3 minutes?
// the query range is calculated based on the rule's evalWindow and evalDelay
// alerts have 2 minutes delay built in, so we need to subtract that from the start time
// to get the correct query range
start := end.Add(-time.Duration(rule.EvalWindow)).Add(-3 * time.Minute)
if rule.AlertType == rules.AlertTypeLogs {
res.Items[idx].RelatedLogsLink = common.PrepareLinksToLogs(ts, newFilters)
res.Items[idx].RelatedLogsLink = contextlinks.PrepareLinksToLogs(start, end, newFilters)
} else if rule.AlertType == rules.AlertTypeTraces {
res.Items[idx].RelatedTracesLink = common.PrepareLinksToTraces(ts, newFilters)
res.Items[idx].RelatedTracesLink = contextlinks.PrepareLinksToTraces(start, end, newFilters)
}
}
}
@@ -817,7 +858,7 @@ func (aH *APIHandler) getRuleStateHistory(w http.ResponseWriter, r *http.Request
func (aH *APIHandler) getRuleStateHistoryTopContributors(w http.ResponseWriter, r *http.Request) {
ruleID := mux.Vars(r)["id"]
params := v3.QueryRuleStateHistory{}
params := model.QueryRuleStateHistory{}
err := json.NewDecoder(r.Body).Decode(&params)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
@@ -838,12 +879,14 @@ func (aH *APIHandler) getRuleStateHistoryTopContributors(w http.ResponseWriter,
if err != nil {
continue
}
ts := time.Unix(params.End/1000, 0)
filters := common.PrepareFilters(lbls, nil)
filterItems, groupBy, keys := aH.metaForLinks(r.Context(), rule)
newFilters := contextlinks.PrepareFilters(lbls, filterItems, groupBy, keys)
end := time.Unix(params.End/1000, 0)
start := time.Unix(params.Start/1000, 0)
if rule.AlertType == rules.AlertTypeLogs {
res[idx].RelatedLogsLink = common.PrepareLinksToLogs(ts, filters)
res[idx].RelatedLogsLink = contextlinks.PrepareLinksToLogs(start, end, newFilters)
} else if rule.AlertType == rules.AlertTypeTraces {
res[idx].RelatedTracesLink = common.PrepareLinksToTraces(ts, filters)
res[idx].RelatedTracesLink = contextlinks.PrepareLinksToTraces(start, end, newFilters)
}
}
}
@@ -935,25 +978,6 @@ func (aH *APIHandler) deleteDashboard(w http.ResponseWriter, r *http.Request) {
}
func (aH *APIHandler) queryDashboardVars(w http.ResponseWriter, r *http.Request) {
query := r.URL.Query().Get("query")
if query == "" {
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("query is required")}, nil)
return
}
if strings.Contains(strings.ToLower(query), "alter table") {
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("query shouldn't alter data")}, nil)
return
}
dashboardVars, err := aH.reader.QueryDashboardVars(r.Context(), query)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
aH.Respond(w, dashboardVars)
}
func prepareQuery(r *http.Request) (string, error) {
var postData *model.DashboardVars
@@ -1066,44 +1090,6 @@ func (aH *APIHandler) getDashboard(w http.ResponseWriter, r *http.Request) {
}
func (aH *APIHandler) saveAndReturn(w http.ResponseWriter, r *http.Request, signozDashboard model.DashboardData) {
toSave := make(map[string]interface{})
toSave["title"] = signozDashboard.Title
toSave["description"] = signozDashboard.Description
toSave["tags"] = signozDashboard.Tags
toSave["layout"] = signozDashboard.Layout
toSave["widgets"] = signozDashboard.Widgets
toSave["variables"] = signozDashboard.Variables
dashboard, apiError := dashboards.CreateDashboard(r.Context(), toSave, aH.featureFlags)
if apiError != nil {
RespondError(w, apiError, nil)
return
}
aH.Respond(w, dashboard)
}
func (aH *APIHandler) createDashboardsTransform(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
b, err := io.ReadAll(r.Body)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, "Error reading request body")
return
}
var importData model.GrafanaJSON
err = json.Unmarshal(b, &importData)
if err == nil {
signozDashboard := dashboards.TransformGrafanaJSONToSignoz(importData)
aH.saveAndReturn(w, r, signozDashboard)
return
}
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, "Error while creating dashboard from grafana json")
}
func (aH *APIHandler) createDashboards(w http.ResponseWriter, r *http.Request) {
var postData map[string]interface{}
@@ -1218,7 +1204,7 @@ func (aH *APIHandler) editRule(w http.ResponseWriter, r *http.Request) {
func (aH *APIHandler) getChannel(w http.ResponseWriter, r *http.Request) {
id := mux.Vars(r)["id"]
channel, apiErrorObj := aH.reader.GetChannel(id)
channel, apiErrorObj := aH.ruleManager.RuleDB().GetChannel(id)
if apiErrorObj != nil {
RespondError(w, apiErrorObj, nil)
return
@@ -1228,7 +1214,7 @@ func (aH *APIHandler) getChannel(w http.ResponseWriter, r *http.Request) {
func (aH *APIHandler) deleteChannel(w http.ResponseWriter, r *http.Request) {
id := mux.Vars(r)["id"]
apiErrorObj := aH.reader.DeleteChannel(id)
apiErrorObj := aH.ruleManager.RuleDB().DeleteChannel(id)
if apiErrorObj != nil {
RespondError(w, apiErrorObj, nil)
return
@@ -1237,7 +1223,7 @@ func (aH *APIHandler) deleteChannel(w http.ResponseWriter, r *http.Request) {
}
func (aH *APIHandler) listChannels(w http.ResponseWriter, r *http.Request) {
channels, apiErrorObj := aH.reader.GetChannels()
channels, apiErrorObj := aH.ruleManager.RuleDB().GetChannels()
if apiErrorObj != nil {
RespondError(w, apiErrorObj, nil)
return
@@ -1290,7 +1276,7 @@ func (aH *APIHandler) editChannel(w http.ResponseWriter, r *http.Request) {
return
}
_, apiErrorObj := aH.reader.EditChannel(receiver, id)
_, apiErrorObj := aH.ruleManager.RuleDB().EditChannel(receiver, id)
if apiErrorObj != nil {
RespondError(w, apiErrorObj, nil)
@@ -1318,7 +1304,7 @@ func (aH *APIHandler) createChannel(w http.ResponseWriter, r *http.Request) {
return
}
_, apiErrorObj := aH.reader.CreateChannel(receiver)
_, apiErrorObj := aH.ruleManager.RuleDB().CreateChannel(receiver)
if apiErrorObj != nil {
RespondError(w, apiErrorObj, nil)
@@ -2521,7 +2507,7 @@ func (aH *APIHandler) getNetworkData(
var result []*v3.Result
var errQueriesByName map[string]error
result, errQueriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams, nil)
result, errQueriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams)
if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err}
RespondError(w, apiErrObj, errQueriesByName)
@@ -2556,7 +2542,7 @@ func (aH *APIHandler) getNetworkData(
return
}
resultFetchLatency, errQueriesByNameFetchLatency, err := aH.querierV2.QueryRange(r.Context(), queryRangeParams, nil)
resultFetchLatency, errQueriesByNameFetchLatency, err := aH.querierV2.QueryRange(r.Context(), queryRangeParams)
if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err}
RespondError(w, apiErrObj, errQueriesByNameFetchLatency)
@@ -2617,7 +2603,7 @@ func (aH *APIHandler) getProducerData(
var result []*v3.Result
var errQuriesByName map[string]error
result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams, nil)
result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams)
if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err}
RespondError(w, apiErrObj, errQuriesByName)
@@ -2658,7 +2644,7 @@ func (aH *APIHandler) getConsumerData(
var result []*v3.Result
var errQuriesByName map[string]error
result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams, nil)
result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams)
if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err}
RespondError(w, apiErrObj, errQuriesByName)
@@ -3020,7 +3006,7 @@ func (aH *APIHandler) calculateLogsConnectionStatus(
},
}
queryRes, _, err := aH.querier.QueryRange(
ctx, qrParams, map[string]v3.AttributeKey{},
ctx, qrParams,
)
if err != nil {
return nil, model.InternalError(fmt.Errorf(
@@ -3563,55 +3549,6 @@ func (aH *APIHandler) autoCompleteAttributeValues(w http.ResponseWriter, r *http
aH.Respond(w, response)
}
func (aH *APIHandler) getLogFieldsV3(ctx context.Context, queryRangeParams *v3.QueryRangeParamsV3) (map[string]v3.AttributeKey, error) {
data := map[string]v3.AttributeKey{}
for _, query := range queryRangeParams.CompositeQuery.BuilderQueries {
if query.DataSource == v3.DataSourceLogs {
fields, apiError := aH.reader.GetLogFields(ctx)
if apiError != nil {
return nil, apiError.Err
}
// top level fields meta will always be present in the frontend. (can be support for that as enchancement)
getType := func(t string) (v3.AttributeKeyType, bool) {
if t == "attributes" {
return v3.AttributeKeyTypeTag, false
} else if t == "resources" {
return v3.AttributeKeyTypeResource, false
}
return "", true
}
for _, selectedField := range fields.Selected {
fieldType, pass := getType(selectedField.Type)
if pass {
continue
}
data[selectedField.Name] = v3.AttributeKey{
Key: selectedField.Name,
Type: fieldType,
DataType: v3.AttributeKeyDataType(strings.ToLower(selectedField.DataType)),
IsColumn: true,
}
}
for _, interestingField := range fields.Interesting {
fieldType, pass := getType(interestingField.Type)
if pass {
continue
}
data[interestingField.Name] = v3.AttributeKey{
Key: interestingField.Name,
Type: fieldType,
DataType: v3.AttributeKeyDataType(strings.ToLower(interestingField.DataType)),
IsColumn: false,
}
}
break
}
}
return data, nil
}
func (aH *APIHandler) getSpanKeysV3(ctx context.Context, queryRangeParams *v3.QueryRangeParamsV3) (map[string]v3.AttributeKey, error) {
data := map[string]v3.AttributeKey{}
for _, query := range queryRangeParams.CompositeQuery.BuilderQueries {
@@ -3653,14 +3590,14 @@ func (aH *APIHandler) queryRangeV3(ctx context.Context, queryRangeParams *v3.Que
if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeBuilder {
// check if any enrichment is required for logs if yes then enrich them
if logsv3.EnrichmentRequired(queryRangeParams) {
// get the fields if any logs query is present
var fields map[string]v3.AttributeKey
fields, err = aH.getLogFieldsV3(ctx, queryRangeParams)
logsFields, err := aH.reader.GetLogFields(ctx)
if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorInternal, Err: err}
RespondError(w, apiErrObj, errQuriesByName)
return
}
// get the fields if any logs query is present
fields := model.GetLogFieldsV3(ctx, queryRangeParams, logsFields)
logsv3.Enrich(queryRangeParams, fields)
}
@@ -3670,13 +3607,14 @@ func (aH *APIHandler) queryRangeV3(ctx context.Context, queryRangeParams *v3.Que
RespondError(w, apiErrObj, errQuriesByName)
return
}
tracesV3.Enrich(queryRangeParams, spanKeys)
}
// WARN: Only works for AND operator in traces query
if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeBuilder {
// check if traceID is used as filter (with equal/similar operator) in traces query if yes add timestamp filter to queryRange params
isUsed, traceIDs := tracesV3.TraceIdFilterUsedWithEqual(queryRangeParams)
if isUsed == true && len(traceIDs) > 0 {
if isUsed && len(traceIDs) > 0 {
zap.L().Debug("traceID used as filter in traces query")
// query signoz_spans table with traceID to get min and max timestamp
min, max, err := aH.reader.GetMinAndMaxTimestampForTraceID(ctx, traceIDs)
@@ -3691,26 +3629,34 @@ func (aH *APIHandler) queryRangeV3(ctx context.Context, queryRangeParams *v3.Que
// Hook up query progress tracking if requested
queryIdHeader := r.Header.Get("X-SIGNOZ-QUERY-ID")
if len(queryIdHeader) > 0 {
ctx = context.WithValue(ctx, "queryId", queryIdHeader)
onQueryFinished, err := aH.reader.ReportQueryStartForProgressTracking(queryIdHeader)
if err != nil {
zap.L().Error(
"couldn't report query start for progress tracking",
zap.String("queryId", queryIdHeader), zap.Error(err),
)
} else {
// Adding queryId to the context signals clickhouse queries to report progress
//lint:ignore SA1029 ignore for now
ctx = context.WithValue(ctx, "queryId", queryIdHeader)
defer func() {
go onQueryFinished()
}()
}
}
result, errQuriesByName, err = aH.querier.QueryRange(ctx, queryRangeParams, spanKeys)
result, errQuriesByName, err = aH.querier.QueryRange(ctx, queryRangeParams)
if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err}
RespondError(w, apiErrObj, errQuriesByName)
queryErrors := map[string]string{}
for name, err := range errQuriesByName {
queryErrors[fmt.Sprintf("Query-%s", name)] = err.Error()
}
apiErrObj := &model.ApiError{Typ: model.ErrorInternal, Err: err}
RespondError(w, apiErrObj, queryErrors)
return
}
@@ -3926,7 +3872,7 @@ func (aH *APIHandler) GetQueryProgressUpdates(w http.ResponseWriter, r *http.Req
}
}
func (aH *APIHandler) liveTailLogs(w http.ResponseWriter, r *http.Request) {
func (aH *APIHandler) liveTailLogsV2(w http.ResponseWriter, r *http.Request) {
// get the param from url and add it to body
stringReader := strings.NewReader(r.URL.Query().Get("q"))
@@ -3946,13 +3892,99 @@ func (aH *APIHandler) liveTailLogs(w http.ResponseWriter, r *http.Request) {
// check if any enrichment is required for logs if yes then enrich them
if logsv3.EnrichmentRequired(queryRangeParams) {
// get the fields if any logs query is present
var fields map[string]v3.AttributeKey
fields, err = aH.getLogFieldsV3(r.Context(), queryRangeParams)
logsFields, err := aH.reader.GetLogFields(r.Context())
if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorInternal, Err: err}
RespondError(w, apiErrObj, nil)
return
}
fields := model.GetLogFieldsV3(r.Context(), queryRangeParams, logsFields)
logsv3.Enrich(queryRangeParams, fields)
}
queryString, err = aH.queryBuilder.PrepareLiveTailQuery(queryRangeParams)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
default:
err = fmt.Errorf("invalid query type")
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
w.Header().Set("Connection", "keep-alive")
w.Header().Set("Content-Type", "text/event-stream")
w.Header().Set("Cache-Control", "no-cache")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.WriteHeader(200)
flusher, ok := w.(http.Flusher)
if !ok {
err := model.ApiError{Typ: model.ErrorStreamingNotSupported, Err: nil}
RespondError(w, &err, "streaming is not supported")
return
}
// flush the headers
flusher.Flush()
// create the client
client := &model.LogsLiveTailClientV2{Name: r.RemoteAddr, Logs: make(chan *model.SignozLogV2, 1000), Done: make(chan *bool), Error: make(chan error)}
go aH.reader.LiveTailLogsV4(r.Context(), queryString, uint64(queryRangeParams.Start), "", client)
for {
select {
case log := <-client.Logs:
var buf bytes.Buffer
enc := json.NewEncoder(&buf)
enc.Encode(log)
fmt.Fprintf(w, "data: %v\n\n", buf.String())
flusher.Flush()
case <-client.Done:
zap.L().Debug("done!")
return
case err := <-client.Error:
zap.L().Error("error occurred", zap.Error(err))
fmt.Fprintf(w, "event: error\ndata: %v\n\n", err.Error())
flusher.Flush()
return
}
}
}
func (aH *APIHandler) liveTailLogs(w http.ResponseWriter, r *http.Request) {
if aH.UseLogsNewSchema {
aH.liveTailLogsV2(w, r)
return
}
// get the param from url and add it to body
stringReader := strings.NewReader(r.URL.Query().Get("q"))
r.Body = io.NopCloser(stringReader)
queryRangeParams, apiErrorObj := ParseQueryRangeParams(r)
if apiErrorObj != nil {
zap.L().Error(apiErrorObj.Err.Error())
RespondError(w, apiErrorObj, nil)
return
}
var err error
var queryString string
switch queryRangeParams.CompositeQuery.QueryType {
case v3.QueryTypeBuilder:
// check if any enrichment is required for logs if yes then enrich them
if logsv3.EnrichmentRequired(queryRangeParams) {
logsFields, err := aH.reader.GetLogFields(r.Context())
if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorInternal, Err: err}
RespondError(w, apiErrObj, nil)
return
}
// get the fields if any logs query is present
fields := model.GetLogFieldsV3(r.Context(), queryRangeParams, logsFields)
logsv3.Enrich(queryRangeParams, fields)
}
@@ -3969,7 +4001,7 @@ func (aH *APIHandler) liveTailLogs(w http.ResponseWriter, r *http.Request) {
}
// create the client
client := &v3.LogsLiveTailClient{Name: r.RemoteAddr, Logs: make(chan *model.SignozLog, 1000), Done: make(chan *bool), Error: make(chan error)}
client := &model.LogsLiveTailClient{Name: r.RemoteAddr, Logs: make(chan *model.SignozLog, 1000), Done: make(chan *bool), Error: make(chan error)}
go aH.reader.LiveTailLogsV3(r.Context(), queryString, uint64(queryRangeParams.Start), "", client)
w.Header().Set("Connection", "keep-alive")
@@ -4004,6 +4036,7 @@ func (aH *APIHandler) liveTailLogs(w http.ResponseWriter, r *http.Request) {
return
}
}
}
func (aH *APIHandler) getMetricMetadata(w http.ResponseWriter, r *http.Request) {
@@ -4028,13 +4061,13 @@ func (aH *APIHandler) queryRangeV4(ctx context.Context, queryRangeParams *v3.Que
// check if any enrichment is required for logs if yes then enrich them
if logsv3.EnrichmentRequired(queryRangeParams) {
// get the fields if any logs query is present
var fields map[string]v3.AttributeKey
fields, err = aH.getLogFieldsV3(ctx, queryRangeParams)
logsFields, err := aH.reader.GetLogFields(r.Context())
if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorInternal, Err: err}
RespondError(w, apiErrObj, errQuriesByName)
RespondError(w, apiErrObj, nil)
return
}
fields := model.GetLogFieldsV3(r.Context(), queryRangeParams, logsFields)
logsv3.Enrich(queryRangeParams, fields)
}
@@ -4044,13 +4077,14 @@ func (aH *APIHandler) queryRangeV4(ctx context.Context, queryRangeParams *v3.Que
RespondError(w, apiErrObj, errQuriesByName)
return
}
tracesV3.Enrich(queryRangeParams, spanKeys)
}
// WARN: Only works for AND operator in traces query
if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeBuilder {
// check if traceID is used as filter (with equal/similar operator) in traces query if yes add timestamp filter to queryRange params
isUsed, traceIDs := tracesV3.TraceIdFilterUsedWithEqual(queryRangeParams)
if isUsed == true && len(traceIDs) > 0 {
if isUsed && len(traceIDs) > 0 {
zap.L().Debug("traceID used as filter in traces query")
// query signoz_spans table with traceID to get min and max timestamp
min, max, err := aH.reader.GetMinAndMaxTimestampForTraceID(ctx, traceIDs)
@@ -4062,11 +4096,15 @@ func (aH *APIHandler) queryRangeV4(ctx context.Context, queryRangeParams *v3.Que
}
}
result, errQuriesByName, err = aH.querierV2.QueryRange(ctx, queryRangeParams, spanKeys)
result, errQuriesByName, err = aH.querierV2.QueryRange(ctx, queryRangeParams)
if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err}
RespondError(w, apiErrObj, errQuriesByName)
queryErrors := map[string]string{}
for name, err := range errQuriesByName {
queryErrors[fmt.Sprintf("Query-%s", name)] = err.Error()
}
apiErrObj := &model.ApiError{Typ: model.ErrorInternal, Err: err}
RespondError(w, apiErrObj, queryErrors)
return
}

View File

@@ -94,11 +94,11 @@ func Enrich(params *v3.QueryRangeParamsV3, fields map[string]v3.AttributeKey) {
if query.Expression != queryName && query.DataSource != v3.DataSourceLogs {
continue
}
enrichLogsQuery(query, fields)
EnrichLogsQuery(query, fields)
}
}
func enrichLogsQuery(query *v3.BuilderQuery, fields map[string]v3.AttributeKey) error {
func EnrichLogsQuery(query *v3.BuilderQuery, fields map[string]v3.AttributeKey) error {
// enrich aggregation attribute
if query.AggregateAttribute.Key != "" {
query.AggregateAttribute = enrichFieldWithMetadata(query.AggregateAttribute, fields)

View File

@@ -486,12 +486,6 @@ func AddOffsetToQuery(query string, offset uint64) string {
return fmt.Sprintf("%s OFFSET %d", query, offset)
}
type Options struct {
GraphLimitQtype string
IsLivetailQuery bool
PreferRPM bool
}
func IsOrderByTs(orderBy []v3.OrderBy) bool {
if len(orderBy) == 1 && (orderBy[0].Key == constants.TIMESTAMP || orderBy[0].ColumnName == constants.TIMESTAMP) {
return true
@@ -502,7 +496,7 @@ func IsOrderByTs(orderBy []v3.OrderBy) bool {
// PrepareLogsQuery prepares the query for logs
// start and end are in epoch millisecond
// step is in seconds
func PrepareLogsQuery(start, end int64, queryType v3.QueryType, panelType v3.PanelType, mq *v3.BuilderQuery, options Options) (string, error) {
func PrepareLogsQuery(start, end int64, queryType v3.QueryType, panelType v3.PanelType, mq *v3.BuilderQuery, options v3.LogQBOptions) (string, error) {
// adjust the start and end time to the step interval
// NOTE: Disabling this as it's creating confusion between charts and actual data

View File

@@ -1201,7 +1201,7 @@ var testPrepLogsQueryData = []struct {
TableName string
AggregateOperator v3.AggregateOperator
ExpectedQuery string
Options Options
Options v3.LogQBOptions
}{
{
Name: "Test TS with limit- first",
@@ -1223,7 +1223,7 @@ var testPrepLogsQueryData = []struct {
},
TableName: "logs",
ExpectedQuery: "SELECT `method` from (SELECT attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND has(attributes_string_key, 'method') AND has(attributes_string_key, 'name') group by `method` order by value DESC) LIMIT 10",
Options: Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: true},
Options: v3.LogQBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: true},
},
{
Name: "Test TS with limit- first - with order by value",
@@ -1246,7 +1246,7 @@ var testPrepLogsQueryData = []struct {
},
TableName: "logs",
ExpectedQuery: "SELECT `method` from (SELECT attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND has(attributes_string_key, 'method') AND has(attributes_string_key, 'name') group by `method` order by value ASC) LIMIT 10",
Options: Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: true},
Options: v3.LogQBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: true},
},
{
Name: "Test TS with limit- first - with order by attribute",
@@ -1269,7 +1269,7 @@ var testPrepLogsQueryData = []struct {
},
TableName: "logs",
ExpectedQuery: "SELECT `method` from (SELECT attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND has(attributes_string_key, 'method') AND has(attributes_string_key, 'name') group by `method` order by `method` ASC) LIMIT 10",
Options: Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: true},
Options: v3.LogQBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: true},
},
{
Name: "Test TS with limit- second",
@@ -1291,7 +1291,7 @@ var testPrepLogsQueryData = []struct {
},
TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND has(attributes_string_key, 'method') AND has(attributes_string_key, 'name') AND (`method`) GLOBAL IN (#LIMIT_PLACEHOLDER) group by `method`,ts order by value DESC",
Options: Options{GraphLimitQtype: constants.SecondQueryGraphLimit},
Options: v3.LogQBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit},
},
{
Name: "Test TS with limit- second - with order by",
@@ -1314,7 +1314,7 @@ var testPrepLogsQueryData = []struct {
},
TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND has(attributes_string_key, 'method') AND has(attributes_string_key, 'name') AND (`method`) GLOBAL IN (#LIMIT_PLACEHOLDER) group by `method`,ts order by `method` ASC",
Options: Options{GraphLimitQtype: constants.SecondQueryGraphLimit},
Options: v3.LogQBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit},
},
// Live tail
{
@@ -1334,7 +1334,7 @@ var testPrepLogsQueryData = []struct {
},
TableName: "logs",
ExpectedQuery: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body,CAST((attributes_string_key, attributes_string_value), 'Map(String, String)') as attributes_string,CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64,CAST((attributes_bool_key, attributes_bool_value), 'Map(String, Bool)') as attributes_bool,CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string from signoz_logs.distributed_logs where attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND ",
Options: Options{IsLivetailQuery: true},
Options: v3.LogQBOptions{IsLivetailQuery: true},
},
{
Name: "Live Tail Query with contains",
@@ -1353,7 +1353,7 @@ var testPrepLogsQueryData = []struct {
},
TableName: "logs",
ExpectedQuery: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body,CAST((attributes_string_key, attributes_string_value), 'Map(String, String)') as attributes_string,CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64,CAST((attributes_bool_key, attributes_bool_value), 'Map(String, Bool)') as attributes_bool,CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string from signoz_logs.distributed_logs where attributes_string_value[indexOf(attributes_string_key, 'method')] ILIKE '%GET%' AND ",
Options: Options{IsLivetailQuery: true},
Options: v3.LogQBOptions{IsLivetailQuery: true},
},
{
Name: "Live Tail Query W/O filter",
@@ -1369,7 +1369,7 @@ var testPrepLogsQueryData = []struct {
},
TableName: "logs",
ExpectedQuery: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body,CAST((attributes_string_key, attributes_string_value), 'Map(String, String)') as attributes_string,CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64,CAST((attributes_bool_key, attributes_bool_value), 'Map(String, Bool)') as attributes_bool,CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string from signoz_logs.distributed_logs where ",
Options: Options{IsLivetailQuery: true},
Options: v3.LogQBOptions{IsLivetailQuery: true},
},
{
Name: "Table query w/o limit",
@@ -1385,7 +1385,7 @@ var testPrepLogsQueryData = []struct {
},
TableName: "logs",
ExpectedQuery: "SELECT now() as ts, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) order by value DESC",
Options: Options{},
Options: v3.LogQBOptions{},
},
{
Name: "Table query with limit",
@@ -1402,7 +1402,7 @@ var testPrepLogsQueryData = []struct {
},
TableName: "logs",
ExpectedQuery: "SELECT now() as ts, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) order by value DESC LIMIT 10",
Options: Options{},
Options: v3.LogQBOptions{},
},
{
Name: "Ignore offset if order by is timestamp in list queries",
@@ -1488,7 +1488,7 @@ var testPrepLogsQueryLimitOffsetData = []struct {
TableName string
AggregateOperator v3.AggregateOperator
ExpectedQuery string
Options Options
Options v3.LogQBOptions
}{
{
Name: "Test limit less than pageSize - order by ts",

View File

@@ -451,7 +451,11 @@ func buildLogsLiveTailQuery(mq *v3.BuilderQuery) (string, error) {
}
// join both the filter clauses
if resourceSubQuery != "" {
filterSubQuery = filterSubQuery + " AND (resource_fingerprint GLOBAL IN " + resourceSubQuery
if filterSubQuery != "" {
filterSubQuery = filterSubQuery + " AND (resource_fingerprint GLOBAL IN " + resourceSubQuery
} else {
filterSubQuery = "(resource_fingerprint GLOBAL IN " + resourceSubQuery
}
}
// the reader will add the timestamp and id filters

View File

@@ -9,6 +9,7 @@ import (
"time"
logsV3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3"
logsV4 "go.signoz.io/signoz/pkg/query-service/app/logs/v4"
metricsV3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3"
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
"go.signoz.io/signoz/pkg/query-service/cache/status"
@@ -19,6 +20,7 @@ import (
)
func prepareLogsQuery(_ context.Context,
useLogsNewSchema bool,
start,
end int64,
builderQuery *v3.BuilderQuery,
@@ -27,30 +29,35 @@ func prepareLogsQuery(_ context.Context,
) (string, error) {
query := ""
logsQueryBuilder := logsV3.PrepareLogsQuery
if useLogsNewSchema {
logsQueryBuilder = logsV4.PrepareLogsQuery
}
if params == nil || builderQuery == nil {
return query, fmt.Errorf("params and builderQuery cannot be nil")
}
// for ts query with limit replace it as it is already formed
if params.CompositeQuery.PanelType == v3.PanelTypeGraph && builderQuery.Limit > 0 && len(builderQuery.GroupBy) > 0 {
limitQuery, err := logsV3.PrepareLogsQuery(
limitQuery, err := logsQueryBuilder(
start,
end,
params.CompositeQuery.QueryType,
params.CompositeQuery.PanelType,
builderQuery,
logsV3.Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: preferRPM},
v3.LogQBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: preferRPM},
)
if err != nil {
return query, err
}
placeholderQuery, err := logsV3.PrepareLogsQuery(
placeholderQuery, err := logsQueryBuilder(
start,
end,
params.CompositeQuery.QueryType,
params.CompositeQuery.PanelType,
builderQuery,
logsV3.Options{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: preferRPM},
v3.LogQBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: preferRPM},
)
if err != nil {
return query, err
@@ -59,13 +66,13 @@ func prepareLogsQuery(_ context.Context,
return query, err
}
query, err := logsV3.PrepareLogsQuery(
query, err := logsQueryBuilder(
start,
end,
params.CompositeQuery.QueryType,
params.CompositeQuery.PanelType,
builderQuery,
logsV3.Options{PreferRPM: preferRPM},
v3.LogQBOptions{PreferRPM: preferRPM},
)
if err != nil {
return query, err
@@ -77,7 +84,6 @@ func (q *querier) runBuilderQuery(
ctx context.Context,
builderQuery *v3.BuilderQuery,
params *v3.QueryRangeParamsV3,
keys map[string]v3.AttributeKey,
cacheKeys map[string]string,
ch chan channelResult,
wg *sync.WaitGroup,
@@ -102,7 +108,7 @@ func (q *querier) runBuilderQuery(
var query string
var err error
if _, ok := cacheKeys[queryName]; !ok {
query, err = prepareLogsQuery(ctx, start, end, builderQuery, params, preferRPM)
query, err = prepareLogsQuery(ctx, q.UseLogsNewSchema, start, end, builderQuery, params, preferRPM)
if err != nil {
ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil}
return
@@ -126,7 +132,7 @@ func (q *querier) runBuilderQuery(
missedSeries := make([]*v3.Series, 0)
cachedSeries := make([]*v3.Series, 0)
for _, miss := range misses {
query, err = prepareLogsQuery(ctx, miss.start, miss.end, builderQuery, params, preferRPM)
query, err = prepareLogsQuery(ctx, q.UseLogsNewSchema, miss.start, miss.end, builderQuery, params, preferRPM)
if err != nil {
ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil}
return
@@ -196,7 +202,6 @@ func (q *querier) runBuilderQuery(
end,
params.CompositeQuery.PanelType,
builderQuery,
keys,
tracesV3.Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: preferRPM},
)
if err != nil {
@@ -208,7 +213,6 @@ func (q *querier) runBuilderQuery(
end,
params.CompositeQuery.PanelType,
builderQuery,
keys,
tracesV3.Options{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: preferRPM},
)
if err != nil {
@@ -222,7 +226,6 @@ func (q *querier) runBuilderQuery(
end,
params.CompositeQuery.PanelType,
builderQuery,
keys,
tracesV3.Options{PreferRPM: preferRPM},
)
if err != nil {
@@ -333,7 +336,6 @@ func (q *querier) runBuilderExpression(
ctx context.Context,
builderQuery *v3.BuilderQuery,
params *v3.QueryRangeParamsV3,
keys map[string]v3.AttributeKey,
cacheKeys map[string]string,
ch chan channelResult,
wg *sync.WaitGroup,
@@ -342,7 +344,7 @@ func (q *querier) runBuilderExpression(
queryName := builderQuery.QueryName
queries, err := q.builder.PrepareQueries(params, keys)
queries, err := q.builder.PrepareQueries(params)
if err != nil {
ch <- channelResult{Err: err, Name: queryName, Query: "", Series: nil}
return
@@ -377,7 +379,7 @@ func (q *querier) runBuilderExpression(
NoCache: params.NoCache,
CompositeQuery: params.CompositeQuery,
Variables: params.Variables,
}, keys)
})
query := missQueries[queryName]
series, err := q.execClickHouseQuery(ctx, query)
if err != nil {

View File

@@ -11,10 +11,12 @@ import (
"time"
logsV3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3"
logsV4 "go.signoz.io/signoz/pkg/query-service/app/logs/v4"
metricsV3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3"
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
chErrors "go.signoz.io/signoz/pkg/query-service/errors"
"go.signoz.io/signoz/pkg/query-service/utils"
"go.signoz.io/signoz/pkg/query-service/cache"
"go.signoz.io/signoz/pkg/query-service/interfaces"
@@ -73,6 +75,11 @@ type QuerierOptions struct {
}
func NewQuerier(opts QuerierOptions) interfaces.Querier {
logsQueryBuilder := logsV3.PrepareLogsQuery
if opts.UseLogsNewSchema {
logsQueryBuilder = logsV4.PrepareLogsQuery
}
return &querier{
cache: opts.Cache,
reader: opts.Reader,
@@ -81,14 +88,15 @@ func NewQuerier(opts QuerierOptions) interfaces.Querier {
builder: queryBuilder.NewQueryBuilder(queryBuilder.QueryBuilderOptions{
BuildTraceQuery: tracesV3.PrepareTracesQuery,
BuildLogQuery: logsV3.PrepareLogsQuery,
BuildLogQuery: logsQueryBuilder,
BuildMetricQuery: metricsV3.PrepareMetricQuery,
}, opts.FeatureLookup),
featureLookUp: opts.FeatureLookup,
testingMode: opts.TestingMode,
returnedSeries: opts.ReturnedSeries,
returnedErr: opts.ReturnedErr,
testingMode: opts.TestingMode,
returnedSeries: opts.ReturnedSeries,
returnedErr: opts.ReturnedErr,
UseLogsNewSchema: opts.UseLogsNewSchema,
}
}
@@ -296,7 +304,7 @@ func mergeSerieses(cachedSeries, missedSeries []*v3.Series) []*v3.Series {
return mergedSeries
}
func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, map[string]error, error) {
func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) {
cacheKeys := q.keyGenerator.GenerateKeys(params)
@@ -309,9 +317,9 @@ func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangePa
}
wg.Add(1)
if queryName == builderQuery.Expression {
go q.runBuilderQuery(ctx, builderQuery, params, keys, cacheKeys, ch, &wg)
go q.runBuilderQuery(ctx, builderQuery, params, cacheKeys, ch, &wg)
} else {
go q.runBuilderExpression(ctx, builderQuery, params, keys, cacheKeys, ch, &wg)
go q.runBuilderExpression(ctx, builderQuery, params, cacheKeys, ch, &wg)
}
}
@@ -469,9 +477,80 @@ func (q *querier) runClickHouseQueries(ctx context.Context, params *v3.QueryRang
return results, errQueriesByName, err
}
func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, map[string]error, error) {
func (q *querier) runLogsListQuery(ctx context.Context, params *v3.QueryRangeParamsV3, tsRanges []utils.LogsListTsRange) ([]*v3.Result, map[string]error, error) {
res := make([]*v3.Result, 0)
qName := ""
pageSize := uint64(0)
queries, err := q.builder.PrepareQueries(params, keys)
// se we are considering only one query
for name, v := range params.CompositeQuery.BuilderQueries {
qName = name
pageSize = v.PageSize
}
data := []*v3.Row{}
for _, v := range tsRanges {
params.Start = v.Start
params.End = v.End
params.CompositeQuery.BuilderQueries[qName].PageSize = pageSize - uint64(len(data))
queries, err := q.builder.PrepareQueries(params)
if err != nil {
return nil, nil, err
}
// this will to run only once
for name, query := range queries {
rowList, err := q.reader.GetListResultV3(ctx, query)
if err != nil {
errs := []error{err}
errQuriesByName := map[string]error{
name: err,
}
return nil, errQuriesByName, fmt.Errorf("encountered multiple errors: %s", multierr.Combine(errs...))
}
data = append(data, rowList...)
}
// append a filter to the params
if len(data) > 0 {
params.CompositeQuery.BuilderQueries[qName].Filters.Items = append(params.CompositeQuery.BuilderQueries[qName].Filters.Items, v3.FilterItem{
Key: v3.AttributeKey{
Key: "id",
IsColumn: true,
DataType: "string",
},
Operator: v3.FilterOperatorLessThan,
Value: data[len(data)-1].Data["id"],
})
}
if uint64(len(data)) >= pageSize {
break
}
}
res = append(res, &v3.Result{
QueryName: qName,
List: data,
})
return res, nil, nil
}
func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) {
// List query has support for only one query.
if q.UseLogsNewSchema && params.CompositeQuery != nil && len(params.CompositeQuery.BuilderQueries) == 1 {
for _, v := range params.CompositeQuery.BuilderQueries {
// only allow of logs queries with timestamp ordering desc
if v.DataSource == v3.DataSourceLogs && len(v.OrderBy) == 1 && v.OrderBy[0].ColumnName == "timestamp" && v.OrderBy[0].Order == "desc" {
startEndArr := utils.GetLogsListTsRanges(params.Start, params.End)
if len(startEndArr) > 0 {
return q.runLogsListQuery(ctx, params, startEndArr)
}
}
}
}
queries, err := q.builder.PrepareQueries(params)
if err != nil {
return nil, nil, err
@@ -487,7 +566,7 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan
rowList, err := q.reader.GetListResultV3(ctx, query)
if err != nil {
ch <- channelResult{Err: fmt.Errorf("error in query-%s: %v", name, err), Name: name, Query: query}
ch <- channelResult{Err: err, Name: name, Query: query}
return
}
ch <- channelResult{List: rowList, Name: name, Query: query}
@@ -518,7 +597,7 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan
return res, nil, nil
}
func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, map[string]error, error) {
func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) {
var results []*v3.Result
var err error
var errQueriesByName map[string]error
@@ -526,9 +605,9 @@ func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3,
switch params.CompositeQuery.QueryType {
case v3.QueryTypeBuilder:
if params.CompositeQuery.PanelType == v3.PanelTypeList || params.CompositeQuery.PanelType == v3.PanelTypeTrace {
results, errQueriesByName, err = q.runBuilderListQueries(ctx, params, keys)
results, errQueriesByName, err = q.runBuilderListQueries(ctx, params)
} else {
results, errQueriesByName, err = q.runBuilderQueries(ctx, params, keys)
results, errQueriesByName, err = q.runBuilderQueries(ctx, params)
}
// in builder query, the only errors we expose are the ones that exceed the resource limits
// everything else is internal error as they are not actionable by the user

View File

@@ -8,6 +8,7 @@ import (
"time"
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
"go.signoz.io/signoz/pkg/query-service/cache/inmemory"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
)
@@ -584,7 +585,8 @@ func TestQueryRange(t *testing.T) {
}
for i, param := range params {
_, errByName, err := q.QueryRange(context.Background(), param, nil)
tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param)
if err != nil {
t.Errorf("expected no error, got %s", err)
}
@@ -693,7 +695,8 @@ func TestQueryRangeValueType(t *testing.T) {
}
for i, param := range params {
_, errByName, err := q.QueryRange(context.Background(), param, nil)
tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param)
if err != nil {
t.Errorf("expected no error, got %s", err)
}
@@ -746,7 +749,8 @@ func TestQueryRangeTimeShift(t *testing.T) {
expectedTimeRangeInQueryString := fmt.Sprintf("timestamp >= %d AND timestamp <= %d", (1675115596722-86400*1000)*1000000, ((1675115596722+120*60*1000)-86400*1000)*1000000)
for i, param := range params {
_, errByName, err := q.QueryRange(context.Background(), param, nil)
tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param)
if err != nil {
t.Errorf("expected no error, got %s", err)
}
@@ -844,7 +848,8 @@ func TestQueryRangeTimeShiftWithCache(t *testing.T) {
}
for i, param := range params {
_, errByName, err := q.QueryRange(context.Background(), param, nil)
tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param)
if err != nil {
t.Errorf("expected no error, got %s", err)
}
@@ -944,7 +949,8 @@ func TestQueryRangeTimeShiftWithLimitAndCache(t *testing.T) {
}
for i, param := range params {
_, errByName, err := q.QueryRange(context.Background(), param, nil)
tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param)
if err != nil {
t.Errorf("expected no error, got %s", err)
}
@@ -1033,7 +1039,8 @@ func TestQueryRangeValueTypePromQL(t *testing.T) {
}
for i, param := range params {
_, errByName, err := q.QueryRange(context.Background(), param, nil)
tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param)
if err != nil {
t.Errorf("expected no error, got %s", err)
}

View File

@@ -9,6 +9,7 @@ import (
"time"
logsV3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3"
logsV4 "go.signoz.io/signoz/pkg/query-service/app/logs/v4"
metricsV3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3"
metricsV4 "go.signoz.io/signoz/pkg/query-service/app/metrics/v4"
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
@@ -19,12 +20,17 @@ import (
)
func prepareLogsQuery(_ context.Context,
useLogsNewSchema bool,
start,
end int64,
builderQuery *v3.BuilderQuery,
params *v3.QueryRangeParamsV3,
preferRPM bool,
) (string, error) {
logsQueryBuilder := logsV3.PrepareLogsQuery
if useLogsNewSchema {
logsQueryBuilder = logsV4.PrepareLogsQuery
}
query := ""
if params == nil || builderQuery == nil {
@@ -33,24 +39,24 @@ func prepareLogsQuery(_ context.Context,
// for ts query with limit replace it as it is already formed
if params.CompositeQuery.PanelType == v3.PanelTypeGraph && builderQuery.Limit > 0 && len(builderQuery.GroupBy) > 0 {
limitQuery, err := logsV3.PrepareLogsQuery(
limitQuery, err := logsQueryBuilder(
start,
end,
params.CompositeQuery.QueryType,
params.CompositeQuery.PanelType,
builderQuery,
logsV3.Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: preferRPM},
v3.LogQBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: preferRPM},
)
if err != nil {
return query, err
}
placeholderQuery, err := logsV3.PrepareLogsQuery(
placeholderQuery, err := logsQueryBuilder(
start,
end,
params.CompositeQuery.QueryType,
params.CompositeQuery.PanelType,
builderQuery,
logsV3.Options{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: preferRPM},
v3.LogQBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: preferRPM},
)
if err != nil {
return query, err
@@ -59,13 +65,13 @@ func prepareLogsQuery(_ context.Context,
return query, err
}
query, err := logsV3.PrepareLogsQuery(
query, err := logsQueryBuilder(
start,
end,
params.CompositeQuery.QueryType,
params.CompositeQuery.PanelType,
builderQuery,
logsV3.Options{PreferRPM: preferRPM},
v3.LogQBOptions{PreferRPM: preferRPM},
)
if err != nil {
return query, err
@@ -77,7 +83,6 @@ func (q *querier) runBuilderQuery(
ctx context.Context,
builderQuery *v3.BuilderQuery,
params *v3.QueryRangeParamsV3,
keys map[string]v3.AttributeKey,
cacheKeys map[string]string,
ch chan channelResult,
wg *sync.WaitGroup,
@@ -104,7 +109,7 @@ func (q *querier) runBuilderQuery(
var query string
var err error
if _, ok := cacheKeys[queryName]; !ok {
query, err = prepareLogsQuery(ctx, start, end, builderQuery, params, preferRPM)
query, err = prepareLogsQuery(ctx, q.UseLogsNewSchema, start, end, builderQuery, params, preferRPM)
if err != nil {
ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil}
return
@@ -127,7 +132,7 @@ func (q *querier) runBuilderQuery(
missedSeries := make([]*v3.Series, 0)
cachedSeries := make([]*v3.Series, 0)
for _, miss := range misses {
query, err = prepareLogsQuery(ctx, miss.start, miss.end, builderQuery, params, preferRPM)
query, err = prepareLogsQuery(ctx, q.UseLogsNewSchema, miss.start, miss.end, builderQuery, params, preferRPM)
if err != nil {
ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil}
return
@@ -195,7 +200,6 @@ func (q *querier) runBuilderQuery(
end,
params.CompositeQuery.PanelType,
builderQuery,
keys,
tracesV3.Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: preferRPM},
)
if err != nil {
@@ -207,7 +211,6 @@ func (q *querier) runBuilderQuery(
end,
params.CompositeQuery.PanelType,
builderQuery,
keys,
tracesV3.Options{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: preferRPM},
)
if err != nil {
@@ -221,7 +224,6 @@ func (q *querier) runBuilderQuery(
end,
params.CompositeQuery.PanelType,
builderQuery,
keys,
tracesV3.Options{PreferRPM: preferRPM},
)
if err != nil {

View File

@@ -11,10 +11,12 @@ import (
"time"
logsV3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3"
logsV4 "go.signoz.io/signoz/pkg/query-service/app/logs/v4"
metricsV4 "go.signoz.io/signoz/pkg/query-service/app/metrics/v4"
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
chErrors "go.signoz.io/signoz/pkg/query-service/errors"
"go.signoz.io/signoz/pkg/query-service/utils"
"go.signoz.io/signoz/pkg/query-service/cache"
"go.signoz.io/signoz/pkg/query-service/interfaces"
@@ -51,10 +53,9 @@ type querier struct {
testingMode bool
queriesExecuted []string
// tuple of start and end time in milliseconds
timeRanges [][]int
returnedSeries []*v3.Series
returnedErr error
timeRanges [][]int
returnedSeries []*v3.Series
returnedErr error
UseLogsNewSchema bool
}
@@ -73,6 +74,11 @@ type QuerierOptions struct {
}
func NewQuerier(opts QuerierOptions) interfaces.Querier {
logsQueryBuilder := logsV3.PrepareLogsQuery
if opts.UseLogsNewSchema {
logsQueryBuilder = logsV4.PrepareLogsQuery
}
return &querier{
cache: opts.Cache,
reader: opts.Reader,
@@ -81,14 +87,15 @@ func NewQuerier(opts QuerierOptions) interfaces.Querier {
builder: queryBuilder.NewQueryBuilder(queryBuilder.QueryBuilderOptions{
BuildTraceQuery: tracesV3.PrepareTracesQuery,
BuildLogQuery: logsV3.PrepareLogsQuery,
BuildLogQuery: logsQueryBuilder,
BuildMetricQuery: metricsV4.PrepareMetricQuery,
}, opts.FeatureLookup),
featureLookUp: opts.FeatureLookup,
testingMode: opts.TestingMode,
returnedSeries: opts.ReturnedSeries,
returnedErr: opts.ReturnedErr,
testingMode: opts.TestingMode,
returnedSeries: opts.ReturnedSeries,
returnedErr: opts.ReturnedErr,
UseLogsNewSchema: opts.UseLogsNewSchema,
}
}
@@ -311,7 +318,7 @@ func mergeSerieses(cachedSeries, missedSeries []*v3.Series) []*v3.Series {
return mergedSeries
}
func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, map[string]error, error) {
func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) {
cacheKeys := q.keyGenerator.GenerateKeys(params)
@@ -321,7 +328,7 @@ func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangePa
for queryName, builderQuery := range params.CompositeQuery.BuilderQueries {
if queryName == builderQuery.Expression {
wg.Add(1)
go q.runBuilderQuery(ctx, builderQuery, params, keys, cacheKeys, ch, &wg)
go q.runBuilderQuery(ctx, builderQuery, params, cacheKeys, ch, &wg)
}
}
@@ -478,9 +485,80 @@ func (q *querier) runClickHouseQueries(ctx context.Context, params *v3.QueryRang
return results, errQueriesByName, err
}
func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, map[string]error, error) {
func (q *querier) runLogsListQuery(ctx context.Context, params *v3.QueryRangeParamsV3, tsRanges []utils.LogsListTsRange) ([]*v3.Result, map[string]error, error) {
res := make([]*v3.Result, 0)
qName := ""
pageSize := uint64(0)
queries, err := q.builder.PrepareQueries(params, keys)
// se we are considering only one query
for name, v := range params.CompositeQuery.BuilderQueries {
qName = name
pageSize = v.PageSize
}
data := []*v3.Row{}
for _, v := range tsRanges {
params.Start = v.Start
params.End = v.End
params.CompositeQuery.BuilderQueries[qName].PageSize = pageSize - uint64(len(data))
queries, err := q.builder.PrepareQueries(params)
if err != nil {
return nil, nil, err
}
// this will to run only once
for name, query := range queries {
rowList, err := q.reader.GetListResultV3(ctx, query)
if err != nil {
errs := []error{err}
errQuriesByName := map[string]error{
name: err,
}
return nil, errQuriesByName, fmt.Errorf("encountered multiple errors: %s", multierr.Combine(errs...))
}
data = append(data, rowList...)
}
// append a filter to the params
if len(data) > 0 {
params.CompositeQuery.BuilderQueries[qName].Filters.Items = append(params.CompositeQuery.BuilderQueries[qName].Filters.Items, v3.FilterItem{
Key: v3.AttributeKey{
Key: "id",
IsColumn: true,
DataType: "string",
},
Operator: v3.FilterOperatorLessThan,
Value: data[len(data)-1].Data["id"],
})
}
if uint64(len(data)) >= pageSize {
break
}
}
res = append(res, &v3.Result{
QueryName: qName,
List: data,
})
return res, nil, nil
}
func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) {
// List query has support for only one query.
if q.UseLogsNewSchema && params.CompositeQuery != nil && len(params.CompositeQuery.BuilderQueries) == 1 {
for _, v := range params.CompositeQuery.BuilderQueries {
// only allow of logs queries with timestamp ordering desc
if v.DataSource == v3.DataSourceLogs && len(v.OrderBy) == 1 && v.OrderBy[0].ColumnName == "timestamp" && v.OrderBy[0].Order == "desc" {
startEndArr := utils.GetLogsListTsRanges(params.Start, params.End)
if len(startEndArr) > 0 {
return q.runLogsListQuery(ctx, params, startEndArr)
}
}
}
}
queries, err := q.builder.PrepareQueries(params)
if err != nil {
return nil, nil, err
@@ -496,7 +574,7 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan
rowList, err := q.reader.GetListResultV3(ctx, query)
if err != nil {
ch <- channelResult{Err: fmt.Errorf("error in query-%s: %v", name, err), Name: name, Query: query}
ch <- channelResult{Err: err, Name: name, Query: query}
return
}
ch <- channelResult{List: rowList, Name: name, Query: query}
@@ -529,7 +607,7 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan
// QueryRange is the main function that runs the queries
// and returns the results
func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, map[string]error, error) {
func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) {
var results []*v3.Result
var err error
var errQueriesByName map[string]error
@@ -537,9 +615,9 @@ func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3,
switch params.CompositeQuery.QueryType {
case v3.QueryTypeBuilder:
if params.CompositeQuery.PanelType == v3.PanelTypeList || params.CompositeQuery.PanelType == v3.PanelTypeTrace {
results, errQueriesByName, err = q.runBuilderListQueries(ctx, params, keys)
results, errQueriesByName, err = q.runBuilderListQueries(ctx, params)
} else {
results, errQueriesByName, err = q.runBuilderQueries(ctx, params, keys)
results, errQueriesByName, err = q.runBuilderQueries(ctx, params)
}
// in builder query, the only errors we expose are the ones that exceed the resource limits
// everything else is internal error as they are not actionable by the user

View File

@@ -8,6 +8,7 @@ import (
"time"
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
"go.signoz.io/signoz/pkg/query-service/cache/inmemory"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
)
@@ -593,7 +594,8 @@ func TestV2QueryRangePanelGraph(t *testing.T) {
}
for i, param := range params {
_, errByName, err := q.QueryRange(context.Background(), param, nil)
tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param)
if err != nil {
t.Errorf("expected no error, got %s", err)
}
@@ -738,7 +740,8 @@ func TestV2QueryRangeValueType(t *testing.T) {
}
for i, param := range params {
_, errByName, err := q.QueryRange(context.Background(), param, nil)
tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param)
if err != nil {
t.Errorf("expected no error, got %s", err)
}
@@ -792,7 +795,8 @@ func TestV2QueryRangeTimeShift(t *testing.T) {
expectedTimeRangeInQueryString := fmt.Sprintf("timestamp >= %d AND timestamp <= %d", (1675115596722-86400*1000)*1000000, ((1675115596722+120*60*1000)-86400*1000)*1000000)
for i, param := range params {
_, errByName, err := q.QueryRange(context.Background(), param, nil)
tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param)
if err != nil {
t.Errorf("expected no error, got %s", err)
}
@@ -892,7 +896,8 @@ func TestV2QueryRangeTimeShiftWithCache(t *testing.T) {
}
for i, param := range params {
_, errByName, err := q.QueryRange(context.Background(), param, nil)
tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param)
if err != nil {
t.Errorf("expected no error, got %s", err)
}
@@ -994,7 +999,8 @@ func TestV2QueryRangeTimeShiftWithLimitAndCache(t *testing.T) {
}
for i, param := range params {
_, errByName, err := q.QueryRange(context.Background(), param, nil)
tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param)
if err != nil {
t.Errorf("expected no error, got %s", err)
}
@@ -1085,7 +1091,8 @@ func TestV2QueryRangeValueTypePromQL(t *testing.T) {
}
for i, param := range params {
_, errByName, err := q.QueryRange(context.Background(), param, nil)
tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param)
if err != nil {
t.Errorf("expected no error, got %s", err)
}

View File

@@ -5,7 +5,6 @@ import (
"strings"
"github.com/SigNoz/govaluate"
logsV3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3"
metricsV3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3"
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
"go.signoz.io/signoz/pkg/query-service/cache"
@@ -43,8 +42,8 @@ var SupportedFunctions = []string{
var EvalFuncs = map[string]govaluate.ExpressionFunction{}
type prepareTracesQueryFunc func(start, end int64, panelType v3.PanelType, bq *v3.BuilderQuery, keys map[string]v3.AttributeKey, options tracesV3.Options) (string, error)
type prepareLogsQueryFunc func(start, end int64, queryType v3.QueryType, panelType v3.PanelType, bq *v3.BuilderQuery, options logsV3.Options) (string, error)
type prepareTracesQueryFunc func(start, end int64, panelType v3.PanelType, bq *v3.BuilderQuery, options tracesV3.Options) (string, error)
type prepareLogsQueryFunc func(start, end int64, queryType v3.QueryType, panelType v3.PanelType, bq *v3.BuilderQuery, options v3.LogQBOptions) (string, error)
type prepareMetricQueryFunc func(start, end int64, queryType v3.QueryType, panelType v3.PanelType, bq *v3.BuilderQuery, options metricsV3.Options) (string, error)
type QueryBuilder struct {
@@ -162,7 +161,7 @@ func (qb *QueryBuilder) PrepareLiveTailQuery(params *v3.QueryRangeParamsV3) (str
}
for queryName, query := range compositeQuery.BuilderQueries {
if query.Expression == queryName {
queryStr, err = qb.options.BuildLogQuery(params.Start, params.End, compositeQuery.QueryType, compositeQuery.PanelType, query, logsV3.Options{IsLivetailQuery: true})
queryStr, err = qb.options.BuildLogQuery(params.Start, params.End, compositeQuery.QueryType, compositeQuery.PanelType, query, v3.LogQBOptions{IsLivetailQuery: true})
if err != nil {
return "", err
}
@@ -173,7 +172,7 @@ func (qb *QueryBuilder) PrepareLiveTailQuery(params *v3.QueryRangeParamsV3) (str
return queryStr, nil
}
func (qb *QueryBuilder) PrepareQueries(params *v3.QueryRangeParamsV3, args ...interface{}) (map[string]string, error) {
func (qb *QueryBuilder) PrepareQueries(params *v3.QueryRangeParamsV3) (map[string]string, error) {
queries := make(map[string]string)
compositeQuery := params.CompositeQuery
@@ -193,19 +192,15 @@ func (qb *QueryBuilder) PrepareQueries(params *v3.QueryRangeParamsV3, args ...in
if query.Expression == queryName {
switch query.DataSource {
case v3.DataSourceTraces:
keys := map[string]v3.AttributeKey{}
if len(args) > 0 {
keys = args[0].(map[string]v3.AttributeKey)
}
// for ts query with group by and limit form two queries
if compositeQuery.PanelType == v3.PanelTypeGraph && query.Limit > 0 && len(query.GroupBy) > 0 {
limitQuery, err := qb.options.BuildTraceQuery(start, end, compositeQuery.PanelType, query,
keys, tracesV3.Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled})
tracesV3.Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled})
if err != nil {
return nil, err
}
placeholderQuery, err := qb.options.BuildTraceQuery(start, end, compositeQuery.PanelType,
query, keys, tracesV3.Options{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled})
query, tracesV3.Options{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled})
if err != nil {
return nil, err
}
@@ -213,7 +208,7 @@ func (qb *QueryBuilder) PrepareQueries(params *v3.QueryRangeParamsV3, args ...in
queries[queryName] = query
} else {
queryString, err := qb.options.BuildTraceQuery(start, end, compositeQuery.PanelType,
query, keys, tracesV3.Options{PreferRPM: PreferRPMFeatureEnabled, GraphLimitQtype: ""})
query, tracesV3.Options{PreferRPM: PreferRPMFeatureEnabled, GraphLimitQtype: ""})
if err != nil {
return nil, err
}
@@ -222,18 +217,18 @@ func (qb *QueryBuilder) PrepareQueries(params *v3.QueryRangeParamsV3, args ...in
case v3.DataSourceLogs:
// for ts query with limit replace it as it is already formed
if compositeQuery.PanelType == v3.PanelTypeGraph && query.Limit > 0 && len(query.GroupBy) > 0 {
limitQuery, err := qb.options.BuildLogQuery(start, end, compositeQuery.QueryType, compositeQuery.PanelType, query, logsV3.Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled})
limitQuery, err := qb.options.BuildLogQuery(start, end, compositeQuery.QueryType, compositeQuery.PanelType, query, v3.LogQBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled})
if err != nil {
return nil, err
}
placeholderQuery, err := qb.options.BuildLogQuery(start, end, compositeQuery.QueryType, compositeQuery.PanelType, query, logsV3.Options{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled})
placeholderQuery, err := qb.options.BuildLogQuery(start, end, compositeQuery.QueryType, compositeQuery.PanelType, query, v3.LogQBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled})
if err != nil {
return nil, err
}
query := fmt.Sprintf(placeholderQuery, limitQuery)
queries[queryName] = query
} else {
queryString, err := qb.options.BuildLogQuery(start, end, compositeQuery.QueryType, compositeQuery.PanelType, query, logsV3.Options{PreferRPM: PreferRPMFeatureEnabled, GraphLimitQtype: ""})
queryString, err := qb.options.BuildLogQuery(start, end, compositeQuery.QueryType, compositeQuery.PanelType, query, v3.LogQBOptions{PreferRPM: PreferRPMFeatureEnabled, GraphLimitQtype: ""})
if err != nil {
return nil, err
}

View File

@@ -6,6 +6,7 @@ import (
"github.com/stretchr/testify/require"
logsV3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3"
logsV4 "go.signoz.io/signoz/pkg/query-service/app/logs/v4"
metricsv3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3"
"go.signoz.io/signoz/pkg/query-service/constants"
"go.signoz.io/signoz/pkg/query-service/featureManager"
@@ -585,6 +586,217 @@ func TestLogsQueryWithFormula(t *testing.T) {
}
var testLogsWithFormulaV2 = []struct {
Name string
Query *v3.QueryRangeParamsV3
ExpectedQuery string
}{
{
Name: "test formula without dot in filter and group by attribute",
Query: &v3.QueryRangeParamsV3{
Start: 1702979275000000000,
End: 1702981075000000000,
CompositeQuery: &v3.CompositeQuery{
QueryType: v3.QueryTypeBuilder,
PanelType: v3.PanelTypeGraph,
BuilderQueries: map[string]*v3.BuilderQuery{
"A": {
QueryName: "A",
StepInterval: 60,
DataSource: v3.DataSourceLogs,
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "key_1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, Value: true, Operator: v3.FilterOperatorEqual},
}},
AggregateOperator: v3.AggregateOperatorCount,
Expression: "A",
OrderBy: []v3.OrderBy{
{
ColumnName: "timestamp",
Order: "desc",
},
},
GroupBy: []v3.AttributeKey{
{Key: "key_1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag},
},
},
"B": {
QueryName: "B",
StepInterval: 60,
DataSource: v3.DataSourceLogs,
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "key_2", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, Value: true, Operator: v3.FilterOperatorEqual},
}},
AggregateOperator: v3.AggregateOperatorCount,
Expression: "B",
OrderBy: []v3.OrderBy{
{
ColumnName: "timestamp",
Order: "desc",
},
},
GroupBy: []v3.AttributeKey{
{Key: "key_1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag},
},
},
"C": {
QueryName: "C",
Expression: "A + B",
},
},
},
},
ExpectedQuery: "SELECT A.`key_1` as `key_1`, A.`ts` as `ts`, A.value + B.value as value FROM " +
"(SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, attributes_bool['key_1'] as `key_1`, toFloat64(count(*)) as value from " +
"signoz_logs.distributed_logs_v2 where (timestamp >= 1702979275000000000 AND timestamp <= 1702981075000000000) AND (ts_bucket_start >= 1702977475 AND ts_bucket_start <= 1702981075) " +
"AND attributes_bool['key_1'] = true AND mapContains(attributes_bool, 'key_1') AND mapContains(attributes_bool, 'key_1') group by `key_1`,ts order by value DESC) as A INNER JOIN (SELECT " +
"toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, attributes_bool['key_1'] as `key_1`, toFloat64(count(*)) as value " +
"from signoz_logs.distributed_logs_v2 where (timestamp >= 1702979275000000000 AND timestamp <= 1702981075000000000) AND (ts_bucket_start >= 1702977475 AND ts_bucket_start <= 1702981075) " +
"AND attributes_bool['key_2'] = true AND mapContains(attributes_bool, 'key_2') AND mapContains(attributes_bool, 'key_1') group by `key_1`,ts order by value DESC) as B ON A.`key_1` = B.`key_1` AND A.`ts` = B.`ts`",
},
{
Name: "test formula with dot in filter and group by attribute",
Query: &v3.QueryRangeParamsV3{
Start: 1702979056000000000,
End: 1702982656000000000,
CompositeQuery: &v3.CompositeQuery{
QueryType: v3.QueryTypeBuilder,
PanelType: v3.PanelTypeTable,
BuilderQueries: map[string]*v3.BuilderQuery{
"A": {
QueryName: "A",
StepInterval: 60,
DataSource: v3.DataSourceLogs,
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "key1.1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, Value: true, Operator: v3.FilterOperatorEqual},
}},
AggregateOperator: v3.AggregateOperatorCount,
Expression: "A",
OrderBy: []v3.OrderBy{
{
ColumnName: "timestamp",
Order: "desc",
},
},
GroupBy: []v3.AttributeKey{
{Key: "key1.1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag},
},
},
"B": {
QueryName: "B",
StepInterval: 60,
DataSource: v3.DataSourceLogs,
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "key1.2", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, Value: true, Operator: v3.FilterOperatorEqual},
}},
AggregateOperator: v3.AggregateOperatorCount,
Expression: "B",
OrderBy: []v3.OrderBy{
{
ColumnName: "timestamp",
Order: "desc",
},
},
GroupBy: []v3.AttributeKey{
{Key: "key1.1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag},
},
},
"C": {
QueryName: "C",
Expression: "A + B",
},
},
},
},
ExpectedQuery: "SELECT A.`key1.1` as `key1.1`, A.`ts` as `ts`, A.value + B.value as value FROM (SELECT attributes_bool['key1.1'] as `key1.1`, " +
"toFloat64(count(*)) as value from signoz_logs.distributed_logs_v2 where (timestamp >= 1702979056000000000 AND timestamp <= 1702982656000000000) AND (ts_bucket_start >= 1702977256 AND ts_bucket_start <= 1702982656) " +
"AND attributes_bool['key1.1'] = true AND mapContains(attributes_bool, 'key1.1') AND mapContains(attributes_bool, 'key1.1') group by `key1.1` order by value DESC) as A INNER JOIN (SELECT " +
"attributes_bool['key1.1'] as `key1.1`, toFloat64(count(*)) as value from signoz_logs.distributed_logs_v2 where (timestamp >= 1702979056000000000 AND timestamp <= 1702982656000000000) " +
"AND (ts_bucket_start >= 1702977256 AND ts_bucket_start <= 1702982656) AND attributes_bool['key1.2'] = true AND mapContains(attributes_bool, 'key1.2') AND " +
"mapContains(attributes_bool, 'key1.1') group by `key1.1` order by value DESC) as B ON A.`key1.1` = B.`key1.1` AND A.`ts` = B.`ts`",
},
{
Name: "test formula with dot in filter and group by materialized attribute",
Query: &v3.QueryRangeParamsV3{
Start: 1702980884000000000,
End: 1702984484000000000,
CompositeQuery: &v3.CompositeQuery{
QueryType: v3.QueryTypeBuilder,
PanelType: v3.PanelTypeGraph,
BuilderQueries: map[string]*v3.BuilderQuery{
"A": {
QueryName: "A",
StepInterval: 60,
DataSource: v3.DataSourceLogs,
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "key_2", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Value: true, Operator: v3.FilterOperatorEqual},
}},
AggregateOperator: v3.AggregateOperatorCount,
Expression: "A",
OrderBy: []v3.OrderBy{
{
ColumnName: "timestamp",
Order: "desc",
},
},
GroupBy: []v3.AttributeKey{
{Key: "key1.1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag, IsColumn: true},
},
},
"B": {
QueryName: "B",
StepInterval: 60,
DataSource: v3.DataSourceLogs,
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "key_1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, Value: true, Operator: v3.FilterOperatorEqual},
}},
AggregateOperator: v3.AggregateOperatorCount,
Expression: "B",
OrderBy: []v3.OrderBy{
{
ColumnName: "timestamp",
Order: "desc",
},
},
GroupBy: []v3.AttributeKey{
{Key: "key1.1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag, IsColumn: true},
},
},
"C": {
QueryName: "C",
Expression: "A - B",
},
},
},
},
ExpectedQuery: "SELECT A.`key1.1` as `key1.1`, A.`ts` as `ts`, A.value - B.value as value FROM (SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, " +
"`attribute_bool_key1$$1` as `key1.1`, toFloat64(count(*)) as value from signoz_logs.distributed_logs_v2 where (timestamp >= 1702980884000000000 AND timestamp <= 1702984484000000000) AND " +
"(ts_bucket_start >= 1702979084 AND ts_bucket_start <= 1702984484) AND `attribute_bool_key_2` = true AND `attribute_bool_key1$$1_exists`=true group by `key1.1`,ts order by value DESC) as " +
"A INNER JOIN (SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, `attribute_bool_key1$$1` as `key1.1`, toFloat64(count(*)) as value from " +
"signoz_logs.distributed_logs_v2 where (timestamp >= 1702980884000000000 AND timestamp <= 1702984484000000000) AND (ts_bucket_start >= 1702979084 AND ts_bucket_start <= 1702984484) AND " +
"attributes_bool['key_1'] = true AND mapContains(attributes_bool, 'key_1') AND `attribute_bool_key1$$1_exists`=true group by `key1.1`,ts order by value DESC) as B " +
"ON A.`key1.1` = B.`key1.1` AND A.`ts` = B.`ts`",
},
}
func TestLogsQueryWithFormulaV2(t *testing.T) {
t.Parallel()
qbOptions := QueryBuilderOptions{
BuildLogQuery: logsV4.PrepareLogsQuery,
}
fm := featureManager.StartManager()
qb := NewQueryBuilder(qbOptions, fm)
for _, test := range testLogsWithFormulaV2 {
t.Run(test.Name, func(t *testing.T) {
queries, err := qb.PrepareQueries(test.Query)
require.NoError(t, err)
require.Equal(t, test.ExpectedQuery, queries["C"])
})
}
}
func TestGenerateCacheKeysMetricsBuilder(t *testing.T) {
testCases := []struct {
name string

View File

@@ -710,7 +710,7 @@ func (s *Server) Stop() error {
}
func makeRulesManager(
promConfigPath,
_,
alertManagerURL string,
ruleRepoURL string,
db *sqlx.DB,

View File

@@ -58,8 +58,7 @@ var tracesOperatorMappingV3 = map[v3.FilterOperator]string{
v3.FilterOperatorNotExists: "NOT has(%s%s, '%s')",
}
func getColumnName(key v3.AttributeKey, keys map[string]v3.AttributeKey) string {
key = enrichKeyWithMetadata(key, keys)
func getColumnName(key v3.AttributeKey) string {
if key.IsColumn {
return key.Key
}
@@ -102,13 +101,13 @@ func enrichKeyWithMetadata(key v3.AttributeKey, keys map[string]v3.AttributeKey)
}
// getSelectLabels returns the select labels for the query based on groupBy and aggregateOperator
func getSelectLabels(aggregatorOperator v3.AggregateOperator, groupBy []v3.AttributeKey, keys map[string]v3.AttributeKey) string {
func getSelectLabels(aggregatorOperator v3.AggregateOperator, groupBy []v3.AttributeKey) string {
var selectLabels string
if aggregatorOperator == v3.AggregateOperatorNoOp {
selectLabels = ""
} else {
for _, tag := range groupBy {
filterName := getColumnName(tag, keys)
filterName := getColumnName(tag)
selectLabels += fmt.Sprintf(" %s as `%s`,", filterName, tag.Key)
}
}
@@ -127,10 +126,10 @@ func getSelectKeys(aggregatorOperator v3.AggregateOperator, groupBy []v3.Attribu
return strings.Join(selectLabels, ",")
}
func getSelectColumns(sc []v3.AttributeKey, keys map[string]v3.AttributeKey) string {
func getSelectColumns(sc []v3.AttributeKey) string {
var columns []string
for _, tag := range sc {
columnName := getColumnName(tag, keys)
columnName := getColumnName(tag)
columns = append(columns, fmt.Sprintf("%s as `%s` ", columnName, tag.Key))
}
return strings.Join(columns, ",")
@@ -150,20 +149,19 @@ func getZerosForEpochNano(epoch int64) int64 {
return int64(math.Pow(10, float64(19-count)))
}
func buildTracesFilterQuery(fs *v3.FilterSet, keys map[string]v3.AttributeKey) (string, error) {
func buildTracesFilterQuery(fs *v3.FilterSet) (string, error) {
var conditions []string
if fs != nil && len(fs.Items) != 0 {
for _, item := range fs.Items {
val := item.Value
// generate the key
columnName := getColumnName(item.Key, keys)
columnName := getColumnName(item.Key)
var fmtVal string
key := enrichKeyWithMetadata(item.Key, keys)
item.Operator = v3.FilterOperator(strings.ToLower(strings.TrimSpace(string(item.Operator))))
if item.Operator != v3.FilterOperatorExists && item.Operator != v3.FilterOperatorNotExists {
var err error
val, err = utils.ValidateAndCastValue(val, key.DataType)
val, err = utils.ValidateAndCastValue(val, item.Key.DataType)
if err != nil {
return "", fmt.Errorf("invalid value for key %s: %v", item.Key.Key, err)
}
@@ -179,15 +177,15 @@ func buildTracesFilterQuery(fs *v3.FilterSet, keys map[string]v3.AttributeKey) (
case v3.FilterOperatorRegex, v3.FilterOperatorNotRegex:
conditions = append(conditions, fmt.Sprintf(operator, columnName, fmtVal))
case v3.FilterOperatorExists, v3.FilterOperatorNotExists:
if key.IsColumn {
subQuery, err := existsSubQueryForFixedColumn(key, item.Operator)
if item.Key.IsColumn {
subQuery, err := existsSubQueryForFixedColumn(item.Key, item.Operator)
if err != nil {
return "", err
}
conditions = append(conditions, subQuery)
} else {
columnType, columnDataType := getClickhouseTracesColumnDataTypeAndType(key)
conditions = append(conditions, fmt.Sprintf(operator, columnDataType, columnType, key.Key))
columnType, columnDataType := getClickhouseTracesColumnDataTypeAndType(item.Key)
conditions = append(conditions, fmt.Sprintf(operator, columnDataType, columnType, item.Key.Key))
}
default:
@@ -218,12 +216,11 @@ func existsSubQueryForFixedColumn(key v3.AttributeKey, op v3.FilterOperator) (st
}
}
func handleEmptyValuesInGroupBy(keys map[string]v3.AttributeKey, groupBy []v3.AttributeKey) (string, error) {
func handleEmptyValuesInGroupBy(groupBy []v3.AttributeKey) (string, error) {
filterItems := []v3.FilterItem{}
if len(groupBy) != 0 {
for _, item := range groupBy {
key := enrichKeyWithMetadata(item, keys)
if !key.IsColumn {
if !item.IsColumn {
filterItems = append(filterItems, v3.FilterItem{
Key: item,
Operator: v3.FilterOperatorExists,
@@ -236,21 +233,21 @@ func handleEmptyValuesInGroupBy(keys map[string]v3.AttributeKey, groupBy []v3.At
Operator: "AND",
Items: filterItems,
}
return buildTracesFilterQuery(&filterSet, keys)
return buildTracesFilterQuery(&filterSet)
}
return "", nil
}
func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, tableName string, keys map[string]v3.AttributeKey, panelType v3.PanelType, options Options) (string, error) {
func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, _ string, panelType v3.PanelType, options Options) (string, error) {
filterSubQuery, err := buildTracesFilterQuery(mq.Filters, keys)
filterSubQuery, err := buildTracesFilterQuery(mq.Filters)
if err != nil {
return "", err
}
// timerange will be sent in epoch millisecond
spanIndexTableTimeFilter := fmt.Sprintf("(timestamp >= '%d' AND timestamp <= '%d')", start*getZerosForEpochNano(start), end*getZerosForEpochNano(end))
selectLabels := getSelectLabels(mq.AggregateOperator, mq.GroupBy, keys)
selectLabels := getSelectLabels(mq.AggregateOperator, mq.GroupBy)
having := having(mq.Having)
if having != "" {
@@ -283,7 +280,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, tableName str
queryTmpl = "SELECT " + getSelectKeys(mq.AggregateOperator, mq.GroupBy) + " from (" + queryTmpl + ")"
}
emptyValuesInGroupByFilter, err := handleEmptyValuesInGroupBy(keys, mq.GroupBy)
emptyValuesInGroupByFilter, err := handleEmptyValuesInGroupBy(mq.GroupBy)
if err != nil {
return "", err
}
@@ -293,8 +290,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, tableName str
if groupBy != "" {
groupBy = " group by " + groupBy
}
enrichedOrderBy := enrichOrderBy(mq.OrderBy, keys)
orderBy := orderByAttributeKeyTags(panelType, enrichedOrderBy, mq.GroupBy, keys)
orderBy := orderByAttributeKeyTags(panelType, mq.OrderBy, mq.GroupBy)
if orderBy != "" {
orderBy = " order by " + orderBy
}
@@ -305,7 +301,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, tableName str
aggregationKey := ""
if mq.AggregateAttribute.Key != "" {
aggregationKey = getColumnName(mq.AggregateAttribute, keys)
aggregationKey = getColumnName(mq.AggregateAttribute)
}
switch mq.AggregateOperator {
@@ -342,14 +338,13 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, tableName str
return query, nil
case v3.AggregateOperatorCount:
if mq.AggregateAttribute.Key != "" {
key := enrichKeyWithMetadata(mq.AggregateAttribute, keys)
if key.IsColumn {
subQuery, err := existsSubQueryForFixedColumn(key, v3.FilterOperatorExists)
if mq.AggregateAttribute.IsColumn {
subQuery, err := existsSubQueryForFixedColumn(mq.AggregateAttribute, v3.FilterOperatorExists)
if err == nil {
filterSubQuery = fmt.Sprintf("%s AND %s", filterSubQuery, subQuery)
}
} else {
columnType, columnDataType := getClickhouseTracesColumnDataTypeAndType(key)
columnType, columnDataType := getClickhouseTracesColumnDataTypeAndType(mq.AggregateAttribute)
filterSubQuery = fmt.Sprintf("%s AND has(%s%s, '%s')", filterSubQuery, columnDataType, columnType, mq.AggregateAttribute.Key)
}
}
@@ -373,7 +368,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, tableName str
if len(mq.SelectColumns) == 0 {
return "", fmt.Errorf("select columns cannot be empty for panelType %s", panelType)
}
selectColumns := getSelectColumns(mq.SelectColumns, keys)
selectColumns := getSelectColumns(mq.SelectColumns)
queryNoOpTmpl := fmt.Sprintf("SELECT timestamp as timestamp_datetime, spanID, traceID, "+"%s ", selectColumns) + "from " + constants.SIGNOZ_TRACE_DBNAME + "." + constants.SIGNOZ_SPAN_INDEX_TABLENAME + " where %s %s" + "%s"
query = fmt.Sprintf(queryNoOpTmpl, spanIndexTableTimeFilter, filterSubQuery, orderBy)
} else {
@@ -423,7 +418,7 @@ func groupByAttributeKeyTags(panelType v3.PanelType, graphLimitQtype string, tag
// orderBy returns a string of comma separated tags for order by clause
// if there are remaining items which are not present in tags they are also added
// if the order is not specified, it defaults to ASC
func orderBy(panelType v3.PanelType, items []v3.OrderBy, tagLookup map[string]struct{}, keys map[string]v3.AttributeKey) []string {
func orderBy(panelType v3.PanelType, items []v3.OrderBy, tagLookup map[string]struct{}) []string {
var orderBy []string
for _, item := range items {
@@ -433,7 +428,7 @@ func orderBy(panelType v3.PanelType, items []v3.OrderBy, tagLookup map[string]st
orderBy = append(orderBy, fmt.Sprintf("`%s` %s", item.ColumnName, item.Order))
} else if panelType == v3.PanelTypeList {
attr := v3.AttributeKey{Key: item.ColumnName, DataType: item.DataType, Type: item.Type, IsColumn: item.IsColumn}
name := getColumnName(attr, keys)
name := getColumnName(attr)
if item.IsColumn {
orderBy = append(orderBy, fmt.Sprintf("`%s` %s", name, item.Order))
} else {
@@ -445,13 +440,13 @@ func orderBy(panelType v3.PanelType, items []v3.OrderBy, tagLookup map[string]st
return orderBy
}
func orderByAttributeKeyTags(panelType v3.PanelType, items []v3.OrderBy, tags []v3.AttributeKey, keys map[string]v3.AttributeKey) string {
func orderByAttributeKeyTags(panelType v3.PanelType, items []v3.OrderBy, tags []v3.AttributeKey) string {
tagLookup := map[string]struct{}{}
for _, v := range tags {
tagLookup[v.Key] = struct{}{}
}
orderByArray := orderBy(panelType, items, tagLookup, keys)
orderByArray := orderBy(panelType, items, tagLookup)
if len(orderByArray) == 0 {
if panelType == v3.PanelTypeList {
@@ -474,7 +469,7 @@ func having(items []v3.Having) string {
return strings.Join(having, " AND ")
}
func reduceToQuery(query string, reduceTo v3.ReduceToOperator, aggregateOperator v3.AggregateOperator) (string, error) {
func reduceToQuery(query string, reduceTo v3.ReduceToOperator, _ v3.AggregateOperator) (string, error) {
var groupBy string
switch reduceTo {
@@ -508,13 +503,13 @@ func addOffsetToQuery(query string, offset uint64) string {
// PrepareTracesQuery returns the query string for traces
// start and end are in epoch millisecond
// step is in seconds
func PrepareTracesQuery(start, end int64, panelType v3.PanelType, mq *v3.BuilderQuery, keys map[string]v3.AttributeKey, options Options) (string, error) {
func PrepareTracesQuery(start, end int64, panelType v3.PanelType, mq *v3.BuilderQuery, options Options) (string, error) {
// adjust the start and end time to the step interval
start = start - (start % (mq.StepInterval * 1000))
end = end - (end % (mq.StepInterval * 1000))
if options.GraphLimitQtype == constants.FirstQueryGraphLimit {
// give me just the group by names
query, err := buildTracesQuery(start, end, mq.StepInterval, mq, constants.SIGNOZ_SPAN_INDEX_TABLENAME, keys, panelType, options)
query, err := buildTracesQuery(start, end, mq.StepInterval, mq, constants.SIGNOZ_SPAN_INDEX_TABLENAME, panelType, options)
if err != nil {
return "", err
}
@@ -522,14 +517,14 @@ func PrepareTracesQuery(start, end int64, panelType v3.PanelType, mq *v3.Builder
return query, nil
} else if options.GraphLimitQtype == constants.SecondQueryGraphLimit {
query, err := buildTracesQuery(start, end, mq.StepInterval, mq, constants.SIGNOZ_SPAN_INDEX_TABLENAME, keys, panelType, options)
query, err := buildTracesQuery(start, end, mq.StepInterval, mq, constants.SIGNOZ_SPAN_INDEX_TABLENAME, panelType, options)
if err != nil {
return "", err
}
return query, nil
}
query, err := buildTracesQuery(start, end, mq.StepInterval, mq, constants.SIGNOZ_SPAN_INDEX_TABLENAME, keys, panelType, options)
query, err := buildTracesQuery(start, end, mq.StepInterval, mq, constants.SIGNOZ_SPAN_INDEX_TABLENAME, panelType, options)
if err != nil {
return "", err
}
@@ -545,3 +540,34 @@ func PrepareTracesQuery(start, end int64, panelType v3.PanelType, mq *v3.Builder
}
return query, err
}
func Enrich(params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) {
if params.CompositeQuery.QueryType == v3.QueryTypeBuilder {
for _, query := range params.CompositeQuery.BuilderQueries {
if query.DataSource == v3.DataSourceTraces {
EnrichTracesQuery(query, keys)
}
}
}
}
func EnrichTracesQuery(query *v3.BuilderQuery, keys map[string]v3.AttributeKey) {
// enrich aggregate attribute
query.AggregateAttribute = enrichKeyWithMetadata(query.AggregateAttribute, keys)
// enrich filter items
if query.Filters != nil && len(query.Filters.Items) > 0 {
for idx, filter := range query.Filters.Items {
query.Filters.Items[idx].Key = enrichKeyWithMetadata(filter.Key, keys)
}
}
// enrich group by
for idx, groupBy := range query.GroupBy {
query.GroupBy[idx] = enrichKeyWithMetadata(groupBy, keys)
}
// enrich order by
query.OrderBy = enrichOrderBy(query.OrderBy, keys)
// enrich select columns
for idx, selectColumn := range query.SelectColumns {
query.SelectColumns[idx] = enrichKeyWithMetadata(selectColumn, keys)
}
}

View File

@@ -133,7 +133,7 @@ var buildFilterQueryData = []struct {
func TestBuildTracesFilterQuery(t *testing.T) {
for _, tt := range buildFilterQueryData {
Convey("TestBuildTracesFilterQuery", t, func() {
query, err := buildTracesFilterQuery(tt.FilterSet, map[string]v3.AttributeKey{})
query, err := buildTracesFilterQuery(tt.FilterSet)
So(err, ShouldBeNil)
So(query, ShouldEqual, tt.ExpectedFilter)
})
@@ -169,7 +169,7 @@ var handleEmptyValuesInGroupByData = []struct {
func TestBuildTracesHandleEmptyValuesInGroupBy(t *testing.T) {
for _, tt := range handleEmptyValuesInGroupByData {
Convey("TestBuildTracesHandleEmptyValuesInGroupBy", t, func() {
query, err := handleEmptyValuesInGroupBy(map[string]v3.AttributeKey{}, tt.GroupBy)
query, err := handleEmptyValuesInGroupBy(tt.GroupBy)
So(err, ShouldBeNil)
So(query, ShouldEqual, tt.ExpectedFilter)
})
@@ -220,8 +220,9 @@ var testColumnName = []struct {
func TestColumnName(t *testing.T) {
for _, tt := range testColumnName {
tt.AttributeKey = enrichKeyWithMetadata(tt.AttributeKey, map[string]v3.AttributeKey{})
Convey("testColumnName", t, func() {
Column := getColumnName(tt.AttributeKey, map[string]v3.AttributeKey{})
Column := getColumnName(tt.AttributeKey)
So(Column, ShouldEqual, tt.ExpectedColumn)
})
}
@@ -265,7 +266,7 @@ var testGetSelectLabelsData = []struct {
func TestGetSelectLabels(t *testing.T) {
for _, tt := range testGetSelectLabelsData {
Convey("testGetSelectLabelsData", t, func() {
selectLabels := getSelectLabels(tt.AggregateOperator, tt.GroupByTags, map[string]v3.AttributeKey{})
selectLabels := getSelectLabels(tt.AggregateOperator, tt.GroupByTags)
So(selectLabels, ShouldEqual, tt.SelectLabels)
})
}
@@ -304,7 +305,7 @@ var testGetSelectColumnsData = []struct {
func TestGetSelectColumns(t *testing.T) {
for _, tt := range testGetSelectColumnsData {
Convey("testGetSelectColumnsData", t, func() {
selectColumns := getSelectColumns(tt.sc, map[string]v3.AttributeKey{})
selectColumns := getSelectColumns(tt.sc)
So(selectColumns, ShouldEqual, tt.SelectColumns)
})
}
@@ -464,13 +465,15 @@ var testOrderBy = []struct {
}
func TestOrderBy(t *testing.T) {
keys := map[string]v3.AttributeKey{
"name": {Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true},
"bytes": {Key: "bytes", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true},
"response_time": {Key: "response_time", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: false},
}
for _, tt := range testOrderBy {
Convey("testOrderBy", t, func() {
res := orderByAttributeKeyTags(tt.PanelType, tt.Items, tt.Tags, map[string]v3.AttributeKey{
"name": {Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true},
"bytes": {Key: "bytes", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true},
"response_time": {Key: "response_time", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: false},
})
tt.Items = enrichOrderBy(tt.Items, keys)
res := orderByAttributeKeyTags(tt.PanelType, tt.Items, tt.Tags)
So(res, ShouldResemble, tt.Result)
})
}
@@ -1171,11 +1174,24 @@ var testBuildTracesQueryData = []struct {
}
func TestBuildTracesQuery(t *testing.T) {
keys := map[string]v3.AttributeKey{
"name": {Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true},
}
for _, tt := range testBuildTracesQueryData {
tt.BuilderQuery.DataSource = v3.DataSourceTraces
params := &v3.QueryRangeParamsV3{
Version: "v4",
CompositeQuery: &v3.CompositeQuery{
QueryType: v3.QueryTypeBuilder,
BuilderQueries: map[string]*v3.BuilderQuery{
"A": tt.BuilderQuery,
},
},
}
Enrich(params, keys)
Convey("TestBuildTracesQuery", t, func() {
query, err := buildTracesQuery(tt.Start, tt.End, tt.BuilderQuery.StepInterval, tt.BuilderQuery, tt.TableName, map[string]v3.AttributeKey{
"name": {Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true},
}, tt.PanelType, tt.Options)
query, err := buildTracesQuery(tt.Start, tt.End, tt.BuilderQuery.StepInterval, tt.BuilderQuery, tt.TableName, tt.PanelType, tt.Options)
So(err, ShouldBeNil)
So(query, ShouldEqual, tt.ExpectedQuery)
})
@@ -1400,7 +1416,7 @@ var testPrepTracesQueryData = []struct {
func TestPrepareTracesQuery(t *testing.T) {
for _, tt := range testPrepTracesQueryData {
Convey("TestPrepareTracesQuery", t, func() {
query, err := PrepareTracesQuery(tt.Start, tt.End, tt.PanelType, tt.BuilderQuery, tt.Keys, tt.Options)
query, err := PrepareTracesQuery(tt.Start, tt.End, tt.PanelType, tt.BuilderQuery, tt.Options)
So(err, ShouldBeNil)
So(query, ShouldEqual, tt.ExpectedQuery)
})

View File

@@ -8,6 +8,39 @@ import (
"go.uber.org/zap"
)
var TracesListViewDefaultSelectedColumns = []v3.AttributeKey{
{
Key: "serviceName",
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeTag,
IsColumn: true,
},
{
Key: "name",
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeTag,
IsColumn: true,
},
{
Key: "durationNano",
DataType: v3.AttributeKeyDataTypeArrayFloat64,
Type: v3.AttributeKeyTypeTag,
IsColumn: true,
},
{
Key: "httpMethod",
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeTag,
IsColumn: true,
},
{
Key: "responseStatusCode",
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeTag,
IsColumn: true,
},
}
// check if traceId filter is used in traces query and return the list of traceIds
func TraceIdFilterUsedWithEqual(params *v3.QueryRangeParamsV3) (bool, []string) {
compositeQuery := params.CompositeQuery

View File

@@ -1,10 +1,7 @@
package common
import (
"encoding/json"
"fmt"
"math"
"net/url"
"time"
"go.signoz.io/signoz/pkg/query-service/constants"
@@ -73,183 +70,3 @@ func LCMList(nums []int64) int64 {
}
return result
}
// TODO(srikanthccv): move the custom function in threshold_rule.go to here
func PrepareLinksToTraces(ts time.Time, filterItems []v3.FilterItem) string {
start := ts.Add(-time.Minute * 15)
end := ts.Add(time.Minute * 15)
// Traces list view expects time in nanoseconds
tr := v3.URLShareableTimeRange{
Start: start.UnixNano(),
End: end.UnixNano(),
PageSize: 100,
}
options := v3.URLShareableOptions{
MaxLines: 2,
Format: "list",
SelectColumns: constants.TracesListViewDefaultSelectedColumns,
}
period, _ := json.Marshal(tr)
urlEncodedTimeRange := url.QueryEscape(string(period))
urlData := v3.URLShareableCompositeQuery{
QueryType: string(v3.QueryTypeBuilder),
Builder: v3.URLShareableBuilderQuery{
QueryData: []v3.BuilderQuery{
{
DataSource: v3.DataSourceTraces,
QueryName: "A",
AggregateOperator: v3.AggregateOperatorNoOp,
AggregateAttribute: v3.AttributeKey{},
Filters: &v3.FilterSet{
Items: filterItems,
Operator: "AND",
},
Expression: "A",
Disabled: false,
Having: []v3.Having{},
StepInterval: 60,
OrderBy: []v3.OrderBy{
{
ColumnName: "timestamp",
Order: "desc",
},
},
},
},
QueryFormulas: make([]string, 0),
},
}
data, _ := json.Marshal(urlData)
compositeQuery := url.QueryEscape(url.QueryEscape(string(data)))
optionsData, _ := json.Marshal(options)
urlEncodedOptions := url.QueryEscape(string(optionsData))
return fmt.Sprintf("compositeQuery=%s&timeRange=%s&startTime=%d&endTime=%d&options=%s", compositeQuery, urlEncodedTimeRange, tr.Start, tr.End, urlEncodedOptions)
}
func PrepareLinksToLogs(ts time.Time, filterItems []v3.FilterItem) string {
start := ts.Add(-time.Minute * 15)
end := ts.Add(time.Minute * 15)
// Logs list view expects time in milliseconds
// Logs list view expects time in milliseconds
tr := v3.URLShareableTimeRange{
Start: start.UnixMilli(),
End: end.UnixMilli(),
PageSize: 100,
}
options := v3.URLShareableOptions{
MaxLines: 2,
Format: "list",
SelectColumns: []v3.AttributeKey{},
}
period, _ := json.Marshal(tr)
urlEncodedTimeRange := url.QueryEscape(string(period))
urlData := v3.URLShareableCompositeQuery{
QueryType: string(v3.QueryTypeBuilder),
Builder: v3.URLShareableBuilderQuery{
QueryData: []v3.BuilderQuery{
{
DataSource: v3.DataSourceLogs,
QueryName: "A",
AggregateOperator: v3.AggregateOperatorNoOp,
AggregateAttribute: v3.AttributeKey{},
Filters: &v3.FilterSet{
Items: filterItems,
Operator: "AND",
},
Expression: "A",
Disabled: false,
Having: []v3.Having{},
StepInterval: 60,
OrderBy: []v3.OrderBy{
{
ColumnName: "timestamp",
Order: "desc",
},
},
},
},
QueryFormulas: make([]string, 0),
},
}
data, _ := json.Marshal(urlData)
compositeQuery := url.QueryEscape(url.QueryEscape(string(data)))
optionsData, _ := json.Marshal(options)
urlEncodedOptions := url.QueryEscape(string(optionsData))
return fmt.Sprintf("compositeQuery=%s&timeRange=%s&startTime=%d&endTime=%d&options=%s", compositeQuery, urlEncodedTimeRange, tr.Start, tr.End, urlEncodedOptions)
}
// The following function is used to prepare the where clause for the query
// `lbls` contains the key value pairs of the labels from the result of the query
// We iterate over the where clause and replace the labels with the actual values
// There are two cases:
// 1. The label is present in the where clause
// 2. The label is not present in the where clause
//
// Example for case 2:
// Latency by serviceName without any filter
// In this case, for each service with latency > threshold we send a notification
// The expectation will be that clicking on the related traces for service A, will
// take us to the traces page with the filter serviceName=A
// So for all the missing labels in the where clause, we add them as key = value
//
// Example for case 1:
// Severity text IN (WARN, ERROR)
// In this case, the Severity text will appear in the `lbls` if it were part of the group
// by clause, in which case we replace it with the actual value for the notification
// i.e Severity text = WARN
// If the Severity text is not part of the group by clause, then we add it as it is
func PrepareFilters(labels map[string]string, filters []v3.FilterItem) []v3.FilterItem {
var filterItems []v3.FilterItem
added := make(map[string]struct{})
for _, item := range filters {
exists := false
for key, value := range labels {
if item.Key.Key == key {
// if the label is present in the where clause, replace it with key = value
filterItems = append(filterItems, v3.FilterItem{
Key: item.Key,
Operator: v3.FilterOperatorEqual,
Value: value,
})
exists = true
added[key] = struct{}{}
break
}
}
if !exists {
// if the label is not present in the where clause, add it as it is
filterItems = append(filterItems, item)
}
}
// add the labels which are not present in the where clause
for key, value := range labels {
if _, ok := added[key]; !ok {
filterItems = append(filterItems, v3.FilterItem{
Key: v3.AttributeKey{Key: key},
Operator: v3.FilterOperatorEqual,
Value: value,
})
}
}
return filterItems
}

View File

@@ -401,39 +401,6 @@ const TIMESTAMP = "timestamp"
const FirstQueryGraphLimit = "first_query_graph_limit"
const SecondQueryGraphLimit = "second_query_graph_limit"
var TracesListViewDefaultSelectedColumns = []v3.AttributeKey{
{
Key: "serviceName",
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeTag,
IsColumn: true,
},
{
Key: "name",
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeTag,
IsColumn: true,
},
{
Key: "durationNano",
DataType: v3.AttributeKeyDataTypeArrayFloat64,
Type: v3.AttributeKeyTypeTag,
IsColumn: true,
},
{
Key: "httpMethod",
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeTag,
IsColumn: true,
},
{
Key: "responseStatusCode",
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeTag,
IsColumn: true,
},
}
const DefaultFilterSuggestionsAttributesLimit = 50
const MaxFilterSuggestionsAttributesLimit = 100
const DefaultFilterSuggestionsExamplesLimit = 2

View File

@@ -0,0 +1,203 @@
package contextlinks
import (
"encoding/json"
"fmt"
"net/url"
"time"
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
)
func PrepareLinksToTraces(start, end time.Time, filterItems []v3.FilterItem) string {
// Traces list view expects time in nanoseconds
tr := v3.URLShareableTimeRange{
Start: start.UnixNano(),
End: end.UnixNano(),
PageSize: 100,
}
options := v3.URLShareableOptions{
MaxLines: 2,
Format: "list",
SelectColumns: tracesV3.TracesListViewDefaultSelectedColumns,
}
period, _ := json.Marshal(tr)
urlEncodedTimeRange := url.QueryEscape(string(period))
builderQuery := v3.BuilderQuery{
DataSource: v3.DataSourceTraces,
QueryName: "A",
AggregateOperator: v3.AggregateOperatorNoOp,
AggregateAttribute: v3.AttributeKey{},
Filters: &v3.FilterSet{
Items: filterItems,
Operator: "AND",
},
Expression: "A",
Disabled: false,
Having: []v3.Having{},
StepInterval: 60,
OrderBy: []v3.OrderBy{
{
ColumnName: "timestamp",
Order: "desc",
},
},
}
urlData := v3.URLShareableCompositeQuery{
QueryType: string(v3.QueryTypeBuilder),
Builder: v3.URLShareableBuilderQuery{
QueryData: []v3.BuilderQuery{
builderQuery,
},
QueryFormulas: make([]string, 0),
},
}
data, _ := json.Marshal(urlData)
compositeQuery := url.QueryEscape(url.QueryEscape(string(data)))
optionsData, _ := json.Marshal(options)
urlEncodedOptions := url.QueryEscape(string(optionsData))
return fmt.Sprintf("compositeQuery=%s&timeRange=%s&startTime=%d&endTime=%d&options=%s", compositeQuery, urlEncodedTimeRange, tr.Start, tr.End, urlEncodedOptions)
}
func PrepareLinksToLogs(start, end time.Time, filterItems []v3.FilterItem) string {
// Logs list view expects time in milliseconds
tr := v3.URLShareableTimeRange{
Start: start.UnixMilli(),
End: end.UnixMilli(),
PageSize: 100,
}
options := v3.URLShareableOptions{
MaxLines: 2,
Format: "list",
SelectColumns: []v3.AttributeKey{},
}
period, _ := json.Marshal(tr)
urlEncodedTimeRange := url.QueryEscape(string(period))
builderQuery := v3.BuilderQuery{
DataSource: v3.DataSourceLogs,
QueryName: "A",
AggregateOperator: v3.AggregateOperatorNoOp,
AggregateAttribute: v3.AttributeKey{},
Filters: &v3.FilterSet{
Items: filterItems,
Operator: "AND",
},
Expression: "A",
Disabled: false,
Having: []v3.Having{},
StepInterval: 60,
OrderBy: []v3.OrderBy{
{
ColumnName: "timestamp",
Order: "desc",
},
},
}
urlData := v3.URLShareableCompositeQuery{
QueryType: string(v3.QueryTypeBuilder),
Builder: v3.URLShareableBuilderQuery{
QueryData: []v3.BuilderQuery{
builderQuery,
},
QueryFormulas: make([]string, 0),
},
}
data, _ := json.Marshal(urlData)
compositeQuery := url.QueryEscape(url.QueryEscape(string(data)))
optionsData, _ := json.Marshal(options)
urlEncodedOptions := url.QueryEscape(string(optionsData))
return fmt.Sprintf("compositeQuery=%s&timeRange=%s&startTime=%d&endTime=%d&options=%s", compositeQuery, urlEncodedTimeRange, tr.Start, tr.End, urlEncodedOptions)
}
// The following function is used to prepare the where clause for the query
// `lbls` contains the key value pairs of the labels from the result of the query
// We iterate over the where clause and replace the labels with the actual values
// There are two cases:
// 1. The label is present in the where clause
// 2. The label is not present in the where clause
//
// Example for case 2:
// Latency by serviceName without any filter
// In this case, for each service with latency > threshold we send a notification
// The expectation will be that clicking on the related traces for service A, will
// take us to the traces page with the filter serviceName=A
// So for all the missing labels in the where clause, we add them as key = value
//
// Example for case 1:
// Severity text IN (WARN, ERROR)
// In this case, the Severity text will appear in the `lbls` if it were part of the group
// by clause, in which case we replace it with the actual value for the notification
// i.e Severity text = WARN
// If the Severity text is not part of the group by clause, then we add it as it is
func PrepareFilters(labels map[string]string, whereClauseItems []v3.FilterItem, groupByItems []v3.AttributeKey, keys map[string]v3.AttributeKey) []v3.FilterItem {
var filterItems []v3.FilterItem
added := make(map[string]struct{})
for _, item := range whereClauseItems {
exists := false
for key, value := range labels {
if item.Key.Key == key {
// if the label is present in the where clause, replace it with key = value
filterItems = append(filterItems, v3.FilterItem{
Key: item.Key,
Operator: v3.FilterOperatorEqual,
Value: value,
})
exists = true
added[key] = struct{}{}
break
}
}
if !exists {
// if there is no label for the filter item, add it as it is
filterItems = append(filterItems, item)
}
}
// if there are labels which are not part of the where clause, but
// exist in the result, then they could be part of the group by clause
for key, value := range labels {
if _, ok := added[key]; !ok {
// start by taking the attribute key from the keys map, if not present, create a new one
attributeKey, ok := keys[key]
if !ok {
attributeKey = v3.AttributeKey{Key: key}
}
// if there is a group by item with the same key, use that instead
for _, groupByItem := range groupByItems {
if groupByItem.Key == key {
attributeKey = groupByItem
break
}
}
filterItems = append(filterItems, v3.FilterItem{
Key: attributeKey,
Operator: v3.FilterOperatorEqual,
Value: value,
})
}
}
return filterItems
}

View File

@@ -105,6 +105,7 @@ func InitDB(dataSourceName string) (*ModelDaoSqlite, error) {
telemetry.GetInstance().SetUserCountCallback(mds.GetUserCount)
telemetry.GetInstance().SetUserRoleCallback(mds.GetUserRole)
telemetry.GetInstance().SetGetUsersCallback(mds.GetUsers)
return mds, nil
}

View File

@@ -24,42 +24,62 @@ type Manager interface {
TestReceiver(receiver *Receiver) *model.ApiError
}
func New(url string) (Manager, error) {
func defaultOptions() []ManagerOptions {
return []ManagerOptions{
WithURL(constants.GetAlertManagerApiPrefix()),
WithChannelApiPath(constants.AmChannelApiPath),
}
}
if url == "" {
url = constants.GetAlertManagerApiPrefix()
type ManagerOptions func(m *manager) error
func New(opts ...ManagerOptions) (Manager, error) {
m := &manager{}
newOpts := defaultOptions()
newOpts = append(newOpts, opts...)
for _, opt := range newOpts {
err := opt(m)
if err != nil {
return nil, err
}
}
urlParsed, err := neturl.Parse(url)
if err != nil {
return nil, err
}
return m, nil
}
return &manager{
url: url,
parsedURL: urlParsed,
}, nil
func WithURL(url string) ManagerOptions {
return func(m *manager) error {
m.url = url
parsedURL, err := neturl.Parse(url)
if err != nil {
return err
}
m.parsedURL = parsedURL
return nil
}
}
func WithChannelApiPath(path string) ManagerOptions {
return func(m *manager) error {
m.channelApiPath = path
return nil
}
}
type manager struct {
url string
parsedURL *neturl.URL
url string
parsedURL *neturl.URL
channelApiPath string
}
func prepareAmChannelApiURL() string {
basePath := constants.GetAlertManagerApiPrefix()
AmChannelApiPath := constants.AmChannelApiPath
if len(AmChannelApiPath) > 0 && rune(AmChannelApiPath[0]) == rune('/') {
AmChannelApiPath = AmChannelApiPath[1:]
}
return fmt.Sprintf("%s%s", basePath, AmChannelApiPath)
func (m *manager) prepareAmChannelApiURL() string {
return fmt.Sprintf("%s%s", m.url, m.channelApiPath)
}
func prepareTestApiURL() string {
basePath := constants.GetAlertManagerApiPrefix()
return fmt.Sprintf("%s%s", basePath, "v1/testReceiver")
func (m *manager) prepareTestApiURL() string {
return fmt.Sprintf("%s%s", m.url, "v1/testReceiver")
}
func (m *manager) URL() *neturl.URL {
@@ -79,7 +99,7 @@ func (m *manager) AddRoute(receiver *Receiver) *model.ApiError {
receiverString, _ := json.Marshal(receiver)
amURL := prepareAmChannelApiURL()
amURL := m.prepareAmChannelApiURL()
response, err := http.Post(amURL, contentType, bytes.NewBuffer(receiverString))
if err != nil {
@@ -97,7 +117,7 @@ func (m *manager) AddRoute(receiver *Receiver) *model.ApiError {
func (m *manager) EditRoute(receiver *Receiver) *model.ApiError {
receiverString, _ := json.Marshal(receiver)
amURL := prepareAmChannelApiURL()
amURL := m.prepareAmChannelApiURL()
req, err := http.NewRequest(http.MethodPut, amURL, bytes.NewBuffer(receiverString))
if err != nil {
@@ -126,7 +146,7 @@ func (m *manager) DeleteRoute(name string) *model.ApiError {
values := map[string]string{"name": name}
requestData, _ := json.Marshal(values)
amURL := prepareAmChannelApiURL()
amURL := m.prepareAmChannelApiURL()
req, err := http.NewRequest(http.MethodDelete, amURL, bytes.NewBuffer(requestData))
if err != nil {
@@ -156,7 +176,7 @@ func (m *manager) TestReceiver(receiver *Receiver) *model.ApiError {
receiverBytes, _ := json.Marshal(receiver)
amTestURL := prepareTestApiURL()
amTestURL := m.prepareTestApiURL()
response, err := http.Post(amTestURL, contentType, bytes.NewBuffer(receiverBytes))
if err != nil {

View File

@@ -295,7 +295,7 @@ func newAlertmanagerSet(urls []string, timeout time.Duration, logger log.Logger)
ams := []Manager{}
for _, u := range urls {
am, err := New(u)
am, err := New(WithURL(u))
if err != nil {
level.Error(s.logger).Log(fmt.Sprintf("invalid alert manager url %s: %s", u, err))
} else {

View File

@@ -8,18 +8,11 @@ import (
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/util/stats"
am "go.signoz.io/signoz/pkg/query-service/integrations/alertManager"
"go.signoz.io/signoz/pkg/query-service/model"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
)
type Reader interface {
GetChannel(id string) (*model.ChannelItem, *model.ApiError)
GetChannels() (*[]model.ChannelItem, *model.ApiError)
DeleteChannel(id string) *model.ApiError
CreateChannel(receiver *am.Receiver) (*am.Receiver, *model.ApiError)
EditChannel(receiver *am.Receiver, id string) (*am.Receiver, *model.ApiError)
GetInstantQueryMetricsResult(ctx context.Context, query *model.InstantQueryMetricsParams) (*promql.Result, *stats.QueryStats, *model.ApiError)
GetQueryRangeResult(ctx context.Context, query *model.QueryRangeParams) (*promql.Result, *stats.QueryStats, *model.ApiError)
GetServiceOverview(ctx context.Context, query *model.GetServiceOverviewParams, skipConfig *model.SkipConfig) (*[]model.ServiceOverviewItem, *model.ApiError)
@@ -58,8 +51,6 @@ type Reader interface {
SetTTL(ctx context.Context, ttlParams *model.TTLParams) (*model.SetTTLResponseItem, *model.ApiError)
FetchTemporality(ctx context.Context, metricNames []string) (map[string]map[v3.Temporality]bool, error)
GetMetricResult(ctx context.Context, query string) ([]*model.Series, error)
GetMetricResultEE(ctx context.Context, query string) ([]*model.Series, string, error)
GetMetricAggregateAttributes(ctx context.Context, req *v3.AggregateAttributeRequest) (*v3.AggregateAttributeResponse, error)
GetMetricAttributeKeys(ctx context.Context, req *v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error)
GetMetricAttributeValues(ctx context.Context, req *v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error)
@@ -70,10 +61,9 @@ type Reader interface {
// QB V3 metrics/traces/logs
GetTimeSeriesResultV3(ctx context.Context, query string) ([]*v3.Series, error)
GetListResultV3(ctx context.Context, query string) ([]*v3.Row, error)
LiveTailLogsV3(ctx context.Context, query string, timestampStart uint64, idStart string, client *v3.LogsLiveTailClient)
LiveTailLogsV3(ctx context.Context, query string, timestampStart uint64, idStart string, client *model.LogsLiveTailClient)
LiveTailLogsV4(ctx context.Context, query string, timestampStart uint64, idStart string, client *model.LogsLiveTailClientV2)
GetDashboardsInfo(ctx context.Context) (*model.DashboardsInfo, error)
GetSavedViewsInfo(ctx context.Context) (*model.SavedViewsInfo, error)
GetTotalSpans(ctx context.Context) (uint64, error)
GetTotalLogs(ctx context.Context) (uint64, error)
GetTotalSamples(ctx context.Context) (uint64, error)
@@ -92,7 +82,6 @@ type Reader interface {
GetLogAttributeKeys(ctx context.Context, req *v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error)
GetLogAttributeValues(ctx context.Context, req *v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error)
GetLogAggregateAttributes(ctx context.Context, req *v3.AggregateAttributeRequest) (*v3.AggregateAttributeResponse, error)
GetUsers(ctx context.Context) ([]model.UserPayload, error)
GetQBFilterSuggestionsForLogs(
ctx context.Context,
req *v3.QBFilterSuggestionsRequest,
@@ -108,25 +97,25 @@ type Reader interface {
GetMetricMetadata(context.Context, string, string) (*v3.MetricMetadataResponse, error)
AddRuleStateHistory(ctx context.Context, ruleStateHistory []v3.RuleStateHistory) error
GetOverallStateTransitions(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) ([]v3.ReleStateItem, error)
ReadRuleStateHistoryByRuleID(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) (*v3.RuleStateTimeline, error)
GetTotalTriggers(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) (uint64, error)
GetTriggersByInterval(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) (*v3.Series, error)
GetAvgResolutionTime(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) (float64, error)
GetAvgResolutionTimeByInterval(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) (*v3.Series, error)
ReadRuleStateHistoryTopContributorsByRuleID(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) ([]v3.RuleStateHistoryContributor, error)
GetLastSavedRuleStateHistory(ctx context.Context, ruleID string) ([]v3.RuleStateHistory, error)
AddRuleStateHistory(ctx context.Context, ruleStateHistory []model.RuleStateHistory) error
GetOverallStateTransitions(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) ([]model.ReleStateItem, error)
ReadRuleStateHistoryByRuleID(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) (*model.RuleStateTimeline, error)
GetTotalTriggers(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) (uint64, error)
GetTriggersByInterval(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) (*v3.Series, error)
GetAvgResolutionTime(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) (float64, error)
GetAvgResolutionTimeByInterval(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) (*v3.Series, error)
ReadRuleStateHistoryTopContributorsByRuleID(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) ([]model.RuleStateHistoryContributor, error)
GetLastSavedRuleStateHistory(ctx context.Context, ruleID string) ([]model.RuleStateHistory, error)
GetMinAndMaxTimestampForTraceID(ctx context.Context, traceID []string) (int64, int64, error)
// Query Progress tracking helpers.
ReportQueryStartForProgressTracking(queryId string) (reportQueryFinished func(), err *model.ApiError)
SubscribeToQueryProgress(queryId string) (<-chan v3.QueryProgress, func(), *model.ApiError)
SubscribeToQueryProgress(queryId string) (<-chan model.QueryProgress, func(), *model.ApiError)
}
type Querier interface {
QueryRange(context.Context, *v3.QueryRangeParamsV3, map[string]v3.AttributeKey) ([]*v3.Result, map[string]error, error)
QueryRange(context.Context, *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error)
// test helpers
QueriesExecuted() []string

View File

@@ -3,8 +3,10 @@ package model
import (
"database/sql/driver"
"encoding/json"
"fmt"
"github.com/pkg/errors"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
)
// AlertState denotes the state of an active alert.
@@ -88,3 +90,104 @@ func (s *AlertState) Scan(value interface{}) error {
func (s *AlertState) Value() (driver.Value, error) {
return s.String(), nil
}
type LabelsString string
func (l *LabelsString) MarshalJSON() ([]byte, error) {
lbls := make(map[string]string)
err := json.Unmarshal([]byte(*l), &lbls)
if err != nil {
return nil, err
}
return json.Marshal(lbls)
}
func (l *LabelsString) Scan(src interface{}) error {
if data, ok := src.(string); ok {
*l = LabelsString(data)
}
return nil
}
func (l LabelsString) String() string {
return string(l)
}
type RuleStateTimeline struct {
Items []RuleStateHistory `json:"items"`
Total uint64 `json:"total"`
Labels map[string][]string `json:"labels"`
}
type RuleStateHistory struct {
RuleID string `json:"ruleID" ch:"rule_id"`
RuleName string `json:"ruleName" ch:"rule_name"`
// One of ["normal", "firing"]
OverallState AlertState `json:"overallState" ch:"overall_state"`
OverallStateChanged bool `json:"overallStateChanged" ch:"overall_state_changed"`
// One of ["normal", "firing", "no_data", "muted"]
State AlertState `json:"state" ch:"state"`
StateChanged bool `json:"stateChanged" ch:"state_changed"`
UnixMilli int64 `json:"unixMilli" ch:"unix_milli"`
Labels LabelsString `json:"labels" ch:"labels"`
Fingerprint uint64 `json:"fingerprint" ch:"fingerprint"`
Value float64 `json:"value" ch:"value"`
RelatedTracesLink string `json:"relatedTracesLink"`
RelatedLogsLink string `json:"relatedLogsLink"`
}
type QueryRuleStateHistory struct {
Start int64 `json:"start"`
End int64 `json:"end"`
State string `json:"state"`
Filters *v3.FilterSet `json:"filters"`
Offset int64 `json:"offset"`
Limit int64 `json:"limit"`
Order string `json:"order"`
}
func (r *QueryRuleStateHistory) Validate() error {
if r.Start == 0 || r.End == 0 {
return fmt.Errorf("start and end are required")
}
if r.Offset < 0 || r.Limit < 0 {
return fmt.Errorf("offset and limit must be greater than 0")
}
if r.Order != "asc" && r.Order != "desc" {
return fmt.Errorf("order must be asc or desc")
}
return nil
}
type RuleStateHistoryContributor struct {
Fingerprint uint64 `json:"fingerprint" ch:"fingerprint"`
Labels LabelsString `json:"labels" ch:"labels"`
Count uint64 `json:"count" ch:"count"`
RelatedTracesLink string `json:"relatedTracesLink"`
RelatedLogsLink string `json:"relatedLogsLink"`
}
type RuleStateTransition struct {
RuleID string `json:"ruleID" ch:"rule_id"`
State AlertState `json:"state" ch:"state"`
FiringTime int64 `json:"firingTime" ch:"firing_time"`
ResolutionTime int64 `json:"resolutionTime" ch:"resolution_time"`
}
type ReleStateItem struct {
State AlertState `json:"state"`
Start int64 `json:"start"`
End int64 `json:"end"`
}
type Stats struct {
TotalCurrentTriggers uint64 `json:"totalCurrentTriggers"`
TotalPastTriggers uint64 `json:"totalPastTriggers"`
CurrentTriggersSeries *v3.Series `json:"currentTriggersSeries"`
PastTriggersSeries *v3.Series `json:"pastTriggersSeries"`
CurrentAvgResolutionTime string `json:"currentAvgResolutionTime"`
PastAvgResolutionTime string `json:"pastAvgResolutionTime"`
CurrentAvgResolutionTimeSeries *v3.Series `json:"currentAvgResolutionTimeSeries"`
PastAvgResolutionTimeSeries *v3.Series `json:"pastAvgResolutionTimeSeries"`
}

View File

@@ -0,0 +1,75 @@
package model
import (
"context"
"strings"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
)
type LogsLiveTailClientV2 struct {
Name string
Logs chan *SignozLogV2
Done chan *bool
Error chan error
}
type LogsLiveTailClient struct {
Name string
Logs chan *SignozLog
Done chan *bool
Error chan error
}
type QueryProgress struct {
ReadRows uint64 `json:"read_rows"`
ReadBytes uint64 `json:"read_bytes"`
ElapsedMs uint64 `json:"elapsed_ms"`
}
func GetLogFieldsV3(ctx context.Context, queryRangeParams *v3.QueryRangeParamsV3, fields *GetFieldsResponse) map[string]v3.AttributeKey {
data := map[string]v3.AttributeKey{}
for _, query := range queryRangeParams.CompositeQuery.BuilderQueries {
if query.DataSource == v3.DataSourceLogs {
// top level fields meta will always be present in the frontend. (can be support for that as enchancement)
getType := func(t string) (v3.AttributeKeyType, bool) {
if t == "attributes" {
return v3.AttributeKeyTypeTag, false
} else if t == "resources" {
return v3.AttributeKeyTypeResource, false
}
return "", true
}
for _, selectedField := range fields.Selected {
fieldType, pass := getType(selectedField.Type)
if pass {
continue
}
data[selectedField.Name] = v3.AttributeKey{
Key: selectedField.Name,
Type: fieldType,
DataType: v3.AttributeKeyDataType(strings.ToLower(selectedField.DataType)),
IsColumn: true,
}
}
for _, interestingField := range fields.Interesting {
fieldType, pass := getType(interestingField.Type)
if pass {
continue
}
data[interestingField.Name] = v3.AttributeKey{
Key: interestingField.Name,
Type: fieldType,
DataType: v3.AttributeKeyDataType(strings.ToLower(interestingField.DataType)),
IsColumn: false,
}
}
break
}
}
return data
}

View File

@@ -18,110 +18,6 @@ type QueryRangeParams struct {
Stats string
}
type MetricQuery struct {
QueryName string `json:"queryName"`
MetricName string `json:"metricName"`
TagFilters *FilterSet `json:"tagFilters,omitempty"`
GroupingTags []string `json:"groupBy,omitempty"`
AggregateOperator AggregateOperator `json:"aggregateOperator"`
Expression string `json:"expression"`
Disabled bool `json:"disabled"`
ReduceTo ReduceToOperator `json:"reduceTo,omitempty"`
}
type ReduceToOperator int
const (
_ ReduceToOperator = iota
RLAST
RSUM
RAVG
RMAX
RMIN
)
type QueryType int
const (
_ QueryType = iota
QUERY_BUILDER
CLICKHOUSE
PROM
)
type PromQuery struct {
Query string `json:"query"`
Stats string `json:"stats,omitempty"`
Disabled bool `json:"disabled"`
}
type ClickHouseQuery struct {
Query string `json:"query"`
Disabled bool `json:"disabled"`
}
type PanelType int
const (
_ PanelType = iota
TIME_SERIES
QUERY_VALUE
)
type CompositeMetricQuery struct {
BuilderQueries map[string]*MetricQuery `json:"builderQueries,omitempty"`
ClickHouseQueries map[string]*ClickHouseQuery `json:"chQueries,omitempty"`
PromQueries map[string]*PromQuery `json:"promQueries,omitempty"`
PanelType PanelType `json:"panelType"`
QueryType QueryType `json:"queryType"`
}
type AggregateOperator int
const (
_ AggregateOperator = iota
NOOP
COUNT
COUNT_DISTINCT
SUM
AVG
MAX
MIN
P05
P10
P20
P25
P50
P75
P90
P95
P99
RATE
SUM_RATE
// leave blank space for possily {AVG, X}_RATE
_
_
_
RATE_SUM
RATE_AVG
RATE_MAX
RATE_MIN
HIST_QUANTILE_50
HIST_QUANTILE_75
HIST_QUANTILE_90
HIST_QUANTILE_95
HIST_QUANTILE_99
)
type DataSource int
const (
_ DataSource = iota
METRICS
TRACES
LOGS
)
const (
StringTagMapCol = "stringTagMap"
NumberTagMapCol = "numberTagMap"
@@ -129,16 +25,6 @@ const (
ResourceTagMapCol = "resourceTagsMap"
)
type QueryRangeParamsV2 struct {
DataSource DataSource `json:"dataSource"`
Start int64 `json:"start"`
End int64 `json:"end"`
Step int64 `json:"step"`
CompositeMetricQuery *CompositeMetricQuery `json:"compositeMetricQuery"`
Variables map[string]interface{} `json:"variables,omitempty"`
NoCache bool `json:"noCache"`
}
type DashboardVars struct {
Query string `json:"query"`
Variables map[string]interface{} `json:"variables,omitempty"`

View File

@@ -4,7 +4,6 @@ import (
"encoding/json"
"fmt"
"math"
"sort"
"strconv"
"time"
@@ -79,7 +78,7 @@ func BadRequest(err error) *ApiError {
func BadRequestStr(s string) *ApiError {
return &ApiError{
Typ: ErrorBadData,
Err: fmt.Errorf(s),
Err: errors.New(s),
}
}
@@ -500,46 +499,12 @@ type NextPrevErrorIDs struct {
GroupID string `json:"groupID"`
}
type Series struct {
QueryName string `json:"queryName"`
Labels map[string]string `json:"metric"`
Points []MetricPoint `json:"values"`
}
func (s *Series) SortPoints() {
sort.Slice(s.Points, func(i, j int) bool {
return s.Points[i].Timestamp < s.Points[j].Timestamp
})
}
type MetricPoint struct {
Timestamp int64
Value float64
}
type MetricStatus struct {
MetricName string
LastReceivedTsMillis int64
LastReceivedLabels map[string]string
}
// MarshalJSON implements json.Marshaler.
func (p *MetricPoint) MarshalJSON() ([]byte, error) {
v := strconv.FormatFloat(p.Value, 'f', -1, 64)
return json.Marshal([...]interface{}{float64(p.Timestamp) / 1000, v})
}
// UnmarshalJSON implements json.Unmarshaler.
func (p *MetricPoint) UnmarshalJSON(b []byte) error {
var a [2]interface{}
if err := json.Unmarshal(b, &a); err != nil {
return err
}
p.Timestamp = int64(a[0].(float64) * 1000)
p.Value, _ = strconv.ParseFloat(a[1].(string), 64)
return nil
}
type ShowCreateTableStatement struct {
Statement string `json:"statement" ch:"statement"`
}
@@ -572,6 +537,21 @@ type SignozLog struct {
Attributes_bool map[string]bool `json:"attributes_bool" ch:"attributes_bool"`
}
type SignozLogV2 struct {
Timestamp uint64 `json:"timestamp" ch:"timestamp"`
ID string `json:"id" ch:"id"`
TraceID string `json:"trace_id" ch:"trace_id"`
SpanID string `json:"span_id" ch:"span_id"`
TraceFlags uint32 `json:"trace_flags" ch:"trace_flags"`
SeverityText string `json:"severity_text" ch:"severity_text"`
SeverityNumber uint8 `json:"severity_number" ch:"severity_number"`
Body string `json:"body" ch:"body"`
Resources_string map[string]string `json:"resources_string" ch:"resources_string"`
Attributes_string map[string]string `json:"attributes_string" ch:"attributes_string"`
Attributes_number map[string]float64 `json:"attributes_float" ch:"attributes_number"`
Attributes_bool map[string]bool `json:"attributes_bool" ch:"attributes_bool"`
}
type LogsTailClient struct {
Name string
Logs chan *SignozLog
@@ -638,6 +618,7 @@ type AlertsInfo struct {
LogsBasedAlerts int `json:"logsBasedAlerts"`
MetricBasedAlerts int `json:"metricBasedAlerts"`
TracesBasedAlerts int `json:"tracesBasedAlerts"`
TotalChannels int `json:"totalChannels"`
SlackChannels int `json:"slackChannels"`
WebHookChannels int `json:"webHookChannels"`
PagerDutyChannels int `json:"pagerDutyChannels"`
@@ -650,6 +631,7 @@ type AlertsInfo struct {
SpanMetricsPrometheusQueries int `json:"spanMetricsPrometheusQueries"`
AlertNames []string `json:"alertNames"`
AlertsWithTSV2 int `json:"alertsWithTSv2"`
AlertsWithLogsChQuery int `json:"alertsWithLogsChQuery"`
}
type SavedViewsInfo struct {
@@ -666,6 +648,7 @@ type DashboardsInfo struct {
TracesBasedPanels int `json:"tracesBasedPanels"`
DashboardNames []string `json:"dashboardNames"`
QueriesWithTSV2 int `json:"queriesWithTSV2"`
DashboardsWithLogsChQuery int `json:"dashboardsWithLogsChQuery"`
}
type TagTelemetryData struct {

View File

@@ -11,7 +11,6 @@ import (
"github.com/google/uuid"
"github.com/pkg/errors"
"go.signoz.io/signoz/pkg/query-service/model"
)
type DataSource string
@@ -371,6 +370,22 @@ type QueryRangeParamsV3 struct {
FormatForWeb bool `json:"formatForWeb,omitempty"`
}
func (q *QueryRangeParamsV3) Clone() *QueryRangeParamsV3 {
if q == nil {
return nil
}
return &QueryRangeParamsV3{
Start: q.Start,
End: q.End,
Step: q.Step,
CompositeQuery: q.CompositeQuery.Clone(),
Variables: q.Variables,
NoCache: q.NoCache,
Version: q.Version,
FormatForWeb: q.FormatForWeb,
}
}
type PromQuery struct {
Query string `json:"query"`
Stats string `json:"stats,omitempty"`
@@ -378,6 +393,18 @@ type PromQuery struct {
Legend string `json:"legend,omitempty"`
}
func (p *PromQuery) Clone() *PromQuery {
if p == nil {
return nil
}
return &PromQuery{
Query: p.Query,
Stats: p.Stats,
Disabled: p.Disabled,
Legend: p.Legend,
}
}
func (p *PromQuery) Validate() error {
if p == nil {
return nil
@@ -396,6 +423,16 @@ type ClickHouseQuery struct {
Legend string `json:"legend,omitempty"`
}
func (c *ClickHouseQuery) Clone() *ClickHouseQuery {
if c == nil {
return nil
}
return &ClickHouseQuery{
Query: c.Query,
Disabled: c.Disabled,
Legend: c.Legend,
}
}
func (c *ClickHouseQuery) Validate() error {
if c == nil {
return nil
@@ -421,6 +458,43 @@ type CompositeQuery struct {
FillGaps bool `json:"fillGaps,omitempty"`
}
func (c *CompositeQuery) Clone() *CompositeQuery {
if c == nil {
return nil
}
var builderQueries map[string]*BuilderQuery
if c.BuilderQueries != nil {
builderQueries = make(map[string]*BuilderQuery)
for name, query := range c.BuilderQueries {
builderQueries[name] = query.Clone()
}
}
var clickHouseQueries map[string]*ClickHouseQuery
if c.ClickHouseQueries != nil {
clickHouseQueries = make(map[string]*ClickHouseQuery)
for name, query := range c.ClickHouseQueries {
clickHouseQueries[name] = query.Clone()
}
}
var promQueries map[string]*PromQuery
if c.PromQueries != nil {
promQueries = make(map[string]*PromQuery)
for name, query := range c.PromQueries {
promQueries[name] = query.Clone()
}
}
return &CompositeQuery{
BuilderQueries: builderQueries,
ClickHouseQueries: clickHouseQueries,
PromQueries: promQueries,
PanelType: c.PanelType,
QueryType: c.QueryType,
Unit: c.Unit,
FillGaps: c.FillGaps,
}
}
func (c *CompositeQuery) EnabledQueries() int {
count := 0
switch c.QueryType {
@@ -646,6 +720,7 @@ const (
FunctionNameMedian5 FunctionName = "median5"
FunctionNameMedian7 FunctionName = "median7"
FunctionNameTimeShift FunctionName = "timeShift"
FunctionNameAnomaly FunctionName = "anomaly"
)
func (f FunctionName) Validate() error {
@@ -665,7 +740,8 @@ func (f FunctionName) Validate() error {
FunctionNameMedian3,
FunctionNameMedian5,
FunctionNameMedian7,
FunctionNameTimeShift:
FunctionNameTimeShift,
FunctionNameAnomaly:
return nil
default:
return fmt.Errorf("invalid function name: %s", f)
@@ -673,33 +749,68 @@ func (f FunctionName) Validate() error {
}
type Function struct {
Name FunctionName `json:"name"`
Args []interface{} `json:"args,omitempty"`
Name FunctionName `json:"name"`
Args []interface{} `json:"args,omitempty"`
NamedArgs map[string]interface{} `json:"namedArgs,omitempty"`
}
type BuilderQuery struct {
QueryName string `json:"queryName"`
StepInterval int64 `json:"stepInterval"`
DataSource DataSource `json:"dataSource"`
AggregateOperator AggregateOperator `json:"aggregateOperator"`
AggregateAttribute AttributeKey `json:"aggregateAttribute,omitempty"`
Temporality Temporality `json:"temporality,omitempty"`
Filters *FilterSet `json:"filters,omitempty"`
GroupBy []AttributeKey `json:"groupBy,omitempty"`
Expression string `json:"expression"`
Disabled bool `json:"disabled"`
Having []Having `json:"having,omitempty"`
Legend string `json:"legend,omitempty"`
Limit uint64 `json:"limit"`
Offset uint64 `json:"offset"`
PageSize uint64 `json:"pageSize"`
OrderBy []OrderBy `json:"orderBy,omitempty"`
ReduceTo ReduceToOperator `json:"reduceTo,omitempty"`
SelectColumns []AttributeKey `json:"selectColumns,omitempty"`
TimeAggregation TimeAggregation `json:"timeAggregation,omitempty"`
SpaceAggregation SpaceAggregation `json:"spaceAggregation,omitempty"`
Functions []Function `json:"functions,omitempty"`
ShiftBy int64
QueryName string `json:"queryName"`
StepInterval int64 `json:"stepInterval"`
DataSource DataSource `json:"dataSource"`
AggregateOperator AggregateOperator `json:"aggregateOperator"`
AggregateAttribute AttributeKey `json:"aggregateAttribute,omitempty"`
Temporality Temporality `json:"temporality,omitempty"`
Filters *FilterSet `json:"filters,omitempty"`
GroupBy []AttributeKey `json:"groupBy,omitempty"`
Expression string `json:"expression"`
Disabled bool `json:"disabled"`
Having []Having `json:"having,omitempty"`
Legend string `json:"legend,omitempty"`
Limit uint64 `json:"limit"`
Offset uint64 `json:"offset"`
PageSize uint64 `json:"pageSize"`
OrderBy []OrderBy `json:"orderBy,omitempty"`
ReduceTo ReduceToOperator `json:"reduceTo,omitempty"`
SelectColumns []AttributeKey `json:"selectColumns,omitempty"`
TimeAggregation TimeAggregation `json:"timeAggregation,omitempty"`
SpaceAggregation SpaceAggregation `json:"spaceAggregation,omitempty"`
Functions []Function `json:"functions,omitempty"`
ShiftBy int64
IsAnomaly bool
QueriesUsedInFormula []string
}
func (b *BuilderQuery) Clone() *BuilderQuery {
if b == nil {
return nil
}
return &BuilderQuery{
QueryName: b.QueryName,
StepInterval: b.StepInterval,
DataSource: b.DataSource,
AggregateOperator: b.AggregateOperator,
AggregateAttribute: b.AggregateAttribute,
Temporality: b.Temporality,
Filters: b.Filters.Clone(),
GroupBy: b.GroupBy,
Expression: b.Expression,
Disabled: b.Disabled,
Having: b.Having,
Legend: b.Legend,
Limit: b.Limit,
Offset: b.Offset,
PageSize: b.PageSize,
OrderBy: b.OrderBy,
ReduceTo: b.ReduceTo,
SelectColumns: b.SelectColumns,
TimeAggregation: b.TimeAggregation,
SpaceAggregation: b.SpaceAggregation,
Functions: b.Functions,
ShiftBy: b.ShiftBy,
IsAnomaly: b.IsAnomaly,
QueriesUsedInFormula: b.QueriesUsedInFormula,
}
}
// CanDefaultZero returns true if the missing value can be substituted by zero
@@ -878,6 +989,16 @@ type FilterSet struct {
Items []FilterItem `json:"items"`
}
func (f *FilterSet) Clone() *FilterSet {
if f == nil {
return nil
}
return &FilterSet{
Operator: f.Operator,
Items: f.Items,
}
}
func (f *FilterSet) Validate() error {
if f == nil {
return nil
@@ -1029,17 +1150,15 @@ type Table struct {
}
type Result struct {
QueryName string `json:"queryName,omitempty"`
Series []*Series `json:"series,omitempty"`
List []*Row `json:"list,omitempty"`
Table *Table `json:"table,omitempty"`
}
type LogsLiveTailClient struct {
Name string
Logs chan *model.SignozLog
Done chan *bool
Error chan error
QueryName string `json:"queryName,omitempty"`
Series []*Series `json:"series,omitempty"`
PredictedSeries []*Series `json:"predictedSeries,omitempty"`
UpperBoundSeries []*Series `json:"upperBoundSeries,omitempty"`
LowerBoundSeries []*Series `json:"lowerBoundSeries,omitempty"`
AnomalyScores []*Series `json:"anomalyScores,omitempty"`
List []*Row `json:"list,omitempty"`
Table *Table `json:"table,omitempty"`
IsAnomaly bool `json:"isAnomaly,omitempty"`
}
type Series struct {
@@ -1160,115 +1279,6 @@ type MetricMetadataResponse struct {
Temporality string `json:"temporality"`
}
type LabelsString string
func (l *LabelsString) MarshalJSON() ([]byte, error) {
lbls := make(map[string]string)
err := json.Unmarshal([]byte(*l), &lbls)
if err != nil {
return nil, err
}
return json.Marshal(lbls)
}
func (l *LabelsString) Scan(src interface{}) error {
if data, ok := src.(string); ok {
*l = LabelsString(data)
}
return nil
}
func (l LabelsString) String() string {
return string(l)
}
type RuleStateTimeline struct {
Items []RuleStateHistory `json:"items"`
Total uint64 `json:"total"`
Labels map[string][]string `json:"labels"`
}
type RuleStateHistory struct {
RuleID string `json:"ruleID" ch:"rule_id"`
RuleName string `json:"ruleName" ch:"rule_name"`
// One of ["normal", "firing"]
OverallState model.AlertState `json:"overallState" ch:"overall_state"`
OverallStateChanged bool `json:"overallStateChanged" ch:"overall_state_changed"`
// One of ["normal", "firing", "no_data", "muted"]
State model.AlertState `json:"state" ch:"state"`
StateChanged bool `json:"stateChanged" ch:"state_changed"`
UnixMilli int64 `json:"unixMilli" ch:"unix_milli"`
Labels LabelsString `json:"labels" ch:"labels"`
Fingerprint uint64 `json:"fingerprint" ch:"fingerprint"`
Value float64 `json:"value" ch:"value"`
RelatedTracesLink string `json:"relatedTracesLink"`
RelatedLogsLink string `json:"relatedLogsLink"`
}
type QueryRuleStateHistory struct {
Start int64 `json:"start"`
End int64 `json:"end"`
State string `json:"state"`
Filters *FilterSet `json:"filters"`
Offset int64 `json:"offset"`
Limit int64 `json:"limit"`
Order string `json:"order"`
}
func (r *QueryRuleStateHistory) Validate() error {
if r.Start == 0 || r.End == 0 {
return fmt.Errorf("start and end are required")
}
if r.Offset < 0 || r.Limit < 0 {
return fmt.Errorf("offset and limit must be greater than 0")
}
if r.Order != "asc" && r.Order != "desc" {
return fmt.Errorf("order must be asc or desc")
}
return nil
}
type RuleStateHistoryContributor struct {
Fingerprint uint64 `json:"fingerprint" ch:"fingerprint"`
Labels LabelsString `json:"labels" ch:"labels"`
Count uint64 `json:"count" ch:"count"`
RelatedTracesLink string `json:"relatedTracesLink"`
RelatedLogsLink string `json:"relatedLogsLink"`
}
type RuleStateTransition struct {
RuleID string `json:"ruleID" ch:"rule_id"`
State model.AlertState `json:"state" ch:"state"`
FiringTime int64 `json:"firingTime" ch:"firing_time"`
ResolutionTime int64 `json:"resolutionTime" ch:"resolution_time"`
}
type ReleStateItem struct {
State model.AlertState `json:"state"`
Start int64 `json:"start"`
End int64 `json:"end"`
}
type Stats struct {
TotalCurrentTriggers uint64 `json:"totalCurrentTriggers"`
TotalPastTriggers uint64 `json:"totalPastTriggers"`
CurrentTriggersSeries *Series `json:"currentTriggersSeries"`
PastTriggersSeries *Series `json:"pastTriggersSeries"`
CurrentAvgResolutionTime string `json:"currentAvgResolutionTime"`
PastAvgResolutionTime string `json:"pastAvgResolutionTime"`
CurrentAvgResolutionTimeSeries *Series `json:"currentAvgResolutionTimeSeries"`
PastAvgResolutionTimeSeries *Series `json:"pastAvgResolutionTimeSeries"`
}
type QueryProgress struct {
ReadRows uint64 `json:"read_rows"`
ReadBytes uint64 `json:"read_bytes"`
ElapsedMs uint64 `json:"elapsed_ms"`
}
type URLShareableTimeRange struct {
Start int64 `json:"start"`
End int64 `json:"end"`

View File

@@ -4,6 +4,7 @@ import (
"encoding/json"
"fmt"
"net/url"
"sort"
"strings"
"time"
@@ -124,6 +125,47 @@ type RuleCondition struct {
SelectedQuery string `json:"selectedQueryName,omitempty"`
}
func (rc *RuleCondition) GetSelectedQueryName() string {
if rc != nil {
if rc.SelectedQuery != "" {
return rc.SelectedQuery
}
queryNames := map[string]struct{}{}
if rc.CompositeQuery != nil {
if rc.QueryType() == v3.QueryTypeBuilder {
for name := range rc.CompositeQuery.BuilderQueries {
queryNames[name] = struct{}{}
}
} else if rc.QueryType() == v3.QueryTypeClickHouseSQL {
for name := range rc.CompositeQuery.ClickHouseQueries {
queryNames[name] = struct{}{}
}
}
}
// The following logic exists for backward compatibility
// If there is no selected query, then
// - check if F1 is present, if yes, return F1
// - else return the query with max ascii value
// this logic is not really correct. we should be considering
// whether the query is enabled or not. but this is a temporary
// fix to support backward compatibility
if _, ok := queryNames["F1"]; ok {
return "F1"
}
keys := make([]string, 0, len(queryNames))
for k := range queryNames {
keys = append(keys, k)
}
sort.Strings(keys)
return keys[len(keys)-1]
}
// This should never happen
return ""
}
func (rc *RuleCondition) IsValid() bool {
if rc.CompositeQuery == nil {

View File

@@ -109,6 +109,7 @@ func NewBaseRule(id string, p *PostableRule, reader interfaces.Reader, opts ...R
id: id,
name: p.AlertName,
source: p.Source,
typ: p.AlertType,
ruleCondition: p.RuleCondition,
evalWindow: time.Duration(p.EvalWindow),
labels: qslabels.FromMap(p.Labels),
@@ -201,6 +202,21 @@ func (r *BaseRule) Unit() string {
return ""
}
func (r *BaseRule) Timestamps(ts time.Time) (time.Time, time.Time) {
start := ts.Add(-time.Duration(r.evalWindow)).UnixMilli()
end := ts.UnixMilli()
if r.evalDelay > 0 {
start = start - int64(r.evalDelay.Milliseconds())
end = end - int64(r.evalDelay.Milliseconds())
}
// round to minute otherwise we could potentially miss data
start = start - (start % (60 * 1000))
end = end - (end % (60 * 1000))
return time.UnixMilli(start), time.UnixMilli(end)
}
func (r *BaseRule) SetLastError(err error) {
r.mtx.Lock()
defer r.mtx.Unlock()
@@ -471,9 +487,9 @@ func (r *BaseRule) shouldAlert(series v3.Series) (Sample, bool) {
return alertSmpl, shouldAlert
}
func (r *BaseRule) RecordRuleStateHistory(ctx context.Context, prevState, currentState model.AlertState, itemsToAdd []v3.RuleStateHistory) error {
func (r *BaseRule) RecordRuleStateHistory(ctx context.Context, prevState, currentState model.AlertState, itemsToAdd []model.RuleStateHistory) error {
zap.L().Debug("recording rule state history", zap.String("ruleid", r.ID()), zap.Any("prevState", prevState), zap.Any("currentState", currentState), zap.Any("itemsToAdd", itemsToAdd))
revisedItemsToAdd := map[uint64]v3.RuleStateHistory{}
revisedItemsToAdd := map[uint64]model.RuleStateHistory{}
lastSavedState, err := r.reader.GetLastSavedRuleStateHistory(ctx, r.ID())
if err != nil {
@@ -483,7 +499,7 @@ func (r *BaseRule) RecordRuleStateHistory(ctx context.Context, prevState, curren
// the state would reset so we need to add the corresponding state changes to previously saved states
if !r.handledRestart && len(lastSavedState) > 0 {
zap.L().Debug("handling restart", zap.String("ruleid", r.ID()), zap.Any("lastSavedState", lastSavedState))
l := map[uint64]v3.RuleStateHistory{}
l := map[uint64]model.RuleStateHistory{}
for _, item := range itemsToAdd {
l[item.Fingerprint] = item
}
@@ -552,7 +568,7 @@ func (r *BaseRule) RecordRuleStateHistory(ctx context.Context, prevState, curren
if len(revisedItemsToAdd) > 0 && r.reader != nil {
zap.L().Debug("writing rule state history", zap.String("ruleid", r.ID()), zap.Any("revisedItemsToAdd", revisedItemsToAdd))
entries := make([]v3.RuleStateHistory, 0, len(revisedItemsToAdd))
entries := make([]model.RuleStateHistory, 0, len(revisedItemsToAdd))
for _, item := range revisedItemsToAdd {
entries = append(entries, item)
}

View File

@@ -11,6 +11,7 @@ import (
"github.com/jmoiron/sqlx"
"go.signoz.io/signoz/pkg/query-service/auth"
"go.signoz.io/signoz/pkg/query-service/common"
am "go.signoz.io/signoz/pkg/query-service/integrations/alertManager"
"go.signoz.io/signoz/pkg/query-service/model"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.uber.org/zap"
@@ -18,6 +19,12 @@ import (
// Data store to capture user alert rule settings
type RuleDB interface {
GetChannel(id string) (*model.ChannelItem, *model.ApiError)
GetChannels() (*[]model.ChannelItem, *model.ApiError)
DeleteChannel(id string) *model.ApiError
CreateChannel(receiver *am.Receiver) (*am.Receiver, *model.ApiError)
EditChannel(receiver *am.Receiver, id string) (*am.Receiver, *model.ApiError)
// CreateRuleTx stores rule in the db and returns tx and group name (on success)
CreateRuleTx(ctx context.Context, rule string) (int64, Tx, error)
@@ -68,13 +75,15 @@ type Tx interface {
type ruleDB struct {
*sqlx.DB
alertManager am.Manager
}
// todo: move init methods for creating tables
func NewRuleDB(db *sqlx.DB) RuleDB {
func NewRuleDB(db *sqlx.DB, alertManager am.Manager) RuleDB {
return &ruleDB{
db,
alertManager,
}
}
@@ -303,6 +312,229 @@ func (r *ruleDB) EditPlannedMaintenance(ctx context.Context, maintenance Planned
return "", nil
}
func getChannelType(receiver *am.Receiver) string {
if receiver.EmailConfigs != nil {
return "email"
}
if receiver.OpsGenieConfigs != nil {
return "opsgenie"
}
if receiver.PagerdutyConfigs != nil {
return "pagerduty"
}
if receiver.PushoverConfigs != nil {
return "pushover"
}
if receiver.SNSConfigs != nil {
return "sns"
}
if receiver.SlackConfigs != nil {
return "slack"
}
if receiver.VictorOpsConfigs != nil {
return "victorops"
}
if receiver.WebhookConfigs != nil {
return "webhook"
}
if receiver.WechatConfigs != nil {
return "wechat"
}
if receiver.MSTeamsConfigs != nil {
return "msteams"
}
return ""
}
func (r *ruleDB) GetChannel(id string) (*model.ChannelItem, *model.ApiError) {
idInt, _ := strconv.Atoi(id)
channel := model.ChannelItem{}
query := "SELECT id, created_at, updated_at, name, type, data data FROM notification_channels WHERE id=?;"
stmt, err := r.Preparex(query)
if err != nil {
zap.L().Error("Error in preparing sql query for GetChannel", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
err = stmt.Get(&channel, idInt)
if err != nil {
zap.L().Error("Error in getting channel with id", zap.Int("id", idInt), zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
return &channel, nil
}
func (r *ruleDB) DeleteChannel(id string) *model.ApiError {
idInt, _ := strconv.Atoi(id)
channelToDelete, apiErrorObj := r.GetChannel(id)
if apiErrorObj != nil {
return apiErrorObj
}
tx, err := r.Begin()
if err != nil {
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
{
stmt, err := tx.Prepare(`DELETE FROM notification_channels WHERE id=$1;`)
if err != nil {
zap.L().Error("Error in preparing statement for INSERT to notification_channels", zap.Error(err))
tx.Rollback()
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
defer stmt.Close()
if _, err := stmt.Exec(idInt); err != nil {
zap.L().Error("Error in Executing prepared statement for INSERT to notification_channels", zap.Error(err))
tx.Rollback() // return an error too, we may want to wrap them
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
}
apiError := r.alertManager.DeleteRoute(channelToDelete.Name)
if apiError != nil {
tx.Rollback()
return apiError
}
err = tx.Commit()
if err != nil {
zap.L().Error("Error in committing transaction for DELETE command to notification_channels", zap.Error(err))
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
return nil
}
func (r *ruleDB) GetChannels() (*[]model.ChannelItem, *model.ApiError) {
channels := []model.ChannelItem{}
query := "SELECT id, created_at, updated_at, name, type, data data FROM notification_channels"
err := r.Select(&channels, query)
zap.L().Info(query)
if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
return &channels, nil
}
func (r *ruleDB) EditChannel(receiver *am.Receiver, id string) (*am.Receiver, *model.ApiError) {
idInt, _ := strconv.Atoi(id)
channel, apiErrObj := r.GetChannel(id)
if apiErrObj != nil {
return nil, apiErrObj
}
if channel.Name != receiver.Name {
return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("channel name cannot be changed")}
}
tx, err := r.Begin()
if err != nil {
return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
channel_type := getChannelType(receiver)
receiverString, _ := json.Marshal(receiver)
{
stmt, err := tx.Prepare(`UPDATE notification_channels SET updated_at=$1, type=$2, data=$3 WHERE id=$4;`)
if err != nil {
zap.L().Error("Error in preparing statement for UPDATE to notification_channels", zap.Error(err))
tx.Rollback()
return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
defer stmt.Close()
if _, err := stmt.Exec(time.Now(), channel_type, string(receiverString), idInt); err != nil {
zap.L().Error("Error in Executing prepared statement for UPDATE to notification_channels", zap.Error(err))
tx.Rollback() // return an error too, we may want to wrap them
return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
}
apiError := r.alertManager.EditRoute(receiver)
if apiError != nil {
tx.Rollback()
return nil, apiError
}
err = tx.Commit()
if err != nil {
zap.L().Error("Error in committing transaction for INSERT to notification_channels", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
return receiver, nil
}
func (r *ruleDB) CreateChannel(receiver *am.Receiver) (*am.Receiver, *model.ApiError) {
channel_type := getChannelType(receiver)
receiverString, _ := json.Marshal(receiver)
tx, err := r.Begin()
if err != nil {
return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
{
stmt, err := tx.Prepare(`INSERT INTO notification_channels (created_at, updated_at, name, type, data) VALUES($1,$2,$3,$4,$5);`)
if err != nil {
zap.L().Error("Error in preparing statement for INSERT to notification_channels", zap.Error(err))
tx.Rollback()
return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
defer stmt.Close()
if _, err := stmt.Exec(time.Now(), time.Now(), receiver.Name, channel_type, string(receiverString)); err != nil {
zap.L().Error("Error in Executing prepared statement for INSERT to notification_channels", zap.Error(err))
tx.Rollback() // return an error too, we may want to wrap them
return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
}
apiError := r.alertManager.AddRoute(receiver)
if apiError != nil {
tx.Rollback()
return nil, apiError
}
err = tx.Commit()
if err != nil {
zap.L().Error("Error in committing transaction for INSERT to notification_channels", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
return receiver, nil
}
func (r *ruleDB) GetAlertsInfo(ctx context.Context) (*model.AlertsInfo, error) {
alertsInfo := model.AlertsInfo{}
// fetch alerts from rules db
@@ -319,6 +551,10 @@ func (r *ruleDB) GetAlertsInfo(ctx context.Context) (*model.AlertsInfo, error) {
if strings.Contains(alert, "time_series_v2") {
alertsInfo.AlertsWithTSV2 = alertsInfo.AlertsWithTSV2 + 1
}
if strings.Contains(alert, "signoz_logs.distributed_logs") ||
strings.Contains(alert, "signoz_logs.logs") {
alertsInfo.AlertsWithLogsChQuery = alertsInfo.AlertsWithLogsChQuery + 1
}
err = json.Unmarshal([]byte(alert), &rule)
if err != nil {
zap.L().Error("invalid rule data", zap.Error(err))
@@ -349,5 +585,31 @@ func (r *ruleDB) GetAlertsInfo(ctx context.Context) (*model.AlertsInfo, error) {
alertsInfo.TotalAlerts = alertsInfo.TotalAlerts + 1
}
alertsInfo.AlertNames = alertNames
channels, _ := r.GetChannels()
if channels != nil {
alertsInfo.TotalChannels = len(*channels)
for _, channel := range *channels {
if channel.Type == "slack" {
alertsInfo.SlackChannels = alertsInfo.SlackChannels + 1
}
if channel.Type == "webhook" {
alertsInfo.WebHookChannels = alertsInfo.WebHookChannels + 1
}
if channel.Type == "email" {
alertsInfo.EmailChannels = alertsInfo.EmailChannels + 1
}
if channel.Type == "pagerduty" {
alertsInfo.PagerDutyChannels = alertsInfo.PagerDutyChannels + 1
}
if channel.Type == "opsgenie" {
alertsInfo.OpsGenieChannels = alertsInfo.OpsGenieChannels + 1
}
if channel.Type == "msteams" {
alertsInfo.MSTeamsChannels = alertsInfo.MSTeamsChannels + 1
}
}
}
return &alertsInfo, nil
}

View File

@@ -190,7 +190,12 @@ func NewManager(o *ManagerOptions) (*Manager, error) {
return nil, err
}
db := NewRuleDB(o.DBConn)
amManager, err := am.New()
if err != nil {
return nil, err
}
db := NewRuleDB(o.DBConn, amManager)
telemetry.GetInstance().SetAlertsInfoCallback(db.GetAlertsInfo)

View File

@@ -219,7 +219,7 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time) (interface{}, error)
}
itemsToAdd := []v3.RuleStateHistory{}
itemsToAdd := []model.RuleStateHistory{}
// Check if any pending alerts should be removed or fire now. Write out alert timeseries.
for fp, a := range r.active {
@@ -236,13 +236,13 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time) (interface{}, error)
if a.State != model.StateInactive {
a.State = model.StateInactive
a.ResolvedAt = ts
itemsToAdd = append(itemsToAdd, v3.RuleStateHistory{
itemsToAdd = append(itemsToAdd, model.RuleStateHistory{
RuleID: r.ID(),
RuleName: r.Name(),
State: model.StateInactive,
StateChanged: true,
UnixMilli: ts.UnixMilli(),
Labels: v3.LabelsString(labelsJSON),
Labels: model.LabelsString(labelsJSON),
Fingerprint: a.QueryResultLables.Hash(),
})
}
@@ -256,13 +256,13 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time) (interface{}, error)
if a.Missing {
state = model.StateNoData
}
itemsToAdd = append(itemsToAdd, v3.RuleStateHistory{
itemsToAdd = append(itemsToAdd, model.RuleStateHistory{
RuleID: r.ID(),
RuleName: r.Name(),
State: state,
StateChanged: true,
UnixMilli: ts.UnixMilli(),
Labels: v3.LabelsString(labelsJSON),
Labels: model.LabelsString(labelsJSON),
Fingerprint: a.QueryResultLables.Hash(),
Value: a.Value,
})

View File

@@ -13,7 +13,7 @@ import (
func TestPromRuleShouldAlert(t *testing.T) {
postableRule := PostableRule{
AlertName: "Test Rule",
AlertType: "METRIC_BASED_ALERT",
AlertType: AlertTypeMetric,
RuleType: RuleTypeProm,
EvalWindow: Duration(5 * time.Minute),
Frequency: Duration(1 * time.Minute),

View File

@@ -5,7 +5,6 @@ import (
"time"
"go.signoz.io/signoz/pkg/query-service/model"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/query-service/utils/labels"
)
@@ -35,7 +34,7 @@ type Rule interface {
SetEvaluationTimestamp(time.Time)
GetEvaluationTimestamp() time.Time
RecordRuleStateHistory(ctx context.Context, prevState, currentState model.AlertState, itemsToAdd []v3.RuleStateHistory) error
RecordRuleStateHistory(ctx context.Context, prevState, currentState model.AlertState, itemsToAdd []model.RuleStateHistory) error
SendAlerts(ctx context.Context, ts time.Time, resendDelay time.Duration, interval time.Duration, notifyFunc NotifyFunc)
}

View File

@@ -6,9 +6,7 @@ import (
"encoding/json"
"fmt"
"math"
"net/url"
"regexp"
"sort"
"text/template"
"time"
"unicode"
@@ -16,6 +14,7 @@ import (
"go.uber.org/zap"
"go.signoz.io/signoz/pkg/query-service/common"
"go.signoz.io/signoz/pkg/query-service/contextlinks"
"go.signoz.io/signoz/pkg/query-service/model"
"go.signoz.io/signoz/pkg/query-service/postprocess"
@@ -31,6 +30,7 @@ import (
"go.signoz.io/signoz/pkg/query-service/utils/timestamp"
logsv3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3"
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
"go.signoz.io/signoz/pkg/query-service/formatter"
yaml "gopkg.in/yaml.v2"
@@ -53,6 +53,10 @@ type ThresholdRule struct {
querier interfaces.Querier
// querierV2 is used for alerts created after the introduction of new metrics query builder
querierV2 interfaces.Querier
// used for attribute metadata enrichment for logs and traces
logsKeys map[string]v3.AttributeKey
spansKeys map[string]v3.AttributeKey
}
func NewThresholdRule(
@@ -164,16 +168,8 @@ func (r *ThresholdRule) prepareQueryRange(ts time.Time) (*v3.QueryRangeParamsV3,
zap.L().Info("prepareQueryRange", zap.Int64("ts", ts.UnixMilli()), zap.Int64("evalWindow", r.evalWindow.Milliseconds()), zap.Int64("evalDelay", r.evalDelay.Milliseconds()))
start := ts.Add(-time.Duration(r.evalWindow)).UnixMilli()
end := ts.UnixMilli()
if r.evalDelay > 0 {
start = start - int64(r.evalDelay.Milliseconds())
end = end - int64(r.evalDelay.Milliseconds())
}
// round to minute otherwise we could potentially miss data
start = start - (start % (60 * 1000))
end = end - (end % (60 * 1000))
startTs, endTs := r.Timestamps(ts)
start, end := startTs.UnixMilli(), endTs.UnixMilli()
if r.ruleCondition.QueryType() == v3.QueryTypeClickHouseSQL {
params := &v3.QueryRangeParamsV3{
@@ -239,245 +235,76 @@ func (r *ThresholdRule) prepareQueryRange(ts time.Time) (*v3.QueryRangeParamsV3,
}, nil
}
// The following function is used to prepare the where clause for the query
// `lbls` contains the key value pairs of the labels from the result of the query
// We iterate over the where clause and replace the labels with the actual values
// There are two cases:
// 1. The label is present in the where clause
// 2. The label is not present in the where clause
//
// Example for case 2:
// Latency by serviceName without any filter
// In this case, for each service with latency > threshold we send a notification
// The expectation will be that clicking on the related traces for service A, will
// take us to the traces page with the filter serviceName=A
// So for all the missing labels in the where clause, we add them as key = value
//
// Example for case 1:
// Severity text IN (WARN, ERROR)
// In this case, the Severity text will appear in the `lbls` if it were part of the group
// by clause, in which case we replace it with the actual value for the notification
// i.e Severity text = WARN
// If the Severity text is not part of the group by clause, then we add it as it is
func (r *ThresholdRule) fetchFilters(selectedQuery string, lbls labels.Labels) []v3.FilterItem {
var filterItems []v3.FilterItem
added := make(map[string]struct{})
if r.ruleCondition.CompositeQuery.QueryType == v3.QueryTypeBuilder &&
r.ruleCondition.CompositeQuery.BuilderQueries[selectedQuery] != nil &&
r.ruleCondition.CompositeQuery.BuilderQueries[selectedQuery].Filters != nil {
for _, item := range r.ruleCondition.CompositeQuery.BuilderQueries[selectedQuery].Filters.Items {
exists := false
for _, label := range lbls {
if item.Key.Key == label.Name {
// if the label is present in the where clause, replace it with key = value
filterItems = append(filterItems, v3.FilterItem{
Key: item.Key,
Operator: v3.FilterOperatorEqual,
Value: label.Value,
})
exists = true
added[label.Name] = struct{}{}
break
}
}
if !exists {
// if the label is not present in the where clause, add it as it is
filterItems = append(filterItems, item)
}
}
}
// add the labels which are not present in the where clause
for _, label := range lbls {
if _, ok := added[label.Name]; !ok {
filterItems = append(filterItems, v3.FilterItem{
Key: v3.AttributeKey{Key: label.Name},
Operator: v3.FilterOperatorEqual,
Value: label.Value,
})
}
}
return filterItems
}
func (r *ThresholdRule) prepareLinksToLogs(ts time.Time, lbls labels.Labels) string {
selectedQuery := r.GetSelectedQuery()
qr, err := r.prepareQueryRange(ts)
if err != nil {
return ""
}
start := time.UnixMilli(qr.Start)
end := time.UnixMilli(qr.End)
// TODO(srikanthccv): handle formula queries
if selectedQuery < "A" || selectedQuery > "Z" {
return ""
}
q, err := r.prepareQueryRange(ts)
if err != nil {
q := r.ruleCondition.CompositeQuery.BuilderQueries[selectedQuery]
if q == nil {
return ""
}
// Logs list view expects time in milliseconds
tr := v3.URLShareableTimeRange{
Start: q.Start,
End: q.End,
PageSize: 100,
if q.DataSource != v3.DataSourceLogs {
return ""
}
options := v3.URLShareableOptions{
MaxLines: 2,
Format: "list",
SelectColumns: []v3.AttributeKey{},
queryFilter := []v3.FilterItem{}
if q.Filters != nil {
queryFilter = q.Filters.Items
}
period, _ := json.Marshal(tr)
urlEncodedTimeRange := url.QueryEscape(string(period))
filterItems := contextlinks.PrepareFilters(lbls.Map(), queryFilter, q.GroupBy, r.logsKeys)
filterItems := r.fetchFilters(selectedQuery, lbls)
urlData := v3.URLShareableCompositeQuery{
QueryType: string(v3.QueryTypeBuilder),
Builder: v3.URLShareableBuilderQuery{
QueryData: []v3.BuilderQuery{
{
DataSource: v3.DataSourceLogs,
QueryName: "A",
AggregateOperator: v3.AggregateOperatorNoOp,
AggregateAttribute: v3.AttributeKey{},
Filters: &v3.FilterSet{
Items: filterItems,
Operator: "AND",
},
Expression: "A",
Disabled: false,
Having: []v3.Having{},
StepInterval: 60,
OrderBy: []v3.OrderBy{
{
ColumnName: "timestamp",
Order: "desc",
},
},
},
},
QueryFormulas: make([]string, 0),
},
}
data, _ := json.Marshal(urlData)
compositeQuery := url.QueryEscape(url.QueryEscape(string(data)))
optionsData, _ := json.Marshal(options)
urlEncodedOptions := url.QueryEscape(string(optionsData))
return fmt.Sprintf("compositeQuery=%s&timeRange=%s&startTime=%d&endTime=%d&options=%s", compositeQuery, urlEncodedTimeRange, tr.Start, tr.End, urlEncodedOptions)
return contextlinks.PrepareLinksToLogs(start, end, filterItems)
}
func (r *ThresholdRule) prepareLinksToTraces(ts time.Time, lbls labels.Labels) string {
selectedQuery := r.GetSelectedQuery()
qr, err := r.prepareQueryRange(ts)
if err != nil {
return ""
}
start := time.UnixMilli(qr.Start)
end := time.UnixMilli(qr.End)
// TODO(srikanthccv): handle formula queries
if selectedQuery < "A" || selectedQuery > "Z" {
return ""
}
q, err := r.prepareQueryRange(ts)
if err != nil {
q := r.ruleCondition.CompositeQuery.BuilderQueries[selectedQuery]
if q == nil {
return ""
}
// Traces list view expects time in nanoseconds
tr := v3.URLShareableTimeRange{
Start: q.Start * time.Second.Microseconds(),
End: q.End * time.Second.Microseconds(),
PageSize: 100,
if q.DataSource != v3.DataSourceTraces {
return ""
}
options := v3.URLShareableOptions{
MaxLines: 2,
Format: "list",
SelectColumns: constants.TracesListViewDefaultSelectedColumns,
queryFilter := []v3.FilterItem{}
if q.Filters != nil {
queryFilter = q.Filters.Items
}
period, _ := json.Marshal(tr)
urlEncodedTimeRange := url.QueryEscape(string(period))
filterItems := contextlinks.PrepareFilters(lbls.Map(), queryFilter, q.GroupBy, r.spansKeys)
filterItems := r.fetchFilters(selectedQuery, lbls)
urlData := v3.URLShareableCompositeQuery{
QueryType: string(v3.QueryTypeBuilder),
Builder: v3.URLShareableBuilderQuery{
QueryData: []v3.BuilderQuery{
{
DataSource: v3.DataSourceTraces,
QueryName: "A",
AggregateOperator: v3.AggregateOperatorNoOp,
AggregateAttribute: v3.AttributeKey{},
Filters: &v3.FilterSet{
Items: filterItems,
Operator: "AND",
},
Expression: "A",
Disabled: false,
Having: []v3.Having{},
StepInterval: 60,
OrderBy: []v3.OrderBy{
{
ColumnName: "timestamp",
Order: "desc",
},
},
},
},
QueryFormulas: make([]string, 0),
},
}
data, _ := json.Marshal(urlData)
compositeQuery := url.QueryEscape(url.QueryEscape(string(data)))
optionsData, _ := json.Marshal(options)
urlEncodedOptions := url.QueryEscape(string(optionsData))
return fmt.Sprintf("compositeQuery=%s&timeRange=%s&startTime=%d&endTime=%d&options=%s", compositeQuery, urlEncodedTimeRange, tr.Start, tr.End, urlEncodedOptions)
return contextlinks.PrepareLinksToTraces(start, end, filterItems)
}
func (r *ThresholdRule) GetSelectedQuery() string {
if r.ruleCondition != nil {
if r.ruleCondition.SelectedQuery != "" {
return r.ruleCondition.SelectedQuery
}
queryNames := map[string]struct{}{}
if r.ruleCondition.CompositeQuery != nil {
if r.ruleCondition.QueryType() == v3.QueryTypeBuilder {
for name := range r.ruleCondition.CompositeQuery.BuilderQueries {
queryNames[name] = struct{}{}
}
} else if r.ruleCondition.QueryType() == v3.QueryTypeClickHouseSQL {
for name := range r.ruleCondition.CompositeQuery.ClickHouseQueries {
queryNames[name] = struct{}{}
}
}
}
// The following logic exists for backward compatibility
// If there is no selected query, then
// - check if F1 is present, if yes, return F1
// - else return the query with max ascii value
// this logic is not really correct. we should be considering
// whether the query is enabled or not. but this is a temporary
// fix to support backward compatibility
if _, ok := queryNames["F1"]; ok {
return "F1"
}
keys := make([]string, 0, len(queryNames))
for k := range queryNames {
keys = append(keys, k)
}
sort.Strings(keys)
return keys[len(keys)-1]
}
// This should never happen
return ""
return r.ruleCondition.GetSelectedQueryName()
}
func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, ts time.Time) (Vector, error) {
@@ -492,11 +319,37 @@ func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, ts time.Time) (Vec
}
if params.CompositeQuery.QueryType == v3.QueryTypeBuilder {
// check if any enrichment is required for logs if yes then enrich them
if logsv3.EnrichmentRequired(params) {
// Note: Sending empty fields key because enrichment is only needed for json
// TODO: Add support for attribute enrichment later
logsv3.Enrich(params, map[string]v3.AttributeKey{})
hasLogsQuery := false
hasTracesQuery := false
for _, query := range params.CompositeQuery.BuilderQueries {
if query.DataSource == v3.DataSourceLogs {
hasLogsQuery = true
}
if query.DataSource == v3.DataSourceTraces {
hasTracesQuery = true
}
}
if hasLogsQuery {
// check if any enrichment is required for logs if yes then enrich them
if logsv3.EnrichmentRequired(params) {
logsFields, err := r.reader.GetLogFields(ctx)
if err != nil {
return nil, err
}
logsKeys := model.GetLogFieldsV3(ctx, params, logsFields)
r.logsKeys = logsKeys
logsv3.Enrich(params, logsKeys)
}
}
if hasTracesQuery {
spanKeys, err := r.reader.GetSpanAttributeKeys(ctx)
if err != nil {
return nil, err
}
r.spansKeys = spanKeys
tracesV3.Enrich(params, spanKeys)
}
}
@@ -504,9 +357,9 @@ func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, ts time.Time) (Vec
var queryErrors map[string]error
if r.version == "v4" {
results, queryErrors, err = r.querierV2.QueryRange(ctx, params, map[string]v3.AttributeKey{})
results, queryErrors, err = r.querierV2.QueryRange(ctx, params)
} else {
results, queryErrors, err = r.querier.QueryRange(ctx, params, map[string]v3.AttributeKey{})
results, queryErrors, err = r.querier.QueryRange(ctx, params)
}
if err != nil {
@@ -654,11 +507,13 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time) (interface{}, er
if r.typ == AlertTypeTraces {
link := r.prepareLinksToTraces(ts, smpl.MetricOrig)
if link != "" && r.hostFromSource() != "" {
zap.L().Info("adding traces link to annotations", zap.String("link", fmt.Sprintf("%s/traces-explorer?%s", r.hostFromSource(), link)))
annotations = append(annotations, labels.Label{Name: "related_traces", Value: fmt.Sprintf("%s/traces-explorer?%s", r.hostFromSource(), link)})
}
} else if r.typ == AlertTypeLogs {
link := r.prepareLinksToLogs(ts, smpl.MetricOrig)
if link != "" && r.hostFromSource() != "" {
zap.L().Info("adding logs link to annotations", zap.String("link", fmt.Sprintf("%s/logs/logs-explorer?%s", r.hostFromSource(), link)))
annotations = append(annotations, labels.Label{Name: "related_logs", Value: fmt.Sprintf("%s/logs/logs-explorer?%s", r.hostFromSource(), link)})
}
}
@@ -703,7 +558,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time) (interface{}, er
r.active[h] = a
}
itemsToAdd := []v3.RuleStateHistory{}
itemsToAdd := []model.RuleStateHistory{}
// Check if any pending alerts should be removed or fire now. Write out alert timeseries.
for fp, a := range r.active {
@@ -720,13 +575,13 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time) (interface{}, er
if a.State != model.StateInactive {
a.State = model.StateInactive
a.ResolvedAt = ts
itemsToAdd = append(itemsToAdd, v3.RuleStateHistory{
itemsToAdd = append(itemsToAdd, model.RuleStateHistory{
RuleID: r.ID(),
RuleName: r.Name(),
State: model.StateInactive,
StateChanged: true,
UnixMilli: ts.UnixMilli(),
Labels: v3.LabelsString(labelsJSON),
Labels: model.LabelsString(labelsJSON),
Fingerprint: a.QueryResultLables.Hash(),
Value: a.Value,
})
@@ -741,13 +596,13 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time) (interface{}, er
if a.Missing {
state = model.StateNoData
}
itemsToAdd = append(itemsToAdd, v3.RuleStateHistory{
itemsToAdd = append(itemsToAdd, model.RuleStateHistory{
RuleID: r.ID(),
RuleName: r.Name(),
State: state,
StateChanged: true,
UnixMilli: ts.UnixMilli(),
Labels: v3.LabelsString(labelsJSON),
Labels: model.LabelsString(labelsJSON),
Fingerprint: a.QueryResultLables.Hash(),
Value: a.Value,
})

View File

@@ -18,7 +18,7 @@ import (
func TestThresholdRuleShouldAlert(t *testing.T) {
postableRule := PostableRule{
AlertName: "Tricky Condition Tests",
AlertType: "METRIC_BASED_ALERT",
AlertType: AlertTypeMetric,
RuleType: RuleTypeThreshold,
EvalWindow: Duration(5 * time.Minute),
Frequency: Duration(1 * time.Minute),
@@ -788,7 +788,7 @@ func TestPrepareLinksToLogs(t *testing.T) {
func TestPrepareLinksToTraces(t *testing.T) {
postableRule := PostableRule{
AlertName: "Links to traces test",
AlertType: "TRACES_BASED_ALERT",
AlertType: AlertTypeTraces,
RuleType: RuleTypeThreshold,
EvalWindow: Duration(5 * time.Minute),
Frequency: Duration(1 * time.Minute),
@@ -830,7 +830,7 @@ func TestPrepareLinksToTraces(t *testing.T) {
func TestThresholdRuleLabelNormalization(t *testing.T) {
postableRule := PostableRule{
AlertName: "Tricky Condition Tests",
AlertType: "METRIC_BASED_ALERT",
AlertType: AlertTypeMetric,
RuleType: RuleTypeThreshold,
EvalWindow: Duration(5 * time.Minute),
Frequency: Duration(1 * time.Minute),
@@ -914,7 +914,7 @@ func TestThresholdRuleLabelNormalization(t *testing.T) {
func TestThresholdRuleEvalDelay(t *testing.T) {
postableRule := PostableRule{
AlertName: "Test Eval Delay",
AlertType: "METRIC_BASED_ALERT",
AlertType: AlertTypeMetric,
RuleType: RuleTypeThreshold,
EvalWindow: Duration(5 * time.Minute),
Frequency: Duration(1 * time.Minute),
@@ -963,7 +963,7 @@ func TestThresholdRuleEvalDelay(t *testing.T) {
func TestThresholdRuleClickHouseTmpl(t *testing.T) {
postableRule := PostableRule{
AlertName: "Tricky Condition Tests",
AlertType: "METRIC_BASED_ALERT",
AlertType: AlertTypeMetric,
RuleType: RuleTypeThreshold,
EvalWindow: Duration(5 * time.Minute),
Frequency: Duration(1 * time.Minute),
@@ -1019,7 +1019,7 @@ func (m *queryMatcherAny) Match(string, string) error {
func TestThresholdRuleUnitCombinations(t *testing.T) {
postableRule := PostableRule{
AlertName: "Units test",
AlertType: "METRIC_BASED_ALERT",
AlertType: AlertTypeMetric,
RuleType: RuleTypeThreshold,
EvalWindow: Duration(5 * time.Minute),
Frequency: Duration(1 * time.Minute),
@@ -1170,8 +1170,8 @@ func TestThresholdRuleUnitCombinations(t *testing.T) {
func TestThresholdRuleNoData(t *testing.T) {
postableRule := PostableRule{
AlertName: "Units test",
AlertType: "METRIC_BASED_ALERT",
AlertName: "No data test",
AlertType: AlertTypeMetric,
RuleType: RuleTypeThreshold,
EvalWindow: Duration(5 * time.Minute),
Frequency: Duration(1 * time.Minute),
@@ -1261,3 +1261,238 @@ func TestThresholdRuleNoData(t *testing.T) {
}
}
}
func TestThresholdRuleTracesLink(t *testing.T) {
postableRule := PostableRule{
AlertName: "Traces link test",
AlertType: AlertTypeTraces,
RuleType: RuleTypeThreshold,
EvalWindow: Duration(5 * time.Minute),
Frequency: Duration(1 * time.Minute),
RuleCondition: &RuleCondition{
CompositeQuery: &v3.CompositeQuery{
QueryType: v3.QueryTypeBuilder,
BuilderQueries: map[string]*v3.BuilderQuery{
"A": {
QueryName: "A",
StepInterval: 60,
AggregateAttribute: v3.AttributeKey{
Key: "durationNano",
},
AggregateOperator: v3.AggregateOperatorP95,
DataSource: v3.DataSourceTraces,
Expression: "A",
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{
Key: v3.AttributeKey{Key: "httpMethod", IsColumn: true, Type: v3.AttributeKeyTypeTag, DataType: v3.AttributeKeyDataTypeString},
Value: "GET",
Operator: v3.FilterOperatorEqual,
},
},
},
},
},
},
},
}
fm := featureManager.StartManager()
mock, err := cmock.NewClickHouseWithQueryMatcher(nil, &queryMatcherAny{})
if err != nil {
t.Errorf("an error '%s' was not expected when opening a stub database connection", err)
}
metaCols := make([]cmock.ColumnType, 0)
metaCols = append(metaCols, cmock.ColumnType{Name: "DISTINCT(tagKey)", Type: "String"})
metaCols = append(metaCols, cmock.ColumnType{Name: "tagType", Type: "String"})
metaCols = append(metaCols, cmock.ColumnType{Name: "dataType", Type: "String"})
metaCols = append(metaCols, cmock.ColumnType{Name: "isColumn", Type: "Bool"})
cols := make([]cmock.ColumnType, 0)
cols = append(cols, cmock.ColumnType{Name: "value", Type: "Float64"})
cols = append(cols, cmock.ColumnType{Name: "attr", Type: "String"})
cols = append(cols, cmock.ColumnType{Name: "timestamp", Type: "String"})
for idx, c := range testCases {
metaRows := cmock.NewRows(metaCols, c.metaValues)
mock.
ExpectQuery("SELECT DISTINCT(tagKey), tagType, dataType, isColumn FROM archiveNamespace.span_attributes_keys").
WillReturnRows(metaRows)
rows := cmock.NewRows(cols, c.values)
// We are testing the eval logic after the query is run
// so we don't care about the query string here
queryString := "SELECT any"
mock.
ExpectQuery(queryString).
WillReturnRows(rows)
postableRule.RuleCondition.CompareOp = CompareOp(c.compareOp)
postableRule.RuleCondition.MatchType = MatchType(c.matchType)
postableRule.RuleCondition.Target = &c.target
postableRule.RuleCondition.CompositeQuery.Unit = c.yAxisUnit
postableRule.RuleCondition.TargetUnit = c.targetUnit
postableRule.Annotations = map[string]string{
"description": "This alert is fired when the defined metric (current value: {{$value}}) crosses the threshold ({{$threshold}})",
"summary": "The rule threshold is set to {{$threshold}}, and the observed metric value is {{$value}}",
}
options := clickhouseReader.NewOptions("", 0, 0, 0, "", "archiveNamespace")
reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "", true)
rule, err := NewThresholdRule("69", &postableRule, fm, reader, true)
rule.temporalityMap = map[string]map[v3.Temporality]bool{
"signoz_calls_total": {
v3.Delta: true,
},
}
if err != nil {
assert.NoError(t, err)
}
retVal, err := rule.Eval(context.Background(), time.Now())
if err != nil {
assert.NoError(t, err)
}
if c.expectAlerts == 0 {
assert.Equal(t, 0, retVal.(int), "case %d", idx)
} else {
assert.Equal(t, c.expectAlerts, retVal.(int), "case %d", idx)
for _, item := range rule.active {
for name, value := range item.Annotations.Map() {
if name == "related_traces" {
assert.NotEmpty(t, value, "case %d", idx)
assert.Contains(t, value, "GET")
}
}
}
}
}
}
func TestThresholdRuleLogsLink(t *testing.T) {
postableRule := PostableRule{
AlertName: "Logs link test",
AlertType: AlertTypeLogs,
RuleType: RuleTypeThreshold,
EvalWindow: Duration(5 * time.Minute),
Frequency: Duration(1 * time.Minute),
RuleCondition: &RuleCondition{
CompositeQuery: &v3.CompositeQuery{
QueryType: v3.QueryTypeBuilder,
BuilderQueries: map[string]*v3.BuilderQuery{
"A": {
QueryName: "A",
StepInterval: 60,
AggregateAttribute: v3.AttributeKey{
Key: "component",
},
AggregateOperator: v3.AggregateOperatorCountDistinct,
DataSource: v3.DataSourceLogs,
Expression: "A",
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{
Key: v3.AttributeKey{Key: "k8s.container.name", IsColumn: false, Type: v3.AttributeKeyTypeTag, DataType: v3.AttributeKeyDataTypeString},
Value: "testcontainer",
Operator: v3.FilterOperatorEqual,
},
},
},
},
},
},
},
}
fm := featureManager.StartManager()
mock, err := cmock.NewClickHouseWithQueryMatcher(nil, &queryMatcherAny{})
if err != nil {
t.Errorf("an error '%s' was not expected when opening a stub database connection", err)
}
attrMetaCols := make([]cmock.ColumnType, 0)
attrMetaCols = append(attrMetaCols, cmock.ColumnType{Name: "name", Type: "String"})
attrMetaCols = append(attrMetaCols, cmock.ColumnType{Name: "dataType", Type: "String"})
resourceMetaCols := make([]cmock.ColumnType, 0)
resourceMetaCols = append(resourceMetaCols, cmock.ColumnType{Name: "name", Type: "String"})
resourceMetaCols = append(resourceMetaCols, cmock.ColumnType{Name: "dataType", Type: "String"})
createTableCols := make([]cmock.ColumnType, 0)
createTableCols = append(createTableCols, cmock.ColumnType{Name: "statement", Type: "String"})
cols := make([]cmock.ColumnType, 0)
cols = append(cols, cmock.ColumnType{Name: "value", Type: "Float64"})
cols = append(cols, cmock.ColumnType{Name: "attr", Type: "String"})
cols = append(cols, cmock.ColumnType{Name: "timestamp", Type: "String"})
for idx, c := range testCases {
attrMetaRows := cmock.NewRows(attrMetaCols, c.attrMetaValues)
mock.
ExpectSelect("SELECT DISTINCT name, datatype from signoz_logs.distributed_logs_attribute_keys group by name, datatype").
WillReturnRows(attrMetaRows)
resourceMetaRows := cmock.NewRows(resourceMetaCols, c.resourceMetaValues)
mock.
ExpectSelect("SELECT DISTINCT name, datatype from signoz_logs.distributed_logs_resource_keys group by name, datatype").
WillReturnRows(resourceMetaRows)
createTableRows := cmock.NewRows(createTableCols, c.createTableValues)
mock.
ExpectSelect("SHOW CREATE TABLE signoz_logs.logs").
WillReturnRows(createTableRows)
rows := cmock.NewRows(cols, c.values)
// We are testing the eval logic after the query is run
// so we don't care about the query string here
queryString := "SELECT any"
mock.
ExpectQuery(queryString).
WillReturnRows(rows)
postableRule.RuleCondition.CompareOp = CompareOp(c.compareOp)
postableRule.RuleCondition.MatchType = MatchType(c.matchType)
postableRule.RuleCondition.Target = &c.target
postableRule.RuleCondition.CompositeQuery.Unit = c.yAxisUnit
postableRule.RuleCondition.TargetUnit = c.targetUnit
postableRule.Annotations = map[string]string{
"description": "This alert is fired when the defined metric (current value: {{$value}}) crosses the threshold ({{$threshold}})",
"summary": "The rule threshold is set to {{$threshold}}, and the observed metric value is {{$value}}",
}
options := clickhouseReader.NewOptions("", 0, 0, 0, "", "archiveNamespace")
reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "", true)
rule, err := NewThresholdRule("69", &postableRule, fm, reader, true)
rule.temporalityMap = map[string]map[v3.Temporality]bool{
"signoz_calls_total": {
v3.Delta: true,
},
}
if err != nil {
assert.NoError(t, err)
}
retVal, err := rule.Eval(context.Background(), time.Now())
if err != nil {
assert.NoError(t, err)
}
if c.expectAlerts == 0 {
assert.Equal(t, 0, retVal.(int), "case %d", idx)
} else {
assert.Equal(t, c.expectAlerts, retVal.(int), "case %d", idx)
for _, item := range rule.active {
for name, value := range item.Annotations.Map() {
if name == "related_logs" {
assert.NotEmpty(t, value, "case %d", idx)
assert.Contains(t, value, "testcontainer")
}
}
}
}
}
}

View File

@@ -0,0 +1,90 @@
package rules
import "time"
var (
testCases = []struct {
targetUnit string
yAxisUnit string
values [][]interface{}
metaValues [][]interface{}
attrMetaValues [][]interface{}
resourceMetaValues [][]interface{}
createTableValues [][]interface{}
expectAlerts int
compareOp string
matchType string
target float64
summaryAny []string
}{
{
targetUnit: "s",
yAxisUnit: "ns",
values: [][]interface{}{
{float64(572588400), "attr", time.Now()}, // 0.57 seconds
{float64(572386400), "attr", time.Now().Add(1 * time.Second)}, // 0.57 seconds
{float64(300947400), "attr", time.Now().Add(2 * time.Second)}, // 0.3 seconds
{float64(299316000), "attr", time.Now().Add(3 * time.Second)}, // 0.3 seconds
{float64(66640400.00000001), "attr", time.Now().Add(4 * time.Second)}, // 0.06 seconds
},
metaValues: [][]interface{}{},
createTableValues: [][]interface{}{
{"statement"},
},
attrMetaValues: [][]interface{}{},
resourceMetaValues: [][]interface{}{},
expectAlerts: 0,
compareOp: "1", // Above
matchType: "1", // Once
target: 1, // 1 second
},
{
targetUnit: "ms",
yAxisUnit: "ns",
values: [][]interface{}{
{float64(572588400), "attr", time.Now()}, // 572.58 ms
{float64(572386400), "attr", time.Now().Add(1 * time.Second)}, // 572.38 ms
{float64(300947400), "attr", time.Now().Add(2 * time.Second)}, // 300.94 ms
{float64(299316000), "attr", time.Now().Add(3 * time.Second)}, // 299.31 ms
{float64(66640400.00000001), "attr", time.Now().Add(4 * time.Second)}, // 66.64 ms
},
metaValues: [][]interface{}{},
createTableValues: [][]interface{}{
{"statement"},
},
attrMetaValues: [][]interface{}{},
resourceMetaValues: [][]interface{}{},
expectAlerts: 4,
compareOp: "1", // Above
matchType: "1", // Once
target: 200, // 200 ms
summaryAny: []string{
"observed metric value is 299 ms",
"the observed metric value is 573 ms",
"the observed metric value is 572 ms",
"the observed metric value is 301 ms",
},
},
{
targetUnit: "decgbytes",
yAxisUnit: "bytes",
values: [][]interface{}{
{float64(2863284053), "attr", time.Now()}, // 2.86 GB
{float64(2863388842), "attr", time.Now().Add(1 * time.Second)}, // 2.86 GB
{float64(300947400), "attr", time.Now().Add(2 * time.Second)}, // 0.3 GB
{float64(299316000), "attr", time.Now().Add(3 * time.Second)}, // 0.3 GB
{float64(66640400.00000001), "attr", time.Now().Add(4 * time.Second)}, // 66.64 MB
},
metaValues: [][]interface{}{},
createTableValues: [][]interface{}{
{"statement"},
},
attrMetaValues: [][]interface{}{},
resourceMetaValues: [][]interface{}{},
expectAlerts: 0,
compareOp: "1", // Above
matchType: "1", // Once
target: 200, // 200 GB
},
}
)

View File

@@ -178,9 +178,12 @@ type Telemetry struct {
patTokenUser bool
mutex sync.RWMutex
alertsInfoCallback func(ctx context.Context) (*model.AlertsInfo, error)
userCountCallback func(ctx context.Context) (int, error)
userRoleCallback func(ctx context.Context, groupId string) (string, error)
alertsInfoCallback func(ctx context.Context) (*model.AlertsInfo, error)
userCountCallback func(ctx context.Context) (int, error)
userRoleCallback func(ctx context.Context, groupId string) (string, error)
getUsersCallback func(ctx context.Context) ([]model.UserPayload, *model.ApiError)
dashboardsInfoCallback func(ctx context.Context) (*model.DashboardsInfo, error)
savedViewsInfoCallback func(ctx context.Context) (*model.SavedViewsInfo, error)
}
func (a *Telemetry) SetAlertsInfoCallback(callback func(ctx context.Context) (*model.AlertsInfo, error)) {
@@ -195,6 +198,18 @@ func (a *Telemetry) SetUserRoleCallback(callback func(ctx context.Context, group
a.userRoleCallback = callback
}
func (a *Telemetry) SetGetUsersCallback(callback func(ctx context.Context) ([]model.UserPayload, *model.ApiError)) {
a.getUsersCallback = callback
}
func (a *Telemetry) SetSavedViewsInfoCallback(callback func(ctx context.Context) (*model.SavedViewsInfo, error)) {
a.savedViewsInfoCallback = callback
}
func (a *Telemetry) SetDashboardsInfoCallback(callback func(ctx context.Context) (*model.DashboardsInfo, error)) {
a.dashboardsInfoCallback = callback
}
func createTelemetry() {
// Do not do anything in CI (not even resolving the outbound IP address)
if testing.Testing() {
@@ -296,7 +311,7 @@ func createTelemetry() {
data[key] = value
}
users, apiErr := telemetry.reader.GetUsers(ctx)
users, apiErr := telemetry.getUsersCallback(ctx)
if apiErr == nil {
for _, user := range users {
if user.Email == DEFAULT_CLOUD_EMAIL {
@@ -308,65 +323,48 @@ func createTelemetry() {
alertsInfo, err := telemetry.alertsInfoCallback(ctx)
if err == nil {
dashboardsInfo, err := telemetry.reader.GetDashboardsInfo(ctx)
dashboardsInfo, err := telemetry.dashboardsInfoCallback(ctx)
if err == nil {
channels, err := telemetry.reader.GetChannels()
savedViewsInfo, err := telemetry.savedViewsInfoCallback(ctx)
if err == nil {
for _, channel := range *channels {
switch channel.Type {
case "slack":
alertsInfo.SlackChannels++
case "webhook":
alertsInfo.WebHookChannels++
case "pagerduty":
alertsInfo.PagerDutyChannels++
case "opsgenie":
alertsInfo.OpsGenieChannels++
case "email":
alertsInfo.EmailChannels++
case "msteams":
alertsInfo.MSTeamsChannels++
}
dashboardsAlertsData := map[string]interface{}{
"totalDashboards": dashboardsInfo.TotalDashboards,
"totalDashboardsWithPanelAndName": dashboardsInfo.TotalDashboardsWithPanelAndName,
"dashboardNames": dashboardsInfo.DashboardNames,
"alertNames": alertsInfo.AlertNames,
"logsBasedPanels": dashboardsInfo.LogsBasedPanels,
"metricBasedPanels": dashboardsInfo.MetricBasedPanels,
"tracesBasedPanels": dashboardsInfo.TracesBasedPanels,
"dashboardsWithTSV2": dashboardsInfo.QueriesWithTSV2,
"dashboardWithLogsChQuery": dashboardsInfo.DashboardsWithLogsChQuery,
"totalAlerts": alertsInfo.TotalAlerts,
"alertsWithTSV2": alertsInfo.AlertsWithTSV2,
"logsBasedAlerts": alertsInfo.LogsBasedAlerts,
"metricBasedAlerts": alertsInfo.MetricBasedAlerts,
"tracesBasedAlerts": alertsInfo.TracesBasedAlerts,
"totalChannels": alertsInfo.TotalChannels,
"totalSavedViews": savedViewsInfo.TotalSavedViews,
"logsSavedViews": savedViewsInfo.LogsSavedViews,
"tracesSavedViews": savedViewsInfo.TracesSavedViews,
"slackChannels": alertsInfo.SlackChannels,
"webHookChannels": alertsInfo.WebHookChannels,
"pagerDutyChannels": alertsInfo.PagerDutyChannels,
"opsGenieChannels": alertsInfo.OpsGenieChannels,
"emailChannels": alertsInfo.EmailChannels,
"msteamsChannels": alertsInfo.MSTeamsChannels,
"metricsBuilderQueries": alertsInfo.MetricsBuilderQueries,
"metricsClickHouseQueries": alertsInfo.MetricsClickHouseQueries,
"metricsPrometheusQueries": alertsInfo.MetricsPrometheusQueries,
"spanMetricsPrometheusQueries": alertsInfo.SpanMetricsPrometheusQueries,
"alertsWithLogsChQuery": alertsInfo.AlertsWithLogsChQuery,
}
savedViewsInfo, err := telemetry.reader.GetSavedViewsInfo(ctx)
if err == nil {
dashboardsAlertsData := map[string]interface{}{
"totalDashboards": dashboardsInfo.TotalDashboards,
"totalDashboardsWithPanelAndName": dashboardsInfo.TotalDashboardsWithPanelAndName,
"dashboardNames": dashboardsInfo.DashboardNames,
"alertNames": alertsInfo.AlertNames,
"logsBasedPanels": dashboardsInfo.LogsBasedPanels,
"metricBasedPanels": dashboardsInfo.MetricBasedPanels,
"tracesBasedPanels": dashboardsInfo.TracesBasedPanels,
"dashboardsWithTSV2": dashboardsInfo.QueriesWithTSV2,
"totalAlerts": alertsInfo.TotalAlerts,
"alertsWithTSV2": alertsInfo.AlertsWithTSV2,
"logsBasedAlerts": alertsInfo.LogsBasedAlerts,
"metricBasedAlerts": alertsInfo.MetricBasedAlerts,
"tracesBasedAlerts": alertsInfo.TracesBasedAlerts,
"totalChannels": len(*channels),
"totalSavedViews": savedViewsInfo.TotalSavedViews,
"logsSavedViews": savedViewsInfo.LogsSavedViews,
"tracesSavedViews": savedViewsInfo.TracesSavedViews,
"slackChannels": alertsInfo.SlackChannels,
"webHookChannels": alertsInfo.WebHookChannels,
"pagerDutyChannels": alertsInfo.PagerDutyChannels,
"opsGenieChannels": alertsInfo.OpsGenieChannels,
"emailChannels": alertsInfo.EmailChannels,
"msteamsChannels": alertsInfo.MSTeamsChannels,
"metricsBuilderQueries": alertsInfo.MetricsBuilderQueries,
"metricsClickHouseQueries": alertsInfo.MetricsClickHouseQueries,
"metricsPrometheusQueries": alertsInfo.MetricsPrometheusQueries,
"spanMetricsPrometheusQueries": alertsInfo.SpanMetricsPrometheusQueries,
}
// send event only if there are dashboards or alerts or channels
if (dashboardsInfo.TotalDashboards > 0 || alertsInfo.TotalAlerts > 0 || len(*channels) > 0 || savedViewsInfo.TotalSavedViews > 0) && apiErr == nil {
for _, user := range users {
if user.Email == DEFAULT_CLOUD_EMAIL {
continue
}
telemetry.SendEvent(TELEMETRY_EVENT_DASHBOARDS_ALERTS, dashboardsAlertsData, user.Email, false, false)
// send event only if there are dashboards or alerts or channels
if (dashboardsInfo.TotalDashboards > 0 || alertsInfo.TotalAlerts > 0 || alertsInfo.TotalChannels > 0 || savedViewsInfo.TotalSavedViews > 0) && apiErr == nil {
for _, user := range users {
if user.Email == DEFAULT_CLOUD_EMAIL {
continue
}
telemetry.SendEvent(TELEMETRY_EVENT_DASHBOARDS_ALERTS, dashboardsAlertsData, user.Email, false, false)
}
}
}
@@ -450,11 +448,9 @@ func getOutboundIP() string {
}
defer resp.Body.Close()
ipBody, err := io.ReadAll(resp.Body)
if err == nil {
ipBody, err := io.ReadAll(resp.Body)
if err == nil {
ip = ipBody
}
ip = ipBody
}
return string(ip)

View File

@@ -186,7 +186,7 @@ func (tb *FilterSuggestionsTestBed) mockAttribValuesQueryResponse(
{Type: "Nullable(Float64)", Name: "float64TagValue"},
}
expectedAttribKeysInQuery := []string{}
expectedAttribKeysInQuery := []any{}
mockResultRows := [][]any{}
for idx, attrib := range expectedAttribs {
expectedAttribKeysInQuery = append(expectedAttribKeysInQuery, attrib.Key)
@@ -198,8 +198,8 @@ func (tb *FilterSuggestionsTestBed) mockAttribValuesQueryResponse(
}
tb.mockClickhouse.ExpectQuery(
"select.*tagKey.*stringTagValue.*int64TagValue.*float64TagValue.*distributed_tag_attributes.*tagKey.*in.*",
).WithArgs(expectedAttribKeysInQuery).WillReturnRows(mockhouse.NewRows(resultCols, mockResultRows))
"select.*tagKey.*stringTagValue.*int64TagValue.*float64TagValue.*distributed_tag_attributes.*tagKey",
).WithArgs(expectedAttribKeysInQuery...).WillReturnRows(mockhouse.NewRows(resultCols, mockResultRows))
}
type FilterSuggestionsTestBed struct {

View File

@@ -14,6 +14,8 @@ import (
// ValidateAndCastValue validates and casts the value of a key to the corresponding data type of the key
func ValidateAndCastValue(v interface{}, dataType v3.AttributeKeyDataType) (interface{}, error) {
// get the actual value if it's a pointer
v = getPointerValue(v)
switch dataType {
case v3.AttributeKeyDataTypeString:
switch x := v.(type) {

View File

@@ -0,0 +1,38 @@
package utils
const HOUR_NANO = int64(3600000000000)
type LogsListTsRange struct {
Start int64
End int64
}
func GetLogsListTsRanges(start, end int64) []LogsListTsRange {
startNano := GetEpochNanoSecs(start)
endNano := GetEpochNanoSecs(end)
result := []LogsListTsRange{}
if endNano-startNano > HOUR_NANO {
bucket := HOUR_NANO
tStartNano := endNano - bucket
complete := false
for {
result = append(result, LogsListTsRange{Start: tStartNano, End: endNano})
if complete {
break
}
bucket = bucket * 2
endNano = tStartNano
tStartNano = tStartNano - bucket
// break condition
if tStartNano <= startNano {
complete = true
tStartNano = startNano
}
}
}
return result
}

View File

@@ -0,0 +1,49 @@
package utils
import "testing"
func TestLogsListTsRange(t *testing.T) {
startEndData := []struct {
name string
start int64
end int64
res []LogsListTsRange
}{
{
name: "testing for less then one hour",
start: 1722262800000000000, // July 29, 2024 7:50:00 PM
end: 1722263800000000000, // July 29, 2024 8:06:40 PM
res: []LogsListTsRange{},
},
{
name: "testing for more than one hour",
start: 1722255800000000000, // July 29, 2024 5:53:20 PM
end: 1722262800000000000, // July 29, 2024 8:06:40 PM
res: []LogsListTsRange{
{1722259200000000000, 1722262800000000000}, // July 29, 2024 6:50:00 PM - July 29, 2024 7:50:00 PM
{1722255800000000000, 1722259200000000000}, // July 29, 2024 5:53:20 PM - July 29, 2024 6:50:00 PM
},
},
{
name: "testing for 1 day",
start: 1722171576000000000,
end: 1722262800000000000,
res: []LogsListTsRange{
{1722259200000000000, 1722262800000000000}, // July 29, 2024 6:50:00 PM - July 29, 2024 7:50:00 PM
{1722252000000000000, 1722259200000000000}, // July 29, 2024 4:50:00 PM - July 29, 2024 6:50:00 PM
{1722237600000000000, 1722252000000000000}, // July 29, 2024 12:50:00 PM - July 29, 2024 4:50:00 PM
{1722208800000000000, 1722237600000000000}, // July 29, 2024 4:50:00 AM - July 29, 2024 12:50:00 PM
{1722171576000000000, 1722208800000000000}, // July 28, 2024 6:29:36 PM - July 29, 2024 4:50:00 AM
},
},
}
for _, test := range startEndData {
res := GetLogsListTsRanges(test.start, test.end)
for i, v := range res {
if test.res[i].Start != v.Start || test.res[i].End != v.End {
t.Errorf("expected range was %v - %v, got %v - %v", v.Start, v.End, test.res[i].Start, test.res[i].End)
}
}
}
}

1
signoz-core-ui Submodule

Submodule signoz-core-ui added at f8c925d842