Compare commits

...

24 Commits

Author SHA1 Message Date
Vikrant Gupta
e77a6f4d7a feat: send last log line time stamp for timestamp order-by desc (#5968)
* feat: send last log line time stamp for timestamp orderby desc

* chore: little cleanup
2024-09-16 10:06:09 +05:30
Srikanth Chekuri
a023a7514e chore: move analytics related methods from CH reader to their own mod… (#5935) 2024-09-14 13:23:49 +05:30
Vikrant Gupta
3573c0863c feat: add support to configure units for pie chart values (#5960)
* feat: add units for pie chart

* chore: set the default to none in case no unit present

* chore: rename the y axis unit to unit

---------

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2024-09-14 13:11:04 +05:30
Srikanth Chekuri
b444c1e6b1 fix: do not use removed column in traces clickhouse query (#5953) 2024-09-13 18:20:37 +05:30
Srikanth Chekuri
5698628839 chore: move some structs out of v3 (#5932) 2024-09-13 18:10:49 +05:30
Srikanth Chekuri
3596f73fb1 chore: add anomaly provider interface (#5856) 2024-09-13 18:06:20 +05:30
Srikanth Chekuri
5b22490d6d chore: improve error message readability (#5628) 2024-09-13 18:01:37 +05:30
Srikanth Chekuri
39f9fc6900 fix: missing related logs or traces links in alert notification (#5946) 2024-09-13 17:30:02 +05:30
Nityananda Gohain
f854cdd9d3 feat: collect telemetry for ch log queries in alerts and dashboards (#5967)
* feat: collect telemtry for ch log queries in alerts and dashboards

* feat: consider local table as well

* fix: address pr comments
2024-09-13 17:15:03 +05:30
Nityananda Gohain
011b2167ba Integrate V4 QB (#5914)
* feat: logsV4 initial refactoring

* feat: filter_query builder with tests added

* feat: all functions of v4 refactored

* fix: tests fixed

* feat: logs list API, logic update for better perf

* feat: integrate v4 qb

* fix: pass flag

* fix: update select for table panel

* fix: tests updated with better examples of limit and group by

* fix: resource filter support in live tail

* fix: v4 livetail api

* fix: changes for casting pointer value

* fix: reader options updated

* feat: cleanup and use flag

* feat: restrict new list api to single query

* fix: move getTsRanges to utils

* fix: address pr comments
2024-09-13 17:04:22 +05:30
Srikanth Chekuri
a5f3a189f8 chore: move traces builder query attributes enrichment before query prep (#5917) 2024-09-13 16:43:56 +05:30
Vikrant Gupta
3fdfb51e02 chore: deprecate clarity from frontend (#5962) 2024-09-13 13:55:45 +05:30
Vikrant Gupta
43577c7ead feat: group by severity logs explorer page by default (#5772)
* feat: initial setup for group by severity logs explorer page

* chore: reduce the height of the histogram

* chore: pr cleanup

* chore: minor color update

* chore: clean the PR

* chore: clean the PR

* chore: better base handling

* fix: append query names to the legends  in case of multiple queries

* feat: make the changes only for list view and add back legends
2024-09-13 13:47:08 +05:30
Vikrant Gupta
6661aa7686 chore: update the filter in / filter out operators (#5923)
* chore: update the filter in / filter out operators

* fix: handle cases for old logs explorer
2024-09-13 13:43:40 +05:30
Vikrant Gupta
8d54e3b766 fix: dashboard list page showing older data (#5961) 2024-09-13 13:41:55 +05:30
Sudeep MP
6c446226eb refactor(ListAlert): update styles and button layout (#5931) 2024-09-13 01:03:22 +05:30
Nityananda Gohain
90b5f88413 feat: logs list API, logic update for better perf (#5912)
* feat: logsV4 initial refactoring

* feat: filter_query builder with tests added

* feat: all functions of v4 refactored

* fix: tests fixed

* feat: logs list API, logic update for better perf

* fix: update select for table panel

* fix: tests updated with better examples of limit and group by

* fix: resource filter support in live tail

* feat: cleanup and use flag

* feat: restrict new list api to single query

* fix: move getTsRanges to utils
2024-09-12 21:34:27 +05:30
Srikanth Chekuri
381a4de88a chore: use json formatting for ClickHouse logs (#5241)
Co-authored-by: Prashant Shahi <prashant@signoz.io>
2024-09-12 12:48:50 +05:30
Nityananda Gohain
10ebd0cad6 feat: use new schema flag (#5930) 2024-09-12 10:58:07 +05:30
Nityananda Gohain
6e7f04b492 logs v4 qb refactor (#5908)
* feat: logsV4 initial refactoring

* feat: filter_query builder with tests added

* feat: all functions of v4 refactored

* fix: tests fixed

* fix: update select for table panel

* fix: tests updated with better examples of limit and group by

* fix: resource filter support in live tail

---------

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2024-09-12 09:48:09 +05:30
Srikanth Chekuri
20ac75e3d2 chore: json logs for collector (#5240) 2024-09-12 00:57:48 +05:30
Shaheer Kochai
d6b75d76ca fix: add support for long texts in alert history page (#5895) 2024-09-11 19:02:17 +04:30
Shaheer Kochai
41d3342a42 feat: alert history feedback changes (#5903)
* fix: make the default offset 0

* chore: add beta tag to alert history

* fix: don't add 5 minutes earlier to the timeline graph data
2024-09-11 18:16:41 +04:30
Shaheer Kochai
f3cb3b9840 fix: loading and no-data states showing in loading state of alert edit/overview (#5887) 2024-09-11 18:14:22 +04:30
103 changed files with 4568 additions and 1801 deletions

View File

@@ -11,9 +11,9 @@ jobs:
check-no-ee-references:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Run check
run: make check-no-ee-references
- uses: actions/checkout@v4
- name: Run check
run: make check-no-ee-references
build-frontend:
runs-on: ubuntu-latest
@@ -43,7 +43,6 @@ jobs:
run: |
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
echo 'CLARITY_PROJECT_ID="${{ secrets.CLARITY_PROJECT_ID }}"' >> frontend/.env
- name: Install dependencies
run: cd frontend && yarn install
- name: Run ESLint

View File

@@ -9,7 +9,6 @@ on:
- v*
jobs:
image-build-and-push-query-service:
runs-on: ubuntu-latest
steps:
@@ -151,7 +150,6 @@ jobs:
run: |
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
echo 'CLARITY_PROJECT_ID="${{ secrets.CLARITY_PROJECT_ID }}"' >> frontend/.env
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env

View File

@@ -23,6 +23,9 @@
[1]: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105-L114
-->
<level>information</level>
<formatting>
<type>json</type>
</formatting>
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
<!-- Rotation policy

View File

@@ -154,6 +154,8 @@ extensions:
service:
telemetry:
logs:
encoding: json
metrics:
address: 0.0.0.0:8888
extensions: [health_check, zpages, pprof]

View File

@@ -23,6 +23,9 @@
[1]: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105-L114
-->
<level>information</level>
<formatting>
<type>json</type>
</formatting>
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
<!-- Rotation policy

View File

@@ -158,6 +158,8 @@ exporters:
service:
telemetry:
logs:
encoding: json
metrics:
address: 0.0.0.0:8888
extensions:

View File

@@ -0,0 +1,32 @@
package anomaly
import (
"context"
)
type DailyProvider struct {
BaseSeasonalProvider
}
var _ BaseProvider = (*DailyProvider)(nil)
func (dp *DailyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider {
return &dp.BaseSeasonalProvider
}
// NewDailyProvider uses the same generic option type
func NewDailyProvider(opts ...GenericProviderOption[*DailyProvider]) *DailyProvider {
dp := &DailyProvider{
BaseSeasonalProvider: BaseSeasonalProvider{},
}
for _, opt := range opts {
opt(dp)
}
return dp
}
func (p *DailyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
return nil, nil
}

View File

@@ -0,0 +1,32 @@
package anomaly
import (
"context"
)
type HourlyProvider struct {
BaseSeasonalProvider
}
var _ BaseProvider = (*HourlyProvider)(nil)
func (hp *HourlyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider {
return &hp.BaseSeasonalProvider
}
// NewHourlyProvider now uses the generic option type
func NewHourlyProvider(opts ...GenericProviderOption[*HourlyProvider]) *HourlyProvider {
hp := &HourlyProvider{
BaseSeasonalProvider: BaseSeasonalProvider{},
}
for _, opt := range opts {
opt(hp)
}
return hp
}
func (p *HourlyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
return nil, nil
}

View File

@@ -0,0 +1,188 @@
package anomaly
import (
"math"
"time"
"go.signoz.io/signoz/pkg/query-service/common"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
)
type Seasonality string
const (
SeasonalityHourly Seasonality = "hourly"
SeasonalityDaily Seasonality = "daily"
SeasonalityWeekly Seasonality = "weekly"
)
func (s Seasonality) IsValid() bool {
switch s {
case SeasonalityHourly, SeasonalityDaily, SeasonalityWeekly:
return true
default:
return false
}
}
type GetAnomaliesRequest struct {
Params *v3.QueryRangeParamsV3
Seasonality Seasonality
}
type GetAnomaliesResponse struct {
Results []*v3.Result
}
// anomalyParams is the params for anomaly detection
// prediction = avg(past_period_query) + avg(current_season_query) - avg(past_season_query)
//
// ^ ^
// | |
// (rounded value for past peiod) + (seasonal growth)
//
// score = abs(value - prediction) / stddev (current_season_query)
type anomalyQueryParams struct {
// CurrentPeriodQuery is the query range params for period user is looking at or eval window
// Example: (now-5m, now), (now-30m, now), (now-1h, now)
// The results obtained from this query are used to compare with predicted values
// and to detect anomalies
CurrentPeriodQuery *v3.QueryRangeParamsV3
// PastPeriodQuery is the query range params for past seasonal period
// Example: For weekly seasonality, (now-1w-4h-5m, now-1w)
// : For daily seasonality, (now-1d-2h-5m, now-1d)
// : For hourly seasonality, (now-1h-30m-5m, now-1h)
PastPeriodQuery *v3.QueryRangeParamsV3
// CurrentSeasonQuery is the query range params for current period (seasonal)
// Example: For weekly seasonality, this is the query range params for the (now-1w-5m, now)
// : For daily seasonality, this is the query range params for the (now-1d-5m, now)
// : For hourly seasonality, this is the query range params for the (now-1h-5m, now)
CurrentSeasonQuery *v3.QueryRangeParamsV3
// PastSeasonQuery is the query range params for past seasonal period to the current season
// Example: For weekly seasonality, this is the query range params for the (now-2w-5m, now-1w)
// : For daily seasonality, this is the query range params for the (now-2d-5m, now-1d)
// : For hourly seasonality, this is the query range params for the (now-2h-5m, now-1h)
PastSeasonQuery *v3.QueryRangeParamsV3
}
func copyCompositeQuery(req *v3.QueryRangeParamsV3) *v3.CompositeQuery {
deepCopyCompositeQuery := *req.CompositeQuery
deepCopyCompositeQuery.BuilderQueries = make(map[string]*v3.BuilderQuery)
for k, v := range req.CompositeQuery.BuilderQueries {
query := *v
deepCopyCompositeQuery.BuilderQueries[k] = &query
}
return &deepCopyCompositeQuery
}
func updateStepInterval(req *v3.QueryRangeParamsV3) {
start := req.Start
end := req.End
req.Step = int64(math.Max(float64(common.MinAllowedStepInterval(start, end)), 60))
for _, q := range req.CompositeQuery.BuilderQueries {
// If the step interval is less than the minimum allowed step interval, set it to the minimum allowed step interval
if minStep := common.MinAllowedStepInterval(start, end); q.StepInterval < minStep {
q.StepInterval = minStep
}
}
}
func prepareAnomalyQueryParams(req *v3.QueryRangeParamsV3, seasonality Seasonality) *anomalyQueryParams {
start := req.Start
end := req.End
currentPeriodQuery := &v3.QueryRangeParamsV3{
Start: start,
End: end,
CompositeQuery: req.CompositeQuery,
Variables: make(map[string]interface{}, 0),
NoCache: false,
}
updateStepInterval(currentPeriodQuery)
var pastPeriodStart, pastPeriodEnd int64
switch seasonality {
// for one week period, we fetch the data from the past week with 4 hours offset
case SeasonalityWeekly:
pastPeriodStart = start - 166*time.Hour.Milliseconds() - 4*time.Hour.Milliseconds()
pastPeriodEnd = end - 166*time.Hour.Milliseconds()
// for one day period, we fetch the data from the past day with 2 hours offset
case SeasonalityDaily:
pastPeriodStart = start - 23*time.Hour.Milliseconds() - 2*time.Hour.Milliseconds()
pastPeriodEnd = end - 23*time.Hour.Milliseconds()
// for one hour period, we fetch the data from the past hour with 30 minutes offset
case SeasonalityHourly:
pastPeriodStart = start - 1*time.Hour.Milliseconds() - 30*time.Minute.Milliseconds()
pastPeriodEnd = end - 1*time.Hour.Milliseconds()
}
pastPeriodQuery := &v3.QueryRangeParamsV3{
Start: pastPeriodStart,
End: pastPeriodEnd,
CompositeQuery: copyCompositeQuery(req),
Variables: make(map[string]interface{}, 0),
NoCache: false,
}
updateStepInterval(pastPeriodQuery)
// seasonality growth trend
var currentGrowthPeriodStart, currentGrowthPeriodEnd int64
switch seasonality {
case SeasonalityWeekly:
currentGrowthPeriodStart = start - 7*24*time.Hour.Milliseconds()
currentGrowthPeriodEnd = end
case SeasonalityDaily:
currentGrowthPeriodStart = start - 23*time.Hour.Milliseconds()
currentGrowthPeriodEnd = end
case SeasonalityHourly:
currentGrowthPeriodStart = start - 1*time.Hour.Milliseconds()
currentGrowthPeriodEnd = end
}
currentGrowthQuery := &v3.QueryRangeParamsV3{
Start: currentGrowthPeriodStart,
End: currentGrowthPeriodEnd,
CompositeQuery: copyCompositeQuery(req),
Variables: make(map[string]interface{}, 0),
NoCache: false,
}
updateStepInterval(currentGrowthQuery)
var pastGrowthPeriodStart, pastGrowthPeriodEnd int64
switch seasonality {
case SeasonalityWeekly:
pastGrowthPeriodStart = start - 14*24*time.Hour.Milliseconds()
pastGrowthPeriodEnd = start - 7*24*time.Hour.Milliseconds()
case SeasonalityDaily:
pastGrowthPeriodStart = start - 2*time.Hour.Milliseconds()
pastGrowthPeriodEnd = start - 1*time.Hour.Milliseconds()
case SeasonalityHourly:
pastGrowthPeriodStart = start - 2*time.Hour.Milliseconds()
pastGrowthPeriodEnd = start - 1*time.Hour.Milliseconds()
}
pastGrowthQuery := &v3.QueryRangeParamsV3{
Start: pastGrowthPeriodStart,
End: pastGrowthPeriodEnd,
CompositeQuery: copyCompositeQuery(req),
Variables: make(map[string]interface{}, 0),
NoCache: false,
}
updateStepInterval(pastGrowthQuery)
return &anomalyQueryParams{
CurrentPeriodQuery: currentPeriodQuery,
PastPeriodQuery: pastPeriodQuery,
CurrentSeasonQuery: currentGrowthQuery,
PastSeasonQuery: pastGrowthQuery,
}
}
type anomalyQueryResults struct {
CurrentPeriodResults []*v3.Result
PastPeriodResults []*v3.Result
CurrentSeasonResults []*v3.Result
PastSeasonResults []*v3.Result
}

View File

@@ -0,0 +1,9 @@
package anomaly
import (
"context"
)
type Provider interface {
GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error)
}

View File

@@ -0,0 +1,229 @@
package anomaly
import (
"context"
"math"
"go.signoz.io/signoz/pkg/query-service/cache"
"go.signoz.io/signoz/pkg/query-service/interfaces"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/query-service/utils/labels"
"go.uber.org/zap"
)
// BaseProvider is an interface that includes common methods for all provider types
type BaseProvider interface {
GetBaseSeasonalProvider() *BaseSeasonalProvider
}
// GenericProviderOption is a generic type for provider options
type GenericProviderOption[T BaseProvider] func(T)
func WithCache[T BaseProvider](cache cache.Cache) GenericProviderOption[T] {
return func(p T) {
p.GetBaseSeasonalProvider().cache = cache
}
}
func WithKeyGenerator[T BaseProvider](keyGenerator cache.KeyGenerator) GenericProviderOption[T] {
return func(p T) {
p.GetBaseSeasonalProvider().keyGenerator = keyGenerator
}
}
func WithFeatureLookup[T BaseProvider](ff interfaces.FeatureLookup) GenericProviderOption[T] {
return func(p T) {
p.GetBaseSeasonalProvider().ff = ff
}
}
func WithReader[T BaseProvider](reader interfaces.Reader) GenericProviderOption[T] {
return func(p T) {
p.GetBaseSeasonalProvider().reader = reader
}
}
type BaseSeasonalProvider struct {
querierV2 interfaces.Querier
reader interfaces.Reader
cache cache.Cache
keyGenerator cache.KeyGenerator
ff interfaces.FeatureLookup
}
func (p *BaseSeasonalProvider) getQueryParams(req *GetAnomaliesRequest) *anomalyQueryParams {
if !req.Seasonality.IsValid() {
req.Seasonality = SeasonalityWeekly
}
return prepareAnomalyQueryParams(req.Params, req.Seasonality)
}
func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQueryParams) (*anomalyQueryResults, error) {
currentPeriodResults, _, err := p.querierV2.QueryRange(ctx, params.CurrentPeriodQuery, nil)
if err != nil {
return nil, err
}
pastPeriodResults, _, err := p.querierV2.QueryRange(ctx, params.PastPeriodQuery, nil)
if err != nil {
return nil, err
}
currentSeasonResults, _, err := p.querierV2.QueryRange(ctx, params.CurrentSeasonQuery, nil)
if err != nil {
return nil, err
}
pastSeasonResults, _, err := p.querierV2.QueryRange(ctx, params.PastSeasonQuery, nil)
if err != nil {
return nil, err
}
return &anomalyQueryResults{
CurrentPeriodResults: currentPeriodResults,
PastPeriodResults: pastPeriodResults,
CurrentSeasonResults: currentSeasonResults,
PastSeasonResults: pastSeasonResults,
}, nil
}
func (p *BaseSeasonalProvider) getMatchingSeries(queryResult *v3.Result, series *v3.Series) *v3.Series {
for _, curr := range queryResult.Series {
currLabels := labels.FromMap(curr.Labels)
seriesLabels := labels.FromMap(series.Labels)
if currLabels.Hash() == seriesLabels.Hash() {
return curr
}
}
return nil
}
func (p *BaseSeasonalProvider) getAvg(series *v3.Series) float64 {
var sum float64
for _, smpl := range series.Points {
sum += smpl.Value
}
return sum / float64(len(series.Points))
}
func (p *BaseSeasonalProvider) getStdDev(series *v3.Series) float64 {
avg := p.getAvg(series)
var sum float64
for _, smpl := range series.Points {
sum += math.Pow(smpl.Value-avg, 2)
}
return math.Sqrt(sum / float64(len(series.Points)))
}
func (p *BaseSeasonalProvider) getPredictedSeries(series, prevSeries, currentSeasonSeries, pastSeasonSeries *v3.Series) *v3.Series {
predictedSeries := &v3.Series{
Labels: series.Labels,
LabelsArray: series.LabelsArray,
Points: []v3.Point{},
}
for _, curr := range series.Points {
predictedValue := p.getAvg(prevSeries) + p.getAvg(currentSeasonSeries) - p.getAvg(pastSeasonSeries)
predictedSeries.Points = append(predictedSeries.Points, v3.Point{
Timestamp: curr.Timestamp,
Value: predictedValue,
})
}
return predictedSeries
}
func (p *BaseSeasonalProvider) getExpectedValue(_, prevSeries, currentSeasonSeries, pastSeasonSeries *v3.Series) float64 {
prevSeriesAvg := p.getAvg(prevSeries)
currentSeasonSeriesAvg := p.getAvg(currentSeasonSeries)
pastSeasonSeriesAvg := p.getAvg(pastSeasonSeries)
zap.L().Debug("getExpectedValue",
zap.Float64("prevSeriesAvg", prevSeriesAvg),
zap.Float64("currentSeasonSeriesAvg", currentSeasonSeriesAvg),
zap.Float64("pastSeasonSeriesAvg", pastSeasonSeriesAvg),
zap.Float64("expectedValue", prevSeriesAvg+currentSeasonSeriesAvg-pastSeasonSeriesAvg),
)
return prevSeriesAvg + currentSeasonSeriesAvg - pastSeasonSeriesAvg
}
func (p *BaseSeasonalProvider) getScore(series, prevSeries, weekSeries, weekPrevSeries *v3.Series, value float64) float64 {
expectedValue := p.getExpectedValue(series, prevSeries, weekSeries, weekPrevSeries)
return (value - expectedValue) / p.getStdDev(weekSeries)
}
func (p *BaseSeasonalProvider) getAnomalyScores(series, prevSeries, currentSeasonSeries, pastSeasonSeries *v3.Series) *v3.Series {
anomalyScoreSeries := &v3.Series{
Labels: series.Labels,
LabelsArray: series.LabelsArray,
Points: []v3.Point{},
}
for _, curr := range series.Points {
anomalyScore := p.getScore(series, prevSeries, currentSeasonSeries, pastSeasonSeries, curr.Value)
anomalyScoreSeries.Points = append(anomalyScoreSeries.Points, v3.Point{
Timestamp: curr.Timestamp,
Value: anomalyScore,
})
}
return anomalyScoreSeries
}
func (p *BaseSeasonalProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
anomalyParams := p.getQueryParams(req)
anomalyQueryResults, err := p.getResults(ctx, anomalyParams)
if err != nil {
return nil, err
}
currentPeriodResultsMap := make(map[string]*v3.Result)
for _, result := range anomalyQueryResults.CurrentPeriodResults {
currentPeriodResultsMap[result.QueryName] = result
}
pastPeriodResultsMap := make(map[string]*v3.Result)
for _, result := range anomalyQueryResults.PastPeriodResults {
pastPeriodResultsMap[result.QueryName] = result
}
currentSeasonResultsMap := make(map[string]*v3.Result)
for _, result := range anomalyQueryResults.CurrentSeasonResults {
currentSeasonResultsMap[result.QueryName] = result
}
pastSeasonResultsMap := make(map[string]*v3.Result)
for _, result := range anomalyQueryResults.PastSeasonResults {
pastSeasonResultsMap[result.QueryName] = result
}
for _, result := range currentPeriodResultsMap {
pastPeriodResult, ok := pastPeriodResultsMap[result.QueryName]
if !ok {
continue
}
currentSeasonResult, ok := currentSeasonResultsMap[result.QueryName]
if !ok {
continue
}
pastSeasonResult, ok := pastSeasonResultsMap[result.QueryName]
if !ok {
continue
}
for _, series := range result.Series {
pastPeriodSeries := p.getMatchingSeries(pastPeriodResult, series)
currentSeasonSeries := p.getMatchingSeries(currentSeasonResult, series)
pastSeasonSeries := p.getMatchingSeries(pastSeasonResult, series)
predictedSeries := p.getPredictedSeries(series, pastPeriodSeries, currentSeasonSeries, pastSeasonSeries)
result.PredictedSeries = append(result.PredictedSeries, predictedSeries)
anomalyScoreSeries := p.getAnomalyScores(series, pastPeriodSeries, currentSeasonSeries, pastSeasonSeries)
result.AnomalyScores = append(result.AnomalyScores, anomalyScoreSeries)
}
}
return &GetAnomaliesResponse{
Results: anomalyQueryResults.CurrentPeriodResults,
}, nil
}

View File

@@ -0,0 +1,31 @@
package anomaly
import (
"context"
)
type WeeklyProvider struct {
BaseSeasonalProvider
}
var _ BaseProvider = (*WeeklyProvider)(nil)
func (wp *WeeklyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider {
return &wp.BaseSeasonalProvider
}
func NewWeeklyProvider(opts ...GenericProviderOption[*WeeklyProvider]) *WeeklyProvider {
wp := &WeeklyProvider{
BaseSeasonalProvider: BaseSeasonalProvider{},
}
for _, opt := range opts {
opt(wp)
}
return wp
}
func (p *WeeklyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) {
return nil, nil
}

View File

@@ -38,7 +38,8 @@ type APIHandlerOptions struct {
Cache cache.Cache
Gateway *httputil.ReverseProxy
// Querier Influx Interval
FluxInterval time.Duration
FluxInterval time.Duration
UseLogsNewSchema bool
}
type APIHandler struct {
@@ -63,6 +64,7 @@ func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
LogsParsingPipelineController: opts.LogsParsingPipelineController,
Cache: opts.Cache,
FluxInterval: opts.FluxInterval,
UseLogsNewSchema: opts.UseLogsNewSchema,
})
if err != nil {

View File

@@ -1,401 +0,0 @@
package db
import (
"context"
"crypto/md5"
"encoding/json"
"fmt"
"reflect"
"regexp"
"sort"
"strings"
"time"
"go.signoz.io/signoz/ee/query-service/model"
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.signoz.io/signoz/pkg/query-service/utils"
"go.uber.org/zap"
)
// GetMetricResultEE runs the query and returns list of time series
func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string) ([]*basemodel.Series, string, error) {
defer utils.Elapsed("GetMetricResult", nil)()
zap.L().Info("Executing metric result query: ", zap.String("query", query))
var hash string
// If getSubTreeSpans function is used in the clickhouse query
if strings.Contains(query, "getSubTreeSpans(") {
var err error
query, hash, err = r.getSubTreeSpansCustomFunction(ctx, query, hash)
if err == fmt.Errorf("no spans found for the given query") {
return nil, "", nil
}
if err != nil {
return nil, "", err
}
}
rows, err := r.conn.Query(ctx, query)
if err != nil {
zap.L().Error("Error in processing query", zap.Error(err))
return nil, "", fmt.Errorf("error in processing query")
}
var (
columnTypes = rows.ColumnTypes()
columnNames = rows.Columns()
vars = make([]interface{}, len(columnTypes))
)
for i := range columnTypes {
vars[i] = reflect.New(columnTypes[i].ScanType()).Interface()
}
// when group by is applied, each combination of cartesian product
// of attributes is separate series. each item in metricPointsMap
// represent a unique series.
metricPointsMap := make(map[string][]basemodel.MetricPoint)
// attribute key-value pairs for each group selection
attributesMap := make(map[string]map[string]string)
defer rows.Close()
for rows.Next() {
if err := rows.Scan(vars...); err != nil {
return nil, "", err
}
var groupBy []string
var metricPoint basemodel.MetricPoint
groupAttributes := make(map[string]string)
// Assuming that the end result row contains a timestamp, value and option labels
// Label key and value are both strings.
for idx, v := range vars {
colName := columnNames[idx]
switch v := v.(type) {
case *string:
// special case for returning all labels
if colName == "fullLabels" {
var metric map[string]string
err := json.Unmarshal([]byte(*v), &metric)
if err != nil {
return nil, "", err
}
for key, val := range metric {
groupBy = append(groupBy, val)
groupAttributes[key] = val
}
} else {
groupBy = append(groupBy, *v)
groupAttributes[colName] = *v
}
case *time.Time:
metricPoint.Timestamp = v.UnixMilli()
case *float64:
metricPoint.Value = *v
case **float64:
// ch seems to return this type when column is derived from
// SELECT count(*)/ SELECT count(*)
floatVal := *v
if floatVal != nil {
metricPoint.Value = *floatVal
}
case *float32:
float32Val := float32(*v)
metricPoint.Value = float64(float32Val)
case *uint8, *uint64, *uint16, *uint32:
if _, ok := baseconst.ReservedColumnTargetAliases[colName]; ok {
metricPoint.Value = float64(reflect.ValueOf(v).Elem().Uint())
} else {
groupBy = append(groupBy, fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Uint()))
groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Uint())
}
case *int8, *int16, *int32, *int64:
if _, ok := baseconst.ReservedColumnTargetAliases[colName]; ok {
metricPoint.Value = float64(reflect.ValueOf(v).Elem().Int())
} else {
groupBy = append(groupBy, fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int()))
groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int())
}
default:
zap.L().Error("invalid var found in metric builder query result", zap.Any("var", v), zap.String("colName", colName))
}
}
sort.Strings(groupBy)
key := strings.Join(groupBy, "")
attributesMap[key] = groupAttributes
metricPointsMap[key] = append(metricPointsMap[key], metricPoint)
}
var seriesList []*basemodel.Series
for key := range metricPointsMap {
points := metricPointsMap[key]
// first point in each series could be invalid since the
// aggregations are applied with point from prev series
if len(points) != 0 && len(points) > 1 {
points = points[1:]
}
attributes := attributesMap[key]
series := basemodel.Series{Labels: attributes, Points: points}
seriesList = append(seriesList, &series)
}
// err = r.conn.Exec(ctx, "DROP TEMPORARY TABLE IF EXISTS getSubTreeSpans"+hash)
// if err != nil {
// zap.L().Error("Error in dropping temporary table: ", err)
// return nil, err
// }
if hash == "" {
return seriesList, hash, nil
} else {
return seriesList, "getSubTreeSpans" + hash, nil
}
}
func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, query string, hash string) (string, string, error) {
zap.L().Debug("Executing getSubTreeSpans function")
// str1 := `select fromUnixTimestamp64Milli(intDiv( toUnixTimestamp64Milli ( timestamp ), 100) * 100) AS interval, toFloat64(count()) as count from (select timestamp, spanId, parentSpanId, durationNano from getSubTreeSpans(select * from signoz_traces.signoz_index_v2 where serviceName='frontend' and name='/driver.DriverService/FindNearest' and traceID='00000000000000004b0a863cb5ed7681') where name='FindDriverIDs' group by interval order by interval asc;`
// process the query to fetch subTree query
var subtreeInput string
query, subtreeInput, hash = processQuery(query, hash)
err := r.conn.Exec(ctx, "DROP TABLE IF EXISTS getSubTreeSpans"+hash)
if err != nil {
zap.L().Error("Error in dropping temporary table", zap.Error(err))
return query, hash, err
}
// Create temporary table to store the getSubTreeSpans() results
zap.L().Debug("Creating temporary table getSubTreeSpans", zap.String("hash", hash))
err = r.conn.Exec(ctx, "CREATE TABLE IF NOT EXISTS "+"getSubTreeSpans"+hash+" (timestamp DateTime64(9) CODEC(DoubleDelta, LZ4), traceID FixedString(32) CODEC(ZSTD(1)), spanID String CODEC(ZSTD(1)), parentSpanID String CODEC(ZSTD(1)), rootSpanID String CODEC(ZSTD(1)), serviceName LowCardinality(String) CODEC(ZSTD(1)), name LowCardinality(String) CODEC(ZSTD(1)), rootName LowCardinality(String) CODEC(ZSTD(1)), durationNano UInt64 CODEC(T64, ZSTD(1)), kind Int8 CODEC(T64, ZSTD(1)), tagMap Map(LowCardinality(String), String) CODEC(ZSTD(1)), events Array(String) CODEC(ZSTD(2))) ENGINE = MergeTree() ORDER BY (timestamp)")
if err != nil {
zap.L().Error("Error in creating temporary table", zap.Error(err))
return query, hash, err
}
var getSpansSubQueryDBResponses []model.GetSpansSubQueryDBResponse
getSpansSubQuery := subtreeInput
// Execute the subTree query
zap.L().Debug("Executing subTree query", zap.String("query", getSpansSubQuery))
err = r.conn.Select(ctx, &getSpansSubQueryDBResponses, getSpansSubQuery)
// zap.L().Info(getSpansSubQuery)
if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err))
return query, hash, fmt.Errorf("error in processing sql query")
}
var searchScanResponses []basemodel.SearchSpanDBResponseItem
// TODO : @ankit: I think the algorithm does not need to assume that subtrees are from the same TraceID. We can take this as an improvement later.
// Fetch all the spans from of same TraceID so that we can build subtree
modelQuery := fmt.Sprintf("SELECT timestamp, traceID, model FROM %s.%s WHERE traceID=$1", r.TraceDB, r.SpansTable)
if len(getSpansSubQueryDBResponses) == 0 {
return query, hash, fmt.Errorf("no spans found for the given query")
}
zap.L().Debug("Executing query to fetch all the spans from the same TraceID: ", zap.String("modelQuery", modelQuery))
err = r.conn.Select(ctx, &searchScanResponses, modelQuery, getSpansSubQueryDBResponses[0].TraceID)
if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err))
return query, hash, fmt.Errorf("error in processing sql query")
}
// Process model to fetch the spans
zap.L().Debug("Processing model to fetch the spans")
searchSpanResponses := []basemodel.SearchSpanResponseItem{}
for _, item := range searchScanResponses {
var jsonItem basemodel.SearchSpanResponseItem
json.Unmarshal([]byte(item.Model), &jsonItem)
jsonItem.TimeUnixNano = uint64(item.Timestamp.UnixNano())
if jsonItem.Events == nil {
jsonItem.Events = []string{}
}
searchSpanResponses = append(searchSpanResponses, jsonItem)
}
// Build the subtree and store all the subtree spans in temporary table getSubTreeSpans+hash
// Use map to store pointer to the spans to avoid duplicates and save memory
zap.L().Debug("Building the subtree to store all the subtree spans in temporary table getSubTreeSpans", zap.String("hash", hash))
treeSearchResponse, err := getSubTreeAlgorithm(searchSpanResponses, getSpansSubQueryDBResponses)
if err != nil {
zap.L().Error("Error in getSubTreeAlgorithm function", zap.Error(err))
return query, hash, err
}
zap.L().Debug("Preparing batch to store subtree spans in temporary table getSubTreeSpans", zap.String("hash", hash))
statement, err := r.conn.PrepareBatch(context.Background(), fmt.Sprintf("INSERT INTO getSubTreeSpans"+hash))
if err != nil {
zap.L().Error("Error in preparing batch statement", zap.Error(err))
return query, hash, err
}
for _, span := range treeSearchResponse {
var parentID string
if len(span.References) > 0 && span.References[0].RefType == "CHILD_OF" {
parentID = span.References[0].SpanId
}
err = statement.Append(
time.Unix(0, int64(span.TimeUnixNano)),
span.TraceID,
span.SpanID,
parentID,
span.RootSpanID,
span.ServiceName,
span.Name,
span.RootName,
uint64(span.DurationNano),
int8(span.Kind),
span.TagMap,
span.Events,
)
if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err))
return query, hash, err
}
}
zap.L().Debug("Inserting the subtree spans in temporary table getSubTreeSpans", zap.String("hash", hash))
err = statement.Send()
if err != nil {
zap.L().Error("Error in sending statement", zap.Error(err))
return query, hash, err
}
return query, hash, nil
}
//lint:ignore SA4009 return hash is feeded to the query
func processQuery(query string, hash string) (string, string, string) {
re3 := regexp.MustCompile(`getSubTreeSpans`)
submatchall3 := re3.FindAllStringIndex(query, -1)
getSubtreeSpansMatchIndex := submatchall3[0][1]
query2countParenthesis := query[getSubtreeSpansMatchIndex:]
sqlCompleteIndex := 0
countParenthesisImbalance := 0
for i, char := range query2countParenthesis {
if string(char) == "(" {
countParenthesisImbalance += 1
}
if string(char) == ")" {
countParenthesisImbalance -= 1
}
if countParenthesisImbalance == 0 {
sqlCompleteIndex = i
break
}
}
subtreeInput := query2countParenthesis[1:sqlCompleteIndex]
// hash the subtreeInput
hmd5 := md5.Sum([]byte(subtreeInput))
hash = fmt.Sprintf("%x", hmd5)
// Reformat the query to use the getSubTreeSpans function
query = query[:getSubtreeSpansMatchIndex] + hash + " " + query2countParenthesis[sqlCompleteIndex+1:]
return query, subtreeInput, hash
}
// getSubTreeAlgorithm is an algorithm to build the subtrees of the spans and return the list of spans
func getSubTreeAlgorithm(payload []basemodel.SearchSpanResponseItem, getSpansSubQueryDBResponses []model.GetSpansSubQueryDBResponse) (map[string]*basemodel.SearchSpanResponseItem, error) {
var spans []*model.SpanForTraceDetails
for _, spanItem := range payload {
var parentID string
if len(spanItem.References) > 0 && spanItem.References[0].RefType == "CHILD_OF" {
parentID = spanItem.References[0].SpanId
}
span := &model.SpanForTraceDetails{
TimeUnixNano: spanItem.TimeUnixNano,
SpanID: spanItem.SpanID,
TraceID: spanItem.TraceID,
ServiceName: spanItem.ServiceName,
Name: spanItem.Name,
Kind: spanItem.Kind,
DurationNano: spanItem.DurationNano,
TagMap: spanItem.TagMap,
ParentID: parentID,
Events: spanItem.Events,
HasError: spanItem.HasError,
}
spans = append(spans, span)
}
zap.L().Debug("Building Tree")
roots, err := buildSpanTrees(&spans)
if err != nil {
return nil, err
}
searchSpansResult := make(map[string]*basemodel.SearchSpanResponseItem)
// Every span which was fetched from getSubTree Input SQL query is considered root
// For each root, get the subtree spans
for _, getSpansSubQueryDBResponse := range getSpansSubQueryDBResponses {
targetSpan := &model.SpanForTraceDetails{}
// zap.L().Debug("Building tree for span id: " + getSpansSubQueryDBResponse.SpanID + " " + strconv.Itoa(i+1) + " of " + strconv.Itoa(len(getSpansSubQueryDBResponses)))
// Search target span object in the tree
for _, root := range roots {
targetSpan, err = breadthFirstSearch(root, getSpansSubQueryDBResponse.SpanID)
if targetSpan != nil {
break
}
if err != nil {
zap.L().Error("Error during BreadthFirstSearch()", zap.Error(err))
return nil, err
}
}
if targetSpan == nil {
return nil, nil
}
// Build subtree for the target span
// Mark the target span as root by setting parent ID as empty string
targetSpan.ParentID = ""
preParents := []*model.SpanForTraceDetails{targetSpan}
children := []*model.SpanForTraceDetails{}
// Get the subtree child spans
for i := 0; len(preParents) != 0; i++ {
parents := []*model.SpanForTraceDetails{}
for _, parent := range preParents {
children = append(children, parent.Children...)
parents = append(parents, parent.Children...)
}
preParents = parents
}
resultSpans := children
// Add the target span to the result spans
resultSpans = append(resultSpans, targetSpan)
for _, item := range resultSpans {
references := []basemodel.OtelSpanRef{
{
TraceId: item.TraceID,
SpanId: item.ParentID,
RefType: "CHILD_OF",
},
}
if item.Events == nil {
item.Events = []string{}
}
searchSpansResult[item.SpanID] = &basemodel.SearchSpanResponseItem{
TimeUnixNano: item.TimeUnixNano,
SpanID: item.SpanID,
TraceID: item.TraceID,
ServiceName: item.ServiceName,
Name: item.Name,
Kind: item.Kind,
References: references,
DurationNano: item.DurationNano,
TagMap: item.TagMap,
Events: item.Events,
HasError: item.HasError,
RootSpanID: getSpansSubQueryDBResponse.SpanID,
RootName: targetSpan.Name,
}
}
}
return searchSpansResult, nil
}

View File

@@ -25,8 +25,9 @@ func NewDataConnector(
maxOpenConns int,
dialTimeout time.Duration,
cluster string,
useLogsNewSchema bool,
) *ClickhouseReader {
ch := basechr.NewReader(localDB, promConfigPath, lm, maxIdleConns, maxOpenConns, dialTimeout, cluster)
ch := basechr.NewReader(localDB, promConfigPath, lm, maxIdleConns, maxOpenConns, dialTimeout, cluster, useLogsNewSchema)
return &ClickhouseReader{
conn: ch.GetConn(),
appdb: localDB,

View File

@@ -77,6 +77,7 @@ type ServerOptions struct {
FluxInterval string
Cluster string
GatewayUrl string
UseLogsNewSchema bool
}
// Server runs HTTP api service
@@ -154,6 +155,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
serverOptions.MaxOpenConns,
serverOptions.DialTimeout,
serverOptions.Cluster,
serverOptions.UseLogsNewSchema,
)
go qb.Start(readerReady)
reader = qb
@@ -176,7 +178,9 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
localDB,
reader,
serverOptions.DisableRules,
lm)
lm,
serverOptions.UseLogsNewSchema,
)
if err != nil {
return nil, err
@@ -265,6 +269,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
Cache: c,
FluxInterval: fluxInterval,
Gateway: gatewayProxy,
UseLogsNewSchema: serverOptions.UseLogsNewSchema,
}
apiHandler, err := api.NewAPIHandler(apiOpts)
@@ -728,7 +733,8 @@ func makeRulesManager(
db *sqlx.DB,
ch baseint.Reader,
disableRules bool,
fm baseint.FeatureLookup) (*baserules.Manager, error) {
fm baseint.FeatureLookup,
useLogsNewSchema bool) (*baserules.Manager, error) {
// create engine
pqle, err := pqle.FromConfigPath(promConfigPath)
@@ -756,7 +762,8 @@ func makeRulesManager(
Reader: ch,
EvalDelay: baseconst.GetEvalDelay(),
PrepareTaskFunc: rules.PrepareTaskFunc,
PrepareTaskFunc: rules.PrepareTaskFunc,
UseLogsNewSchema: useLogsNewSchema,
}
// create Manager

View File

@@ -87,6 +87,7 @@ func main() {
var ruleRepoURL string
var cluster string
var useLogsNewSchema bool
var cacheConfigPath, fluxInterval string
var enableQueryServiceLogOTLPExport bool
var preferSpanMetrics bool
@@ -96,6 +97,7 @@ func main() {
var dialTimeout time.Duration
var gatewayUrl string
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
@@ -134,6 +136,7 @@ func main() {
FluxInterval: fluxInterval,
Cluster: cluster,
GatewayUrl: gatewayUrl,
UseLogsNewSchema: useLogsNewSchema,
}
// Read the jwt secret key

View File

@@ -20,6 +20,7 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
opts.Rule,
opts.FF,
opts.Reader,
opts.UseLogsNewSchema,
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
)

View File

@@ -137,7 +137,6 @@ function App(): JSX.Element {
window.analytics.identify(email, sanitizedIdentifyPayload);
window.analytics.group(domain, groupTraits);
window.clarity('identify', email, name);
posthog?.identify(email, {
email,

View File

@@ -139,6 +139,7 @@ export const getGraphOptions = (
},
scales: {
x: {
stacked: isStacked,
grid: {
display: true,
color: getGridColor(),
@@ -165,6 +166,7 @@ export const getGraphOptions = (
ticks: { color: getAxisLabelColor(currentTheme) },
},
y: {
stacked: isStacked,
display: true,
grid: {
display: true,
@@ -178,9 +180,6 @@ export const getGraphOptions = (
},
},
},
stacked: {
display: isStacked === undefined ? false : 'auto',
},
},
elements: {
line: {

View File

@@ -15,7 +15,7 @@ function AddToQueryHOC({
}: AddToQueryHOCProps): JSX.Element {
const handleQueryAdd = (event: MouseEvent<HTMLDivElement>): void => {
event.stopPropagation();
onAddToQuery(fieldKey, fieldValue, OPERATORS.IN);
onAddToQuery(fieldKey, fieldValue, OPERATORS['=']);
};
const popOverContent = useMemo(() => <span>Add to query: {fieldKey}</span>, [

View File

@@ -26,17 +26,14 @@ function HorizontalTimelineGraph({
return [[], []];
}
// add a first and last entry to make sure the graph displays all the data
const FIVE_MINUTES_IN_SECONDS = 300;
// add an entry for the end time of the last entry to make sure the graph displays all the data
const timestamps = [
data[0].start / 1000 - FIVE_MINUTES_IN_SECONDS, // 5 minutes before the first entry
...data.map((item) => item.start / 1000),
data[data.length - 1].end / 1000, // end value of last entry
];
const states = [
ALERT_STATUS[data[0].state], // Same state as the first entry
...data.map((item) => ALERT_STATUS[item.state]),
ALERT_STATUS[data[data.length - 1].state], // Same state as the last entry
];

View File

@@ -95,7 +95,7 @@ export const traceAlertDefaults: AlertDef = {
chQueries: {
A: {
name: 'A',
query: `SELECT \n\ttoStartOfInterval(timestamp, INTERVAL 1 MINUTE) AS interval, \n\ttagMap['peer.service'] AS op_name, \n\ttoFloat64(avg(durationNano)) AS value \nFROM signoz_traces.distributed_signoz_index_v2 \nWHERE tagMap['peer.service']!='' \nAND timestamp BETWEEN {{.start_datetime}} AND {{.end_datetime}} \nGROUP BY (op_name, interval);\n\n-- available variables:\n-- \t{{.start_datetime}}\n-- \t{{.end_datetime}}\n\n-- required column alias:\n-- \tvalue\n-- \tinterval`,
query: `SELECT \n\ttoStartOfInterval(timestamp, INTERVAL 1 MINUTE) AS interval, \n\tstringTagMap['peer.service'] AS op_name, \n\ttoFloat64(avg(durationNano)) AS value \nFROM signoz_traces.distributed_signoz_index_v2 \nWHERE stringTagMap['peer.service']!='' \nAND timestamp BETWEEN {{.start_datetime}} AND {{.end_datetime}} \nGROUP BY (op_name, interval);\n\n-- available variables:\n-- \t{{.start_datetime}}\n-- \t{{.end_datetime}}\n\n-- required column alias:\n-- \tvalue\n-- \tinterval`,
legend: '',
disabled: false,
},

View File

@@ -260,7 +260,7 @@ function ChartPreview({
</FailedMessageContainer>
)}
{chartData && !queryResponse.isError && (
{chartData && !queryResponse.isError && !queryResponse.isLoading && (
<GridPanelSwitch
options={options}
panelType={graphType}

View File

@@ -1,6 +1,6 @@
/* eslint-disable react/display-name */
import { PlusOutlined } from '@ant-design/icons';
import { Input, Typography } from 'antd';
import { Flex, Input, Typography } from 'antd';
import type { ColumnsType } from 'antd/es/table/interface';
import saveAlertApi from 'api/alerts/save';
import logEvent from 'api/common/logEvent';
@@ -34,12 +34,7 @@ import { GettableAlert } from 'types/api/alerts/get';
import AppReducer from 'types/reducer/app';
import DeleteAlert from './DeleteAlert';
import {
Button,
ButtonContainer,
ColumnButton,
SearchContainer,
} from './styles';
import { Button, ColumnButton, SearchContainer } from './styles';
import Status from './TableComponents/Status';
import ToggleAlertState from './ToggleAlertState';
import { alertActionLogEvent, filterAlerts } from './utils';
@@ -373,21 +368,25 @@ function ListAlert({ allAlertRules, refetch }: ListAlertProps): JSX.Element {
onChange={handleSearch}
defaultValue={searchString}
/>
<ButtonContainer>
<Flex gap={12}>
{addNewAlert && (
<Button
type="primary"
onClick={onClickNewAlertHandler}
icon={<PlusOutlined />}
>
New Alert
</Button>
)}
<TextToolTip
{...{
text: `More details on how to create alerts`,
url:
'https://signoz.io/docs/alerts/?utm_source=product&utm_medium=list-alerts',
urlText: 'Learn More',
}}
/>
{addNewAlert && (
<Button onClick={onClickNewAlertHandler} icon={<PlusOutlined />}>
New Alert
</Button>
)}
</ButtonContainer>
</Flex>
</SearchContainer>
<DynamicColumnTable
tablesource={TableDataSource.Alert}

View File

@@ -9,12 +9,6 @@ export const SearchContainer = styled.div`
gap: 2rem;
}
`;
export const ButtonContainer = styled.div`
&&& {
display: flex;
align-items: center;
}
`;
export const Button = styled(ButtonComponent)`
&&& {

View File

@@ -91,6 +91,7 @@ function DashboardsList(): JSX.Element {
const {
data: dashboardListResponse,
isLoading: isDashboardListLoading,
isRefetching: isDashboardListRefetching,
error: dashboardFetchError,
refetch: refetchDashboardList,
} = useGetAllDashboard();
@@ -703,7 +704,9 @@ function DashboardsList(): JSX.Element {
</Flex>
</div>
{isDashboardListLoading || isFilteringDashboards ? (
{isDashboardListLoading ||
isFilteringDashboards ||
isDashboardListRefetching ? (
<div className="loading-dashboard-details">
<Skeleton.Input active size="large" className="skeleton-1" />
<Skeleton.Input active size="large" className="skeleton-1" />
@@ -902,7 +905,11 @@ function DashboardsList(): JSX.Element {
columns={columns}
dataSource={data}
showSorterTooltip
loading={isDashboardListLoading || isFilteringDashboards}
loading={
isDashboardListLoading ||
isFilteringDashboards ||
isDashboardListRefetching
}
showHeader={false}
pagination={paginationConfig}
/>

View File

@@ -122,10 +122,10 @@ function TableView({
fieldValue: string,
) => (): void => {
handleClick(operator, fieldKey, fieldValue);
if (operator === OPERATORS.IN) {
if (operator === OPERATORS['=']) {
setIsFilterInLoading(true);
}
if (operator === OPERATORS.NIN) {
if (operator === OPERATORS['!=']) {
setIsFilterOutLoading(true);
}
};

View File

@@ -139,7 +139,7 @@ export function TableViewActions(
<ArrowDownToDot size={14} style={{ transform: 'rotate(90deg)' }} />
)
}
onClick={onClickHandler(OPERATORS.IN, fieldFilterKey, fieldData.value)}
onClick={onClickHandler(OPERATORS['='], fieldFilterKey, fieldData.value)}
/>
</Tooltip>
<Tooltip title="Filter out value">
@@ -152,7 +152,11 @@ export function TableViewActions(
<ArrowUpFromDot size={14} style={{ transform: 'rotate(90deg)' }} />
)
}
onClick={onClickHandler(OPERATORS.NIN, fieldFilterKey, fieldData.value)}
onClick={onClickHandler(
OPERATORS['!='],
fieldFilterKey,
fieldData.value,
)}
/>
</Tooltip>
{!isOldLogsExplorerOrLiveLogsPage && (

View File

@@ -1,6 +1,7 @@
import LogDetail from 'components/LogDetail';
import { VIEW_TYPES } from 'components/LogDetail/constants';
import ROUTES from 'constants/routes';
import { getOldLogsOperatorFromNew } from 'hooks/logs/useActiveLog';
import { getGeneratedFilterQueryString } from 'lib/getGeneratedFilterQueryString';
import getStep from 'lib/getStep';
import { getIdConditions } from 'pages/Logs/utils';
@@ -57,10 +58,11 @@ function LogDetailedView({
const handleAddToQuery = useCallback(
(fieldKey: string, fieldValue: string, operator: string) => {
const newOperator = getOldLogsOperatorFromNew(operator);
const updatedQueryString = getGeneratedFilterQueryString(
fieldKey,
fieldValue,
operator,
newOperator,
queryString,
);
@@ -71,10 +73,11 @@ function LogDetailedView({
const handleClickActionItem = useCallback(
(fieldKey: string, fieldValue: string, operator: string): void => {
const newOperator = getOldLogsOperatorFromNew(operator);
const updatedQueryString = getGeneratedFilterQueryString(
fieldKey,
fieldValue,
operator,
newOperator,
queryString,
);

View File

@@ -3,6 +3,7 @@ import { QueryData } from 'types/api/widgets/getQuery';
export type LogsExplorerChartProps = {
data: QueryData[];
isLoading: boolean;
isLogsExplorerViews?: boolean;
isLabelEnabled?: boolean;
className?: string;
};

View File

@@ -16,12 +16,14 @@ import { UpdateTimeInterval } from 'store/actions';
import { LogsExplorerChartProps } from './LogsExplorerChart.interfaces';
import { CardStyled } from './LogsExplorerChart.styled';
import { getColorsForSeverityLabels } from './utils';
function LogsExplorerChart({
data,
isLoading,
isLabelEnabled = true,
className,
isLogsExplorerViews = false,
}: LogsExplorerChartProps): JSX.Element {
const dispatch = useDispatch();
const urlQuery = useUrlQuery();
@@ -29,15 +31,19 @@ function LogsExplorerChart({
const handleCreateDatasets: Required<GetChartDataProps>['createDataset'] = useCallback(
(element, index, allLabels) => ({
data: element,
backgroundColor: colors[index % colors.length] || themeColors.red,
borderColor: colors[index % colors.length] || themeColors.red,
backgroundColor: isLogsExplorerViews
? getColorsForSeverityLabels(allLabels[index], index)
: colors[index % colors.length] || themeColors.red,
borderColor: isLogsExplorerViews
? getColorsForSeverityLabels(allLabels[index], index)
: colors[index % colors.length] || themeColors.red,
...(isLabelEnabled
? {
label: allLabels[index],
}
: {}),
}),
[isLabelEnabled],
[isLabelEnabled, isLogsExplorerViews],
);
const onDragSelect = useCallback(
@@ -112,6 +118,7 @@ function LogsExplorerChart({
<Graph
name="logsExplorerChart"
data={graphData.data}
isStacked={isLogsExplorerViews}
type="bar"
animate
onDragSelect={onDragSelect}

View File

@@ -0,0 +1,36 @@
import { Color } from '@signozhq/design-tokens';
import { themeColors } from 'constants/theme';
import { colors } from 'lib/getRandomColor';
export function getColorsForSeverityLabels(
label: string,
index: number,
): string {
const lowerCaseLabel = label.toLowerCase();
if (lowerCaseLabel.includes(`{severity_text="trace"}`)) {
return Color.BG_ROBIN_300;
}
if (lowerCaseLabel.includes(`{severity_text="debug"}`)) {
return Color.BG_FOREST_500;
}
if (lowerCaseLabel.includes(`{severity_text="info"}`)) {
return Color.BG_SLATE_400;
}
if (lowerCaseLabel.includes(`{severity_text="warn"}`)) {
return Color.BG_AMBER_500;
}
if (lowerCaseLabel.includes(`{severity_text="error"}`)) {
return Color.BG_CHERRY_500;
}
if (lowerCaseLabel.includes(`{severity_text="fatal"}`)) {
return Color.BG_SAKURA_500;
}
return colors[index % colors.length] || themeColors.red;
}

View File

@@ -147,6 +147,13 @@
}
.logs-histogram {
.ant-card-body {
height: 140px;
min-height: 140px;
padding: 0 16px 22px 16px;
font-family: 'Geist Mono';
}
margin-bottom: 0px;
}
}

View File

@@ -64,6 +64,7 @@ import { useHistory } from 'react-router-dom';
import { AppState } from 'store/reducers';
import { Dashboard } from 'types/api/dashboard/getAll';
import { ILog } from 'types/api/logs/log';
import { DataTypes } from 'types/api/queryBuilder/queryAutocompleteResponse';
import {
IBuilderQuery,
OrderByPayload,
@@ -132,6 +133,9 @@ function LogsExplorerViews({
// State
const [page, setPage] = useState<number>(1);
const [logs, setLogs] = useState<ILog[]>([]);
const [lastLogLineTimestamp, setLastLogLineTimestamp] = useState<
number | string | null
>();
const [requestData, setRequestData] = useState<Query | null>(null);
const [showFormatMenuItems, setShowFormatMenuItems] = useState(false);
const [queryId, setQueryId] = useState<string>(v4());
@@ -188,6 +192,16 @@ function LogsExplorerViews({
const modifiedQueryData: IBuilderQuery = {
...listQuery,
aggregateOperator: LogsAggregatorOperator.COUNT,
groupBy: [
{
key: 'severity_text',
dataType: DataTypes.String,
type: '',
isColumn: true,
isJSON: false,
id: 'severity_text--string----true',
},
],
};
const modifiedQuery: Query = {
@@ -259,6 +273,14 @@ function LogsExplorerViews({
start: minTime,
end: maxTime,
}),
// send the lastLogTimeStamp only when the panel type is list and the orderBy is timestamp and the order is desc
lastLogLineTimestamp:
panelType === PANEL_TYPES.LIST &&
requestData?.builder?.queryData?.[0]?.orderBy?.[0]?.columnName ===
'timestamp' &&
requestData?.builder?.queryData?.[0]?.orderBy?.[0]?.order === 'desc'
? lastLogLineTimestamp
: undefined,
},
undefined,
listQueryKeyRef,
@@ -336,6 +358,10 @@ function LogsExplorerViews({
pageSize: nextPageSize,
});
// initialise the last log timestamp to null as we don't have the logs.
// as soon as we scroll to the end of the logs we set the lastLogLineTimestamp to the last log timestamp.
setLastLogLineTimestamp(lastLog.timestamp);
setPage((prevPage) => prevPage + 1);
setRequestData(newRequestData);
@@ -528,6 +554,11 @@ function LogsExplorerViews({
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [data]);
useEffect(() => {
// clear the lastLogLineTimestamp when the data changes
setLastLogLineTimestamp(null);
}, [data]);
useEffect(() => {
if (
requestData?.id !== stagedQuery?.id ||
@@ -661,6 +692,7 @@ function LogsExplorerViews({
className="logs-histogram"
isLoading={isFetchingListChartData || isLoadingListChartData}
data={chartData}
isLogsExplorerViews={panelType === PANEL_TYPES.LIST}
/>
)}

View File

@@ -74,7 +74,7 @@ export const panelTypeVsYAxisUnit: { [key in PANEL_TYPES]: boolean } = {
[PANEL_TYPES.VALUE]: true,
[PANEL_TYPES.TABLE]: false,
[PANEL_TYPES.LIST]: false,
[PANEL_TYPES.PIE]: false,
[PANEL_TYPES.PIE]: true,
[PANEL_TYPES.BAR]: true,
[PANEL_TYPES.HISTOGRAM]: false,
[PANEL_TYPES.TRACE]: false,

View File

@@ -211,7 +211,11 @@ function RightContainer({
<YAxisUnitSelector
defaultValue={yAxisUnit}
onSelect={setYAxisUnit}
fieldLabel={selectedGraphType === 'Value' ? 'Unit' : 'Y Axis Unit'}
fieldLabel={
selectedGraphType === 'Value' || selectedGraphType === 'Pie'
? 'Unit'
: 'Y Axis Unit'
}
/>
)}
{allowSoftMinMax && (

View File

@@ -4,6 +4,7 @@ import { Color } from '@signozhq/design-tokens';
import { Group } from '@visx/group';
import { Pie } from '@visx/shape';
import { useTooltip, useTooltipInPortal } from '@visx/tooltip';
import { getYAxisFormattedValue } from 'components/Graph/yAxisConfig';
import { themeColors } from 'constants/theme';
import { useIsDarkMode } from 'hooks/useDarkMode';
import { generateColor } from 'lib/uPlotLib/utils/generateColor';
@@ -129,7 +130,12 @@ function PiePanelWrapper({
showTooltip({
tooltipData: {
label,
value: arc.data.value,
// do not update the unit in the data as the arc allotment is based on value
// and treats 4K smaller than 40
value: getYAxisFormattedValue(
arc.data.value,
widget?.yAxisUnit || 'none',
),
color: arc.data.color,
key: label,
},

View File

@@ -1,6 +1,6 @@
import { getAggregateKeys } from 'api/queryBuilder/getAttributeKeys';
import { SOMETHING_WENT_WRONG } from 'constants/api';
import { QueryBuilderKeys } from 'constants/queryBuilder';
import { OPERATORS, QueryBuilderKeys } from 'constants/queryBuilder';
import ROUTES from 'constants/routes';
import { getOperatorValue } from 'container/QueryBuilder/filters/QueryBuilderSearch/utils';
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
@@ -24,6 +24,16 @@ import { v4 as uuid } from 'uuid';
import { UseActiveLog } from './types';
export function getOldLogsOperatorFromNew(operator: string): string {
switch (operator) {
case OPERATORS['=']:
return OPERATORS.IN;
case OPERATORS['!=']:
return OPERATORS.NIN;
default:
return operator;
}
}
export const useActiveLog = (): UseActiveLog => {
const dispatch = useDispatch();
@@ -178,10 +188,11 @@ export const useActiveLog = (): UseActiveLog => {
);
const onAddToQueryLogs = useCallback(
(fieldKey: string, fieldValue: string, operator: string) => {
const newOperator = getOldLogsOperatorFromNew(operator);
const updatedQueryString = getGeneratedFilterQueryString(
fieldKey,
fieldValue,
operator,
newOperator,
queryString,
);

View File

@@ -114,25 +114,6 @@
})();
</script>
<script type="text/javascript">
//Set your CLARITY_PROJECT_ID
const CLARITY_PROJECT_ID =
'<%= htmlWebpackPlugin.options.CLARITY_PROJECT_ID %>';
(function (c, l, a, r, i, t, y) {
c[a] =
c[a] ||
function () {
(c[a].q = c[a].q || []).push(arguments);
};
t = l.createElement(r);
t.async = 1;
t.src = 'https://www.clarity.ms/tag/' + i;
y = l.getElementsByTagName(r)[0];
y.parentNode.insertBefore(t, y);
})(window, document, 'clarity', 'script', CLARITY_PROJECT_ID);
</script>
<script>
//Set your SEGMENT_ID
const SEGMENT_ID = '<%= htmlWebpackPlugin.options.SEGMENT_ID %>';

View File

@@ -1,6 +1,7 @@
import getStartEndRangeTime from 'lib/getStartEndRangeTime';
import getStep from 'lib/getStep';
import { mapQueryDataToApi } from 'lib/newQueryBuilder/queryBuilderMappers/mapQueryDataToApi';
import { isUndefined } from 'lodash-es';
import store from 'store';
import { QueryRangePayload } from 'types/api/metrics/getQueryRange';
import { EQueryType } from 'types/common/dashboard';
@@ -24,7 +25,11 @@ export const prepareQueryRangePayload = ({
fillGaps = false,
}: GetQueryResultsProps): PrepareQueryRangePayload => {
let legendMap: Record<string, string> = {};
const { allowSelectedIntervalForStepGen, ...restParams } = params;
const {
allowSelectedIntervalForStepGen,
lastLogLineTimestamp,
...restParams
} = params;
const compositeQuery: QueryRangePayload['compositeQuery'] = {
queryType: query.queryType,
@@ -90,9 +95,13 @@ export const prepareQueryRangePayload = ({
interval: globalSelectedInterval,
});
const endLogTimeStamp = !isUndefined(lastLogLineTimestamp)
? new Date(lastLogLineTimestamp as string | number)?.getTime() || undefined
: undefined;
const queryPayload: QueryRangePayload = {
start: parseInt(start, 10) * 1e3,
end: parseInt(end, 10) * 1e3,
end: endLogTimeStamp || parseInt(end, 10) * 1e3,
step: getStep({
start: allowSelectedIntervalForStepGen
? start

View File

@@ -1,5 +1,6 @@
import './AlertHeader.styles.scss';
import LineClampedText from 'periscope/components/LineClampedText/LineClampedText';
import { useAlertRule } from 'providers/Alert';
import { useEffect, useMemo } from 'react';
@@ -42,7 +43,9 @@ function AlertHeader({ alertDetails }: AlertHeaderProps): JSX.Element {
<div className="top-section">
<div className="alert-title-wrapper">
<AlertState state={isAlertRuleDisabled ? 'disabled' : state} />
<div className="alert-title">{alert}</div>
<div className="alert-title">
<LineClampedText text={alert} />
</div>
</div>
</div>
<div className="bottom-section">

View File

@@ -26,6 +26,7 @@ import history from 'lib/history';
import { History, Table } from 'lucide-react';
import EditRules from 'pages/EditRules';
import { OrderPreferenceItems } from 'pages/Logs/config';
import BetaTag from 'periscope/components/BetaTag/BetaTag';
import PaginationInfoText from 'periscope/components/PaginationInfoText/PaginationInfoText';
import { useAlertRule } from 'providers/Alert';
import { useCallback, useMemo } from 'react';
@@ -125,6 +126,7 @@ export const useRouteTabUtils = (): { routes: TabRoutes[] } => {
<div className="tab-item">
<History size={14} />
History
<BetaTag />
</div>
),
route: getRouteUrl(AlertDetailsTab.HISTORY),
@@ -256,7 +258,7 @@ export const useGetAlertRuleDetailsTimelineTable = (): GetAlertRuleDetailsTimeli
const { updatedOrder, offset } = useMemo(
() => ({
updatedOrder: params.get(urlKey.order) ?? OrderPreferenceItems.ASC,
offset: parseInt(params.get(urlKey.offset) ?? '1', 10),
offset: parseInt(params.get(urlKey.offset) ?? '0', 10),
}),
[params],
);

View File

@@ -0,0 +1,9 @@
import { Tag } from 'antd';
export default function BetaTag(): JSX.Element {
return (
<Tag bordered={false} color="geekblue">
Beta
</Tag>
);
}

View File

@@ -1,18 +1,37 @@
import './KeyValueLabel.styles.scss';
type KeyValueLabelProps = { badgeKey: string; badgeValue: string };
import { Tooltip } from 'antd';
import TrimmedText from '../TrimmedText/TrimmedText';
type KeyValueLabelProps = {
badgeKey: string;
badgeValue: string;
maxCharacters?: number;
};
export default function KeyValueLabel({
badgeKey,
badgeValue,
maxCharacters = 20,
}: KeyValueLabelProps): JSX.Element | null {
if (!badgeKey || !badgeValue) {
return null;
}
return (
<div className="key-value-label">
<div className="key-value-label__key">{badgeKey}</div>
<div className="key-value-label__value">{badgeValue}</div>
<div className="key-value-label__key">
<TrimmedText text={badgeKey} maxCharacters={maxCharacters} />
</div>
<Tooltip title={badgeValue}>
<div className="key-value-label__value">
<TrimmedText text={badgeValue} maxCharacters={maxCharacters} />
</div>
</Tooltip>
</div>
);
}
KeyValueLabel.defaultProps = {
maxCharacters: 20,
};

View File

@@ -0,0 +1,6 @@
.line-clamped-text {
display: -webkit-box;
-webkit-box-orient: vertical;
overflow: hidden;
text-overflow: ellipsis;
}

View File

@@ -0,0 +1,52 @@
import './LineClampedText.styles.scss';
import { Tooltip } from 'antd';
import { useEffect, useRef, useState } from 'react';
function LineClampedText({
text,
lines,
}: {
text: string;
lines?: number;
}): JSX.Element {
const [isOverflowing, setIsOverflowing] = useState(false);
const textRef = useRef<HTMLDivElement>(null);
useEffect(() => {
const checkOverflow = (): void => {
if (textRef.current) {
setIsOverflowing(
textRef.current.scrollHeight > textRef.current.clientHeight,
);
}
};
checkOverflow();
window.addEventListener('resize', checkOverflow);
return (): void => {
window.removeEventListener('resize', checkOverflow);
};
}, [text, lines]);
const content = (
<div
ref={textRef}
className="line-clamped-text"
style={{
WebkitLineClamp: lines,
}}
>
{text}
</div>
);
return isOverflowing ? <Tooltip title={text}>{content}</Tooltip> : content;
}
LineClampedText.defaultProps = {
lines: 1,
};
export default LineClampedText;

View File

@@ -0,0 +1,30 @@
import { Tooltip } from 'antd';
import { useEffect, useState } from 'react';
function TrimmedText({
text,
maxCharacters,
}: {
text: string;
maxCharacters: number;
}): JSX.Element {
const [displayText, setDisplayText] = useState(text);
useEffect(() => {
if (text.length > maxCharacters) {
setDisplayText(`${text.slice(0, maxCharacters)}...`);
} else {
setDisplayText(text);
}
}, [text, maxCharacters]);
return text.length > maxCharacters ? (
<Tooltip title={text}>
<span>{displayText}</span>
</Tooltip>
) : (
<span>{displayText}</span>
);
}
export default TrimmedText;

View File

@@ -1,11 +1,8 @@
import { compose, Store } from 'redux';
type ClarityType<T> = (...args: string[]) => T;
declare global {
interface Window {
store: Store;
clarity: ClarityType<string>;
Intercom: any;
analytics: Record<string, any>;
__REDUX_DEVTOOLS_EXTENSION_COMPOSE__: typeof compose;

View File

@@ -23,7 +23,6 @@ const plugins = [
INTERCOM_APP_ID: process.env.INTERCOM_APP_ID,
SEGMENT_ID: process.env.SEGMENT_ID,
POSTHOG_KEY: process.env.POSTHOG_KEY,
CLARITY_PROJECT_ID: process.env.CLARITY_PROJECT_ID,
SENTRY_AUTH_TOKEN: process.env.SENTRY_AUTH_TOKEN,
SENTRY_ORG: process.env.SENTRY_ORG,
SENTRY_PROJECT_ID: process.env.SENTRY_PROJECT_ID,
@@ -42,7 +41,6 @@ const plugins = [
INTERCOM_APP_ID: process.env.INTERCOM_APP_ID,
SEGMENT_ID: process.env.SEGMENT_ID,
POSTHOG_KEY: process.env.POSTHOG_KEY,
CLARITY_PROJECT_ID: process.env.CLARITY_PROJECT_ID,
SENTRY_AUTH_TOKEN: process.env.SENTRY_AUTH_TOKEN,
SENTRY_ORG: process.env.SENTRY_ORG,
SENTRY_PROJECT_ID: process.env.SENTRY_PROJECT_ID,

View File

@@ -28,7 +28,6 @@ const plugins = [
INTERCOM_APP_ID: process.env.INTERCOM_APP_ID,
SEGMENT_ID: process.env.SEGMENT_ID,
POSTHOG_KEY: process.env.POSTHOG_KEY,
CLARITY_PROJECT_ID: process.env.CLARITY_PROJECT_ID,
SENTRY_AUTH_TOKEN: process.env.SENTRY_AUTH_TOKEN,
SENTRY_ORG: process.env.SENTRY_ORG,
SENTRY_PROJECT_ID: process.env.SENTRY_PROJECT_ID,
@@ -52,7 +51,6 @@ const plugins = [
INTERCOM_APP_ID: process.env.INTERCOM_APP_ID,
SEGMENT_ID: process.env.SEGMENT_ID,
POSTHOG_KEY: process.env.POSTHOG_KEY,
CLARITY_PROJECT_ID: process.env.CLARITY_PROJECT_ID,
SENTRY_AUTH_TOKEN: process.env.SENTRY_AUTH_TOKEN,
SENTRY_ORG: process.env.SENTRY_ORG,
SENTRY_PROJECT_ID: process.env.SENTRY_PROJECT_ID,

View File

@@ -40,6 +40,11 @@ const (
defaultWriteBatchDelay time.Duration = 5 * time.Second
defaultWriteBatchSize int = 10000
defaultEncoding Encoding = EncodingJSON
defaultLogsLocalTableV2 string = "logs_v2"
defaultLogsTableV2 string = "distributed_logs_v2"
defaultLogsResourceLocalTableV2 string = "logs_v2_resource"
defaultLogsResourceTableV2 string = "distributed_logs_v2_resource"
)
// NamespaceConfig is Clickhouse's internal configuration data
@@ -72,6 +77,11 @@ type namespaceConfig struct {
WriteBatchSize int
Encoding Encoding
Connector Connector
LogsLocalTableV2 string
LogsTableV2 string
LogsResourceLocalTableV2 string
LogsResourceTableV2 string
}
// Connecto defines how to connect to the database
@@ -159,6 +169,11 @@ func NewOptions(
WriteBatchSize: defaultWriteBatchSize,
Encoding: defaultEncoding,
Connector: defaultConnector,
LogsTableV2: defaultLogsTableV2,
LogsLocalTableV2: defaultLogsLocalTableV2,
LogsResourceTableV2: defaultLogsResourceTableV2,
LogsResourceLocalTableV2: defaultLogsResourceLocalTableV2,
},
others: make(map[string]*namespaceConfig, len(otherNamespaces)),
}

View File

@@ -7,7 +7,6 @@ import (
"github.com/ClickHouse/clickhouse-go/v2"
"github.com/google/uuid"
"go.signoz.io/signoz/pkg/query-service/model"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.uber.org/zap"
"golang.org/x/exp/maps"
)
@@ -52,7 +51,7 @@ func (tracker *inMemoryQueryProgressTracker) ReportQueryProgress(
func (tracker *inMemoryQueryProgressTracker) SubscribeToQueryProgress(
queryId string,
) (<-chan v3.QueryProgress, func(), *model.ApiError) {
) (<-chan model.QueryProgress, func(), *model.ApiError) {
queryTracker, err := tracker.getQueryTracker(queryId)
if err != nil {
return nil, nil, err
@@ -97,7 +96,7 @@ type queryTracker struct {
queryId string
isFinished bool
progress *v3.QueryProgress
progress *model.QueryProgress
subscriptions map[string]*queryProgressSubscription
lock sync.Mutex
@@ -124,7 +123,7 @@ func (qt *queryTracker) handleProgressUpdate(p *clickhouse.Progress) {
if qt.progress == nil {
// This is the first update
qt.progress = &v3.QueryProgress{}
qt.progress = &model.QueryProgress{}
}
updateQueryProgress(qt.progress, p)
@@ -135,7 +134,7 @@ func (qt *queryTracker) handleProgressUpdate(p *clickhouse.Progress) {
}
func (qt *queryTracker) subscribe() (
<-chan v3.QueryProgress, func(), *model.ApiError,
<-chan model.QueryProgress, func(), *model.ApiError,
) {
qt.lock.Lock()
defer qt.lock.Unlock()
@@ -200,20 +199,20 @@ func (qt *queryTracker) onFinished() {
}
type queryProgressSubscription struct {
ch chan v3.QueryProgress
ch chan model.QueryProgress
isClosed bool
lock sync.Mutex
}
func newQueryProgressSubscription() *queryProgressSubscription {
ch := make(chan v3.QueryProgress, 1000)
ch := make(chan model.QueryProgress, 1000)
return &queryProgressSubscription{
ch: ch,
}
}
// Must not block or panic in any scenario
func (ch *queryProgressSubscription) send(progress v3.QueryProgress) {
func (ch *queryProgressSubscription) send(progress model.QueryProgress) {
ch.lock.Lock()
defer ch.lock.Unlock()
@@ -248,7 +247,7 @@ func (ch *queryProgressSubscription) close() {
}
}
func updateQueryProgress(qp *v3.QueryProgress, chProgress *clickhouse.Progress) {
func updateQueryProgress(qp *model.QueryProgress, chProgress *clickhouse.Progress) {
qp.ReadRows += chProgress.Rows
qp.ReadBytes += chProgress.Bytes
qp.ElapsedMs += uint64(chProgress.Elapsed.Milliseconds())

View File

@@ -3,7 +3,6 @@ package queryprogress
import (
"github.com/ClickHouse/clickhouse-go/v2"
"go.signoz.io/signoz/pkg/query-service/model"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
)
type QueryProgressTracker interface {
@@ -19,7 +18,7 @@ type QueryProgressTracker interface {
// The returned channel will produce `QueryProgress` instances representing
// the latest state of query progress stats. Also returns a function that
// can be called to unsubscribe before the query finishes, if needed.
SubscribeToQueryProgress(queryId string) (ch <-chan v3.QueryProgress, unsubscribe func(), err *model.ApiError)
SubscribeToQueryProgress(queryId string) (ch <-chan model.QueryProgress, unsubscribe func(), err *model.ApiError)
}
func NewQueryProgressTracker() QueryProgressTracker {

View File

@@ -7,7 +7,6 @@ import (
"github.com/ClickHouse/clickhouse-go/v2"
"github.com/stretchr/testify/require"
"go.signoz.io/signoz/pkg/query-service/model"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
)
func TestQueryProgressTracking(t *testing.T) {
@@ -45,7 +44,7 @@ func TestQueryProgressTracking(t *testing.T) {
require.NotNil(ch)
require.NotNil(unsubscribe)
expectedProgress := v3.QueryProgress{}
expectedProgress := model.QueryProgress{}
updateQueryProgress(&expectedProgress, testProgress1)
require.Equal(expectedProgress.ReadRows, testProgress1.Rows)
select {

View File

@@ -42,14 +42,11 @@ import (
"go.uber.org/zap"
queryprogress "go.signoz.io/signoz/pkg/query-service/app/clickhouseReader/query_progress"
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
"go.signoz.io/signoz/pkg/query-service/app/explorer"
"go.signoz.io/signoz/pkg/query-service/app/logs"
"go.signoz.io/signoz/pkg/query-service/app/services"
"go.signoz.io/signoz/pkg/query-service/auth"
"go.signoz.io/signoz/pkg/query-service/common"
"go.signoz.io/signoz/pkg/query-service/constants"
"go.signoz.io/signoz/pkg/query-service/dao"
chErrors "go.signoz.io/signoz/pkg/query-service/errors"
am "go.signoz.io/signoz/pkg/query-service/integrations/alertManager"
"go.signoz.io/signoz/pkg/query-service/interfaces"
@@ -89,6 +86,7 @@ const (
maxProgressiveSteps = 4
charset = "abcdefghijklmnopqrstuvwxyz" +
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
NANOSECOND = 1000000000
)
var (
@@ -125,6 +123,11 @@ type ClickHouseReader struct {
fanoutStorage *storage.Storage
queryProgressTracker queryprogress.QueryProgressTracker
logsTableV2 string
logsLocalTableV2 string
logsResourceTableV2 string
logsResourceLocalTableV2 string
promConfigFile string
promConfig *config.Config
alertManager am.Manager
@@ -132,6 +135,9 @@ type ClickHouseReader struct {
liveTailRefreshSeconds int
cluster string
useLogsNewSchema bool
logsTableName string
}
// NewTraceReader returns a TraceReader for the database
@@ -143,6 +149,7 @@ func NewReader(
maxOpenConns int,
dialTimeout time.Duration,
cluster string,
useLogsNewSchema bool,
) *ClickHouseReader {
datasource := os.Getenv("ClickHouseUrl")
@@ -153,7 +160,7 @@ func NewReader(
zap.L().Fatal("failed to initialize ClickHouse", zap.Error(err))
}
return NewReaderFromClickhouseConnection(db, options, localDB, configFile, featureFlag, cluster)
return NewReaderFromClickhouseConnection(db, options, localDB, configFile, featureFlag, cluster, useLogsNewSchema)
}
func NewReaderFromClickhouseConnection(
@@ -163,6 +170,7 @@ func NewReaderFromClickhouseConnection(
configFile string,
featureFlag interfaces.FeatureLookup,
cluster string,
useLogsNewSchema bool,
) *ClickHouseReader {
alertManager, err := am.New("")
if err != nil {
@@ -193,6 +201,11 @@ func NewReaderFromClickhouseConnection(
},
}
logsTableName := options.primary.LogsTable
if useLogsNewSchema {
logsTableName = options.primary.LogsTableV2
}
return &ClickHouseReader{
db: wrap,
localDB: localDB,
@@ -219,6 +232,14 @@ func NewReaderFromClickhouseConnection(
featureFlags: featureFlag,
cluster: cluster,
queryProgressTracker: queryprogress.NewQueryProgressTracker(),
useLogsNewSchema: useLogsNewSchema,
logsTableV2: options.primary.LogsTableV2,
logsLocalTableV2: options.primary.LogsLocalTableV2,
logsResourceTableV2: options.primary.LogsResourceTableV2,
logsResourceLocalTableV2: options.primary.LogsResourceLocalTableV2,
logsTableName: logsTableName,
}
}
@@ -3026,122 +3047,6 @@ func (r *ClickHouseReader) getPrevErrorID(ctx context.Context, queryParams *mode
}
}
func (r *ClickHouseReader) GetMetricResultEE(ctx context.Context, query string) ([]*model.Series, string, error) {
zap.L().Error("GetMetricResultEE is not implemented for opensource version")
return nil, "", fmt.Errorf("GetMetricResultEE is not implemented for opensource version")
}
// GetMetricResult runs the query and returns list of time series
func (r *ClickHouseReader) GetMetricResult(ctx context.Context, query string) ([]*model.Series, error) {
defer utils.Elapsed("GetMetricResult", nil)()
zap.L().Info("Executing metric result query: ", zap.String("query", query))
rows, err := r.db.Query(ctx, query)
if err != nil {
zap.L().Error("Error in processing query", zap.Error(err))
return nil, err
}
var (
columnTypes = rows.ColumnTypes()
columnNames = rows.Columns()
vars = make([]interface{}, len(columnTypes))
)
for i := range columnTypes {
vars[i] = reflect.New(columnTypes[i].ScanType()).Interface()
}
// when group by is applied, each combination of cartesian product
// of attributes is separate series. each item in metricPointsMap
// represent a unique series.
metricPointsMap := make(map[string][]model.MetricPoint)
// attribute key-value pairs for each group selection
attributesMap := make(map[string]map[string]string)
defer rows.Close()
for rows.Next() {
if err := rows.Scan(vars...); err != nil {
return nil, err
}
var groupBy []string
var metricPoint model.MetricPoint
groupAttributes := make(map[string]string)
// Assuming that the end result row contains a timestamp, value and option labels
// Label key and value are both strings.
for idx, v := range vars {
colName := columnNames[idx]
switch v := v.(type) {
case *string:
// special case for returning all labels
if colName == "fullLabels" {
var metric map[string]string
err := json.Unmarshal([]byte(*v), &metric)
if err != nil {
return nil, err
}
for key, val := range metric {
groupBy = append(groupBy, val)
groupAttributes[key] = val
}
} else {
groupBy = append(groupBy, *v)
groupAttributes[colName] = *v
}
case *time.Time:
metricPoint.Timestamp = v.UnixMilli()
case *float64:
metricPoint.Value = *v
case **float64:
// ch seems to return this type when column is derived from
// SELECT count(*)/ SELECT count(*)
floatVal := *v
if floatVal != nil {
metricPoint.Value = *floatVal
}
case *float32:
float32Val := float32(*v)
metricPoint.Value = float64(float32Val)
case *uint8, *uint64, *uint16, *uint32:
if _, ok := constants.ReservedColumnTargetAliases[colName]; ok {
metricPoint.Value = float64(reflect.ValueOf(v).Elem().Uint())
} else {
groupBy = append(groupBy, fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Uint()))
groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Uint())
}
case *int8, *int16, *int32, *int64:
if _, ok := constants.ReservedColumnTargetAliases[colName]; ok {
metricPoint.Value = float64(reflect.ValueOf(v).Elem().Int())
} else {
groupBy = append(groupBy, fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int()))
groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int())
}
default:
zap.L().Error("invalid var found in metric builder query result", zap.Any("v", v), zap.String("colName", colName))
}
}
sort.Strings(groupBy)
key := strings.Join(groupBy, "")
attributesMap[key] = groupAttributes
metricPointsMap[key] = append(metricPointsMap[key], metricPoint)
}
var seriesList []*model.Series
for key := range metricPointsMap {
points := metricPointsMap[key]
// first point in each series could be invalid since the
// aggregations are applied with point from prev series
if len(points) != 0 && len(points) > 1 {
points = points[1:]
}
attributes := attributesMap[key]
series := model.Series{Labels: attributes, Points: points}
seriesList = append(seriesList, &series)
}
return seriesList, nil
}
func (r *ClickHouseReader) GetTotalSpans(ctx context.Context) (uint64, error) {
var totalSpans uint64
@@ -3335,156 +3240,6 @@ func removeUnderscoreDuplicateFields(fields []model.LogField) []model.LogField {
return updatedFields
}
// GetDashboardsInfo returns analytics data for dashboards
func (r *ClickHouseReader) GetDashboardsInfo(ctx context.Context) (*model.DashboardsInfo, error) {
dashboardsInfo := model.DashboardsInfo{}
// fetch dashboards from dashboard db
query := "SELECT data FROM dashboards"
var dashboardsData []dashboards.Dashboard
err := r.localDB.Select(&dashboardsData, query)
if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err))
return &dashboardsInfo, err
}
totalDashboardsWithPanelAndName := 0
var dashboardNames []string
count := 0
for _, dashboard := range dashboardsData {
if isDashboardWithPanelAndName(dashboard.Data) {
totalDashboardsWithPanelAndName = totalDashboardsWithPanelAndName + 1
}
dashboardName := extractDashboardName(dashboard.Data)
if dashboardName != "" {
dashboardNames = append(dashboardNames, dashboardName)
}
dashboardInfo := countPanelsInDashboard(dashboard.Data)
dashboardsInfo.LogsBasedPanels += dashboardInfo.LogsBasedPanels
dashboardsInfo.TracesBasedPanels += dashboardInfo.TracesBasedPanels
dashboardsInfo.MetricBasedPanels += dashboardsInfo.MetricBasedPanels
if isDashboardWithTSV2(dashboard.Data) {
count = count + 1
}
}
dashboardsInfo.DashboardNames = dashboardNames
dashboardsInfo.TotalDashboards = len(dashboardsData)
dashboardsInfo.TotalDashboardsWithPanelAndName = totalDashboardsWithPanelAndName
dashboardsInfo.QueriesWithTSV2 = count
return &dashboardsInfo, nil
}
func isDashboardWithTSV2(data map[string]interface{}) bool {
jsonData, err := json.Marshal(data)
if err != nil {
return false
}
return strings.Contains(string(jsonData), "time_series_v2")
}
func isDashboardWithPanelAndName(data map[string]interface{}) bool {
isDashboardName := false
isDashboardWithPanelAndName := false
if data != nil && data["title"] != nil && data["widgets"] != nil {
title, ok := data["title"].(string)
if ok && title != "Sample Title" {
isDashboardName = true
}
widgets, ok := data["widgets"]
if ok && isDashboardName {
data, ok := widgets.([]interface{})
if ok && len(data) > 0 {
isDashboardWithPanelAndName = true
}
}
}
return isDashboardWithPanelAndName
}
func extractDashboardName(data map[string]interface{}) string {
if data != nil && data["title"] != nil {
title, ok := data["title"].(string)
if ok {
return title
}
}
return ""
}
func countPanelsInDashboard(data map[string]interface{}) model.DashboardsInfo {
var logsPanelCount, tracesPanelCount, metricsPanelCount int
// totalPanels := 0
if data != nil && data["widgets"] != nil {
widgets, ok := data["widgets"]
if ok {
data, ok := widgets.([]interface{})
if ok {
for _, widget := range data {
sData, ok := widget.(map[string]interface{})
if ok && sData["query"] != nil {
// totalPanels++
query, ok := sData["query"].(map[string]interface{})
if ok && query["queryType"] == "builder" && query["builder"] != nil {
builderData, ok := query["builder"].(map[string]interface{})
if ok && builderData["queryData"] != nil {
builderQueryData, ok := builderData["queryData"].([]interface{})
if ok {
for _, queryData := range builderQueryData {
data, ok := queryData.(map[string]interface{})
if ok {
if data["dataSource"] == "traces" {
tracesPanelCount++
} else if data["dataSource"] == "metrics" {
metricsPanelCount++
} else if data["dataSource"] == "logs" {
logsPanelCount++
}
}
}
}
}
}
}
}
}
}
}
return model.DashboardsInfo{
LogsBasedPanels: logsPanelCount,
TracesBasedPanels: tracesPanelCount,
MetricBasedPanels: metricsPanelCount,
}
}
func (r *ClickHouseReader) GetSavedViewsInfo(ctx context.Context) (*model.SavedViewsInfo, error) {
savedViewsInfo := model.SavedViewsInfo{}
savedViews, err := explorer.GetViews()
if err != nil {
zap.S().Debug("Error in fetching saved views info: ", err)
return &savedViewsInfo, err
}
savedViewsInfo.TotalSavedViews = len(savedViews)
for _, view := range savedViews {
if view.SourcePage == "traces" {
savedViewsInfo.TracesSavedViews += 1
} else if view.SourcePage == "logs" {
savedViewsInfo.LogsSavedViews += 1
}
}
return &savedViewsInfo, nil
}
func (r *ClickHouseReader) GetUsers(ctx context.Context) ([]model.UserPayload, error) {
users, apiErr := dao.DB().GetUsers(ctx)
if apiErr != nil {
return nil, apiErr.Err
}
return users, nil
}
func (r *ClickHouseReader) GetLogFields(ctx context.Context) (*model.GetFieldsResponse, *model.ApiError) {
// response will contain top level fields from the otel log model
response := model.GetFieldsResponse{
@@ -3513,7 +3268,7 @@ func (r *ClickHouseReader) GetLogFields(ctx context.Context) (*model.GetFieldsRe
resources = removeUnderscoreDuplicateFields(resources)
statements := []model.ShowCreateTableStatement{}
query = fmt.Sprintf("SHOW CREATE TABLE %s.%s", r.logsDB, r.logsLocalTable)
query = fmt.Sprintf("SHOW CREATE TABLE %s.%s", r.logsDB, r.logsTable)
err = r.db.Select(ctx, &statements, query)
if err != nil {
return nil, &model.ApiError{Err: err, Typ: model.ErrorInternal}
@@ -3544,6 +3299,72 @@ func isSelectedField(tableStatement string, field model.LogField) bool {
return strings.Contains(tableStatement, name)
}
func (r *ClickHouseReader) UpdateLogFieldV2(ctx context.Context, field *model.UpdateField) *model.ApiError {
if !field.Selected {
return model.ForbiddenError(errors.New("removing a selected field is not allowed, please reach out to support."))
}
colname := utils.GetClickhouseColumnNameV2(field.Type, field.DataType, field.Name)
dataType := strings.ToLower(field.DataType)
if dataType == "int64" || dataType == "float64" {
dataType = "number"
}
attrColName := fmt.Sprintf("%s_%s", field.Type, dataType)
for _, table := range []string{r.logsLocalTableV2, r.logsTableV2} {
q := "ALTER TABLE %s.%s ON CLUSTER %s ADD COLUMN IF NOT EXISTS `%s` %s DEFAULT %s['%s'] CODEC(ZSTD(1))"
query := fmt.Sprintf(q,
r.logsDB, table,
r.cluster,
colname, field.DataType,
attrColName,
field.Name,
)
err := r.db.Exec(ctx, query)
if err != nil {
return &model.ApiError{Err: err, Typ: model.ErrorInternal}
}
query = fmt.Sprintf("ALTER TABLE %s.%s ON CLUSTER %s ADD COLUMN IF NOT EXISTS `%s_exists` bool DEFAULT if(mapContains(%s, '%s') != 0, true, false) CODEC(ZSTD(1))",
r.logsDB, table,
r.cluster,
colname,
attrColName,
field.Name,
)
err = r.db.Exec(ctx, query)
if err != nil {
return &model.ApiError{Err: err, Typ: model.ErrorInternal}
}
}
// create the index
if strings.ToLower(field.DataType) == "bool" {
// there is no point in creating index for bool attributes as the cardinality is just 2
return nil
}
if field.IndexType == "" {
field.IndexType = constants.DefaultLogSkipIndexType
}
if field.IndexGranularity == 0 {
field.IndexGranularity = constants.DefaultLogSkipIndexGranularity
}
query := fmt.Sprintf("ALTER TABLE %s.%s ON CLUSTER %s ADD INDEX IF NOT EXISTS `%s_idx` (`%s`) TYPE %s GRANULARITY %d",
r.logsDB, r.logsLocalTableV2,
r.cluster,
colname,
colname,
field.IndexType,
field.IndexGranularity,
)
err := r.db.Exec(ctx, query)
if err != nil {
return &model.ApiError{Err: err, Typ: model.ErrorInternal}
}
return nil
}
func (r *ClickHouseReader) UpdateLogField(ctx context.Context, field *model.UpdateField) *model.ApiError {
// don't allow updating static fields
if field.Type == constants.Static {
@@ -3551,10 +3372,14 @@ func (r *ClickHouseReader) UpdateLogField(ctx context.Context, field *model.Upda
return &model.ApiError{Err: err, Typ: model.ErrorBadData}
}
colname := utils.GetClickhouseColumnName(field.Type, field.DataType, field.Name)
if r.useLogsNewSchema {
return r.UpdateLogFieldV2(ctx, field)
}
// if a field is selected it means that the field needs to be indexed
if field.Selected {
colname := utils.GetClickhouseColumnName(field.Type, field.DataType, field.Name)
keyColName := fmt.Sprintf("%s_%s_key", field.Type, strings.ToLower(field.DataType))
valueColName := fmt.Sprintf("%s_%s_value", field.Type, strings.ToLower(field.DataType))
@@ -4145,10 +3970,14 @@ func (r *ClickHouseReader) GetLatestReceivedMetric(
return result, nil
}
func isColumn(tableStatement, attrType, field, datType string) bool {
func isColumn(useLogsNewSchema bool, tableStatement, attrType, field, datType string) bool {
// value of attrType will be `resource` or `tag`, if `tag` change it to `attribute`
name := utils.GetClickhouseColumnName(attrType, datType, field)
var name string
if useLogsNewSchema {
name = utils.GetClickhouseColumnNameV2(attrType, datType, field)
} else {
name = utils.GetClickhouseColumnName(attrType, datType, field)
}
return strings.Contains(tableStatement, fmt.Sprintf("%s ", name))
}
@@ -4204,7 +4033,7 @@ func (r *ClickHouseReader) GetLogAggregateAttributes(ctx context.Context, req *v
defer rows.Close()
statements := []model.ShowCreateTableStatement{}
query = fmt.Sprintf("SHOW CREATE TABLE %s.%s", r.logsDB, r.logsLocalTable)
query = fmt.Sprintf("SHOW CREATE TABLE %s.%s", r.logsDB, r.logsTable)
err = r.db.Select(ctx, &statements, query)
if err != nil {
return nil, fmt.Errorf("error while fetching logs schema: %s", err.Error())
@@ -4221,7 +4050,7 @@ func (r *ClickHouseReader) GetLogAggregateAttributes(ctx context.Context, req *v
Key: tagKey,
DataType: v3.AttributeKeyDataType(dataType),
Type: v3.AttributeKeyType(attType),
IsColumn: isColumn(statements[0].Statement, attType, tagKey, dataType),
IsColumn: isColumn(r.useLogsNewSchema, statements[0].Statement, attType, tagKey, dataType),
}
response.AttributeKeys = append(response.AttributeKeys, key)
}
@@ -4258,7 +4087,7 @@ func (r *ClickHouseReader) GetLogAttributeKeys(ctx context.Context, req *v3.Filt
defer rows.Close()
statements := []model.ShowCreateTableStatement{}
query = fmt.Sprintf("SHOW CREATE TABLE %s.%s", r.logsDB, r.logsLocalTable)
query = fmt.Sprintf("SHOW CREATE TABLE %s.%s", r.logsDB, r.logsTable)
err = r.db.Select(ctx, &statements, query)
if err != nil {
return nil, fmt.Errorf("error while fetching logs schema: %s", err.Error())
@@ -4276,7 +4105,7 @@ func (r *ClickHouseReader) GetLogAttributeKeys(ctx context.Context, req *v3.Filt
Key: attributeKey,
DataType: v3.AttributeKeyDataType(attributeDataType),
Type: v3.AttributeKeyType(tagType),
IsColumn: isColumn(statements[0].Statement, tagType, attributeKey, attributeDataType),
IsColumn: isColumn(r.useLogsNewSchema, statements[0].Statement, tagType, attributeKey, attributeDataType),
}
response.AttributeKeys = append(response.AttributeKeys, key)
@@ -4310,7 +4139,7 @@ func (r *ClickHouseReader) GetLogAttributeValues(ctx context.Context, req *v3.Fi
}
// ignore autocomplete request for body
if req.FilterAttributeKey == "body" {
if req.FilterAttributeKey == "body" || req.FilterAttributeKey == "__attrs" {
return &v3.FilterAttributeValueResponse{}, nil
}
@@ -4884,7 +4713,7 @@ func (r *ClickHouseReader) GetTimeSeriesResultV3(ctx context.Context, query stri
if err != nil {
zap.L().Error("error while reading time series result", zap.Error(err))
return nil, err
return nil, errors.New(err.Error())
}
defer rows.Close()
@@ -4931,7 +4760,7 @@ func (r *ClickHouseReader) GetListResultV3(ctx context.Context, query string) ([
if err != nil {
zap.L().Error("error while reading time series result", zap.Error(err))
return nil, err
return nil, errors.New(err.Error())
}
defer rows.Close()
@@ -5257,7 +5086,60 @@ func (r *ClickHouseReader) GetSpanAttributeKeys(ctx context.Context) (map[string
return response, nil
}
func (r *ClickHouseReader) LiveTailLogsV3(ctx context.Context, query string, timestampStart uint64, idStart string, client *v3.LogsLiveTailClient) {
func (r *ClickHouseReader) LiveTailLogsV4(ctx context.Context, query string, timestampStart uint64, idStart string, client *model.LogsLiveTailClientV2) {
if timestampStart == 0 {
timestampStart = uint64(time.Now().UnixNano())
} else {
timestampStart = uint64(utils.GetEpochNanoSecs(int64(timestampStart)))
}
ticker := time.NewTicker(time.Duration(r.liveTailRefreshSeconds) * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
done := true
client.Done <- &done
zap.L().Debug("closing go routine : " + client.Name)
return
case <-ticker.C:
// get the new 100 logs as anything more older won't make sense
var tmpQuery string
bucketStart := (timestampStart / NANOSECOND) - 1800
// we have to form the query differently if the resource filters are used
if strings.Contains(query, r.logsResourceTableV2) {
tmpQuery = fmt.Sprintf("seen_at_ts_bucket_start >=%d)) AND ts_bucket_start >=%d AND timestamp >=%d", bucketStart, bucketStart, timestampStart)
} else {
tmpQuery = fmt.Sprintf("ts_bucket_start >=%d AND timestamp >=%d", bucketStart, timestampStart)
}
if idStart != "" {
tmpQuery = fmt.Sprintf("%s AND id > '%s'", tmpQuery, idStart)
}
// the reason we are doing desc is that we need the latest logs first
tmpQuery = query + tmpQuery + " order by timestamp desc, id desc limit 100"
// using the old structure since we can directly read it to the struct as use it.
response := []model.SignozLogV2{}
err := r.db.Select(ctx, &response, tmpQuery)
if err != nil {
zap.L().Error("Error while getting logs", zap.Error(err))
client.Error <- err
return
}
for i := len(response) - 1; i >= 0; i-- {
client.Logs <- &response[i]
if i == 0 {
timestampStart = response[i].Timestamp
idStart = response[i].ID
}
}
}
}
}
func (r *ClickHouseReader) LiveTailLogsV3(ctx context.Context, query string, timestampStart uint64, idStart string, client *model.LogsLiveTailClient) {
if timestampStart == 0 {
timestampStart = uint64(time.Now().UnixNano())
} else {
@@ -5301,7 +5183,7 @@ func (r *ClickHouseReader) LiveTailLogsV3(ctx context.Context, query string, tim
}
}
func (r *ClickHouseReader) AddRuleStateHistory(ctx context.Context, ruleStateHistory []v3.RuleStateHistory) error {
func (r *ClickHouseReader) AddRuleStateHistory(ctx context.Context, ruleStateHistory []model.RuleStateHistory) error {
var statement driver.Batch
var err error
@@ -5332,11 +5214,11 @@ func (r *ClickHouseReader) AddRuleStateHistory(ctx context.Context, ruleStateHis
return nil
}
func (r *ClickHouseReader) GetLastSavedRuleStateHistory(ctx context.Context, ruleID string) ([]v3.RuleStateHistory, error) {
func (r *ClickHouseReader) GetLastSavedRuleStateHistory(ctx context.Context, ruleID string) ([]model.RuleStateHistory, error) {
query := fmt.Sprintf("SELECT * FROM %s.%s WHERE rule_id = '%s' AND state_changed = true ORDER BY unix_milli DESC LIMIT 1 BY fingerprint",
signozHistoryDBName, ruleStateHistoryTableName, ruleID)
history := []v3.RuleStateHistory{}
history := []model.RuleStateHistory{}
err := r.db.Select(ctx, &history, query)
if err != nil {
return nil, err
@@ -5345,7 +5227,7 @@ func (r *ClickHouseReader) GetLastSavedRuleStateHistory(ctx context.Context, rul
}
func (r *ClickHouseReader) ReadRuleStateHistoryByRuleID(
ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) (*v3.RuleStateTimeline, error) {
ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) (*model.RuleStateTimeline, error) {
var conditions []string
@@ -5408,7 +5290,7 @@ func (r *ClickHouseReader) ReadRuleStateHistoryByRuleID(
query := fmt.Sprintf("SELECT * FROM %s.%s WHERE %s ORDER BY unix_milli %s LIMIT %d OFFSET %d",
signozHistoryDBName, ruleStateHistoryTableName, whereClause, params.Order, params.Limit, params.Offset)
history := []v3.RuleStateHistory{}
history := []model.RuleStateHistory{}
zap.L().Debug("rule state history query", zap.String("query", query))
err := r.db.Select(ctx, &history, query)
if err != nil {
@@ -5450,7 +5332,7 @@ func (r *ClickHouseReader) ReadRuleStateHistoryByRuleID(
}
}
timeline := &v3.RuleStateTimeline{
timeline := &model.RuleStateTimeline{
Items: history,
Total: total,
Labels: labelsMap,
@@ -5460,7 +5342,7 @@ func (r *ClickHouseReader) ReadRuleStateHistoryByRuleID(
}
func (r *ClickHouseReader) ReadRuleStateHistoryTopContributorsByRuleID(
ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) ([]v3.RuleStateHistoryContributor, error) {
ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) ([]model.RuleStateHistoryContributor, error) {
query := fmt.Sprintf(`SELECT
fingerprint,
any(labels) as labels,
@@ -5473,7 +5355,7 @@ func (r *ClickHouseReader) ReadRuleStateHistoryTopContributorsByRuleID(
signozHistoryDBName, ruleStateHistoryTableName, ruleID, model.StateFiring.String(), params.Start, params.End)
zap.L().Debug("rule state history top contributors query", zap.String("query", query))
contributors := []v3.RuleStateHistoryContributor{}
contributors := []model.RuleStateHistoryContributor{}
err := r.db.Select(ctx, &contributors, query)
if err != nil {
zap.L().Error("Error while reading rule state history", zap.Error(err))
@@ -5483,7 +5365,7 @@ func (r *ClickHouseReader) ReadRuleStateHistoryTopContributorsByRuleID(
return contributors, nil
}
func (r *ClickHouseReader) GetOverallStateTransitions(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) ([]v3.ReleStateItem, error) {
func (r *ClickHouseReader) GetOverallStateTransitions(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) ([]model.ReleStateItem, error) {
tmpl := `WITH firing_events AS (
SELECT
@@ -5529,18 +5411,18 @@ ORDER BY firing_time ASC;`
zap.L().Debug("overall state transitions query", zap.String("query", query))
transitions := []v3.RuleStateTransition{}
transitions := []model.RuleStateTransition{}
err := r.db.Select(ctx, &transitions, query)
if err != nil {
return nil, err
}
stateItems := []v3.ReleStateItem{}
stateItems := []model.ReleStateItem{}
for idx, item := range transitions {
start := item.FiringTime
end := item.ResolutionTime
stateItems = append(stateItems, v3.ReleStateItem{
stateItems = append(stateItems, model.ReleStateItem{
State: item.State,
Start: start,
End: end,
@@ -5548,7 +5430,7 @@ ORDER BY firing_time ASC;`
if idx < len(transitions)-1 {
nextStart := transitions[idx+1].FiringTime
if nextStart > end {
stateItems = append(stateItems, v3.ReleStateItem{
stateItems = append(stateItems, model.ReleStateItem{
State: model.StateInactive,
Start: end,
End: nextStart,
@@ -5570,7 +5452,7 @@ ORDER BY firing_time ASC;`
if len(transitions) == 0 {
// no transitions found, it is either firing or inactive for whole time range
stateItems = append(stateItems, v3.ReleStateItem{
stateItems = append(stateItems, model.ReleStateItem{
State: state,
Start: params.Start,
End: params.End,
@@ -5578,7 +5460,7 @@ ORDER BY firing_time ASC;`
} else {
// there were some transitions, we need to add the last state at the end
if state == model.StateInactive {
stateItems = append(stateItems, v3.ReleStateItem{
stateItems = append(stateItems, model.ReleStateItem{
State: model.StateInactive,
Start: transitions[len(transitions)-1].ResolutionTime,
End: params.End,
@@ -5595,12 +5477,12 @@ ORDER BY firing_time ASC;`
if err := r.db.QueryRow(ctx, firingQuery).Scan(&firingTime); err != nil {
return nil, err
}
stateItems = append(stateItems, v3.ReleStateItem{
stateItems = append(stateItems, model.ReleStateItem{
State: model.StateInactive,
Start: transitions[len(transitions)-1].ResolutionTime,
End: firingTime,
})
stateItems = append(stateItems, v3.ReleStateItem{
stateItems = append(stateItems, model.ReleStateItem{
State: model.StateFiring,
Start: firingTime,
End: params.End,
@@ -5610,7 +5492,7 @@ ORDER BY firing_time ASC;`
return stateItems, nil
}
func (r *ClickHouseReader) GetAvgResolutionTime(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) (float64, error) {
func (r *ClickHouseReader) GetAvgResolutionTime(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) (float64, error) {
tmpl := `
WITH firing_events AS (
@@ -5665,7 +5547,7 @@ FROM matched_events;
return avgResolutionTime, nil
}
func (r *ClickHouseReader) GetAvgResolutionTimeByInterval(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) (*v3.Series, error) {
func (r *ClickHouseReader) GetAvgResolutionTimeByInterval(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) (*v3.Series, error) {
step := common.MinAllowedStepInterval(params.Start, params.End)
@@ -5722,7 +5604,7 @@ ORDER BY ts ASC;`
return result[0], nil
}
func (r *ClickHouseReader) GetTotalTriggers(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) (uint64, error) {
func (r *ClickHouseReader) GetTotalTriggers(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) (uint64, error) {
query := fmt.Sprintf("SELECT count(*) FROM %s.%s WHERE rule_id = '%s' AND (state_changed = true) AND (state = '%s') AND unix_milli >= %d AND unix_milli <= %d",
signozHistoryDBName, ruleStateHistoryTableName, ruleID, model.StateFiring.String(), params.Start, params.End)
@@ -5736,7 +5618,7 @@ func (r *ClickHouseReader) GetTotalTriggers(ctx context.Context, ruleID string,
return totalTriggers, nil
}
func (r *ClickHouseReader) GetTriggersByInterval(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) (*v3.Series, error) {
func (r *ClickHouseReader) GetTriggersByInterval(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) (*v3.Series, error) {
step := common.MinAllowedStepInterval(params.Start, params.End)
query := fmt.Sprintf("SELECT count(*), toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL %d SECOND) as ts FROM %s.%s WHERE rule_id = '%s' AND (state_changed = true) AND (state = '%s') AND unix_milli >= %d AND unix_milli <= %d GROUP BY ts ORDER BY ts ASC",
@@ -5783,6 +5665,6 @@ func (r *ClickHouseReader) ReportQueryStartForProgressTracking(
func (r *ClickHouseReader) SubscribeToQueryProgress(
queryId string,
) (<-chan v3.QueryProgress, func(), *model.ApiError) {
) (<-chan model.QueryProgress, func(), *model.ApiError) {
return r.queryProgressTracker.SubscribeToQueryProgress(queryId)
}

View File

@@ -5,19 +5,18 @@ import (
"encoding/base64"
"encoding/json"
"fmt"
"reflect"
"regexp"
"strconv"
"strings"
"time"
"github.com/google/uuid"
"github.com/gosimple/slug"
"github.com/jmoiron/sqlx"
"github.com/mitchellh/mapstructure"
"go.signoz.io/signoz/pkg/query-service/common"
"go.signoz.io/signoz/pkg/query-service/interfaces"
"go.signoz.io/signoz/pkg/query-service/model"
"go.signoz.io/signoz/pkg/query-service/telemetry"
"go.uber.org/zap"
)
@@ -152,6 +151,8 @@ func InitDB(dataSourceName string) (*sqlx.DB, error) {
return nil, fmt.Errorf("error in adding column locked to dashboards table: %s", err.Error())
}
telemetry.GetInstance().SetDashboardsInfoCallback(GetDashboardsInfo)
return db, nil
}
@@ -216,14 +217,6 @@ func CreateDashboard(ctx context.Context, data map[string]interface{}, fm interf
return nil, &model.ApiError{Typ: model.ErrorExec, Err: err}
}
newCount, _ := countTraceAndLogsPanel(data)
if newCount > 0 {
fErr := checkFeatureUsage(fm, newCount)
if fErr != nil {
return nil, fErr
}
}
result, err := db.Exec("INSERT INTO dashboards (uuid, created_at, created_by, updated_at, updated_by, data) VALUES ($1, $2, $3, $4, $5, $6)",
dash.Uuid, dash.CreatedAt, userEmail, dash.UpdatedAt, userEmail, mapData)
@@ -237,11 +230,6 @@ func CreateDashboard(ctx context.Context, data map[string]interface{}, fm interf
}
dash.Id = int(lastInsertId)
traceAndLogsPanelUsage, _ := countTraceAndLogsPanel(data)
if traceAndLogsPanelUsage > 0 {
updateFeatureUsage(fm, traceAndLogsPanelUsage)
}
return dash, nil
}
@@ -287,11 +275,6 @@ func DeleteDashboard(ctx context.Context, uuid string, fm interfaces.FeatureLook
return &model.ApiError{Typ: model.ErrorNotFound, Err: fmt.Errorf("no dashboard found with uuid: %s", uuid)}
}
traceAndLogsPanelUsage, _ := countTraceAndLogsPanel(dashboard.Data)
if traceAndLogsPanelUsage > 0 {
updateFeatureUsage(fm, -traceAndLogsPanelUsage)
}
return nil
}
@@ -329,28 +312,15 @@ func UpdateDashboard(ctx context.Context, uuid string, data map[string]interface
}
}
// check if the count of trace and logs QB panel has changed, if yes, then check feature flag count
existingCount, existingTotal := countTraceAndLogsPanel(dashboard.Data)
newCount, newTotal := countTraceAndLogsPanel(data)
if newCount > existingCount {
err := checkFeatureUsage(fm, newCount-existingCount)
if err != nil {
return nil, err
}
}
// if the total count of panels has reduced by more than 1,
// return error
existingIds := getWidgetIds(dashboard.Data)
newIds := getWidgetIds(data)
if existingTotal > newTotal && existingTotal-newTotal > 1 {
// if the total count of panels has reduced by more than 1,
// return error
existingIds := getWidgetIds(dashboard.Data)
newIds := getWidgetIds(data)
differenceIds := getIdDifference(existingIds, newIds)
if len(differenceIds) > 1 {
return nil, model.BadRequest(fmt.Errorf("deleting more than one panel is not supported"))
}
differenceIds := getIdDifference(existingIds, newIds)
if len(differenceIds) > 1 {
return nil, model.BadRequest(fmt.Errorf("deleting more than one panel is not supported"))
}
dashboard.UpdatedAt = time.Now()
@@ -364,10 +334,6 @@ func UpdateDashboard(ctx context.Context, uuid string, data map[string]interface
zap.L().Error("Error in inserting dashboard data", zap.Any("data", data), zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: err}
}
if existingCount != newCount {
// if the count of trace and logs panel has changed, we need to update feature flag count as well
updateFeatureUsage(fm, newCount-existingCount)
}
return dashboard, nil
}
@@ -389,51 +355,6 @@ func LockUnlockDashboard(ctx context.Context, uuid string, lock bool) *model.Api
return nil
}
func updateFeatureUsage(fm interfaces.FeatureLookup, usage int64) *model.ApiError {
feature, err := fm.GetFeatureFlag(model.QueryBuilderPanels)
if err != nil {
switch err.(type) {
case model.ErrFeatureUnavailable:
zap.L().Error("feature unavailable", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err))
return model.BadRequest(err)
default:
zap.L().Error("feature check failed", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err))
return model.BadRequest(err)
}
}
feature.Usage += usage
if feature.Usage >= feature.UsageLimit && feature.UsageLimit != -1 {
feature.Active = false
}
if feature.Usage < feature.UsageLimit || feature.UsageLimit == -1 {
feature.Active = true
}
err = fm.UpdateFeatureFlag(feature)
if err != nil {
return model.BadRequest(err)
}
return nil
}
func checkFeatureUsage(fm interfaces.FeatureLookup, usage int64) *model.ApiError {
feature, err := fm.GetFeatureFlag(model.QueryBuilderPanels)
if err != nil {
switch err.(type) {
case model.ErrFeatureUnavailable:
zap.L().Error("feature unavailable", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err))
return model.BadRequest(err)
default:
zap.L().Error("feature check failed", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err))
return model.BadRequest(err)
}
}
if feature.UsageLimit-(feature.Usage+usage) < 0 && feature.UsageLimit != -1 {
return model.BadRequest(fmt.Errorf("feature usage exceeded"))
}
return nil
}
// UpdateSlug updates the slug
func (d *Dashboard) UpdateSlug() {
var title string
@@ -469,276 +390,6 @@ func SlugifyTitle(title string) string {
return s
}
func widgetFromPanel(panel model.Panels, idx int, variables map[string]model.Variable) *model.Widget {
widget := model.Widget{
Description: panel.Description,
ID: strconv.Itoa(idx),
IsStacked: false,
NullZeroValues: "zero",
Opacity: "1",
PanelTypes: "TIME_SERIES", // TODO: Need to figure out how to get this
Query: model.Query{
ClickHouse: []model.ClickHouseQueryDashboard{
{
Disabled: false,
Legend: "",
Name: "A",
Query: "",
},
},
MetricsBuilder: model.MetricsBuilder{
Formulas: []string{},
QueryBuilder: []model.QueryBuilder{
{
AggregateOperator: 1,
Disabled: false,
GroupBy: []string{},
Legend: "",
MetricName: "",
Name: "A",
ReduceTo: 1,
},
},
},
PromQL: []model.PromQueryDashboard{},
QueryType: int(model.PROM),
},
QueryData: model.QueryDataDashboard{
Data: model.Data{
QueryData: []interface{}{},
},
},
Title: panel.Title,
YAxisUnit: panel.FieldConfig.Defaults.Unit,
QueryType: int(model.PROM), // TODO: Supprot for multiple query types
}
for _, target := range panel.Targets {
if target.Expr != "" {
for name := range variables {
target.Expr = strings.ReplaceAll(target.Expr, "$"+name, "{{"+"."+name+"}}")
target.Expr = strings.ReplaceAll(target.Expr, "$"+"__rate_interval", "5m")
}
// prometheus receiver in collector maps job,instance as service_name,service_instance_id
target.Expr = instanceEQRE.ReplaceAllString(target.Expr, "service_instance_id=\"{{.instance}}\"")
target.Expr = nodeEQRE.ReplaceAllString(target.Expr, "service_instance_id=\"{{.node}}\"")
target.Expr = jobEQRE.ReplaceAllString(target.Expr, "service_name=\"{{.job}}\"")
target.Expr = instanceRERE.ReplaceAllString(target.Expr, "service_instance_id=~\"{{.instance}}\"")
target.Expr = nodeRERE.ReplaceAllString(target.Expr, "service_instance_id=~\"{{.node}}\"")
target.Expr = jobRERE.ReplaceAllString(target.Expr, "service_name=~\"{{.job}}\"")
widget.Query.PromQL = append(
widget.Query.PromQL,
model.PromQueryDashboard{
Disabled: false,
Legend: target.LegendFormat,
Name: target.RefID,
Query: target.Expr,
},
)
}
}
return &widget
}
func TransformGrafanaJSONToSignoz(grafanaJSON model.GrafanaJSON) model.DashboardData {
var toReturn model.DashboardData
toReturn.Title = grafanaJSON.Title
toReturn.Tags = grafanaJSON.Tags
toReturn.Variables = make(map[string]model.Variable)
for templateIdx, template := range grafanaJSON.Templating.List {
var sort, typ, textboxValue, customValue, queryValue string
if template.Sort == 1 {
sort = "ASC"
} else if template.Sort == 2 {
sort = "DESC"
} else {
sort = "DISABLED"
}
if template.Type == "query" {
if template.Datasource == nil {
zap.L().Warn("Skipping panel as it has no datasource", zap.Int("templateIdx", templateIdx))
continue
}
// Skip if the source is not prometheus
source, stringOk := template.Datasource.(string)
if stringOk && !strings.Contains(strings.ToLower(source), "prometheus") {
zap.L().Warn("Skipping template as it is not prometheus", zap.Int("templateIdx", templateIdx))
continue
}
var result model.Datasource
var structOk bool
if reflect.TypeOf(template.Datasource).Kind() == reflect.Map {
err := mapstructure.Decode(template.Datasource, &result)
if err == nil {
structOk = true
}
}
if result.Type != "prometheus" && result.Type != "" {
zap.L().Warn("Skipping template as it is not prometheus", zap.Int("templateIdx", templateIdx))
continue
}
if !stringOk && !structOk {
zap.L().Warn("Didn't recognize source, skipping")
continue
}
typ = "QUERY"
} else if template.Type == "custom" {
typ = "CUSTOM"
} else if template.Type == "textbox" {
typ = "TEXTBOX"
text, ok := template.Current.Text.(string)
if ok {
textboxValue = text
}
array, ok := template.Current.Text.([]string)
if ok {
textboxValue = strings.Join(array, ",")
}
} else {
continue
}
var selectedValue string
text, ok := template.Current.Value.(string)
if ok {
selectedValue = text
}
array, ok := template.Current.Value.([]string)
if ok {
selectedValue = strings.Join(array, ",")
}
toReturn.Variables[template.Name] = model.Variable{
AllSelected: false,
CustomValue: customValue,
Description: template.Label,
MultiSelect: template.Multi,
QueryValue: queryValue,
SelectedValue: selectedValue,
ShowALLOption: template.IncludeAll,
Sort: sort,
TextboxValue: textboxValue,
Type: typ,
}
}
row := 0
idx := 0
for _, panel := range grafanaJSON.Panels {
if panel.Type == "row" {
if panel.Panels != nil && len(panel.Panels) > 0 {
for _, innerPanel := range panel.Panels {
if idx%3 == 0 {
row++
}
toReturn.Layout = append(
toReturn.Layout,
model.Layout{
X: idx % 3 * 4,
Y: row * 3,
W: 4,
H: 3,
I: strconv.Itoa(idx),
},
)
toReturn.Widgets = append(toReturn.Widgets, *widgetFromPanel(innerPanel, idx, toReturn.Variables))
idx++
}
}
continue
}
if panel.Datasource == nil {
zap.L().Warn("Skipping panel as it has no datasource", zap.Int("idx", idx))
continue
}
// Skip if the datasource is not prometheus
source, stringOk := panel.Datasource.(string)
if stringOk && !strings.Contains(strings.ToLower(source), "prometheus") {
zap.L().Warn("Skipping panel as it is not prometheus", zap.Int("idx", idx))
continue
}
var result model.Datasource
var structOk bool
if reflect.TypeOf(panel.Datasource).Kind() == reflect.Map {
err := mapstructure.Decode(panel.Datasource, &result)
if err == nil {
structOk = true
}
}
if result.Type != "prometheus" && result.Type != "" {
zap.L().Warn("Skipping panel as it is not prometheus", zap.Int("idx", idx))
continue
}
if !stringOk && !structOk {
zap.L().Warn("Didn't recognize source, skipping")
continue
}
// Create a panel from "gridPos"
if idx%3 == 0 {
row++
}
toReturn.Layout = append(
toReturn.Layout,
model.Layout{
X: idx % 3 * 4,
Y: row * 3,
W: 4,
H: 3,
I: strconv.Itoa(idx),
},
)
toReturn.Widgets = append(toReturn.Widgets, *widgetFromPanel(panel, idx, toReturn.Variables))
idx++
}
return toReturn
}
func countTraceAndLogsPanel(data map[string]interface{}) (int64, int64) {
count := int64(0)
totalPanels := int64(0)
if data != nil && data["widgets"] != nil {
widgets, ok := data["widgets"]
if ok {
data, ok := widgets.([]interface{})
if ok {
for _, widget := range data {
sData, ok := widget.(map[string]interface{})
if ok && sData["query"] != nil {
totalPanels++
query, ok := sData["query"].(map[string]interface{})
if ok && query["queryType"] == "builder" && query["builder"] != nil {
builderData, ok := query["builder"].(map[string]interface{})
if ok && builderData["queryData"] != nil {
builderQueryData, ok := builderData["queryData"].([]interface{})
if ok {
for _, queryData := range builderQueryData {
data, ok := queryData.(map[string]interface{})
if ok {
if data["dataSource"] == "traces" || data["dataSource"] == "logs" {
count++
}
}
}
}
}
}
}
}
}
}
}
return count, totalPanels
}
func getWidgetIds(data map[string]interface{}) []string {
widgetIds := []string{}
if data != nil && data["widgets"] != nil {
@@ -787,3 +438,126 @@ func getIdDifference(existingIds []string, newIds []string) []string {
return difference
}
// GetDashboardsInfo returns analytics data for dashboards
func GetDashboardsInfo(ctx context.Context) (*model.DashboardsInfo, error) {
dashboardsInfo := model.DashboardsInfo{}
// fetch dashboards from dashboard db
query := "SELECT data FROM dashboards"
var dashboardsData []Dashboard
err := db.Select(&dashboardsData, query)
if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err))
return &dashboardsInfo, err
}
totalDashboardsWithPanelAndName := 0
var dashboardNames []string
count := 0
for _, dashboard := range dashboardsData {
if isDashboardWithPanelAndName(dashboard.Data) {
totalDashboardsWithPanelAndName = totalDashboardsWithPanelAndName + 1
}
dashboardName := extractDashboardName(dashboard.Data)
if dashboardName != "" {
dashboardNames = append(dashboardNames, dashboardName)
}
dashboardInfo := countPanelsInDashboard(dashboard.Data)
dashboardsInfo.LogsBasedPanels += dashboardInfo.LogsBasedPanels
dashboardsInfo.TracesBasedPanels += dashboardInfo.TracesBasedPanels
dashboardsInfo.MetricBasedPanels += dashboardsInfo.MetricBasedPanels
if isDashboardWithTSV2(dashboard.Data) {
count = count + 1
}
}
dashboardsInfo.DashboardNames = dashboardNames
dashboardsInfo.TotalDashboards = len(dashboardsData)
dashboardsInfo.TotalDashboardsWithPanelAndName = totalDashboardsWithPanelAndName
dashboardsInfo.QueriesWithTSV2 = count
return &dashboardsInfo, nil
}
func isDashboardWithTSV2(data map[string]interface{}) bool {
jsonData, err := json.Marshal(data)
if err != nil {
return false
}
return strings.Contains(string(jsonData), "time_series_v2")
}
func isDashboardWithPanelAndName(data map[string]interface{}) bool {
isDashboardName := false
isDashboardWithPanelAndName := false
if data != nil && data["title"] != nil && data["widgets"] != nil {
title, ok := data["title"].(string)
if ok && title != "Sample Title" {
isDashboardName = true
}
widgets, ok := data["widgets"]
if ok && isDashboardName {
data, ok := widgets.([]interface{})
if ok && len(data) > 0 {
isDashboardWithPanelAndName = true
}
}
}
return isDashboardWithPanelAndName
}
func extractDashboardName(data map[string]interface{}) string {
if data != nil && data["title"] != nil {
title, ok := data["title"].(string)
if ok {
return title
}
}
return ""
}
func countPanelsInDashboard(data map[string]interface{}) model.DashboardsInfo {
var logsPanelCount, tracesPanelCount, metricsPanelCount int
// totalPanels := 0
if data != nil && data["widgets"] != nil {
widgets, ok := data["widgets"]
if ok {
data, ok := widgets.([]interface{})
if ok {
for _, widget := range data {
sData, ok := widget.(map[string]interface{})
if ok && sData["query"] != nil {
// totalPanels++
query, ok := sData["query"].(map[string]interface{})
if ok && query["queryType"] == "builder" && query["builder"] != nil {
builderData, ok := query["builder"].(map[string]interface{})
if ok && builderData["queryData"] != nil {
builderQueryData, ok := builderData["queryData"].([]interface{})
if ok {
for _, queryData := range builderQueryData {
data, ok := queryData.(map[string]interface{})
if ok {
if data["dataSource"] == "traces" {
tracesPanelCount++
} else if data["dataSource"] == "metrics" {
metricsPanelCount++
} else if data["dataSource"] == "logs" {
logsPanelCount++
}
}
}
}
}
}
}
}
}
}
}
return model.DashboardsInfo{
LogsBasedPanels: logsPanelCount,
TracesBasedPanels: tracesPanelCount,
MetricBasedPanels: metricsPanelCount,
}
}

View File

@@ -10,7 +10,10 @@ import (
"github.com/google/uuid"
"github.com/jmoiron/sqlx"
"go.signoz.io/signoz/pkg/query-service/auth"
"go.signoz.io/signoz/pkg/query-service/model"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/query-service/telemetry"
"go.uber.org/zap"
)
var db *sqlx.DB
@@ -57,6 +60,8 @@ func InitWithDSN(dataSourceName string) (*sqlx.DB, error) {
return nil, fmt.Errorf("error in creating saved views table: %s", err.Error())
}
telemetry.GetInstance().SetSavedViewsInfoCallback(GetSavedViewsInfo)
return db, nil
}
@@ -228,3 +233,21 @@ func DeleteView(uuid_ string) error {
}
return nil
}
func GetSavedViewsInfo(ctx context.Context) (*model.SavedViewsInfo, error) {
savedViewsInfo := model.SavedViewsInfo{}
savedViews, err := GetViews()
if err != nil {
zap.S().Debug("Error in fetching saved views info: ", err)
return &savedViewsInfo, err
}
savedViewsInfo.TotalSavedViews = len(savedViews)
for _, view := range savedViews {
if view.SourcePage == "traces" {
savedViewsInfo.TracesSavedViews += 1
} else if view.SourcePage == "logs" {
savedViewsInfo.LogsSavedViews += 1
}
}
return &savedViewsInfo, nil
}

View File

@@ -29,6 +29,7 @@ import (
"go.signoz.io/signoz/pkg/query-service/app/integrations"
"go.signoz.io/signoz/pkg/query-service/app/logs"
logsv3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3"
logsv4 "go.signoz.io/signoz/pkg/query-service/app/logs/v4"
"go.signoz.io/signoz/pkg/query-service/app/metrics"
metricsv3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3"
"go.signoz.io/signoz/pkg/query-service/app/preferences"
@@ -105,6 +106,8 @@ type APIHandler struct {
// Websocket connection upgrader
Upgrader *websocket.Upgrader
UseLogsNewSchema bool
}
type APIHandlerOpts struct {
@@ -140,6 +143,9 @@ type APIHandlerOpts struct {
// Querier Influx Interval
FluxInterval time.Duration
// Use Logs New schema
UseLogsNewSchema bool
}
// NewAPIHandler returns an APIHandler
@@ -151,19 +157,21 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
}
querierOpts := querier.QuerierOptions{
Reader: opts.Reader,
Cache: opts.Cache,
KeyGenerator: queryBuilder.NewKeyGenerator(),
FluxInterval: opts.FluxInterval,
FeatureLookup: opts.FeatureFlags,
Reader: opts.Reader,
Cache: opts.Cache,
KeyGenerator: queryBuilder.NewKeyGenerator(),
FluxInterval: opts.FluxInterval,
FeatureLookup: opts.FeatureFlags,
UseLogsNewSchema: opts.UseLogsNewSchema,
}
querierOptsV2 := querierV2.QuerierOptions{
Reader: opts.Reader,
Cache: opts.Cache,
KeyGenerator: queryBuilder.NewKeyGenerator(),
FluxInterval: opts.FluxInterval,
FeatureLookup: opts.FeatureFlags,
Reader: opts.Reader,
Cache: opts.Cache,
KeyGenerator: queryBuilder.NewKeyGenerator(),
FluxInterval: opts.FluxInterval,
FeatureLookup: opts.FeatureFlags,
UseLogsNewSchema: opts.UseLogsNewSchema,
}
querier := querier.NewQuerier(querierOpts)
@@ -185,12 +193,18 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
LogsParsingPipelineController: opts.LogsParsingPipelineController,
querier: querier,
querierV2: querierv2,
UseLogsNewSchema: opts.UseLogsNewSchema,
}
logsQueryBuilder := logsv3.PrepareLogsQuery
if opts.UseLogsNewSchema {
logsQueryBuilder = logsv4.PrepareLogsQuery
}
builderOpts := queryBuilder.QueryBuilderOptions{
BuildMetricQuery: metricsv3.PrepareMetricQuery,
BuildTraceQuery: tracesV3.PrepareTracesQuery,
BuildLogQuery: logsv3.PrepareLogsQuery,
BuildLogQuery: logsQueryBuilder,
}
aH.queryBuilder = queryBuilder.NewQueryBuilder(builderOpts, aH.featureFlags)
@@ -382,11 +396,9 @@ func (aH *APIHandler) RegisterRoutes(router *mux.Router, am *AuthMiddleware) {
router.HandleFunc("/api/v1/dashboards", am.ViewAccess(aH.getDashboards)).Methods(http.MethodGet)
router.HandleFunc("/api/v1/dashboards", am.EditAccess(aH.createDashboards)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/dashboards/grafana", am.EditAccess(aH.createDashboardsTransform)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/dashboards/{uuid}", am.ViewAccess(aH.getDashboard)).Methods(http.MethodGet)
router.HandleFunc("/api/v1/dashboards/{uuid}", am.EditAccess(aH.updateDashboard)).Methods(http.MethodPut)
router.HandleFunc("/api/v1/dashboards/{uuid}", am.EditAccess(aH.deleteDashboard)).Methods(http.MethodDelete)
router.HandleFunc("/api/v1/variables/query", am.ViewAccess(aH.queryDashboardVars)).Methods(http.MethodGet)
router.HandleFunc("/api/v2/variables/query", am.ViewAccess(aH.queryDashboardVarsV2)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/explorer/views", am.ViewAccess(aH.getSavedViews)).Methods(http.MethodGet)
@@ -657,7 +669,7 @@ func (aH *APIHandler) deleteDowntimeSchedule(w http.ResponseWriter, r *http.Requ
func (aH *APIHandler) getRuleStats(w http.ResponseWriter, r *http.Request) {
ruleID := mux.Vars(r)["id"]
params := v3.QueryRuleStateHistory{}
params := model.QueryRuleStateHistory{}
err := json.NewDecoder(r.Body).Decode(&params)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
@@ -723,7 +735,7 @@ func (aH *APIHandler) getRuleStats(w http.ResponseWriter, r *http.Request) {
pastAvgResolutionTime = 0
}
stats := v3.Stats{
stats := model.Stats{
TotalCurrentTriggers: totalCurrentTriggers,
TotalPastTriggers: totalPastTriggers,
CurrentTriggersSeries: currentTriggersSeries,
@@ -739,7 +751,7 @@ func (aH *APIHandler) getRuleStats(w http.ResponseWriter, r *http.Request) {
func (aH *APIHandler) getOverallStateTransitions(w http.ResponseWriter, r *http.Request) {
ruleID := mux.Vars(r)["id"]
params := v3.QueryRuleStateHistory{}
params := model.QueryRuleStateHistory{}
err := json.NewDecoder(r.Body).Decode(&params)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
@@ -757,7 +769,7 @@ func (aH *APIHandler) getOverallStateTransitions(w http.ResponseWriter, r *http.
func (aH *APIHandler) getRuleStateHistory(w http.ResponseWriter, r *http.Request) {
ruleID := mux.Vars(r)["id"]
params := v3.QueryRuleStateHistory{}
params := model.QueryRuleStateHistory{}
err := json.NewDecoder(r.Body).Decode(&params)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
@@ -809,7 +821,7 @@ func (aH *APIHandler) getRuleStateHistory(w http.ResponseWriter, r *http.Request
func (aH *APIHandler) getRuleStateHistoryTopContributors(w http.ResponseWriter, r *http.Request) {
ruleID := mux.Vars(r)["id"]
params := v3.QueryRuleStateHistory{}
params := model.QueryRuleStateHistory{}
err := json.NewDecoder(r.Body).Decode(&params)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
@@ -927,25 +939,6 @@ func (aH *APIHandler) deleteDashboard(w http.ResponseWriter, r *http.Request) {
}
func (aH *APIHandler) queryDashboardVars(w http.ResponseWriter, r *http.Request) {
query := r.URL.Query().Get("query")
if query == "" {
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("query is required")}, nil)
return
}
if strings.Contains(strings.ToLower(query), "alter table") {
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("query shouldn't alter data")}, nil)
return
}
dashboardVars, err := aH.reader.QueryDashboardVars(r.Context(), query)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
aH.Respond(w, dashboardVars)
}
func prepareQuery(r *http.Request) (string, error) {
var postData *model.DashboardVars
@@ -1075,27 +1068,6 @@ func (aH *APIHandler) saveAndReturn(w http.ResponseWriter, r *http.Request, sign
aH.Respond(w, dashboard)
}
func (aH *APIHandler) createDashboardsTransform(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
b, err := io.ReadAll(r.Body)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, "Error reading request body")
return
}
var importData model.GrafanaJSON
err = json.Unmarshal(b, &importData)
if err == nil {
signozDashboard := dashboards.TransformGrafanaJSONToSignoz(importData)
aH.saveAndReturn(w, r, signozDashboard)
return
}
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, "Error while creating dashboard from grafana json")
}
func (aH *APIHandler) createDashboards(w http.ResponseWriter, r *http.Request) {
var postData map[string]interface{}
@@ -2513,7 +2485,7 @@ func (aH *APIHandler) getNetworkData(
var result []*v3.Result
var errQueriesByName map[string]error
result, errQueriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams, nil)
result, errQueriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams)
if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err}
RespondError(w, apiErrObj, errQueriesByName)
@@ -2548,7 +2520,7 @@ func (aH *APIHandler) getNetworkData(
return
}
resultFetchLatency, errQueriesByNameFetchLatency, err := aH.querierV2.QueryRange(r.Context(), queryRangeParams, nil)
resultFetchLatency, errQueriesByNameFetchLatency, err := aH.querierV2.QueryRange(r.Context(), queryRangeParams)
if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err}
RespondError(w, apiErrObj, errQueriesByNameFetchLatency)
@@ -2609,7 +2581,7 @@ func (aH *APIHandler) getProducerData(
var result []*v3.Result
var errQuriesByName map[string]error
result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams, nil)
result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams)
if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err}
RespondError(w, apiErrObj, errQuriesByName)
@@ -2650,7 +2622,7 @@ func (aH *APIHandler) getConsumerData(
var result []*v3.Result
var errQuriesByName map[string]error
result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams, nil)
result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams)
if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err}
RespondError(w, apiErrObj, errQuriesByName)
@@ -3012,7 +2984,7 @@ func (aH *APIHandler) calculateLogsConnectionStatus(
},
}
queryRes, _, err := aH.querier.QueryRange(
ctx, qrParams, map[string]v3.AttributeKey{},
ctx, qrParams,
)
if err != nil {
return nil, model.InternalError(fmt.Errorf(
@@ -3662,13 +3634,14 @@ func (aH *APIHandler) queryRangeV3(ctx context.Context, queryRangeParams *v3.Que
RespondError(w, apiErrObj, errQuriesByName)
return
}
tracesV3.Enrich(queryRangeParams, spanKeys)
}
// WARN: Only works for AND operator in traces query
if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeBuilder {
// check if traceID is used as filter (with equal/similar operator) in traces query if yes add timestamp filter to queryRange params
isUsed, traceIDs := tracesV3.TraceIdFilterUsedWithEqual(queryRangeParams)
if isUsed == true && len(traceIDs) > 0 {
if isUsed && len(traceIDs) > 0 {
zap.L().Debug("traceID used as filter in traces query")
// query signoz_spans table with traceID to get min and max timestamp
min, max, err := aH.reader.GetMinAndMaxTimestampForTraceID(ctx, traceIDs)
@@ -3698,11 +3671,15 @@ func (aH *APIHandler) queryRangeV3(ctx context.Context, queryRangeParams *v3.Que
}
}
result, errQuriesByName, err = aH.querier.QueryRange(ctx, queryRangeParams, spanKeys)
result, errQuriesByName, err = aH.querier.QueryRange(ctx, queryRangeParams)
if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err}
RespondError(w, apiErrObj, errQuriesByName)
queryErrors := map[string]string{}
for name, err := range errQuriesByName {
queryErrors[fmt.Sprintf("Query-%s", name)] = err.Error()
}
apiErrObj := &model.ApiError{Typ: model.ErrorInternal, Err: err}
RespondError(w, apiErrObj, queryErrors)
return
}
@@ -3918,7 +3895,93 @@ func (aH *APIHandler) GetQueryProgressUpdates(w http.ResponseWriter, r *http.Req
}
}
func (aH *APIHandler) liveTailLogsV2(w http.ResponseWriter, r *http.Request) {
// get the param from url and add it to body
stringReader := strings.NewReader(r.URL.Query().Get("q"))
r.Body = io.NopCloser(stringReader)
queryRangeParams, apiErrorObj := ParseQueryRangeParams(r)
if apiErrorObj != nil {
zap.L().Error(apiErrorObj.Err.Error())
RespondError(w, apiErrorObj, nil)
return
}
var err error
var queryString string
switch queryRangeParams.CompositeQuery.QueryType {
case v3.QueryTypeBuilder:
// check if any enrichment is required for logs if yes then enrich them
if logsv3.EnrichmentRequired(queryRangeParams) {
// get the fields if any logs query is present
var fields map[string]v3.AttributeKey
fields, err = aH.getLogFieldsV3(r.Context(), queryRangeParams)
if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorInternal, Err: err}
RespondError(w, apiErrObj, nil)
return
}
logsv3.Enrich(queryRangeParams, fields)
}
queryString, err = aH.queryBuilder.PrepareLiveTailQuery(queryRangeParams)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
default:
err = fmt.Errorf("invalid query type")
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
w.Header().Set("Connection", "keep-alive")
w.Header().Set("Content-Type", "text/event-stream")
w.Header().Set("Cache-Control", "no-cache")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.WriteHeader(200)
flusher, ok := w.(http.Flusher)
if !ok {
err := model.ApiError{Typ: model.ErrorStreamingNotSupported, Err: nil}
RespondError(w, &err, "streaming is not supported")
return
}
// flush the headers
flusher.Flush()
// create the client
client := &model.LogsLiveTailClientV2{Name: r.RemoteAddr, Logs: make(chan *model.SignozLogV2, 1000), Done: make(chan *bool), Error: make(chan error)}
go aH.reader.LiveTailLogsV4(r.Context(), queryString, uint64(queryRangeParams.Start), "", client)
for {
select {
case log := <-client.Logs:
var buf bytes.Buffer
enc := json.NewEncoder(&buf)
enc.Encode(log)
fmt.Fprintf(w, "data: %v\n\n", buf.String())
flusher.Flush()
case <-client.Done:
zap.L().Debug("done!")
return
case err := <-client.Error:
zap.L().Error("error occurred", zap.Error(err))
fmt.Fprintf(w, "event: error\ndata: %v\n\n", err.Error())
flusher.Flush()
return
}
}
}
func (aH *APIHandler) liveTailLogs(w http.ResponseWriter, r *http.Request) {
if aH.UseLogsNewSchema {
aH.liveTailLogsV2(w, r)
return
}
// get the param from url and add it to body
stringReader := strings.NewReader(r.URL.Query().Get("q"))
@@ -3961,7 +4024,7 @@ func (aH *APIHandler) liveTailLogs(w http.ResponseWriter, r *http.Request) {
}
// create the client
client := &v3.LogsLiveTailClient{Name: r.RemoteAddr, Logs: make(chan *model.SignozLog, 1000), Done: make(chan *bool), Error: make(chan error)}
client := &model.LogsLiveTailClient{Name: r.RemoteAddr, Logs: make(chan *model.SignozLog, 1000), Done: make(chan *bool), Error: make(chan error)}
go aH.reader.LiveTailLogsV3(r.Context(), queryString, uint64(queryRangeParams.Start), "", client)
w.Header().Set("Connection", "keep-alive")
@@ -3996,6 +4059,7 @@ func (aH *APIHandler) liveTailLogs(w http.ResponseWriter, r *http.Request) {
return
}
}
}
func (aH *APIHandler) getMetricMetadata(w http.ResponseWriter, r *http.Request) {
@@ -4036,13 +4100,14 @@ func (aH *APIHandler) queryRangeV4(ctx context.Context, queryRangeParams *v3.Que
RespondError(w, apiErrObj, errQuriesByName)
return
}
tracesV3.Enrich(queryRangeParams, spanKeys)
}
// WARN: Only works for AND operator in traces query
if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeBuilder {
// check if traceID is used as filter (with equal/similar operator) in traces query if yes add timestamp filter to queryRange params
isUsed, traceIDs := tracesV3.TraceIdFilterUsedWithEqual(queryRangeParams)
if isUsed == true && len(traceIDs) > 0 {
if isUsed && len(traceIDs) > 0 {
zap.L().Debug("traceID used as filter in traces query")
// query signoz_spans table with traceID to get min and max timestamp
min, max, err := aH.reader.GetMinAndMaxTimestampForTraceID(ctx, traceIDs)
@@ -4054,11 +4119,15 @@ func (aH *APIHandler) queryRangeV4(ctx context.Context, queryRangeParams *v3.Que
}
}
result, errQuriesByName, err = aH.querierV2.QueryRange(ctx, queryRangeParams, spanKeys)
result, errQuriesByName, err = aH.querierV2.QueryRange(ctx, queryRangeParams)
if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err}
RespondError(w, apiErrObj, errQuriesByName)
queryErrors := map[string]string{}
for name, err := range errQuriesByName {
queryErrors[fmt.Sprintf("Query-%s", name)] = err.Error()
}
apiErrObj := &model.ApiError{Typ: model.ErrorInternal, Err: err}
RespondError(w, apiErrObj, queryErrors)
return
}

View File

@@ -20,7 +20,7 @@ const (
NGRAM_SIZE = 4
)
var dataTypeMapping = map[string]string{
var DataTypeMapping = map[string]string{
"string": STRING,
"int64": INT64,
"float64": FLOAT64,
@@ -31,7 +31,7 @@ var dataTypeMapping = map[string]string{
"array(bool)": ARRAY_BOOL,
}
var arrayValueTypeMapping = map[string]string{
var ArrayValueTypeMapping = map[string]string{
"array(string)": "string",
"array(int64)": "int64",
"array(float64)": "float64",
@@ -59,7 +59,7 @@ var jsonLogOperators = map[v3.FilterOperator]string{
v3.FilterOperatorNotHas: "NOT has(%s, %s)",
}
func getPath(keyArr []string) string {
func GetPath(keyArr []string) string {
path := []string{}
for i := 0; i < len(keyArr); i++ {
if strings.HasSuffix(keyArr[i], "[*]") {
@@ -71,7 +71,7 @@ func getPath(keyArr []string) string {
return strings.Join(path, ".")
}
func getJSONFilterKey(key v3.AttributeKey, op v3.FilterOperator, isArray bool) (string, error) {
func GetJSONFilterKey(key v3.AttributeKey, op v3.FilterOperator, isArray bool) (string, error) {
keyArr := strings.Split(key.Key, ".")
// i.e it should be at least body.name, and not something like body
if len(keyArr) < 2 {
@@ -89,11 +89,11 @@ func getJSONFilterKey(key v3.AttributeKey, op v3.FilterOperator, isArray bool) (
var dataType string
var ok bool
if dataType, ok = dataTypeMapping[string(key.DataType)]; !ok {
if dataType, ok = DataTypeMapping[string(key.DataType)]; !ok {
return "", fmt.Errorf("unsupported dataType for JSON: %s", key.DataType)
}
path := getPath(keyArr[1:])
path := GetPath(keyArr[1:])
if isArray {
return fmt.Sprintf("JSONExtract(JSON_QUERY(%s, '$.%s'), '%s')", keyArr[0], path, dataType), nil
@@ -109,7 +109,7 @@ func getJSONFilterKey(key v3.AttributeKey, op v3.FilterOperator, isArray bool) (
}
// takes the path and the values and generates where clauses for better usage of index
func getPathIndexFilter(path string) string {
func GetPathIndexFilter(path string) string {
filters := []string{}
keyArr := strings.Split(path, ".")
if len(keyArr) < 2 {
@@ -136,7 +136,7 @@ func GetJSONFilter(item v3.FilterItem) (string, error) {
dataType := item.Key.DataType
isArray := false
// check if its an array and handle it
if val, ok := arrayValueTypeMapping[string(item.Key.DataType)]; ok {
if val, ok := ArrayValueTypeMapping[string(item.Key.DataType)]; ok {
if item.Operator != v3.FilterOperatorHas && item.Operator != v3.FilterOperatorNotHas {
return "", fmt.Errorf("only has operator is supported for array")
}
@@ -144,7 +144,7 @@ func GetJSONFilter(item v3.FilterItem) (string, error) {
dataType = v3.AttributeKeyDataType(val)
}
key, err := getJSONFilterKey(item.Key, item.Operator, isArray)
key, err := GetJSONFilterKey(item.Key, item.Operator, isArray)
if err != nil {
return "", err
}
@@ -164,7 +164,7 @@ func GetJSONFilter(item v3.FilterItem) (string, error) {
if logsOp, ok := jsonLogOperators[op]; ok {
switch op {
case v3.FilterOperatorExists, v3.FilterOperatorNotExists:
filter = fmt.Sprintf(logsOp, key, getPath(strings.Split(item.Key.Key, ".")[1:]))
filter = fmt.Sprintf(logsOp, key, GetPath(strings.Split(item.Key.Key, ".")[1:]))
case v3.FilterOperatorRegex, v3.FilterOperatorNotRegex, v3.FilterOperatorHas, v3.FilterOperatorNotHas:
fmtVal := utils.ClickHouseFormattedValue(value)
filter = fmt.Sprintf(logsOp, key, fmtVal)
@@ -181,7 +181,7 @@ func GetJSONFilter(item v3.FilterItem) (string, error) {
filters := []string{}
pathFilter := getPathIndexFilter(item.Key.Key)
pathFilter := GetPathIndexFilter(item.Key.Key)
if pathFilter != "" {
filters = append(filters, pathFilter)
}
@@ -196,7 +196,7 @@ func GetJSONFilter(item v3.FilterItem) (string, error) {
// add exists check for non array items as default values of int/float/bool will corrupt the results
if !isArray && !(item.Operator == v3.FilterOperatorExists || item.Operator == v3.FilterOperatorNotExists) {
existsFilter := fmt.Sprintf("JSON_EXISTS(body, '$.%s')", getPath(strings.Split(item.Key.Key, ".")[1:]))
existsFilter := fmt.Sprintf("JSON_EXISTS(body, '$.%s')", GetPath(strings.Split(item.Key.Key, ".")[1:]))
filter = fmt.Sprintf("%s AND %s", existsFilter, filter)
}

View File

@@ -140,7 +140,7 @@ var testGetJSONFilterKeyData = []struct {
func TestGetJSONFilterKey(t *testing.T) {
for _, tt := range testGetJSONFilterKeyData {
Convey("testgetKey", t, func() {
columnName, err := getJSONFilterKey(tt.Key, tt.Operator, tt.IsArray)
columnName, err := GetJSONFilterKey(tt.Key, tt.Operator, tt.IsArray)
if tt.Error {
So(err, ShouldNotBeNil)
} else {

View File

@@ -9,7 +9,7 @@ import (
"go.signoz.io/signoz/pkg/query-service/utils"
)
var aggregateOperatorToPercentile = map[v3.AggregateOperator]float64{
var AggregateOperatorToPercentile = map[v3.AggregateOperator]float64{
v3.AggregateOperatorP05: 0.05,
v3.AggregateOperatorP10: 0.10,
v3.AggregateOperatorP20: 0.20,
@@ -21,7 +21,7 @@ var aggregateOperatorToPercentile = map[v3.AggregateOperator]float64{
v3.AggregateOperatorP99: 0.99,
}
var aggregateOperatorToSQLFunc = map[v3.AggregateOperator]string{
var AggregateOperatorToSQLFunc = map[v3.AggregateOperator]string{
v3.AggregateOperatorAvg: "avg",
v3.AggregateOperatorMax: "max",
v3.AggregateOperatorMin: "min",
@@ -53,7 +53,7 @@ var logOperators = map[v3.FilterOperator]string{
const BODY = "body"
func getClickhouseLogsColumnType(columnType v3.AttributeKeyType) string {
func GetClickhouseLogsColumnType(columnType v3.AttributeKeyType) string {
if columnType == v3.AttributeKeyTypeTag {
return "attributes"
}
@@ -83,7 +83,7 @@ func getClickhouseColumnName(key v3.AttributeKey) string {
//if the key is present in the topLevelColumn then it will be only searched in those columns,
//regardless if it is indexed/present again in resource or column attribute
if !key.IsColumn {
columnType := getClickhouseLogsColumnType(key.Type)
columnType := GetClickhouseLogsColumnType(key.Type)
columnDataType := getClickhouseLogsColumnDataType(key.DataType)
clickhouseColumn = fmt.Sprintf("%s_%s_value[indexOf(%s_%s_key, '%s')]", columnType, columnDataType, columnType, columnDataType, key.Key)
return clickhouseColumn
@@ -114,7 +114,7 @@ func getSelectLabels(aggregatorOperator v3.AggregateOperator, groupBy []v3.Attri
return selectLabels
}
func getSelectKeys(aggregatorOperator v3.AggregateOperator, groupBy []v3.AttributeKey) string {
func GetSelectKeys(aggregatorOperator v3.AggregateOperator, groupBy []v3.AttributeKey) string {
var selectLabels []string
if aggregatorOperator == v3.AggregateOperatorNoOp {
return ""
@@ -154,7 +154,7 @@ func GetExistsNexistsFilter(op v3.FilterOperator, item v3.FilterItem) string {
}
return fmt.Sprintf("%s_exists`=%v", strings.TrimSuffix(getClickhouseColumnName(item.Key), "`"), val)
}
columnType := getClickhouseLogsColumnType(item.Key.Type)
columnType := GetClickhouseLogsColumnType(item.Key.Type)
columnDataType := getClickhouseLogsColumnDataType(item.Key.DataType)
return fmt.Sprintf(logOperators[op], columnType, columnDataType, item.Key.Key)
}
@@ -224,7 +224,7 @@ func buildLogsTimeSeriesFilterQuery(fs *v3.FilterSet, groupBy []v3.AttributeKey,
// add group by conditions to filter out log lines which doesn't have the key
for _, attr := range groupBy {
if !attr.IsColumn {
columnType := getClickhouseLogsColumnType(attr.Type)
columnType := GetClickhouseLogsColumnType(attr.Type)
columnDataType := getClickhouseLogsColumnDataType(attr.DataType)
conditions = append(conditions, fmt.Sprintf("has(%s_%s_key, '%s')", columnType, columnDataType, attr.Key))
} else if attr.Type != v3.AttributeKeyTypeUnspecified {
@@ -258,7 +258,7 @@ func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.Build
selectLabels := getSelectLabels(mq.AggregateOperator, mq.GroupBy)
having := having(mq.Having)
having := Having(mq.Having)
if having != "" {
having = " having " + having
}
@@ -288,10 +288,10 @@ func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.Build
// we dont need value for first query
// going with this route as for a cleaner approach on implementation
if graphLimitQtype == constants.FirstQueryGraphLimit {
queryTmpl = "SELECT " + getSelectKeys(mq.AggregateOperator, mq.GroupBy) + " from (" + queryTmpl + ")"
queryTmpl = "SELECT " + GetSelectKeys(mq.AggregateOperator, mq.GroupBy) + " from (" + queryTmpl + ")"
}
groupBy := groupByAttributeKeyTags(panelType, graphLimitQtype, mq.GroupBy...)
groupBy := GroupByAttributeKeyTags(panelType, graphLimitQtype, mq.GroupBy...)
if panelType != v3.PanelTypeList && groupBy != "" {
groupBy = " group by " + groupBy
}
@@ -301,7 +301,7 @@ func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.Build
}
if graphLimitQtype == constants.SecondQueryGraphLimit {
filterSubQuery = filterSubQuery + " AND " + fmt.Sprintf("(%s) GLOBAL IN (", getSelectKeys(mq.AggregateOperator, mq.GroupBy)) + "#LIMIT_PLACEHOLDER)"
filterSubQuery = filterSubQuery + " AND " + fmt.Sprintf("(%s) GLOBAL IN (", GetSelectKeys(mq.AggregateOperator, mq.GroupBy)) + "#LIMIT_PLACEHOLDER)"
}
aggregationKey := ""
@@ -329,7 +329,7 @@ func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.Build
rate = rate / 60.0
}
op := fmt.Sprintf("%s(%s)/%f", aggregateOperatorToSQLFunc[mq.AggregateOperator], aggregationKey, rate)
op := fmt.Sprintf("%s(%s)/%f", AggregateOperatorToSQLFunc[mq.AggregateOperator], aggregationKey, rate)
query := fmt.Sprintf(queryTmpl, op, filterSubQuery, groupBy, having, orderBy)
return query, nil
case
@@ -342,11 +342,11 @@ func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.Build
v3.AggregateOperatorP90,
v3.AggregateOperatorP95,
v3.AggregateOperatorP99:
op := fmt.Sprintf("quantile(%v)(%s)", aggregateOperatorToPercentile[mq.AggregateOperator], aggregationKey)
op := fmt.Sprintf("quantile(%v)(%s)", AggregateOperatorToPercentile[mq.AggregateOperator], aggregationKey)
query := fmt.Sprintf(queryTmpl, op, filterSubQuery, groupBy, having, orderBy)
return query, nil
case v3.AggregateOperatorAvg, v3.AggregateOperatorSum, v3.AggregateOperatorMin, v3.AggregateOperatorMax:
op := fmt.Sprintf("%s(%s)", aggregateOperatorToSQLFunc[mq.AggregateOperator], aggregationKey)
op := fmt.Sprintf("%s(%s)", AggregateOperatorToSQLFunc[mq.AggregateOperator], aggregationKey)
query := fmt.Sprintf(queryTmpl, op, filterSubQuery, groupBy, having, orderBy)
return query, nil
case v3.AggregateOperatorCount:
@@ -394,7 +394,7 @@ func groupBy(panelType v3.PanelType, graphLimitQtype string, tags ...string) str
return strings.Join(tags, ",")
}
func groupByAttributeKeyTags(panelType v3.PanelType, graphLimitQtype string, tags ...v3.AttributeKey) string {
func GroupByAttributeKeyTags(panelType v3.PanelType, graphLimitQtype string, tags ...v3.AttributeKey) string {
groupTags := []string{}
for _, tag := range tags {
groupTags = append(groupTags, "`"+tag.Key+"`")
@@ -446,7 +446,7 @@ func orderByAttributeKeyTags(panelType v3.PanelType, items []v3.OrderBy, tags []
return str
}
func having(items []v3.Having) string {
func Having(items []v3.Having) string {
// aggregate something and filter on that aggregate
var having []string
for _, item := range items {
@@ -455,7 +455,7 @@ func having(items []v3.Having) string {
return strings.Join(having, " AND ")
}
func reduceQuery(query string, reduceTo v3.ReduceToOperator, aggregateOperator v3.AggregateOperator) (string, error) {
func ReduceQuery(query string, reduceTo v3.ReduceToOperator, aggregateOperator v3.AggregateOperator) (string, error) {
// the timestamp picked is not relevant here since the final value used is show the single
// chart with just the query value.
switch reduceTo {
@@ -475,24 +475,18 @@ func reduceQuery(query string, reduceTo v3.ReduceToOperator, aggregateOperator v
return query, nil
}
func addLimitToQuery(query string, limit uint64) string {
func AddLimitToQuery(query string, limit uint64) string {
if limit == 0 {
return query
}
return fmt.Sprintf("%s LIMIT %d", query, limit)
}
func addOffsetToQuery(query string, offset uint64) string {
func AddOffsetToQuery(query string, offset uint64) string {
return fmt.Sprintf("%s OFFSET %d", query, offset)
}
type Options struct {
GraphLimitQtype string
IsLivetailQuery bool
PreferRPM bool
}
func isOrderByTs(orderBy []v3.OrderBy) bool {
func IsOrderByTs(orderBy []v3.OrderBy) bool {
if len(orderBy) == 1 && (orderBy[0].Key == constants.TIMESTAMP || orderBy[0].ColumnName == constants.TIMESTAMP) {
return true
}
@@ -502,7 +496,7 @@ func isOrderByTs(orderBy []v3.OrderBy) bool {
// PrepareLogsQuery prepares the query for logs
// start and end are in epoch millisecond
// step is in seconds
func PrepareLogsQuery(start, end int64, queryType v3.QueryType, panelType v3.PanelType, mq *v3.BuilderQuery, options Options) (string, error) {
func PrepareLogsQuery(start, end int64, queryType v3.QueryType, panelType v3.PanelType, mq *v3.BuilderQuery, options v3.LogQBOptions) (string, error) {
// adjust the start and end time to the step interval
// NOTE: Disabling this as it's creating confusion between charts and actual data
@@ -523,7 +517,7 @@ func PrepareLogsQuery(start, end int64, queryType v3.QueryType, panelType v3.Pan
if err != nil {
return "", err
}
query = addLimitToQuery(query, mq.Limit)
query = AddLimitToQuery(query, mq.Limit)
return query, nil
} else if options.GraphLimitQtype == constants.SecondQueryGraphLimit {
@@ -539,7 +533,7 @@ func PrepareLogsQuery(start, end int64, queryType v3.QueryType, panelType v3.Pan
return "", err
}
if panelType == v3.PanelTypeValue {
query, err = reduceQuery(query, mq.ReduceTo, mq.AggregateOperator)
query, err = ReduceQuery(query, mq.ReduceTo, mq.AggregateOperator)
}
if panelType == v3.PanelTypeList {
@@ -550,21 +544,21 @@ func PrepareLogsQuery(start, end int64, queryType v3.QueryType, panelType v3.Pan
if mq.PageSize > 0 {
if mq.Limit > 0 && mq.Offset+mq.PageSize > mq.Limit {
query = addLimitToQuery(query, mq.Limit-mq.Offset)
query = AddLimitToQuery(query, mq.Limit-mq.Offset)
} else {
query = addLimitToQuery(query, mq.PageSize)
query = AddLimitToQuery(query, mq.PageSize)
}
// add offset to the query only if it is not orderd by timestamp.
if !isOrderByTs(mq.OrderBy) {
query = addOffsetToQuery(query, mq.Offset)
if !IsOrderByTs(mq.OrderBy) {
query = AddOffsetToQuery(query, mq.Offset)
}
} else {
query = addLimitToQuery(query, mq.Limit)
query = AddLimitToQuery(query, mq.Limit)
}
} else if panelType == v3.PanelTypeTable {
query = addLimitToQuery(query, mq.Limit)
query = AddLimitToQuery(query, mq.Limit)
}
return query, err

View File

@@ -1201,7 +1201,7 @@ var testPrepLogsQueryData = []struct {
TableName string
AggregateOperator v3.AggregateOperator
ExpectedQuery string
Options Options
Options v3.LogQBOptions
}{
{
Name: "Test TS with limit- first",
@@ -1223,7 +1223,7 @@ var testPrepLogsQueryData = []struct {
},
TableName: "logs",
ExpectedQuery: "SELECT `method` from (SELECT attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND has(attributes_string_key, 'method') AND has(attributes_string_key, 'name') group by `method` order by value DESC) LIMIT 10",
Options: Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: true},
Options: v3.LogQBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: true},
},
{
Name: "Test TS with limit- first - with order by value",
@@ -1246,7 +1246,7 @@ var testPrepLogsQueryData = []struct {
},
TableName: "logs",
ExpectedQuery: "SELECT `method` from (SELECT attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND has(attributes_string_key, 'method') AND has(attributes_string_key, 'name') group by `method` order by value ASC) LIMIT 10",
Options: Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: true},
Options: v3.LogQBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: true},
},
{
Name: "Test TS with limit- first - with order by attribute",
@@ -1269,7 +1269,7 @@ var testPrepLogsQueryData = []struct {
},
TableName: "logs",
ExpectedQuery: "SELECT `method` from (SELECT attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND has(attributes_string_key, 'method') AND has(attributes_string_key, 'name') group by `method` order by `method` ASC) LIMIT 10",
Options: Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: true},
Options: v3.LogQBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: true},
},
{
Name: "Test TS with limit- second",
@@ -1291,7 +1291,7 @@ var testPrepLogsQueryData = []struct {
},
TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND has(attributes_string_key, 'method') AND has(attributes_string_key, 'name') AND (`method`) GLOBAL IN (#LIMIT_PLACEHOLDER) group by `method`,ts order by value DESC",
Options: Options{GraphLimitQtype: constants.SecondQueryGraphLimit},
Options: v3.LogQBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit},
},
{
Name: "Test TS with limit- second - with order by",
@@ -1314,7 +1314,7 @@ var testPrepLogsQueryData = []struct {
},
TableName: "logs",
ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND has(attributes_string_key, 'method') AND has(attributes_string_key, 'name') AND (`method`) GLOBAL IN (#LIMIT_PLACEHOLDER) group by `method`,ts order by `method` ASC",
Options: Options{GraphLimitQtype: constants.SecondQueryGraphLimit},
Options: v3.LogQBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit},
},
// Live tail
{
@@ -1334,7 +1334,7 @@ var testPrepLogsQueryData = []struct {
},
TableName: "logs",
ExpectedQuery: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body,CAST((attributes_string_key, attributes_string_value), 'Map(String, String)') as attributes_string,CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64,CAST((attributes_bool_key, attributes_bool_value), 'Map(String, Bool)') as attributes_bool,CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string from signoz_logs.distributed_logs where attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND ",
Options: Options{IsLivetailQuery: true},
Options: v3.LogQBOptions{IsLivetailQuery: true},
},
{
Name: "Live Tail Query with contains",
@@ -1353,7 +1353,7 @@ var testPrepLogsQueryData = []struct {
},
TableName: "logs",
ExpectedQuery: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body,CAST((attributes_string_key, attributes_string_value), 'Map(String, String)') as attributes_string,CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64,CAST((attributes_bool_key, attributes_bool_value), 'Map(String, Bool)') as attributes_bool,CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string from signoz_logs.distributed_logs where attributes_string_value[indexOf(attributes_string_key, 'method')] ILIKE '%GET%' AND ",
Options: Options{IsLivetailQuery: true},
Options: v3.LogQBOptions{IsLivetailQuery: true},
},
{
Name: "Live Tail Query W/O filter",
@@ -1369,7 +1369,7 @@ var testPrepLogsQueryData = []struct {
},
TableName: "logs",
ExpectedQuery: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body,CAST((attributes_string_key, attributes_string_value), 'Map(String, String)') as attributes_string,CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64,CAST((attributes_bool_key, attributes_bool_value), 'Map(String, Bool)') as attributes_bool,CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string from signoz_logs.distributed_logs where ",
Options: Options{IsLivetailQuery: true},
Options: v3.LogQBOptions{IsLivetailQuery: true},
},
{
Name: "Table query w/o limit",
@@ -1385,7 +1385,7 @@ var testPrepLogsQueryData = []struct {
},
TableName: "logs",
ExpectedQuery: "SELECT now() as ts, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) order by value DESC",
Options: Options{},
Options: v3.LogQBOptions{},
},
{
Name: "Table query with limit",
@@ -1402,7 +1402,7 @@ var testPrepLogsQueryData = []struct {
},
TableName: "logs",
ExpectedQuery: "SELECT now() as ts, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) order by value DESC LIMIT 10",
Options: Options{},
Options: v3.LogQBOptions{},
},
{
Name: "Ignore offset if order by is timestamp in list queries",
@@ -1488,7 +1488,7 @@ var testPrepLogsQueryLimitOffsetData = []struct {
TableName string
AggregateOperator v3.AggregateOperator
ExpectedQuery string
Options Options
Options v3.LogQBOptions
}{
{
Name: "Test limit less than pageSize - order by ts",

View File

@@ -0,0 +1,105 @@
package v4
import (
"fmt"
"strings"
logsV3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/query-service/utils"
)
var jsonLogOperators = map[v3.FilterOperator]string{
v3.FilterOperatorEqual: "=",
v3.FilterOperatorNotEqual: "!=",
v3.FilterOperatorLessThan: "<",
v3.FilterOperatorLessThanOrEq: "<=",
v3.FilterOperatorGreaterThan: ">",
v3.FilterOperatorGreaterThanOrEq: ">=",
v3.FilterOperatorLike: "LIKE",
v3.FilterOperatorNotLike: "NOT LIKE",
v3.FilterOperatorContains: "LIKE",
v3.FilterOperatorNotContains: "NOT LIKE",
v3.FilterOperatorRegex: "match(%s, %s)",
v3.FilterOperatorNotRegex: "NOT match(%s, %s)",
v3.FilterOperatorIn: "IN",
v3.FilterOperatorNotIn: "NOT IN",
v3.FilterOperatorExists: "JSON_EXISTS(%s, '$.%s')",
v3.FilterOperatorNotExists: "NOT JSON_EXISTS(%s, '$.%s')",
v3.FilterOperatorHas: "has(%s, %s)",
v3.FilterOperatorNotHas: "NOT has(%s, %s)",
}
func GetJSONFilter(item v3.FilterItem) (string, error) {
dataType := item.Key.DataType
isArray := false
// check if its an array and handle it
if val, ok := logsV3.ArrayValueTypeMapping[string(item.Key.DataType)]; ok {
if item.Operator != v3.FilterOperatorHas && item.Operator != v3.FilterOperatorNotHas {
return "", fmt.Errorf("only has operator is supported for array")
}
isArray = true
dataType = v3.AttributeKeyDataType(val)
}
key, err := logsV3.GetJSONFilterKey(item.Key, item.Operator, isArray)
if err != nil {
return "", err
}
// non array
op := v3.FilterOperator(strings.ToLower(strings.TrimSpace(string(item.Operator))))
var value interface{}
if op != v3.FilterOperatorExists && op != v3.FilterOperatorNotExists {
value, err = utils.ValidateAndCastValue(item.Value, dataType)
if err != nil {
return "", fmt.Errorf("failed to validate and cast value for %s: %v", item.Key.Key, err)
}
}
var filter string
if logsOp, ok := jsonLogOperators[op]; ok {
switch op {
case v3.FilterOperatorExists, v3.FilterOperatorNotExists:
filter = fmt.Sprintf(logsOp, key, logsV3.GetPath(strings.Split(item.Key.Key, ".")[1:]))
case v3.FilterOperatorRegex, v3.FilterOperatorNotRegex, v3.FilterOperatorHas, v3.FilterOperatorNotHas:
fmtVal := utils.ClickHouseFormattedValue(value)
filter = fmt.Sprintf(logsOp, key, fmtVal)
case v3.FilterOperatorContains, v3.FilterOperatorNotContains:
val := utils.QuoteEscapedString(fmt.Sprintf("%v", item.Value))
filter = fmt.Sprintf("%s %s '%%%s%%'", key, logsOp, val)
default:
fmtVal := utils.ClickHouseFormattedValue(value)
filter = fmt.Sprintf("%s %s %s", key, logsOp, fmtVal)
}
} else {
return "", fmt.Errorf("unsupported operator: %s", op)
}
filters := []string{}
pathFilter := logsV3.GetPathIndexFilter(item.Key.Key)
if pathFilter != "" {
filters = append(filters, pathFilter)
}
if op == v3.FilterOperatorContains ||
op == v3.FilterOperatorEqual ||
op == v3.FilterOperatorHas {
val, ok := item.Value.(string)
if ok && len(val) >= logsV3.NGRAM_SIZE {
filters = append(filters, fmt.Sprintf("lower(body) like lower('%%%s%%')", utils.QuoteEscapedString(strings.ToLower(val))))
}
}
// add exists check for non array items as default values of int/float/bool will corrupt the results
if !isArray && !(item.Operator == v3.FilterOperatorExists || item.Operator == v3.FilterOperatorNotExists) {
existsFilter := fmt.Sprintf("JSON_EXISTS(body, '$.%s')", logsV3.GetPath(strings.Split(item.Key.Key, ".")[1:]))
filter = fmt.Sprintf("%s AND %s", existsFilter, filter)
}
filters = append(filters, filter)
return strings.Join(filters, " AND "), nil
}

View File

@@ -0,0 +1,200 @@
package v4
import (
"testing"
. "github.com/smartystreets/goconvey/convey"
logsV3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
)
var testGetJSONFilterData = []struct {
Name string
FilterItem v3.FilterItem
Filter string
Error bool
}{
{
Name: "Array membership string",
FilterItem: v3.FilterItem{
Key: v3.AttributeKey{
Key: "body.requestor_list[*]",
DataType: "array(string)",
IsJSON: true,
},
Operator: "has",
Value: "index_service",
},
Filter: "lower(body) like lower('%requestor_list%') AND lower(body) like lower('%index_service%') AND has(JSONExtract(JSON_QUERY(body, '$.\"requestor_list\"[*]'), 'Array(String)'), 'index_service')",
},
{
Name: "Array membership int64",
FilterItem: v3.FilterItem{
Key: v3.AttributeKey{
Key: "body.int_numbers[*]",
DataType: "array(int64)",
IsJSON: true,
},
Operator: "has",
Value: 2,
},
Filter: "lower(body) like lower('%int_numbers%') AND has(JSONExtract(JSON_QUERY(body, '$.\"int_numbers\"[*]'), '" + logsV3.ARRAY_INT64 + "'), 2)",
},
{
Name: "Array membership float64",
FilterItem: v3.FilterItem{
Key: v3.AttributeKey{
Key: "body.nested_num[*].float_nums[*]",
DataType: "array(float64)",
IsJSON: true,
},
Operator: "nhas",
Value: 2.2,
},
Filter: "lower(body) like lower('%nested_num%float_nums%') AND NOT has(JSONExtract(JSON_QUERY(body, '$.\"nested_num\"[*].\"float_nums\"[*]'), '" + logsV3.ARRAY_FLOAT64 + "'), 2.200000)",
},
{
Name: "Array membership bool",
FilterItem: v3.FilterItem{
Key: v3.AttributeKey{
Key: "body.bool[*]",
DataType: "array(bool)",
IsJSON: true,
},
Operator: "has",
Value: true,
},
Filter: "lower(body) like lower('%bool%') AND has(JSONExtract(JSON_QUERY(body, '$.\"bool\"[*]'), '" + logsV3.ARRAY_BOOL + "'), true)",
},
{
Name: "eq operator",
FilterItem: v3.FilterItem{
Key: v3.AttributeKey{
Key: "body.message",
DataType: "string",
IsJSON: true,
},
Operator: "=",
Value: "hello",
},
Filter: "lower(body) like lower('%message%') AND lower(body) like lower('%hello%') AND JSON_EXISTS(body, '$.\"message\"') AND JSON_VALUE(body, '$.\"message\"') = 'hello'",
},
{
Name: "eq operator number",
FilterItem: v3.FilterItem{
Key: v3.AttributeKey{
Key: "body.status",
DataType: "int64",
IsJSON: true,
},
Operator: "=",
Value: 1,
},
Filter: "lower(body) like lower('%status%') AND JSON_EXISTS(body, '$.\"status\"') AND JSONExtract(JSON_VALUE(body, '$.\"status\"'), '" + logsV3.INT64 + "') = 1",
},
{
Name: "neq operator number",
FilterItem: v3.FilterItem{
Key: v3.AttributeKey{
Key: "body.status",
DataType: "float64",
IsJSON: true,
},
Operator: "=",
Value: 1.1,
},
Filter: "lower(body) like lower('%status%') AND JSON_EXISTS(body, '$.\"status\"') AND JSONExtract(JSON_VALUE(body, '$.\"status\"'), '" + logsV3.FLOAT64 + "') = 1.100000",
},
{
Name: "eq operator bool",
FilterItem: v3.FilterItem{
Key: v3.AttributeKey{
Key: "body.boolkey",
DataType: "bool",
IsJSON: true,
},
Operator: "=",
Value: true,
},
Filter: "lower(body) like lower('%boolkey%') AND JSON_EXISTS(body, '$.\"boolkey\"') AND JSONExtract(JSON_VALUE(body, '$.\"boolkey\"'), '" + logsV3.BOOL + "') = true",
},
{
Name: "greater than operator",
FilterItem: v3.FilterItem{
Key: v3.AttributeKey{
Key: "body.status",
DataType: "int64",
IsJSON: true,
},
Operator: ">",
Value: 1,
},
Filter: "lower(body) like lower('%status%') AND JSON_EXISTS(body, '$.\"status\"') AND JSONExtract(JSON_VALUE(body, '$.\"status\"'), '" + logsV3.INT64 + "') > 1",
},
{
Name: "regex operator",
FilterItem: v3.FilterItem{
Key: v3.AttributeKey{
Key: "body.message",
DataType: "string",
IsJSON: true,
},
Operator: "regex",
Value: "a*",
},
Filter: "lower(body) like lower('%message%') AND JSON_EXISTS(body, '$.\"message\"') AND match(JSON_VALUE(body, '$.\"message\"'), 'a*')",
},
{
Name: "contains operator",
FilterItem: v3.FilterItem{
Key: v3.AttributeKey{
Key: "body.message",
DataType: "string",
IsJSON: true,
},
Operator: "contains",
Value: "a",
},
Filter: "lower(body) like lower('%message%') AND JSON_EXISTS(body, '$.\"message\"') AND JSON_VALUE(body, '$.\"message\"') LIKE '%a%'",
},
{
Name: "contains operator with quotes",
FilterItem: v3.FilterItem{
Key: v3.AttributeKey{
Key: "body.message",
DataType: "string",
IsJSON: true,
},
Operator: "contains",
Value: "hello 'world'",
},
Filter: "lower(body) like lower('%message%') AND lower(body) like lower('%hello \\'world\\'%') AND JSON_EXISTS(body, '$.\"message\"') AND JSON_VALUE(body, '$.\"message\"') LIKE '%hello \\'world\\'%'",
},
{
Name: "exists",
FilterItem: v3.FilterItem{
Key: v3.AttributeKey{
Key: "body.message",
DataType: "string",
IsJSON: true,
},
Operator: "exists",
Value: "",
},
Filter: "lower(body) like lower('%message%') AND JSON_EXISTS(body, '$.\"message\"')",
},
}
func TestGetJSONFilter(t *testing.T) {
for _, tt := range testGetJSONFilterData {
Convey("testGetJSONFilter", t, func() {
filter, err := GetJSONFilter(tt.FilterItem)
if tt.Error {
So(err, ShouldNotBeNil)
} else {
So(err, ShouldBeNil)
So(filter, ShouldEqual, tt.Filter)
}
})
}
}

View File

@@ -1,7 +1,13 @@
package v4
import (
"fmt"
"strings"
logsV3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3"
"go.signoz.io/signoz/pkg/query-service/constants"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/query-service/utils"
)
var logOperators = map[v3.FilterOperator]string{
@@ -29,3 +35,508 @@ const (
DISTRIBUTED_LOGS_V2_RESOURCE = "distributed_logs_v2_resource"
NANOSECOND = 1000000000
)
func getClickhouseLogsColumnDataType(columnDataType v3.AttributeKeyDataType) string {
if columnDataType == v3.AttributeKeyDataTypeFloat64 || columnDataType == v3.AttributeKeyDataTypeInt64 {
return "number"
}
if columnDataType == v3.AttributeKeyDataTypeBool {
return "bool"
}
return "string"
}
func getClickhouseKey(key v3.AttributeKey) string {
// check if it is a top level static field
if _, ok := constants.StaticFieldsLogsV3[key.Key]; ok && key.Type == v3.AttributeKeyTypeUnspecified {
return key.Key
}
//if the key is present in the topLevelColumn then it will be only searched in those columns,
//regardless if it is indexed/present again in resource or column attribute
if !key.IsColumn {
columnType := logsV3.GetClickhouseLogsColumnType(key.Type)
columnDataType := getClickhouseLogsColumnDataType(key.DataType)
return fmt.Sprintf("%s_%s['%s']", columnType, columnDataType, key.Key)
}
// materialized column created from query
// https://github.com/SigNoz/signoz/pull/4775
return "`" + utils.GetClickhouseColumnNameV2(string(key.Type), string(key.DataType), key.Key) + "`"
}
func getSelectLabels(aggregatorOperator v3.AggregateOperator, groupBy []v3.AttributeKey) string {
var selectLabels string
if aggregatorOperator == v3.AggregateOperatorNoOp {
selectLabels = ""
} else {
for _, tag := range groupBy {
columnName := getClickhouseKey(tag)
selectLabels += fmt.Sprintf(" %s as `%s`,", columnName, tag.Key)
}
}
return selectLabels
}
func getExistsNexistsFilter(op v3.FilterOperator, item v3.FilterItem) string {
if _, ok := constants.StaticFieldsLogsV3[item.Key.Key]; ok && item.Key.Type == v3.AttributeKeyTypeUnspecified {
// no exists filter for static fields as they exists everywhere
// TODO(nitya): Think what we can do here
return ""
} else if item.Key.IsColumn {
// get filter for materialized columns
val := true
if op == v3.FilterOperatorNotExists {
val = false
}
return fmt.Sprintf("%s_exists`=%v", strings.TrimSuffix(getClickhouseKey(item.Key), "`"), val)
}
// filter for non materialized attributes
columnType := logsV3.GetClickhouseLogsColumnType(item.Key.Type)
columnDataType := getClickhouseLogsColumnDataType(item.Key.DataType)
return fmt.Sprintf(logOperators[op], columnType, columnDataType, item.Key.Key)
}
func buildAttributeFilter(item v3.FilterItem) (string, error) {
// check if the user is searching for value in all attributes
key := item.Key.Key
op := v3.FilterOperator(strings.ToLower(string(item.Operator)))
var value interface{}
var err error
if op != v3.FilterOperatorExists && op != v3.FilterOperatorNotExists {
value, err = utils.ValidateAndCastValue(item.Value, item.Key.DataType)
if err != nil {
return "", fmt.Errorf("failed to validate and cast value for %s: %v", item.Key.Key, err)
}
}
// TODO(nitya): as of now __attrs is only supports attributes_string. Discuss more on this
// also for eq and contains as now it does a exact match
if key == "__attrs" {
if (op != v3.FilterOperatorEqual && op != v3.FilterOperatorContains) || item.Key.DataType != v3.AttributeKeyDataTypeString {
return "", fmt.Errorf("only = operator and string data type is supported for __attrs")
}
val := utils.ClickHouseFormattedValue(item.Value)
return fmt.Sprintf("has(mapValues(attributes_string), %s)", val), nil
}
keyName := getClickhouseKey(item.Key)
fmtVal := utils.ClickHouseFormattedValue(value)
if logsOp, ok := logOperators[op]; ok {
switch op {
case v3.FilterOperatorExists, v3.FilterOperatorNotExists:
return getExistsNexistsFilter(op, item), nil
case v3.FilterOperatorRegex, v3.FilterOperatorNotRegex:
return fmt.Sprintf(logsOp, keyName, fmtVal), nil
case v3.FilterOperatorContains, v3.FilterOperatorNotContains:
val := utils.QuoteEscapedStringForContains(fmt.Sprintf("%s", item.Value))
// for body the contains is case insensitive
if keyName == BODY {
return fmt.Sprintf("lower(%s) %s lower('%%%s%%')", keyName, logsOp, val), nil
} else {
return fmt.Sprintf("%s %s '%%%s%%'", keyName, logsOp, val), nil
}
default:
// for use lower for like and ilike
if op == v3.FilterOperatorLike || op == v3.FilterOperatorNotLike {
if keyName == BODY {
keyName = fmt.Sprintf("lower(%s)", keyName)
fmtVal = fmt.Sprintf("lower(%s)", fmtVal)
}
}
return fmt.Sprintf("%s %s %s", keyName, logsOp, fmtVal), nil
}
} else {
return "", fmt.Errorf("unsupported operator: %s", op)
}
}
func buildLogsTimeSeriesFilterQuery(fs *v3.FilterSet, groupBy []v3.AttributeKey, aggregateAttribute v3.AttributeKey) (string, error) {
var conditions []string
if fs == nil || len(fs.Items) == 0 {
return "", nil
}
for _, item := range fs.Items {
// skip if it's a resource attribute
if item.Key.Type == v3.AttributeKeyTypeResource {
continue
}
// if the filter is json filter
if item.Key.IsJSON {
filter, err := GetJSONFilter(item)
if err != nil {
return "", err
}
conditions = append(conditions, filter)
continue
}
// generate the filter
filter, err := buildAttributeFilter(item)
if err != nil {
return "", err
}
conditions = append(conditions, filter)
// add extra condition for map contains
// by default clickhouse is not able to utilize indexes for keys with all operators.
// mapContains forces the use of index.
op := v3.FilterOperator(strings.ToLower(string(item.Operator)))
if item.Key.IsColumn == false && op != v3.FilterOperatorExists && op != v3.FilterOperatorNotExists {
conditions = append(conditions, getExistsNexistsFilter(v3.FilterOperatorExists, item))
}
}
// add group by conditions to filter out log lines which doesn't have the key
for _, attr := range groupBy {
// skip if it's a resource attribute
if attr.Type == v3.AttributeKeyTypeResource {
continue
}
if !attr.IsColumn {
columnType := logsV3.GetClickhouseLogsColumnType(attr.Type)
columnDataType := getClickhouseLogsColumnDataType(attr.DataType)
conditions = append(conditions, fmt.Sprintf("mapContains(%s_%s, '%s')", columnType, columnDataType, attr.Key))
} else if attr.Type != v3.AttributeKeyTypeUnspecified {
// for materialzied columns and not the top level static fields
name := utils.GetClickhouseColumnNameV2(string(attr.Type), string(attr.DataType), attr.Key)
conditions = append(conditions, fmt.Sprintf("`%s_exists`=true", name))
}
}
// add conditions for aggregate attribute
if aggregateAttribute.Key != "" && aggregateAttribute.Type != v3.AttributeKeyTypeResource {
existsFilter := getExistsNexistsFilter(v3.FilterOperatorExists, v3.FilterItem{Key: aggregateAttribute})
conditions = append(conditions, existsFilter)
}
queryString := strings.Join(conditions, " AND ")
return queryString, nil
}
// orderBy returns a string of comma separated tags for order by clause
// if there are remaining items which are not present in tags they are also added
// if the order is not specified, it defaults to ASC
func orderBy(panelType v3.PanelType, items []v3.OrderBy, tagLookup map[string]struct{}) []string {
var orderBy []string
for _, item := range items {
if item.ColumnName == constants.SigNozOrderByValue {
orderBy = append(orderBy, fmt.Sprintf("value %s", item.Order))
} else if _, ok := tagLookup[item.ColumnName]; ok {
orderBy = append(orderBy, fmt.Sprintf("`%s` %s", item.ColumnName, item.Order))
} else if panelType == v3.PanelTypeList {
attr := v3.AttributeKey{Key: item.ColumnName, DataType: item.DataType, Type: item.Type, IsColumn: item.IsColumn}
name := getClickhouseKey(attr)
if item.IsColumn {
name = "`" + name + "`"
}
orderBy = append(orderBy, fmt.Sprintf("%s %s", name, item.Order))
}
}
return orderBy
}
func orderByAttributeKeyTags(panelType v3.PanelType, items []v3.OrderBy, tags []v3.AttributeKey) string {
tagLookup := map[string]struct{}{}
for _, v := range tags {
tagLookup[v.Key] = struct{}{}
}
orderByArray := orderBy(panelType, items, tagLookup)
if len(orderByArray) == 0 {
if panelType == v3.PanelTypeList {
orderByArray = append(orderByArray, constants.TIMESTAMP+" DESC")
} else {
orderByArray = append(orderByArray, "value DESC")
}
}
str := strings.Join(orderByArray, ",")
return str
}
func generateAggregateClause(aggOp v3.AggregateOperator,
aggKey string,
step int64,
preferRPM bool,
timeFilter string,
whereClause string,
groupBy string,
having string,
orderBy string,
) (string, error) {
queryTmpl := " %s as value from signoz_logs." + DISTRIBUTED_LOGS_V2 +
" where " + timeFilter + "%s" +
"%s%s" +
"%s"
switch aggOp {
case v3.AggregateOperatorRate:
rate := float64(step)
if preferRPM {
rate = rate / 60.0
}
op := fmt.Sprintf("count(%s)/%f", aggKey, rate)
query := fmt.Sprintf(queryTmpl, op, whereClause, groupBy, having, orderBy)
return query, nil
case
v3.AggregateOperatorRateSum,
v3.AggregateOperatorRateMax,
v3.AggregateOperatorRateAvg,
v3.AggregateOperatorRateMin:
rate := float64(step)
if preferRPM {
rate = rate / 60.0
}
op := fmt.Sprintf("%s(%s)/%f", logsV3.AggregateOperatorToSQLFunc[aggOp], aggKey, rate)
query := fmt.Sprintf(queryTmpl, op, whereClause, groupBy, having, orderBy)
return query, nil
case
v3.AggregateOperatorP05,
v3.AggregateOperatorP10,
v3.AggregateOperatorP20,
v3.AggregateOperatorP25,
v3.AggregateOperatorP50,
v3.AggregateOperatorP75,
v3.AggregateOperatorP90,
v3.AggregateOperatorP95,
v3.AggregateOperatorP99:
op := fmt.Sprintf("quantile(%v)(%s)", logsV3.AggregateOperatorToPercentile[aggOp], aggKey)
query := fmt.Sprintf(queryTmpl, op, whereClause, groupBy, having, orderBy)
return query, nil
case v3.AggregateOperatorAvg, v3.AggregateOperatorSum, v3.AggregateOperatorMin, v3.AggregateOperatorMax:
op := fmt.Sprintf("%s(%s)", logsV3.AggregateOperatorToSQLFunc[aggOp], aggKey)
query := fmt.Sprintf(queryTmpl, op, whereClause, groupBy, having, orderBy)
return query, nil
case v3.AggregateOperatorCount:
op := "toFloat64(count(*))"
query := fmt.Sprintf(queryTmpl, op, whereClause, groupBy, having, orderBy)
return query, nil
case v3.AggregateOperatorCountDistinct:
op := fmt.Sprintf("toFloat64(count(distinct(%s)))", aggKey)
query := fmt.Sprintf(queryTmpl, op, whereClause, groupBy, having, orderBy)
return query, nil
default:
return "", fmt.Errorf("unsupported aggregate operator")
}
}
func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.BuilderQuery, graphLimitQtype string, preferRPM bool) (string, error) {
// timerange will be sent in epoch millisecond
logsStart := utils.GetEpochNanoSecs(start)
logsEnd := utils.GetEpochNanoSecs(end)
// -1800 this is added so that the bucket start considers all the fingerprints.
bucketStart := logsStart/NANOSECOND - 1800
bucketEnd := logsEnd / NANOSECOND
// timestamp filter , bucket_start filter is added for primary key
timeFilter := fmt.Sprintf("(timestamp >= %d AND timestamp <= %d) AND (ts_bucket_start >= %d AND ts_bucket_start <= %d)", logsStart, logsEnd, bucketStart, bucketEnd)
// build the where clause for main table
filterSubQuery, err := buildLogsTimeSeriesFilterQuery(mq.Filters, mq.GroupBy, mq.AggregateAttribute)
if err != nil {
return "", err
}
if filterSubQuery != "" {
filterSubQuery = " AND " + filterSubQuery
}
// build the where clause for resource table
resourceSubQuery, err := buildResourceSubQuery(bucketStart, bucketEnd, mq.Filters, mq.GroupBy, mq.AggregateAttribute, false)
if err != nil {
return "", err
}
// join both the filter clauses
if resourceSubQuery != "" {
filterSubQuery = filterSubQuery + " AND (resource_fingerprint GLOBAL IN " + resourceSubQuery + ")"
}
// get the select labels
selectLabels := getSelectLabels(mq.AggregateOperator, mq.GroupBy)
// get the order by clause
orderBy := orderByAttributeKeyTags(panelType, mq.OrderBy, mq.GroupBy)
if panelType != v3.PanelTypeList && orderBy != "" {
orderBy = " order by " + orderBy
}
// if noop create the query and return
if mq.AggregateOperator == v3.AggregateOperatorNoOp {
// with noop any filter or different order by other than ts will use new table
sqlSelect := constants.LogsSQLSelectV2
queryTmpl := sqlSelect + "from signoz_logs.%s where %s%s order by %s"
query := fmt.Sprintf(queryTmpl, DISTRIBUTED_LOGS_V2, timeFilter, filterSubQuery, orderBy)
return query, nil
// ---- NOOP ends here ----
}
// ---- FOR aggregation queries ----
// get the having conditions
having := logsV3.Having(mq.Having)
if having != "" {
having = " having " + having
}
// get the group by clause
groupBy := logsV3.GroupByAttributeKeyTags(panelType, graphLimitQtype, mq.GroupBy...)
if panelType != v3.PanelTypeList && groupBy != "" {
groupBy = " group by " + groupBy
}
// get the aggregation key
aggregationKey := ""
if mq.AggregateAttribute.Key != "" {
aggregationKey = getClickhouseKey(mq.AggregateAttribute)
}
// for limit queries, there are two queries formed
// in the second query we need to add the placeholder so that first query can be placed
if graphLimitQtype == constants.SecondQueryGraphLimit {
filterSubQuery = filterSubQuery + " AND " + fmt.Sprintf("(%s) GLOBAL IN (", logsV3.GetSelectKeys(mq.AggregateOperator, mq.GroupBy)) + "#LIMIT_PLACEHOLDER)"
}
aggClause, err := generateAggregateClause(mq.AggregateOperator, aggregationKey, step, preferRPM, timeFilter, filterSubQuery, groupBy, having, orderBy)
if err != nil {
return "", err
}
var queryTmplPrefix string
if graphLimitQtype == constants.FirstQueryGraphLimit {
queryTmplPrefix = "SELECT"
} else if panelType == v3.PanelTypeTable {
queryTmplPrefix =
"SELECT"
// step or aggregate interval is whole time period in case of table panel
step = (utils.GetEpochNanoSecs(end) - utils.GetEpochNanoSecs(start)) / NANOSECOND
} else if panelType == v3.PanelTypeGraph || panelType == v3.PanelTypeValue {
// Select the aggregate value for interval
queryTmplPrefix =
fmt.Sprintf("SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL %d SECOND) AS ts,", step)
}
query := queryTmplPrefix + selectLabels + aggClause
// for limit query this is the first query,
// we don't the the aggregation value here as we are just concerned with the names of group by
// for applying the limit
if graphLimitQtype == constants.FirstQueryGraphLimit {
query = "SELECT " + logsV3.GetSelectKeys(mq.AggregateOperator, mq.GroupBy) + " from (" + query + ")"
}
return query, nil
}
func buildLogsLiveTailQuery(mq *v3.BuilderQuery) (string, error) {
filterSubQuery, err := buildLogsTimeSeriesFilterQuery(mq.Filters, mq.GroupBy, v3.AttributeKey{})
if err != nil {
return "", err
}
// no values for bucket start and end
resourceSubQuery, err := buildResourceSubQuery(0, 0, mq.Filters, mq.GroupBy, mq.AggregateAttribute, true)
if err != nil {
return "", err
}
// join both the filter clauses
if resourceSubQuery != "" {
if filterSubQuery != "" {
filterSubQuery = filterSubQuery + " AND (resource_fingerprint GLOBAL IN " + resourceSubQuery
} else {
filterSubQuery = "(resource_fingerprint GLOBAL IN " + resourceSubQuery
}
}
// the reader will add the timestamp and id filters
switch mq.AggregateOperator {
case v3.AggregateOperatorNoOp:
query := constants.LogsSQLSelectV2 + "from signoz_logs." + DISTRIBUTED_LOGS_V2 + " where "
if len(filterSubQuery) > 0 {
query = query + filterSubQuery + " AND "
}
return query, nil
default:
return "", fmt.Errorf("unsupported aggregate operator in live tail")
}
}
// PrepareLogsQuery prepares the query for logs
func PrepareLogsQuery(start, end int64, queryType v3.QueryType, panelType v3.PanelType, mq *v3.BuilderQuery, options v3.LogQBOptions) (string, error) {
// adjust the start and end time to the step interval
// NOTE: Disabling this as it's creating confusion between charts and actual data
// if panelType != v3.PanelTypeList {
// start = start - (start % (mq.StepInterval * 1000))
// end = end - (end % (mq.StepInterval * 1000))
// }
if options.IsLivetailQuery {
query, err := buildLogsLiveTailQuery(mq)
if err != nil {
return "", err
}
return query, nil
} else if options.GraphLimitQtype == constants.FirstQueryGraphLimit {
// give me just the group_by names (no values)
query, err := buildLogsQuery(panelType, start, end, mq.StepInterval, mq, options.GraphLimitQtype, options.PreferRPM)
if err != nil {
return "", err
}
query = logsV3.AddLimitToQuery(query, mq.Limit)
return query, nil
} else if options.GraphLimitQtype == constants.SecondQueryGraphLimit {
query, err := buildLogsQuery(panelType, start, end, mq.StepInterval, mq, options.GraphLimitQtype, options.PreferRPM)
if err != nil {
return "", err
}
return query, nil
}
query, err := buildLogsQuery(panelType, start, end, mq.StepInterval, mq, options.GraphLimitQtype, options.PreferRPM)
if err != nil {
return "", err
}
if panelType == v3.PanelTypeValue {
query, err = logsV3.ReduceQuery(query, mq.ReduceTo, mq.AggregateOperator)
}
if panelType == v3.PanelTypeList {
// check if limit exceeded
if mq.Limit > 0 && mq.Offset >= mq.Limit {
return "", fmt.Errorf("max limit exceeded")
}
if mq.PageSize > 0 {
if mq.Limit > 0 && mq.Offset+mq.PageSize > mq.Limit {
query = logsV3.AddLimitToQuery(query, mq.Limit-mq.Offset)
} else {
query = logsV3.AddLimitToQuery(query, mq.PageSize)
}
// add offset to the query only if it is not orderd by timestamp.
if !logsV3.IsOrderByTs(mq.OrderBy) {
query = logsV3.AddOffsetToQuery(query, mq.Offset)
}
} else {
query = logsV3.AddLimitToQuery(query, mq.Limit)
}
} else if panelType == v3.PanelTypeTable {
query = logsV3.AddLimitToQuery(query, mq.Limit)
}
return query, err
}

File diff suppressed because it is too large Load Diff

View File

@@ -164,7 +164,7 @@ func buildResourceFiltersFromAggregateAttribute(aggregateAttribute v3.AttributeK
return ""
}
func buildResourceSubQuery(bucketStart, bucketEnd int64, fs *v3.FilterSet, groupBy []v3.AttributeKey, aggregateAttribute v3.AttributeKey) (string, error) {
func buildResourceSubQuery(bucketStart, bucketEnd int64, fs *v3.FilterSet, groupBy []v3.AttributeKey, aggregateAttribute v3.AttributeKey, isLiveTail bool) (string, error) {
// BUILD THE WHERE CLAUSE
var conditions []string
@@ -193,9 +193,14 @@ func buildResourceSubQuery(bucketStart, bucketEnd int64, fs *v3.FilterSet, group
conditionStr := strings.Join(conditions, " AND ")
// BUILD THE FINAL QUERY
query := fmt.Sprintf("SELECT fingerprint FROM signoz_logs.%s WHERE (seen_at_ts_bucket_start >= %d) AND (seen_at_ts_bucket_start <= %d) AND ", DISTRIBUTED_LOGS_V2_RESOURCE, bucketStart, bucketEnd)
query = "(" + query + conditionStr + ")"
var query string
if isLiveTail {
query = fmt.Sprintf("SELECT fingerprint FROM signoz_logs.%s WHERE ", DISTRIBUTED_LOGS_V2_RESOURCE)
query = "(" + query + conditionStr
} else {
query = fmt.Sprintf("SELECT fingerprint FROM signoz_logs.%s WHERE (seen_at_ts_bucket_start >= %d) AND (seen_at_ts_bucket_start <= %d) AND ", DISTRIBUTED_LOGS_V2_RESOURCE, bucketStart, bucketEnd)
query = "(" + query + conditionStr + ")"
}
return query, nil
}

View File

@@ -469,7 +469,7 @@ func Test_buildResourceSubQuery(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := buildResourceSubQuery(tt.args.bucketStart, tt.args.bucketEnd, tt.args.fs, tt.args.groupBy, tt.args.aggregateAttribute)
got, err := buildResourceSubQuery(tt.args.bucketStart, tt.args.bucketEnd, tt.args.fs, tt.args.groupBy, tt.args.aggregateAttribute, false)
if (err != nil) != tt.wantErr {
t.Errorf("buildResourceSubQuery() error = %v, wantErr %v", err, tt.wantErr)
return

View File

@@ -9,6 +9,7 @@ import (
"time"
logsV3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3"
logsV4 "go.signoz.io/signoz/pkg/query-service/app/logs/v4"
metricsV3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3"
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
"go.signoz.io/signoz/pkg/query-service/cache/status"
@@ -19,6 +20,7 @@ import (
)
func prepareLogsQuery(_ context.Context,
useLogsNewSchema bool,
start,
end int64,
builderQuery *v3.BuilderQuery,
@@ -27,30 +29,35 @@ func prepareLogsQuery(_ context.Context,
) (string, error) {
query := ""
logsQueryBuilder := logsV3.PrepareLogsQuery
if useLogsNewSchema {
logsQueryBuilder = logsV4.PrepareLogsQuery
}
if params == nil || builderQuery == nil {
return query, fmt.Errorf("params and builderQuery cannot be nil")
}
// for ts query with limit replace it as it is already formed
if params.CompositeQuery.PanelType == v3.PanelTypeGraph && builderQuery.Limit > 0 && len(builderQuery.GroupBy) > 0 {
limitQuery, err := logsV3.PrepareLogsQuery(
limitQuery, err := logsQueryBuilder(
start,
end,
params.CompositeQuery.QueryType,
params.CompositeQuery.PanelType,
builderQuery,
logsV3.Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: preferRPM},
v3.LogQBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: preferRPM},
)
if err != nil {
return query, err
}
placeholderQuery, err := logsV3.PrepareLogsQuery(
placeholderQuery, err := logsQueryBuilder(
start,
end,
params.CompositeQuery.QueryType,
params.CompositeQuery.PanelType,
builderQuery,
logsV3.Options{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: preferRPM},
v3.LogQBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: preferRPM},
)
if err != nil {
return query, err
@@ -59,13 +66,13 @@ func prepareLogsQuery(_ context.Context,
return query, err
}
query, err := logsV3.PrepareLogsQuery(
query, err := logsQueryBuilder(
start,
end,
params.CompositeQuery.QueryType,
params.CompositeQuery.PanelType,
builderQuery,
logsV3.Options{PreferRPM: preferRPM},
v3.LogQBOptions{PreferRPM: preferRPM},
)
if err != nil {
return query, err
@@ -77,7 +84,6 @@ func (q *querier) runBuilderQuery(
ctx context.Context,
builderQuery *v3.BuilderQuery,
params *v3.QueryRangeParamsV3,
keys map[string]v3.AttributeKey,
cacheKeys map[string]string,
ch chan channelResult,
wg *sync.WaitGroup,
@@ -102,7 +108,7 @@ func (q *querier) runBuilderQuery(
var query string
var err error
if _, ok := cacheKeys[queryName]; !ok {
query, err = prepareLogsQuery(ctx, start, end, builderQuery, params, preferRPM)
query, err = prepareLogsQuery(ctx, q.UseLogsNewSchema, start, end, builderQuery, params, preferRPM)
if err != nil {
ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil}
return
@@ -126,7 +132,7 @@ func (q *querier) runBuilderQuery(
missedSeries := make([]*v3.Series, 0)
cachedSeries := make([]*v3.Series, 0)
for _, miss := range misses {
query, err = prepareLogsQuery(ctx, miss.start, miss.end, builderQuery, params, preferRPM)
query, err = prepareLogsQuery(ctx, q.UseLogsNewSchema, miss.start, miss.end, builderQuery, params, preferRPM)
if err != nil {
ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil}
return
@@ -196,7 +202,6 @@ func (q *querier) runBuilderQuery(
end,
params.CompositeQuery.PanelType,
builderQuery,
keys,
tracesV3.Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: preferRPM},
)
if err != nil {
@@ -208,7 +213,6 @@ func (q *querier) runBuilderQuery(
end,
params.CompositeQuery.PanelType,
builderQuery,
keys,
tracesV3.Options{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: preferRPM},
)
if err != nil {
@@ -222,7 +226,6 @@ func (q *querier) runBuilderQuery(
end,
params.CompositeQuery.PanelType,
builderQuery,
keys,
tracesV3.Options{PreferRPM: preferRPM},
)
if err != nil {
@@ -333,7 +336,6 @@ func (q *querier) runBuilderExpression(
ctx context.Context,
builderQuery *v3.BuilderQuery,
params *v3.QueryRangeParamsV3,
keys map[string]v3.AttributeKey,
cacheKeys map[string]string,
ch chan channelResult,
wg *sync.WaitGroup,
@@ -342,7 +344,7 @@ func (q *querier) runBuilderExpression(
queryName := builderQuery.QueryName
queries, err := q.builder.PrepareQueries(params, keys)
queries, err := q.builder.PrepareQueries(params)
if err != nil {
ch <- channelResult{Err: err, Name: queryName, Query: "", Series: nil}
return
@@ -377,7 +379,7 @@ func (q *querier) runBuilderExpression(
NoCache: params.NoCache,
CompositeQuery: params.CompositeQuery,
Variables: params.Variables,
}, keys)
})
query := missQueries[queryName]
series, err := q.execClickHouseQuery(ctx, query)
if err != nil {

View File

@@ -11,10 +11,12 @@ import (
"time"
logsV3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3"
logsV4 "go.signoz.io/signoz/pkg/query-service/app/logs/v4"
metricsV3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3"
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
chErrors "go.signoz.io/signoz/pkg/query-service/errors"
"go.signoz.io/signoz/pkg/query-service/utils"
"go.signoz.io/signoz/pkg/query-service/cache"
"go.signoz.io/signoz/pkg/query-service/interfaces"
@@ -54,6 +56,8 @@ type querier struct {
timeRanges [][]int
returnedSeries []*v3.Series
returnedErr error
UseLogsNewSchema bool
}
type QuerierOptions struct {
@@ -64,12 +68,18 @@ type QuerierOptions struct {
FeatureLookup interfaces.FeatureLookup
// used for testing
TestingMode bool
ReturnedSeries []*v3.Series
ReturnedErr error
TestingMode bool
ReturnedSeries []*v3.Series
ReturnedErr error
UseLogsNewSchema bool
}
func NewQuerier(opts QuerierOptions) interfaces.Querier {
logsQueryBuilder := logsV3.PrepareLogsQuery
if opts.UseLogsNewSchema {
logsQueryBuilder = logsV4.PrepareLogsQuery
}
return &querier{
cache: opts.Cache,
reader: opts.Reader,
@@ -78,14 +88,15 @@ func NewQuerier(opts QuerierOptions) interfaces.Querier {
builder: queryBuilder.NewQueryBuilder(queryBuilder.QueryBuilderOptions{
BuildTraceQuery: tracesV3.PrepareTracesQuery,
BuildLogQuery: logsV3.PrepareLogsQuery,
BuildLogQuery: logsQueryBuilder,
BuildMetricQuery: metricsV3.PrepareMetricQuery,
}, opts.FeatureLookup),
featureLookUp: opts.FeatureLookup,
testingMode: opts.TestingMode,
returnedSeries: opts.ReturnedSeries,
returnedErr: opts.ReturnedErr,
testingMode: opts.TestingMode,
returnedSeries: opts.ReturnedSeries,
returnedErr: opts.ReturnedErr,
UseLogsNewSchema: opts.UseLogsNewSchema,
}
}
@@ -293,7 +304,7 @@ func mergeSerieses(cachedSeries, missedSeries []*v3.Series) []*v3.Series {
return mergedSeries
}
func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, map[string]error, error) {
func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) {
cacheKeys := q.keyGenerator.GenerateKeys(params)
@@ -306,9 +317,9 @@ func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangePa
}
wg.Add(1)
if queryName == builderQuery.Expression {
go q.runBuilderQuery(ctx, builderQuery, params, keys, cacheKeys, ch, &wg)
go q.runBuilderQuery(ctx, builderQuery, params, cacheKeys, ch, &wg)
} else {
go q.runBuilderExpression(ctx, builderQuery, params, keys, cacheKeys, ch, &wg)
go q.runBuilderExpression(ctx, builderQuery, params, cacheKeys, ch, &wg)
}
}
@@ -466,9 +477,80 @@ func (q *querier) runClickHouseQueries(ctx context.Context, params *v3.QueryRang
return results, errQueriesByName, err
}
func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, map[string]error, error) {
func (q *querier) runLogsListQuery(ctx context.Context, params *v3.QueryRangeParamsV3, tsRanges []utils.LogsListTsRange) ([]*v3.Result, map[string]error, error) {
res := make([]*v3.Result, 0)
qName := ""
pageSize := uint64(0)
queries, err := q.builder.PrepareQueries(params, keys)
// se we are considering only one query
for name, v := range params.CompositeQuery.BuilderQueries {
qName = name
pageSize = v.PageSize
}
data := []*v3.Row{}
for _, v := range tsRanges {
params.Start = v.Start
params.End = v.End
params.CompositeQuery.BuilderQueries[qName].PageSize = pageSize - uint64(len(data))
queries, err := q.builder.PrepareQueries(params)
if err != nil {
return nil, nil, err
}
// this will to run only once
for name, query := range queries {
rowList, err := q.reader.GetListResultV3(ctx, query)
if err != nil {
errs := []error{err}
errQuriesByName := map[string]error{
name: err,
}
return nil, errQuriesByName, fmt.Errorf("encountered multiple errors: %s", multierr.Combine(errs...))
}
data = append(data, rowList...)
}
// append a filter to the params
if len(data) > 0 {
params.CompositeQuery.BuilderQueries[qName].Filters.Items = append(params.CompositeQuery.BuilderQueries[qName].Filters.Items, v3.FilterItem{
Key: v3.AttributeKey{
Key: "id",
IsColumn: true,
DataType: "string",
},
Operator: v3.FilterOperatorLessThan,
Value: data[len(data)-1].Data["id"],
})
}
if uint64(len(data)) >= pageSize {
break
}
}
res = append(res, &v3.Result{
QueryName: qName,
List: data,
})
return res, nil, nil
}
func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) {
// List query has support for only one query.
if q.UseLogsNewSchema && params.CompositeQuery != nil && len(params.CompositeQuery.BuilderQueries) == 1 {
for _, v := range params.CompositeQuery.BuilderQueries {
// only allow of logs queries with timestamp ordering desc
if v.DataSource == v3.DataSourceLogs && len(v.OrderBy) == 1 && v.OrderBy[0].ColumnName == "timestamp" && v.OrderBy[0].Order == "desc" {
startEndArr := utils.GetLogsListTsRanges(params.Start, params.End)
if len(startEndArr) > 0 {
return q.runLogsListQuery(ctx, params, startEndArr)
}
}
}
}
queries, err := q.builder.PrepareQueries(params)
if err != nil {
return nil, nil, err
@@ -484,7 +566,7 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan
rowList, err := q.reader.GetListResultV3(ctx, query)
if err != nil {
ch <- channelResult{Err: fmt.Errorf("error in query-%s: %v", name, err), Name: name, Query: query}
ch <- channelResult{Err: err, Name: name, Query: query}
return
}
ch <- channelResult{List: rowList, Name: name, Query: query}
@@ -515,7 +597,7 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan
return res, nil, nil
}
func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, map[string]error, error) {
func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) {
var results []*v3.Result
var err error
var errQueriesByName map[string]error
@@ -523,9 +605,9 @@ func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3,
switch params.CompositeQuery.QueryType {
case v3.QueryTypeBuilder:
if params.CompositeQuery.PanelType == v3.PanelTypeList || params.CompositeQuery.PanelType == v3.PanelTypeTrace {
results, errQueriesByName, err = q.runBuilderListQueries(ctx, params, keys)
results, errQueriesByName, err = q.runBuilderListQueries(ctx, params)
} else {
results, errQueriesByName, err = q.runBuilderQueries(ctx, params, keys)
results, errQueriesByName, err = q.runBuilderQueries(ctx, params)
}
// in builder query, the only errors we expose are the ones that exceed the resource limits
// everything else is internal error as they are not actionable by the user

View File

@@ -8,6 +8,7 @@ import (
"time"
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
"go.signoz.io/signoz/pkg/query-service/cache/inmemory"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
)
@@ -584,7 +585,8 @@ func TestQueryRange(t *testing.T) {
}
for i, param := range params {
_, errByName, err := q.QueryRange(context.Background(), param, nil)
tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param)
if err != nil {
t.Errorf("expected no error, got %s", err)
}
@@ -693,7 +695,8 @@ func TestQueryRangeValueType(t *testing.T) {
}
for i, param := range params {
_, errByName, err := q.QueryRange(context.Background(), param, nil)
tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param)
if err != nil {
t.Errorf("expected no error, got %s", err)
}
@@ -746,7 +749,8 @@ func TestQueryRangeTimeShift(t *testing.T) {
expectedTimeRangeInQueryString := fmt.Sprintf("timestamp >= %d AND timestamp <= %d", (1675115596722-86400*1000)*1000000, ((1675115596722+120*60*1000)-86400*1000)*1000000)
for i, param := range params {
_, errByName, err := q.QueryRange(context.Background(), param, nil)
tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param)
if err != nil {
t.Errorf("expected no error, got %s", err)
}
@@ -844,7 +848,8 @@ func TestQueryRangeTimeShiftWithCache(t *testing.T) {
}
for i, param := range params {
_, errByName, err := q.QueryRange(context.Background(), param, nil)
tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param)
if err != nil {
t.Errorf("expected no error, got %s", err)
}
@@ -944,7 +949,8 @@ func TestQueryRangeTimeShiftWithLimitAndCache(t *testing.T) {
}
for i, param := range params {
_, errByName, err := q.QueryRange(context.Background(), param, nil)
tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param)
if err != nil {
t.Errorf("expected no error, got %s", err)
}
@@ -1033,7 +1039,8 @@ func TestQueryRangeValueTypePromQL(t *testing.T) {
}
for i, param := range params {
_, errByName, err := q.QueryRange(context.Background(), param, nil)
tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param)
if err != nil {
t.Errorf("expected no error, got %s", err)
}

View File

@@ -9,6 +9,7 @@ import (
"time"
logsV3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3"
logsV4 "go.signoz.io/signoz/pkg/query-service/app/logs/v4"
metricsV3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3"
metricsV4 "go.signoz.io/signoz/pkg/query-service/app/metrics/v4"
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
@@ -19,12 +20,17 @@ import (
)
func prepareLogsQuery(_ context.Context,
useLogsNewSchema bool,
start,
end int64,
builderQuery *v3.BuilderQuery,
params *v3.QueryRangeParamsV3,
preferRPM bool,
) (string, error) {
logsQueryBuilder := logsV3.PrepareLogsQuery
if useLogsNewSchema {
logsQueryBuilder = logsV4.PrepareLogsQuery
}
query := ""
if params == nil || builderQuery == nil {
@@ -33,24 +39,24 @@ func prepareLogsQuery(_ context.Context,
// for ts query with limit replace it as it is already formed
if params.CompositeQuery.PanelType == v3.PanelTypeGraph && builderQuery.Limit > 0 && len(builderQuery.GroupBy) > 0 {
limitQuery, err := logsV3.PrepareLogsQuery(
limitQuery, err := logsQueryBuilder(
start,
end,
params.CompositeQuery.QueryType,
params.CompositeQuery.PanelType,
builderQuery,
logsV3.Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: preferRPM},
v3.LogQBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: preferRPM},
)
if err != nil {
return query, err
}
placeholderQuery, err := logsV3.PrepareLogsQuery(
placeholderQuery, err := logsQueryBuilder(
start,
end,
params.CompositeQuery.QueryType,
params.CompositeQuery.PanelType,
builderQuery,
logsV3.Options{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: preferRPM},
v3.LogQBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: preferRPM},
)
if err != nil {
return query, err
@@ -59,13 +65,13 @@ func prepareLogsQuery(_ context.Context,
return query, err
}
query, err := logsV3.PrepareLogsQuery(
query, err := logsQueryBuilder(
start,
end,
params.CompositeQuery.QueryType,
params.CompositeQuery.PanelType,
builderQuery,
logsV3.Options{PreferRPM: preferRPM},
v3.LogQBOptions{PreferRPM: preferRPM},
)
if err != nil {
return query, err
@@ -77,7 +83,6 @@ func (q *querier) runBuilderQuery(
ctx context.Context,
builderQuery *v3.BuilderQuery,
params *v3.QueryRangeParamsV3,
keys map[string]v3.AttributeKey,
cacheKeys map[string]string,
ch chan channelResult,
wg *sync.WaitGroup,
@@ -104,7 +109,7 @@ func (q *querier) runBuilderQuery(
var query string
var err error
if _, ok := cacheKeys[queryName]; !ok {
query, err = prepareLogsQuery(ctx, start, end, builderQuery, params, preferRPM)
query, err = prepareLogsQuery(ctx, q.UseLogsNewSchema, start, end, builderQuery, params, preferRPM)
if err != nil {
ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil}
return
@@ -127,7 +132,7 @@ func (q *querier) runBuilderQuery(
missedSeries := make([]*v3.Series, 0)
cachedSeries := make([]*v3.Series, 0)
for _, miss := range misses {
query, err = prepareLogsQuery(ctx, miss.start, miss.end, builderQuery, params, preferRPM)
query, err = prepareLogsQuery(ctx, q.UseLogsNewSchema, miss.start, miss.end, builderQuery, params, preferRPM)
if err != nil {
ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil}
return
@@ -195,7 +200,6 @@ func (q *querier) runBuilderQuery(
end,
params.CompositeQuery.PanelType,
builderQuery,
keys,
tracesV3.Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: preferRPM},
)
if err != nil {
@@ -207,7 +211,6 @@ func (q *querier) runBuilderQuery(
end,
params.CompositeQuery.PanelType,
builderQuery,
keys,
tracesV3.Options{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: preferRPM},
)
if err != nil {
@@ -221,7 +224,6 @@ func (q *querier) runBuilderQuery(
end,
params.CompositeQuery.PanelType,
builderQuery,
keys,
tracesV3.Options{PreferRPM: preferRPM},
)
if err != nil {

View File

@@ -11,10 +11,12 @@ import (
"time"
logsV3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3"
logsV4 "go.signoz.io/signoz/pkg/query-service/app/logs/v4"
metricsV4 "go.signoz.io/signoz/pkg/query-service/app/metrics/v4"
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
chErrors "go.signoz.io/signoz/pkg/query-service/errors"
"go.signoz.io/signoz/pkg/query-service/utils"
"go.signoz.io/signoz/pkg/query-service/cache"
"go.signoz.io/signoz/pkg/query-service/interfaces"
@@ -51,9 +53,10 @@ type querier struct {
testingMode bool
queriesExecuted []string
// tuple of start and end time in milliseconds
timeRanges [][]int
returnedSeries []*v3.Series
returnedErr error
timeRanges [][]int
returnedSeries []*v3.Series
returnedErr error
UseLogsNewSchema bool
}
type QuerierOptions struct {
@@ -64,12 +67,18 @@ type QuerierOptions struct {
FeatureLookup interfaces.FeatureLookup
// used for testing
TestingMode bool
ReturnedSeries []*v3.Series
ReturnedErr error
TestingMode bool
ReturnedSeries []*v3.Series
ReturnedErr error
UseLogsNewSchema bool
}
func NewQuerier(opts QuerierOptions) interfaces.Querier {
logsQueryBuilder := logsV3.PrepareLogsQuery
if opts.UseLogsNewSchema {
logsQueryBuilder = logsV4.PrepareLogsQuery
}
return &querier{
cache: opts.Cache,
reader: opts.Reader,
@@ -78,14 +87,15 @@ func NewQuerier(opts QuerierOptions) interfaces.Querier {
builder: queryBuilder.NewQueryBuilder(queryBuilder.QueryBuilderOptions{
BuildTraceQuery: tracesV3.PrepareTracesQuery,
BuildLogQuery: logsV3.PrepareLogsQuery,
BuildLogQuery: logsQueryBuilder,
BuildMetricQuery: metricsV4.PrepareMetricQuery,
}, opts.FeatureLookup),
featureLookUp: opts.FeatureLookup,
testingMode: opts.TestingMode,
returnedSeries: opts.ReturnedSeries,
returnedErr: opts.ReturnedErr,
testingMode: opts.TestingMode,
returnedSeries: opts.ReturnedSeries,
returnedErr: opts.ReturnedErr,
UseLogsNewSchema: opts.UseLogsNewSchema,
}
}
@@ -308,7 +318,7 @@ func mergeSerieses(cachedSeries, missedSeries []*v3.Series) []*v3.Series {
return mergedSeries
}
func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, map[string]error, error) {
func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) {
cacheKeys := q.keyGenerator.GenerateKeys(params)
@@ -318,7 +328,7 @@ func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangePa
for queryName, builderQuery := range params.CompositeQuery.BuilderQueries {
if queryName == builderQuery.Expression {
wg.Add(1)
go q.runBuilderQuery(ctx, builderQuery, params, keys, cacheKeys, ch, &wg)
go q.runBuilderQuery(ctx, builderQuery, params, cacheKeys, ch, &wg)
}
}
@@ -475,9 +485,80 @@ func (q *querier) runClickHouseQueries(ctx context.Context, params *v3.QueryRang
return results, errQueriesByName, err
}
func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, map[string]error, error) {
func (q *querier) runLogsListQuery(ctx context.Context, params *v3.QueryRangeParamsV3, tsRanges []utils.LogsListTsRange) ([]*v3.Result, map[string]error, error) {
res := make([]*v3.Result, 0)
qName := ""
pageSize := uint64(0)
queries, err := q.builder.PrepareQueries(params, keys)
// se we are considering only one query
for name, v := range params.CompositeQuery.BuilderQueries {
qName = name
pageSize = v.PageSize
}
data := []*v3.Row{}
for _, v := range tsRanges {
params.Start = v.Start
params.End = v.End
params.CompositeQuery.BuilderQueries[qName].PageSize = pageSize - uint64(len(data))
queries, err := q.builder.PrepareQueries(params)
if err != nil {
return nil, nil, err
}
// this will to run only once
for name, query := range queries {
rowList, err := q.reader.GetListResultV3(ctx, query)
if err != nil {
errs := []error{err}
errQuriesByName := map[string]error{
name: err,
}
return nil, errQuriesByName, fmt.Errorf("encountered multiple errors: %s", multierr.Combine(errs...))
}
data = append(data, rowList...)
}
// append a filter to the params
if len(data) > 0 {
params.CompositeQuery.BuilderQueries[qName].Filters.Items = append(params.CompositeQuery.BuilderQueries[qName].Filters.Items, v3.FilterItem{
Key: v3.AttributeKey{
Key: "id",
IsColumn: true,
DataType: "string",
},
Operator: v3.FilterOperatorLessThan,
Value: data[len(data)-1].Data["id"],
})
}
if uint64(len(data)) >= pageSize {
break
}
}
res = append(res, &v3.Result{
QueryName: qName,
List: data,
})
return res, nil, nil
}
func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) {
// List query has support for only one query.
if q.UseLogsNewSchema && params.CompositeQuery != nil && len(params.CompositeQuery.BuilderQueries) == 1 {
for _, v := range params.CompositeQuery.BuilderQueries {
// only allow of logs queries with timestamp ordering desc
if v.DataSource == v3.DataSourceLogs && len(v.OrderBy) == 1 && v.OrderBy[0].ColumnName == "timestamp" && v.OrderBy[0].Order == "desc" {
startEndArr := utils.GetLogsListTsRanges(params.Start, params.End)
if len(startEndArr) > 0 {
return q.runLogsListQuery(ctx, params, startEndArr)
}
}
}
}
queries, err := q.builder.PrepareQueries(params)
if err != nil {
return nil, nil, err
@@ -493,7 +574,7 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan
rowList, err := q.reader.GetListResultV3(ctx, query)
if err != nil {
ch <- channelResult{Err: fmt.Errorf("error in query-%s: %v", name, err), Name: name, Query: query}
ch <- channelResult{Err: err, Name: name, Query: query}
return
}
ch <- channelResult{List: rowList, Name: name, Query: query}
@@ -526,7 +607,7 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan
// QueryRange is the main function that runs the queries
// and returns the results
func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, map[string]error, error) {
func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) {
var results []*v3.Result
var err error
var errQueriesByName map[string]error
@@ -534,9 +615,9 @@ func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3,
switch params.CompositeQuery.QueryType {
case v3.QueryTypeBuilder:
if params.CompositeQuery.PanelType == v3.PanelTypeList || params.CompositeQuery.PanelType == v3.PanelTypeTrace {
results, errQueriesByName, err = q.runBuilderListQueries(ctx, params, keys)
results, errQueriesByName, err = q.runBuilderListQueries(ctx, params)
} else {
results, errQueriesByName, err = q.runBuilderQueries(ctx, params, keys)
results, errQueriesByName, err = q.runBuilderQueries(ctx, params)
}
// in builder query, the only errors we expose are the ones that exceed the resource limits
// everything else is internal error as they are not actionable by the user

View File

@@ -8,6 +8,7 @@ import (
"time"
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
"go.signoz.io/signoz/pkg/query-service/cache/inmemory"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
)
@@ -593,7 +594,8 @@ func TestV2QueryRangePanelGraph(t *testing.T) {
}
for i, param := range params {
_, errByName, err := q.QueryRange(context.Background(), param, nil)
tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param)
if err != nil {
t.Errorf("expected no error, got %s", err)
}
@@ -738,7 +740,8 @@ func TestV2QueryRangeValueType(t *testing.T) {
}
for i, param := range params {
_, errByName, err := q.QueryRange(context.Background(), param, nil)
tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param)
if err != nil {
t.Errorf("expected no error, got %s", err)
}
@@ -792,7 +795,8 @@ func TestV2QueryRangeTimeShift(t *testing.T) {
expectedTimeRangeInQueryString := fmt.Sprintf("timestamp >= %d AND timestamp <= %d", (1675115596722-86400*1000)*1000000, ((1675115596722+120*60*1000)-86400*1000)*1000000)
for i, param := range params {
_, errByName, err := q.QueryRange(context.Background(), param, nil)
tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param)
if err != nil {
t.Errorf("expected no error, got %s", err)
}
@@ -892,7 +896,8 @@ func TestV2QueryRangeTimeShiftWithCache(t *testing.T) {
}
for i, param := range params {
_, errByName, err := q.QueryRange(context.Background(), param, nil)
tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param)
if err != nil {
t.Errorf("expected no error, got %s", err)
}
@@ -994,7 +999,8 @@ func TestV2QueryRangeTimeShiftWithLimitAndCache(t *testing.T) {
}
for i, param := range params {
_, errByName, err := q.QueryRange(context.Background(), param, nil)
tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param)
if err != nil {
t.Errorf("expected no error, got %s", err)
}
@@ -1085,7 +1091,8 @@ func TestV2QueryRangeValueTypePromQL(t *testing.T) {
}
for i, param := range params {
_, errByName, err := q.QueryRange(context.Background(), param, nil)
tracesV3.Enrich(param, map[string]v3.AttributeKey{})
_, errByName, err := q.QueryRange(context.Background(), param)
if err != nil {
t.Errorf("expected no error, got %s", err)
}

View File

@@ -5,7 +5,6 @@ import (
"strings"
"github.com/SigNoz/govaluate"
logsV3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3"
metricsV3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3"
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
"go.signoz.io/signoz/pkg/query-service/cache"
@@ -43,8 +42,8 @@ var SupportedFunctions = []string{
var EvalFuncs = map[string]govaluate.ExpressionFunction{}
type prepareTracesQueryFunc func(start, end int64, panelType v3.PanelType, bq *v3.BuilderQuery, keys map[string]v3.AttributeKey, options tracesV3.Options) (string, error)
type prepareLogsQueryFunc func(start, end int64, queryType v3.QueryType, panelType v3.PanelType, bq *v3.BuilderQuery, options logsV3.Options) (string, error)
type prepareTracesQueryFunc func(start, end int64, panelType v3.PanelType, bq *v3.BuilderQuery, options tracesV3.Options) (string, error)
type prepareLogsQueryFunc func(start, end int64, queryType v3.QueryType, panelType v3.PanelType, bq *v3.BuilderQuery, options v3.LogQBOptions) (string, error)
type prepareMetricQueryFunc func(start, end int64, queryType v3.QueryType, panelType v3.PanelType, bq *v3.BuilderQuery, options metricsV3.Options) (string, error)
type QueryBuilder struct {
@@ -162,7 +161,7 @@ func (qb *QueryBuilder) PrepareLiveTailQuery(params *v3.QueryRangeParamsV3) (str
}
for queryName, query := range compositeQuery.BuilderQueries {
if query.Expression == queryName {
queryStr, err = qb.options.BuildLogQuery(params.Start, params.End, compositeQuery.QueryType, compositeQuery.PanelType, query, logsV3.Options{IsLivetailQuery: true})
queryStr, err = qb.options.BuildLogQuery(params.Start, params.End, compositeQuery.QueryType, compositeQuery.PanelType, query, v3.LogQBOptions{IsLivetailQuery: true})
if err != nil {
return "", err
}
@@ -173,7 +172,7 @@ func (qb *QueryBuilder) PrepareLiveTailQuery(params *v3.QueryRangeParamsV3) (str
return queryStr, nil
}
func (qb *QueryBuilder) PrepareQueries(params *v3.QueryRangeParamsV3, args ...interface{}) (map[string]string, error) {
func (qb *QueryBuilder) PrepareQueries(params *v3.QueryRangeParamsV3) (map[string]string, error) {
queries := make(map[string]string)
compositeQuery := params.CompositeQuery
@@ -193,19 +192,15 @@ func (qb *QueryBuilder) PrepareQueries(params *v3.QueryRangeParamsV3, args ...in
if query.Expression == queryName {
switch query.DataSource {
case v3.DataSourceTraces:
keys := map[string]v3.AttributeKey{}
if len(args) > 0 {
keys = args[0].(map[string]v3.AttributeKey)
}
// for ts query with group by and limit form two queries
if compositeQuery.PanelType == v3.PanelTypeGraph && query.Limit > 0 && len(query.GroupBy) > 0 {
limitQuery, err := qb.options.BuildTraceQuery(start, end, compositeQuery.PanelType, query,
keys, tracesV3.Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled})
tracesV3.Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled})
if err != nil {
return nil, err
}
placeholderQuery, err := qb.options.BuildTraceQuery(start, end, compositeQuery.PanelType,
query, keys, tracesV3.Options{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled})
query, tracesV3.Options{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled})
if err != nil {
return nil, err
}
@@ -213,7 +208,7 @@ func (qb *QueryBuilder) PrepareQueries(params *v3.QueryRangeParamsV3, args ...in
queries[queryName] = query
} else {
queryString, err := qb.options.BuildTraceQuery(start, end, compositeQuery.PanelType,
query, keys, tracesV3.Options{PreferRPM: PreferRPMFeatureEnabled, GraphLimitQtype: ""})
query, tracesV3.Options{PreferRPM: PreferRPMFeatureEnabled, GraphLimitQtype: ""})
if err != nil {
return nil, err
}
@@ -222,18 +217,18 @@ func (qb *QueryBuilder) PrepareQueries(params *v3.QueryRangeParamsV3, args ...in
case v3.DataSourceLogs:
// for ts query with limit replace it as it is already formed
if compositeQuery.PanelType == v3.PanelTypeGraph && query.Limit > 0 && len(query.GroupBy) > 0 {
limitQuery, err := qb.options.BuildLogQuery(start, end, compositeQuery.QueryType, compositeQuery.PanelType, query, logsV3.Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled})
limitQuery, err := qb.options.BuildLogQuery(start, end, compositeQuery.QueryType, compositeQuery.PanelType, query, v3.LogQBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled})
if err != nil {
return nil, err
}
placeholderQuery, err := qb.options.BuildLogQuery(start, end, compositeQuery.QueryType, compositeQuery.PanelType, query, logsV3.Options{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled})
placeholderQuery, err := qb.options.BuildLogQuery(start, end, compositeQuery.QueryType, compositeQuery.PanelType, query, v3.LogQBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled})
if err != nil {
return nil, err
}
query := fmt.Sprintf(placeholderQuery, limitQuery)
queries[queryName] = query
} else {
queryString, err := qb.options.BuildLogQuery(start, end, compositeQuery.QueryType, compositeQuery.PanelType, query, logsV3.Options{PreferRPM: PreferRPMFeatureEnabled, GraphLimitQtype: ""})
queryString, err := qb.options.BuildLogQuery(start, end, compositeQuery.QueryType, compositeQuery.PanelType, query, v3.LogQBOptions{PreferRPM: PreferRPMFeatureEnabled, GraphLimitQtype: ""})
if err != nil {
return nil, err
}

View File

@@ -6,6 +6,7 @@ import (
"github.com/stretchr/testify/require"
logsV3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3"
logsV4 "go.signoz.io/signoz/pkg/query-service/app/logs/v4"
metricsv3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3"
"go.signoz.io/signoz/pkg/query-service/constants"
"go.signoz.io/signoz/pkg/query-service/featureManager"
@@ -585,6 +586,217 @@ func TestLogsQueryWithFormula(t *testing.T) {
}
var testLogsWithFormulaV2 = []struct {
Name string
Query *v3.QueryRangeParamsV3
ExpectedQuery string
}{
{
Name: "test formula without dot in filter and group by attribute",
Query: &v3.QueryRangeParamsV3{
Start: 1702979275000000000,
End: 1702981075000000000,
CompositeQuery: &v3.CompositeQuery{
QueryType: v3.QueryTypeBuilder,
PanelType: v3.PanelTypeGraph,
BuilderQueries: map[string]*v3.BuilderQuery{
"A": {
QueryName: "A",
StepInterval: 60,
DataSource: v3.DataSourceLogs,
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "key_1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, Value: true, Operator: v3.FilterOperatorEqual},
}},
AggregateOperator: v3.AggregateOperatorCount,
Expression: "A",
OrderBy: []v3.OrderBy{
{
ColumnName: "timestamp",
Order: "desc",
},
},
GroupBy: []v3.AttributeKey{
{Key: "key_1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag},
},
},
"B": {
QueryName: "B",
StepInterval: 60,
DataSource: v3.DataSourceLogs,
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "key_2", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, Value: true, Operator: v3.FilterOperatorEqual},
}},
AggregateOperator: v3.AggregateOperatorCount,
Expression: "B",
OrderBy: []v3.OrderBy{
{
ColumnName: "timestamp",
Order: "desc",
},
},
GroupBy: []v3.AttributeKey{
{Key: "key_1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag},
},
},
"C": {
QueryName: "C",
Expression: "A + B",
},
},
},
},
ExpectedQuery: "SELECT A.`key_1` as `key_1`, A.`ts` as `ts`, A.value + B.value as value FROM " +
"(SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, attributes_bool['key_1'] as `key_1`, toFloat64(count(*)) as value from " +
"signoz_logs.distributed_logs_v2 where (timestamp >= 1702979275000000000 AND timestamp <= 1702981075000000000) AND (ts_bucket_start >= 1702977475 AND ts_bucket_start <= 1702981075) " +
"AND attributes_bool['key_1'] = true AND mapContains(attributes_bool, 'key_1') AND mapContains(attributes_bool, 'key_1') group by `key_1`,ts order by value DESC) as A INNER JOIN (SELECT " +
"toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, attributes_bool['key_1'] as `key_1`, toFloat64(count(*)) as value " +
"from signoz_logs.distributed_logs_v2 where (timestamp >= 1702979275000000000 AND timestamp <= 1702981075000000000) AND (ts_bucket_start >= 1702977475 AND ts_bucket_start <= 1702981075) " +
"AND attributes_bool['key_2'] = true AND mapContains(attributes_bool, 'key_2') AND mapContains(attributes_bool, 'key_1') group by `key_1`,ts order by value DESC) as B ON A.`key_1` = B.`key_1` AND A.`ts` = B.`ts`",
},
{
Name: "test formula with dot in filter and group by attribute",
Query: &v3.QueryRangeParamsV3{
Start: 1702979056000000000,
End: 1702982656000000000,
CompositeQuery: &v3.CompositeQuery{
QueryType: v3.QueryTypeBuilder,
PanelType: v3.PanelTypeTable,
BuilderQueries: map[string]*v3.BuilderQuery{
"A": {
QueryName: "A",
StepInterval: 60,
DataSource: v3.DataSourceLogs,
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "key1.1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, Value: true, Operator: v3.FilterOperatorEqual},
}},
AggregateOperator: v3.AggregateOperatorCount,
Expression: "A",
OrderBy: []v3.OrderBy{
{
ColumnName: "timestamp",
Order: "desc",
},
},
GroupBy: []v3.AttributeKey{
{Key: "key1.1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag},
},
},
"B": {
QueryName: "B",
StepInterval: 60,
DataSource: v3.DataSourceLogs,
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "key1.2", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, Value: true, Operator: v3.FilterOperatorEqual},
}},
AggregateOperator: v3.AggregateOperatorCount,
Expression: "B",
OrderBy: []v3.OrderBy{
{
ColumnName: "timestamp",
Order: "desc",
},
},
GroupBy: []v3.AttributeKey{
{Key: "key1.1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag},
},
},
"C": {
QueryName: "C",
Expression: "A + B",
},
},
},
},
ExpectedQuery: "SELECT A.`key1.1` as `key1.1`, A.`ts` as `ts`, A.value + B.value as value FROM (SELECT attributes_bool['key1.1'] as `key1.1`, " +
"toFloat64(count(*)) as value from signoz_logs.distributed_logs_v2 where (timestamp >= 1702979056000000000 AND timestamp <= 1702982656000000000) AND (ts_bucket_start >= 1702977256 AND ts_bucket_start <= 1702982656) " +
"AND attributes_bool['key1.1'] = true AND mapContains(attributes_bool, 'key1.1') AND mapContains(attributes_bool, 'key1.1') group by `key1.1` order by value DESC) as A INNER JOIN (SELECT " +
"attributes_bool['key1.1'] as `key1.1`, toFloat64(count(*)) as value from signoz_logs.distributed_logs_v2 where (timestamp >= 1702979056000000000 AND timestamp <= 1702982656000000000) " +
"AND (ts_bucket_start >= 1702977256 AND ts_bucket_start <= 1702982656) AND attributes_bool['key1.2'] = true AND mapContains(attributes_bool, 'key1.2') AND " +
"mapContains(attributes_bool, 'key1.1') group by `key1.1` order by value DESC) as B ON A.`key1.1` = B.`key1.1` AND A.`ts` = B.`ts`",
},
{
Name: "test formula with dot in filter and group by materialized attribute",
Query: &v3.QueryRangeParamsV3{
Start: 1702980884000000000,
End: 1702984484000000000,
CompositeQuery: &v3.CompositeQuery{
QueryType: v3.QueryTypeBuilder,
PanelType: v3.PanelTypeGraph,
BuilderQueries: map[string]*v3.BuilderQuery{
"A": {
QueryName: "A",
StepInterval: 60,
DataSource: v3.DataSourceLogs,
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "key_2", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Value: true, Operator: v3.FilterOperatorEqual},
}},
AggregateOperator: v3.AggregateOperatorCount,
Expression: "A",
OrderBy: []v3.OrderBy{
{
ColumnName: "timestamp",
Order: "desc",
},
},
GroupBy: []v3.AttributeKey{
{Key: "key1.1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag, IsColumn: true},
},
},
"B": {
QueryName: "B",
StepInterval: 60,
DataSource: v3.DataSourceLogs,
Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "key_1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, Value: true, Operator: v3.FilterOperatorEqual},
}},
AggregateOperator: v3.AggregateOperatorCount,
Expression: "B",
OrderBy: []v3.OrderBy{
{
ColumnName: "timestamp",
Order: "desc",
},
},
GroupBy: []v3.AttributeKey{
{Key: "key1.1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag, IsColumn: true},
},
},
"C": {
QueryName: "C",
Expression: "A - B",
},
},
},
},
ExpectedQuery: "SELECT A.`key1.1` as `key1.1`, A.`ts` as `ts`, A.value - B.value as value FROM (SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, " +
"`attribute_bool_key1$$1` as `key1.1`, toFloat64(count(*)) as value from signoz_logs.distributed_logs_v2 where (timestamp >= 1702980884000000000 AND timestamp <= 1702984484000000000) AND " +
"(ts_bucket_start >= 1702979084 AND ts_bucket_start <= 1702984484) AND `attribute_bool_key_2` = true AND `attribute_bool_key1$$1_exists`=true group by `key1.1`,ts order by value DESC) as " +
"A INNER JOIN (SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, `attribute_bool_key1$$1` as `key1.1`, toFloat64(count(*)) as value from " +
"signoz_logs.distributed_logs_v2 where (timestamp >= 1702980884000000000 AND timestamp <= 1702984484000000000) AND (ts_bucket_start >= 1702979084 AND ts_bucket_start <= 1702984484) AND " +
"attributes_bool['key_1'] = true AND mapContains(attributes_bool, 'key_1') AND `attribute_bool_key1$$1_exists`=true group by `key1.1`,ts order by value DESC) as B " +
"ON A.`key1.1` = B.`key1.1` AND A.`ts` = B.`ts`",
},
}
func TestLogsQueryWithFormulaV2(t *testing.T) {
t.Parallel()
qbOptions := QueryBuilderOptions{
BuildLogQuery: logsV4.PrepareLogsQuery,
}
fm := featureManager.StartManager()
qb := NewQueryBuilder(qbOptions, fm)
for _, test := range testLogsWithFormulaV2 {
t.Run(test.Name, func(t *testing.T) {
queries, err := qb.PrepareQueries(test.Query)
require.NoError(t, err)
require.Equal(t, test.ExpectedQuery, queries["C"])
})
}
}
func TestGenerateCacheKeysMetricsBuilder(t *testing.T) {
testCases := []struct {
name string

View File

@@ -66,6 +66,7 @@ type ServerOptions struct {
CacheConfigPath string
FluxInterval string
Cluster string
UseLogsNewSchema bool
}
// Server runs HTTP, Mux and a grpc server
@@ -128,6 +129,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
serverOptions.MaxOpenConns,
serverOptions.DialTimeout,
serverOptions.Cluster,
serverOptions.UseLogsNewSchema,
)
go clickhouseReader.Start(readerReady)
reader = clickhouseReader
@@ -144,7 +146,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
}
<-readerReady
rm, err := makeRulesManager(serverOptions.PromConfigPath, constants.GetAlertManagerApiPrefix(), serverOptions.RuleRepoURL, localDB, reader, serverOptions.DisableRules, fm)
rm, err := makeRulesManager(serverOptions.PromConfigPath, constants.GetAlertManagerApiPrefix(), serverOptions.RuleRepoURL, localDB, reader, serverOptions.DisableRules, fm, serverOptions.UseLogsNewSchema)
if err != nil {
return nil, err
}
@@ -197,6 +199,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
LogsParsingPipelineController: logParsingPipelineController,
Cache: c,
FluxInterval: fluxInterval,
UseLogsNewSchema: serverOptions.UseLogsNewSchema,
})
if err != nil {
return nil, err
@@ -707,13 +710,14 @@ func (s *Server) Stop() error {
}
func makeRulesManager(
promConfigPath,
_,
alertManagerURL string,
ruleRepoURL string,
db *sqlx.DB,
ch interfaces.Reader,
disableRules bool,
fm interfaces.FeatureLookup) (*rules.Manager, error) {
fm interfaces.FeatureLookup,
useLogsNewSchema bool) (*rules.Manager, error) {
// create engine
pqle, err := pqle.FromReader(ch)
@@ -730,16 +734,17 @@ func makeRulesManager(
// create manager opts
managerOpts := &rules.ManagerOptions{
NotifierOpts: notifierOpts,
PqlEngine: pqle,
RepoURL: ruleRepoURL,
DBConn: db,
Context: context.Background(),
Logger: nil,
DisableRules: disableRules,
FeatureFlags: fm,
Reader: ch,
EvalDelay: constants.GetEvalDelay(),
NotifierOpts: notifierOpts,
PqlEngine: pqle,
RepoURL: ruleRepoURL,
DBConn: db,
Context: context.Background(),
Logger: nil,
DisableRules: disableRules,
FeatureFlags: fm,
Reader: ch,
EvalDelay: constants.GetEvalDelay(),
UseLogsNewSchema: useLogsNewSchema,
}
// create Manager

View File

@@ -58,8 +58,7 @@ var tracesOperatorMappingV3 = map[v3.FilterOperator]string{
v3.FilterOperatorNotExists: "NOT has(%s%s, '%s')",
}
func getColumnName(key v3.AttributeKey, keys map[string]v3.AttributeKey) string {
key = enrichKeyWithMetadata(key, keys)
func getColumnName(key v3.AttributeKey) string {
if key.IsColumn {
return key.Key
}
@@ -102,13 +101,13 @@ func enrichKeyWithMetadata(key v3.AttributeKey, keys map[string]v3.AttributeKey)
}
// getSelectLabels returns the select labels for the query based on groupBy and aggregateOperator
func getSelectLabels(aggregatorOperator v3.AggregateOperator, groupBy []v3.AttributeKey, keys map[string]v3.AttributeKey) string {
func getSelectLabels(aggregatorOperator v3.AggregateOperator, groupBy []v3.AttributeKey) string {
var selectLabels string
if aggregatorOperator == v3.AggregateOperatorNoOp {
selectLabels = ""
} else {
for _, tag := range groupBy {
filterName := getColumnName(tag, keys)
filterName := getColumnName(tag)
selectLabels += fmt.Sprintf(" %s as `%s`,", filterName, tag.Key)
}
}
@@ -127,10 +126,10 @@ func getSelectKeys(aggregatorOperator v3.AggregateOperator, groupBy []v3.Attribu
return strings.Join(selectLabels, ",")
}
func getSelectColumns(sc []v3.AttributeKey, keys map[string]v3.AttributeKey) string {
func getSelectColumns(sc []v3.AttributeKey) string {
var columns []string
for _, tag := range sc {
columnName := getColumnName(tag, keys)
columnName := getColumnName(tag)
columns = append(columns, fmt.Sprintf("%s as `%s` ", columnName, tag.Key))
}
return strings.Join(columns, ",")
@@ -150,20 +149,19 @@ func getZerosForEpochNano(epoch int64) int64 {
return int64(math.Pow(10, float64(19-count)))
}
func buildTracesFilterQuery(fs *v3.FilterSet, keys map[string]v3.AttributeKey) (string, error) {
func buildTracesFilterQuery(fs *v3.FilterSet) (string, error) {
var conditions []string
if fs != nil && len(fs.Items) != 0 {
for _, item := range fs.Items {
val := item.Value
// generate the key
columnName := getColumnName(item.Key, keys)
columnName := getColumnName(item.Key)
var fmtVal string
key := enrichKeyWithMetadata(item.Key, keys)
item.Operator = v3.FilterOperator(strings.ToLower(strings.TrimSpace(string(item.Operator))))
if item.Operator != v3.FilterOperatorExists && item.Operator != v3.FilterOperatorNotExists {
var err error
val, err = utils.ValidateAndCastValue(val, key.DataType)
val, err = utils.ValidateAndCastValue(val, item.Key.DataType)
if err != nil {
return "", fmt.Errorf("invalid value for key %s: %v", item.Key.Key, err)
}
@@ -179,15 +177,15 @@ func buildTracesFilterQuery(fs *v3.FilterSet, keys map[string]v3.AttributeKey) (
case v3.FilterOperatorRegex, v3.FilterOperatorNotRegex:
conditions = append(conditions, fmt.Sprintf(operator, columnName, fmtVal))
case v3.FilterOperatorExists, v3.FilterOperatorNotExists:
if key.IsColumn {
subQuery, err := existsSubQueryForFixedColumn(key, item.Operator)
if item.Key.IsColumn {
subQuery, err := existsSubQueryForFixedColumn(item.Key, item.Operator)
if err != nil {
return "", err
}
conditions = append(conditions, subQuery)
} else {
columnType, columnDataType := getClickhouseTracesColumnDataTypeAndType(key)
conditions = append(conditions, fmt.Sprintf(operator, columnDataType, columnType, key.Key))
columnType, columnDataType := getClickhouseTracesColumnDataTypeAndType(item.Key)
conditions = append(conditions, fmt.Sprintf(operator, columnDataType, columnType, item.Key.Key))
}
default:
@@ -218,12 +216,11 @@ func existsSubQueryForFixedColumn(key v3.AttributeKey, op v3.FilterOperator) (st
}
}
func handleEmptyValuesInGroupBy(keys map[string]v3.AttributeKey, groupBy []v3.AttributeKey) (string, error) {
func handleEmptyValuesInGroupBy(groupBy []v3.AttributeKey) (string, error) {
filterItems := []v3.FilterItem{}
if len(groupBy) != 0 {
for _, item := range groupBy {
key := enrichKeyWithMetadata(item, keys)
if !key.IsColumn {
if !item.IsColumn {
filterItems = append(filterItems, v3.FilterItem{
Key: item,
Operator: v3.FilterOperatorExists,
@@ -236,21 +233,21 @@ func handleEmptyValuesInGroupBy(keys map[string]v3.AttributeKey, groupBy []v3.At
Operator: "AND",
Items: filterItems,
}
return buildTracesFilterQuery(&filterSet, keys)
return buildTracesFilterQuery(&filterSet)
}
return "", nil
}
func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, tableName string, keys map[string]v3.AttributeKey, panelType v3.PanelType, options Options) (string, error) {
func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, _ string, panelType v3.PanelType, options Options) (string, error) {
filterSubQuery, err := buildTracesFilterQuery(mq.Filters, keys)
filterSubQuery, err := buildTracesFilterQuery(mq.Filters)
if err != nil {
return "", err
}
// timerange will be sent in epoch millisecond
spanIndexTableTimeFilter := fmt.Sprintf("(timestamp >= '%d' AND timestamp <= '%d')", start*getZerosForEpochNano(start), end*getZerosForEpochNano(end))
selectLabels := getSelectLabels(mq.AggregateOperator, mq.GroupBy, keys)
selectLabels := getSelectLabels(mq.AggregateOperator, mq.GroupBy)
having := having(mq.Having)
if having != "" {
@@ -283,7 +280,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, tableName str
queryTmpl = "SELECT " + getSelectKeys(mq.AggregateOperator, mq.GroupBy) + " from (" + queryTmpl + ")"
}
emptyValuesInGroupByFilter, err := handleEmptyValuesInGroupBy(keys, mq.GroupBy)
emptyValuesInGroupByFilter, err := handleEmptyValuesInGroupBy(mq.GroupBy)
if err != nil {
return "", err
}
@@ -293,8 +290,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, tableName str
if groupBy != "" {
groupBy = " group by " + groupBy
}
enrichedOrderBy := enrichOrderBy(mq.OrderBy, keys)
orderBy := orderByAttributeKeyTags(panelType, enrichedOrderBy, mq.GroupBy, keys)
orderBy := orderByAttributeKeyTags(panelType, mq.OrderBy, mq.GroupBy)
if orderBy != "" {
orderBy = " order by " + orderBy
}
@@ -305,7 +301,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, tableName str
aggregationKey := ""
if mq.AggregateAttribute.Key != "" {
aggregationKey = getColumnName(mq.AggregateAttribute, keys)
aggregationKey = getColumnName(mq.AggregateAttribute)
}
switch mq.AggregateOperator {
@@ -342,14 +338,13 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, tableName str
return query, nil
case v3.AggregateOperatorCount:
if mq.AggregateAttribute.Key != "" {
key := enrichKeyWithMetadata(mq.AggregateAttribute, keys)
if key.IsColumn {
subQuery, err := existsSubQueryForFixedColumn(key, v3.FilterOperatorExists)
if mq.AggregateAttribute.IsColumn {
subQuery, err := existsSubQueryForFixedColumn(mq.AggregateAttribute, v3.FilterOperatorExists)
if err == nil {
filterSubQuery = fmt.Sprintf("%s AND %s", filterSubQuery, subQuery)
}
} else {
columnType, columnDataType := getClickhouseTracesColumnDataTypeAndType(key)
columnType, columnDataType := getClickhouseTracesColumnDataTypeAndType(mq.AggregateAttribute)
filterSubQuery = fmt.Sprintf("%s AND has(%s%s, '%s')", filterSubQuery, columnDataType, columnType, mq.AggregateAttribute.Key)
}
}
@@ -373,7 +368,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, tableName str
if len(mq.SelectColumns) == 0 {
return "", fmt.Errorf("select columns cannot be empty for panelType %s", panelType)
}
selectColumns := getSelectColumns(mq.SelectColumns, keys)
selectColumns := getSelectColumns(mq.SelectColumns)
queryNoOpTmpl := fmt.Sprintf("SELECT timestamp as timestamp_datetime, spanID, traceID, "+"%s ", selectColumns) + "from " + constants.SIGNOZ_TRACE_DBNAME + "." + constants.SIGNOZ_SPAN_INDEX_TABLENAME + " where %s %s" + "%s"
query = fmt.Sprintf(queryNoOpTmpl, spanIndexTableTimeFilter, filterSubQuery, orderBy)
} else {
@@ -423,7 +418,7 @@ func groupByAttributeKeyTags(panelType v3.PanelType, graphLimitQtype string, tag
// orderBy returns a string of comma separated tags for order by clause
// if there are remaining items which are not present in tags they are also added
// if the order is not specified, it defaults to ASC
func orderBy(panelType v3.PanelType, items []v3.OrderBy, tagLookup map[string]struct{}, keys map[string]v3.AttributeKey) []string {
func orderBy(panelType v3.PanelType, items []v3.OrderBy, tagLookup map[string]struct{}) []string {
var orderBy []string
for _, item := range items {
@@ -433,7 +428,7 @@ func orderBy(panelType v3.PanelType, items []v3.OrderBy, tagLookup map[string]st
orderBy = append(orderBy, fmt.Sprintf("`%s` %s", item.ColumnName, item.Order))
} else if panelType == v3.PanelTypeList {
attr := v3.AttributeKey{Key: item.ColumnName, DataType: item.DataType, Type: item.Type, IsColumn: item.IsColumn}
name := getColumnName(attr, keys)
name := getColumnName(attr)
if item.IsColumn {
orderBy = append(orderBy, fmt.Sprintf("`%s` %s", name, item.Order))
} else {
@@ -445,13 +440,13 @@ func orderBy(panelType v3.PanelType, items []v3.OrderBy, tagLookup map[string]st
return orderBy
}
func orderByAttributeKeyTags(panelType v3.PanelType, items []v3.OrderBy, tags []v3.AttributeKey, keys map[string]v3.AttributeKey) string {
func orderByAttributeKeyTags(panelType v3.PanelType, items []v3.OrderBy, tags []v3.AttributeKey) string {
tagLookup := map[string]struct{}{}
for _, v := range tags {
tagLookup[v.Key] = struct{}{}
}
orderByArray := orderBy(panelType, items, tagLookup, keys)
orderByArray := orderBy(panelType, items, tagLookup)
if len(orderByArray) == 0 {
if panelType == v3.PanelTypeList {
@@ -474,7 +469,7 @@ func having(items []v3.Having) string {
return strings.Join(having, " AND ")
}
func reduceToQuery(query string, reduceTo v3.ReduceToOperator, aggregateOperator v3.AggregateOperator) (string, error) {
func reduceToQuery(query string, reduceTo v3.ReduceToOperator, _ v3.AggregateOperator) (string, error) {
var groupBy string
switch reduceTo {
@@ -508,13 +503,13 @@ func addOffsetToQuery(query string, offset uint64) string {
// PrepareTracesQuery returns the query string for traces
// start and end are in epoch millisecond
// step is in seconds
func PrepareTracesQuery(start, end int64, panelType v3.PanelType, mq *v3.BuilderQuery, keys map[string]v3.AttributeKey, options Options) (string, error) {
func PrepareTracesQuery(start, end int64, panelType v3.PanelType, mq *v3.BuilderQuery, options Options) (string, error) {
// adjust the start and end time to the step interval
start = start - (start % (mq.StepInterval * 1000))
end = end - (end % (mq.StepInterval * 1000))
if options.GraphLimitQtype == constants.FirstQueryGraphLimit {
// give me just the group by names
query, err := buildTracesQuery(start, end, mq.StepInterval, mq, constants.SIGNOZ_SPAN_INDEX_TABLENAME, keys, panelType, options)
query, err := buildTracesQuery(start, end, mq.StepInterval, mq, constants.SIGNOZ_SPAN_INDEX_TABLENAME, panelType, options)
if err != nil {
return "", err
}
@@ -522,14 +517,14 @@ func PrepareTracesQuery(start, end int64, panelType v3.PanelType, mq *v3.Builder
return query, nil
} else if options.GraphLimitQtype == constants.SecondQueryGraphLimit {
query, err := buildTracesQuery(start, end, mq.StepInterval, mq, constants.SIGNOZ_SPAN_INDEX_TABLENAME, keys, panelType, options)
query, err := buildTracesQuery(start, end, mq.StepInterval, mq, constants.SIGNOZ_SPAN_INDEX_TABLENAME, panelType, options)
if err != nil {
return "", err
}
return query, nil
}
query, err := buildTracesQuery(start, end, mq.StepInterval, mq, constants.SIGNOZ_SPAN_INDEX_TABLENAME, keys, panelType, options)
query, err := buildTracesQuery(start, end, mq.StepInterval, mq, constants.SIGNOZ_SPAN_INDEX_TABLENAME, panelType, options)
if err != nil {
return "", err
}
@@ -545,3 +540,30 @@ func PrepareTracesQuery(start, end int64, panelType v3.PanelType, mq *v3.Builder
}
return query, err
}
func Enrich(params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) {
if params.CompositeQuery.QueryType == v3.QueryTypeBuilder {
for _, query := range params.CompositeQuery.BuilderQueries {
if query.DataSource == v3.DataSourceTraces {
// enrich aggregate attribute
query.AggregateAttribute = enrichKeyWithMetadata(query.AggregateAttribute, keys)
// enrich filter items
if query.Filters != nil && len(query.Filters.Items) > 0 {
for idx, filter := range query.Filters.Items {
query.Filters.Items[idx].Key = enrichKeyWithMetadata(filter.Key, keys)
}
}
// enrich group by
for idx, groupBy := range query.GroupBy {
query.GroupBy[idx] = enrichKeyWithMetadata(groupBy, keys)
}
// enrich order by
query.OrderBy = enrichOrderBy(query.OrderBy, keys)
// enrich select columns
for idx, selectColumn := range query.SelectColumns {
query.SelectColumns[idx] = enrichKeyWithMetadata(selectColumn, keys)
}
}
}
}
}

View File

@@ -133,7 +133,7 @@ var buildFilterQueryData = []struct {
func TestBuildTracesFilterQuery(t *testing.T) {
for _, tt := range buildFilterQueryData {
Convey("TestBuildTracesFilterQuery", t, func() {
query, err := buildTracesFilterQuery(tt.FilterSet, map[string]v3.AttributeKey{})
query, err := buildTracesFilterQuery(tt.FilterSet)
So(err, ShouldBeNil)
So(query, ShouldEqual, tt.ExpectedFilter)
})
@@ -169,7 +169,7 @@ var handleEmptyValuesInGroupByData = []struct {
func TestBuildTracesHandleEmptyValuesInGroupBy(t *testing.T) {
for _, tt := range handleEmptyValuesInGroupByData {
Convey("TestBuildTracesHandleEmptyValuesInGroupBy", t, func() {
query, err := handleEmptyValuesInGroupBy(map[string]v3.AttributeKey{}, tt.GroupBy)
query, err := handleEmptyValuesInGroupBy(tt.GroupBy)
So(err, ShouldBeNil)
So(query, ShouldEqual, tt.ExpectedFilter)
})
@@ -220,8 +220,9 @@ var testColumnName = []struct {
func TestColumnName(t *testing.T) {
for _, tt := range testColumnName {
tt.AttributeKey = enrichKeyWithMetadata(tt.AttributeKey, map[string]v3.AttributeKey{})
Convey("testColumnName", t, func() {
Column := getColumnName(tt.AttributeKey, map[string]v3.AttributeKey{})
Column := getColumnName(tt.AttributeKey)
So(Column, ShouldEqual, tt.ExpectedColumn)
})
}
@@ -265,7 +266,7 @@ var testGetSelectLabelsData = []struct {
func TestGetSelectLabels(t *testing.T) {
for _, tt := range testGetSelectLabelsData {
Convey("testGetSelectLabelsData", t, func() {
selectLabels := getSelectLabels(tt.AggregateOperator, tt.GroupByTags, map[string]v3.AttributeKey{})
selectLabels := getSelectLabels(tt.AggregateOperator, tt.GroupByTags)
So(selectLabels, ShouldEqual, tt.SelectLabels)
})
}
@@ -304,7 +305,7 @@ var testGetSelectColumnsData = []struct {
func TestGetSelectColumns(t *testing.T) {
for _, tt := range testGetSelectColumnsData {
Convey("testGetSelectColumnsData", t, func() {
selectColumns := getSelectColumns(tt.sc, map[string]v3.AttributeKey{})
selectColumns := getSelectColumns(tt.sc)
So(selectColumns, ShouldEqual, tt.SelectColumns)
})
}
@@ -464,13 +465,15 @@ var testOrderBy = []struct {
}
func TestOrderBy(t *testing.T) {
keys := map[string]v3.AttributeKey{
"name": {Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true},
"bytes": {Key: "bytes", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true},
"response_time": {Key: "response_time", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: false},
}
for _, tt := range testOrderBy {
Convey("testOrderBy", t, func() {
res := orderByAttributeKeyTags(tt.PanelType, tt.Items, tt.Tags, map[string]v3.AttributeKey{
"name": {Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true},
"bytes": {Key: "bytes", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true},
"response_time": {Key: "response_time", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: false},
})
tt.Items = enrichOrderBy(tt.Items, keys)
res := orderByAttributeKeyTags(tt.PanelType, tt.Items, tt.Tags)
So(res, ShouldResemble, tt.Result)
})
}
@@ -1171,11 +1174,24 @@ var testBuildTracesQueryData = []struct {
}
func TestBuildTracesQuery(t *testing.T) {
keys := map[string]v3.AttributeKey{
"name": {Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true},
}
for _, tt := range testBuildTracesQueryData {
tt.BuilderQuery.DataSource = v3.DataSourceTraces
params := &v3.QueryRangeParamsV3{
Version: "v4",
CompositeQuery: &v3.CompositeQuery{
QueryType: v3.QueryTypeBuilder,
BuilderQueries: map[string]*v3.BuilderQuery{
"A": tt.BuilderQuery,
},
},
}
Enrich(params, keys)
Convey("TestBuildTracesQuery", t, func() {
query, err := buildTracesQuery(tt.Start, tt.End, tt.BuilderQuery.StepInterval, tt.BuilderQuery, tt.TableName, map[string]v3.AttributeKey{
"name": {Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true},
}, tt.PanelType, tt.Options)
query, err := buildTracesQuery(tt.Start, tt.End, tt.BuilderQuery.StepInterval, tt.BuilderQuery, tt.TableName, tt.PanelType, tt.Options)
So(err, ShouldBeNil)
So(query, ShouldEqual, tt.ExpectedQuery)
})
@@ -1400,7 +1416,7 @@ var testPrepTracesQueryData = []struct {
func TestPrepareTracesQuery(t *testing.T) {
for _, tt := range testPrepTracesQueryData {
Convey("TestPrepareTracesQuery", t, func() {
query, err := PrepareTracesQuery(tt.Start, tt.End, tt.PanelType, tt.BuilderQuery, tt.Keys, tt.Options)
query, err := PrepareTracesQuery(tt.Start, tt.End, tt.PanelType, tt.BuilderQuery, tt.Options)
So(err, ShouldBeNil)
So(query, ShouldEqual, tt.ExpectedQuery)
})

View File

@@ -316,6 +316,12 @@ const (
"CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64," +
"CAST((attributes_bool_key, attributes_bool_value), 'Map(String, Bool)') as attributes_bool," +
"CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string "
LogsSQLSelectV2 = "SELECT " +
"timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body, " +
"attributes_string, " +
"attributes_number, " +
"attributes_bool, " +
"resources_string "
TracesExplorerViewSQLSelectWithSubQuery = "WITH subQuery AS (SELECT distinct on (traceID) traceID, durationNano, " +
"serviceName, name FROM %s.%s WHERE parentSpanID = '' AND %s %s ORDER BY durationNano DESC "
TracesExplorerViewSQLSelectQuery = "SELECT subQuery.serviceName, subQuery.name, count() AS " +
@@ -380,6 +386,12 @@ var StaticFieldsLogsV3 = map[string]v3.AttributeKey{
Type: v3.AttributeKeyTypeUnspecified,
IsColumn: true,
},
"__attrs": {
Key: "__attrs",
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeUnspecified,
IsColumn: true,
},
}
const SigNozOrderByValue = "#SIGNOZ_VALUE"

View File

@@ -105,6 +105,7 @@ func InitDB(dataSourceName string) (*ModelDaoSqlite, error) {
telemetry.GetInstance().SetUserCountCallback(mds.GetUserCount)
telemetry.GetInstance().SetUserRoleCallback(mds.GetUserRole)
telemetry.GetInstance().SetGetUsersCallback(mds.GetUsers)
return mds, nil
}

View File

@@ -58,8 +58,6 @@ type Reader interface {
SetTTL(ctx context.Context, ttlParams *model.TTLParams) (*model.SetTTLResponseItem, *model.ApiError)
FetchTemporality(ctx context.Context, metricNames []string) (map[string]map[v3.Temporality]bool, error)
GetMetricResult(ctx context.Context, query string) ([]*model.Series, error)
GetMetricResultEE(ctx context.Context, query string) ([]*model.Series, string, error)
GetMetricAggregateAttributes(ctx context.Context, req *v3.AggregateAttributeRequest) (*v3.AggregateAttributeResponse, error)
GetMetricAttributeKeys(ctx context.Context, req *v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error)
GetMetricAttributeValues(ctx context.Context, req *v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error)
@@ -70,10 +68,9 @@ type Reader interface {
// QB V3 metrics/traces/logs
GetTimeSeriesResultV3(ctx context.Context, query string) ([]*v3.Series, error)
GetListResultV3(ctx context.Context, query string) ([]*v3.Row, error)
LiveTailLogsV3(ctx context.Context, query string, timestampStart uint64, idStart string, client *v3.LogsLiveTailClient)
LiveTailLogsV3(ctx context.Context, query string, timestampStart uint64, idStart string, client *model.LogsLiveTailClient)
LiveTailLogsV4(ctx context.Context, query string, timestampStart uint64, idStart string, client *model.LogsLiveTailClientV2)
GetDashboardsInfo(ctx context.Context) (*model.DashboardsInfo, error)
GetSavedViewsInfo(ctx context.Context) (*model.SavedViewsInfo, error)
GetTotalSpans(ctx context.Context) (uint64, error)
GetTotalLogs(ctx context.Context) (uint64, error)
GetTotalSamples(ctx context.Context) (uint64, error)
@@ -92,7 +89,6 @@ type Reader interface {
GetLogAttributeKeys(ctx context.Context, req *v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error)
GetLogAttributeValues(ctx context.Context, req *v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error)
GetLogAggregateAttributes(ctx context.Context, req *v3.AggregateAttributeRequest) (*v3.AggregateAttributeResponse, error)
GetUsers(ctx context.Context) ([]model.UserPayload, error)
GetQBFilterSuggestionsForLogs(
ctx context.Context,
req *v3.QBFilterSuggestionsRequest,
@@ -108,25 +104,25 @@ type Reader interface {
GetMetricMetadata(context.Context, string, string) (*v3.MetricMetadataResponse, error)
AddRuleStateHistory(ctx context.Context, ruleStateHistory []v3.RuleStateHistory) error
GetOverallStateTransitions(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) ([]v3.ReleStateItem, error)
ReadRuleStateHistoryByRuleID(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) (*v3.RuleStateTimeline, error)
GetTotalTriggers(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) (uint64, error)
GetTriggersByInterval(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) (*v3.Series, error)
GetAvgResolutionTime(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) (float64, error)
GetAvgResolutionTimeByInterval(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) (*v3.Series, error)
ReadRuleStateHistoryTopContributorsByRuleID(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) ([]v3.RuleStateHistoryContributor, error)
GetLastSavedRuleStateHistory(ctx context.Context, ruleID string) ([]v3.RuleStateHistory, error)
AddRuleStateHistory(ctx context.Context, ruleStateHistory []model.RuleStateHistory) error
GetOverallStateTransitions(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) ([]model.ReleStateItem, error)
ReadRuleStateHistoryByRuleID(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) (*model.RuleStateTimeline, error)
GetTotalTriggers(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) (uint64, error)
GetTriggersByInterval(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) (*v3.Series, error)
GetAvgResolutionTime(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) (float64, error)
GetAvgResolutionTimeByInterval(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) (*v3.Series, error)
ReadRuleStateHistoryTopContributorsByRuleID(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) ([]model.RuleStateHistoryContributor, error)
GetLastSavedRuleStateHistory(ctx context.Context, ruleID string) ([]model.RuleStateHistory, error)
GetMinAndMaxTimestampForTraceID(ctx context.Context, traceID []string) (int64, int64, error)
// Query Progress tracking helpers.
ReportQueryStartForProgressTracking(queryId string) (reportQueryFinished func(), err *model.ApiError)
SubscribeToQueryProgress(queryId string) (<-chan v3.QueryProgress, func(), *model.ApiError)
SubscribeToQueryProgress(queryId string) (<-chan model.QueryProgress, func(), *model.ApiError)
}
type Querier interface {
QueryRange(context.Context, *v3.QueryRangeParamsV3, map[string]v3.AttributeKey) ([]*v3.Result, map[string]error, error)
QueryRange(context.Context, *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error)
// test helpers
QueriesExecuted() []string

View File

@@ -33,6 +33,7 @@ func main() {
// disables rule execution but allows change to the rule definition
var disableRules bool
var useLogsNewSchema bool
// the url used to build link in the alert messages in slack and other systems
var ruleRepoURL, cacheConfigPath, fluxInterval string
var cluster string
@@ -43,6 +44,7 @@ func main() {
var maxOpenConns int
var dialTimeout time.Duration
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
@@ -79,6 +81,7 @@ func main() {
CacheConfigPath: cacheConfigPath,
FluxInterval: fluxInterval,
Cluster: cluster,
UseLogsNewSchema: useLogsNewSchema,
}
// Read the jwt secret key

View File

@@ -3,8 +3,10 @@ package model
import (
"database/sql/driver"
"encoding/json"
"fmt"
"github.com/pkg/errors"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
)
// AlertState denotes the state of an active alert.
@@ -88,3 +90,104 @@ func (s *AlertState) Scan(value interface{}) error {
func (s *AlertState) Value() (driver.Value, error) {
return s.String(), nil
}
type LabelsString string
func (l *LabelsString) MarshalJSON() ([]byte, error) {
lbls := make(map[string]string)
err := json.Unmarshal([]byte(*l), &lbls)
if err != nil {
return nil, err
}
return json.Marshal(lbls)
}
func (l *LabelsString) Scan(src interface{}) error {
if data, ok := src.(string); ok {
*l = LabelsString(data)
}
return nil
}
func (l LabelsString) String() string {
return string(l)
}
type RuleStateTimeline struct {
Items []RuleStateHistory `json:"items"`
Total uint64 `json:"total"`
Labels map[string][]string `json:"labels"`
}
type RuleStateHistory struct {
RuleID string `json:"ruleID" ch:"rule_id"`
RuleName string `json:"ruleName" ch:"rule_name"`
// One of ["normal", "firing"]
OverallState AlertState `json:"overallState" ch:"overall_state"`
OverallStateChanged bool `json:"overallStateChanged" ch:"overall_state_changed"`
// One of ["normal", "firing", "no_data", "muted"]
State AlertState `json:"state" ch:"state"`
StateChanged bool `json:"stateChanged" ch:"state_changed"`
UnixMilli int64 `json:"unixMilli" ch:"unix_milli"`
Labels LabelsString `json:"labels" ch:"labels"`
Fingerprint uint64 `json:"fingerprint" ch:"fingerprint"`
Value float64 `json:"value" ch:"value"`
RelatedTracesLink string `json:"relatedTracesLink"`
RelatedLogsLink string `json:"relatedLogsLink"`
}
type QueryRuleStateHistory struct {
Start int64 `json:"start"`
End int64 `json:"end"`
State string `json:"state"`
Filters *v3.FilterSet `json:"filters"`
Offset int64 `json:"offset"`
Limit int64 `json:"limit"`
Order string `json:"order"`
}
func (r *QueryRuleStateHistory) Validate() error {
if r.Start == 0 || r.End == 0 {
return fmt.Errorf("start and end are required")
}
if r.Offset < 0 || r.Limit < 0 {
return fmt.Errorf("offset and limit must be greater than 0")
}
if r.Order != "asc" && r.Order != "desc" {
return fmt.Errorf("order must be asc or desc")
}
return nil
}
type RuleStateHistoryContributor struct {
Fingerprint uint64 `json:"fingerprint" ch:"fingerprint"`
Labels LabelsString `json:"labels" ch:"labels"`
Count uint64 `json:"count" ch:"count"`
RelatedTracesLink string `json:"relatedTracesLink"`
RelatedLogsLink string `json:"relatedLogsLink"`
}
type RuleStateTransition struct {
RuleID string `json:"ruleID" ch:"rule_id"`
State AlertState `json:"state" ch:"state"`
FiringTime int64 `json:"firingTime" ch:"firing_time"`
ResolutionTime int64 `json:"resolutionTime" ch:"resolution_time"`
}
type ReleStateItem struct {
State AlertState `json:"state"`
Start int64 `json:"start"`
End int64 `json:"end"`
}
type Stats struct {
TotalCurrentTriggers uint64 `json:"totalCurrentTriggers"`
TotalPastTriggers uint64 `json:"totalPastTriggers"`
CurrentTriggersSeries *v3.Series `json:"currentTriggersSeries"`
PastTriggersSeries *v3.Series `json:"pastTriggersSeries"`
CurrentAvgResolutionTime string `json:"currentAvgResolutionTime"`
PastAvgResolutionTime string `json:"pastAvgResolutionTime"`
CurrentAvgResolutionTimeSeries *v3.Series `json:"currentAvgResolutionTimeSeries"`
PastAvgResolutionTimeSeries *v3.Series `json:"pastAvgResolutionTimeSeries"`
}

View File

@@ -0,0 +1,23 @@
package model
type LogsLiveTailClientV2 struct {
Name string
Logs chan *SignozLogV2
Done chan *bool
Error chan error
}
type LogsLiveTailClient struct {
Name string
Logs chan *SignozLog
Done chan *bool
Error chan error
}
type QueryProgress struct {
ReadRows uint64 `json:"read_rows"`
ReadBytes uint64 `json:"read_bytes"`
ElapsedMs uint64 `json:"elapsed_ms"`
}

View File

@@ -18,110 +18,6 @@ type QueryRangeParams struct {
Stats string
}
type MetricQuery struct {
QueryName string `json:"queryName"`
MetricName string `json:"metricName"`
TagFilters *FilterSet `json:"tagFilters,omitempty"`
GroupingTags []string `json:"groupBy,omitempty"`
AggregateOperator AggregateOperator `json:"aggregateOperator"`
Expression string `json:"expression"`
Disabled bool `json:"disabled"`
ReduceTo ReduceToOperator `json:"reduceTo,omitempty"`
}
type ReduceToOperator int
const (
_ ReduceToOperator = iota
RLAST
RSUM
RAVG
RMAX
RMIN
)
type QueryType int
const (
_ QueryType = iota
QUERY_BUILDER
CLICKHOUSE
PROM
)
type PromQuery struct {
Query string `json:"query"`
Stats string `json:"stats,omitempty"`
Disabled bool `json:"disabled"`
}
type ClickHouseQuery struct {
Query string `json:"query"`
Disabled bool `json:"disabled"`
}
type PanelType int
const (
_ PanelType = iota
TIME_SERIES
QUERY_VALUE
)
type CompositeMetricQuery struct {
BuilderQueries map[string]*MetricQuery `json:"builderQueries,omitempty"`
ClickHouseQueries map[string]*ClickHouseQuery `json:"chQueries,omitempty"`
PromQueries map[string]*PromQuery `json:"promQueries,omitempty"`
PanelType PanelType `json:"panelType"`
QueryType QueryType `json:"queryType"`
}
type AggregateOperator int
const (
_ AggregateOperator = iota
NOOP
COUNT
COUNT_DISTINCT
SUM
AVG
MAX
MIN
P05
P10
P20
P25
P50
P75
P90
P95
P99
RATE
SUM_RATE
// leave blank space for possily {AVG, X}_RATE
_
_
_
RATE_SUM
RATE_AVG
RATE_MAX
RATE_MIN
HIST_QUANTILE_50
HIST_QUANTILE_75
HIST_QUANTILE_90
HIST_QUANTILE_95
HIST_QUANTILE_99
)
type DataSource int
const (
_ DataSource = iota
METRICS
TRACES
LOGS
)
const (
StringTagMapCol = "stringTagMap"
NumberTagMapCol = "numberTagMap"
@@ -129,16 +25,6 @@ const (
ResourceTagMapCol = "resourceTagsMap"
)
type QueryRangeParamsV2 struct {
DataSource DataSource `json:"dataSource"`
Start int64 `json:"start"`
End int64 `json:"end"`
Step int64 `json:"step"`
CompositeMetricQuery *CompositeMetricQuery `json:"compositeMetricQuery"`
Variables map[string]interface{} `json:"variables,omitempty"`
NoCache bool `json:"noCache"`
}
type DashboardVars struct {
Query string `json:"query"`
Variables map[string]interface{} `json:"variables,omitempty"`

View File

@@ -4,7 +4,6 @@ import (
"encoding/json"
"fmt"
"math"
"sort"
"strconv"
"time"
@@ -79,7 +78,7 @@ func BadRequest(err error) *ApiError {
func BadRequestStr(s string) *ApiError {
return &ApiError{
Typ: ErrorBadData,
Err: fmt.Errorf(s),
Err: errors.New(s),
}
}
@@ -500,46 +499,12 @@ type NextPrevErrorIDs struct {
GroupID string `json:"groupID"`
}
type Series struct {
QueryName string `json:"queryName"`
Labels map[string]string `json:"metric"`
Points []MetricPoint `json:"values"`
}
func (s *Series) SortPoints() {
sort.Slice(s.Points, func(i, j int) bool {
return s.Points[i].Timestamp < s.Points[j].Timestamp
})
}
type MetricPoint struct {
Timestamp int64
Value float64
}
type MetricStatus struct {
MetricName string
LastReceivedTsMillis int64
LastReceivedLabels map[string]string
}
// MarshalJSON implements json.Marshaler.
func (p *MetricPoint) MarshalJSON() ([]byte, error) {
v := strconv.FormatFloat(p.Value, 'f', -1, 64)
return json.Marshal([...]interface{}{float64(p.Timestamp) / 1000, v})
}
// UnmarshalJSON implements json.Unmarshaler.
func (p *MetricPoint) UnmarshalJSON(b []byte) error {
var a [2]interface{}
if err := json.Unmarshal(b, &a); err != nil {
return err
}
p.Timestamp = int64(a[0].(float64) * 1000)
p.Value, _ = strconv.ParseFloat(a[1].(string), 64)
return nil
}
type ShowCreateTableStatement struct {
Statement string `json:"statement" ch:"statement"`
}
@@ -572,6 +537,21 @@ type SignozLog struct {
Attributes_bool map[string]bool `json:"attributes_bool" ch:"attributes_bool"`
}
type SignozLogV2 struct {
Timestamp uint64 `json:"timestamp" ch:"timestamp"`
ID string `json:"id" ch:"id"`
TraceID string `json:"trace_id" ch:"trace_id"`
SpanID string `json:"span_id" ch:"span_id"`
TraceFlags uint32 `json:"trace_flags" ch:"trace_flags"`
SeverityText string `json:"severity_text" ch:"severity_text"`
SeverityNumber uint8 `json:"severity_number" ch:"severity_number"`
Body string `json:"body" ch:"body"`
Resources_string map[string]string `json:"resources_string" ch:"resources_string"`
Attributes_string map[string]string `json:"attributes_string" ch:"attributes_string"`
Attributes_number map[string]float64 `json:"attributes_float" ch:"attributes_number"`
Attributes_bool map[string]bool `json:"attributes_bool" ch:"attributes_bool"`
}
type LogsTailClient struct {
Name string
Logs chan *SignozLog
@@ -650,6 +630,7 @@ type AlertsInfo struct {
SpanMetricsPrometheusQueries int `json:"spanMetricsPrometheusQueries"`
AlertNames []string `json:"alertNames"`
AlertsWithTSV2 int `json:"alertsWithTSv2"`
AlertsWithLogsChQuery int `json:"alertsWithLogsChQuery"`
}
type SavedViewsInfo struct {
@@ -666,6 +647,7 @@ type DashboardsInfo struct {
TracesBasedPanels int `json:"tracesBasedPanels"`
DashboardNames []string `json:"dashboardNames"`
QueriesWithTSV2 int `json:"queriesWithTSV2"`
DashboardsWithLogsChQuery int `json:"dashboardsWithLogsChQuery"`
}
type TagTelemetryData struct {

View File

@@ -11,7 +11,6 @@ import (
"github.com/google/uuid"
"github.com/pkg/errors"
"go.signoz.io/signoz/pkg/query-service/model"
)
type DataSource string
@@ -1029,17 +1028,12 @@ type Table struct {
}
type Result struct {
QueryName string `json:"queryName,omitempty"`
Series []*Series `json:"series,omitempty"`
List []*Row `json:"list,omitempty"`
Table *Table `json:"table,omitempty"`
}
type LogsLiveTailClient struct {
Name string
Logs chan *model.SignozLog
Done chan *bool
Error chan error
QueryName string `json:"queryName,omitempty"`
Series []*Series `json:"series,omitempty"`
PredictedSeries []*Series `json:"predictedSeries,omitempty"`
AnomalyScores []*Series `json:"anomalyScores,omitempty"`
List []*Row `json:"list,omitempty"`
Table *Table `json:"table,omitempty"`
}
type Series struct {
@@ -1160,115 +1154,6 @@ type MetricMetadataResponse struct {
Temporality string `json:"temporality"`
}
type LabelsString string
func (l *LabelsString) MarshalJSON() ([]byte, error) {
lbls := make(map[string]string)
err := json.Unmarshal([]byte(*l), &lbls)
if err != nil {
return nil, err
}
return json.Marshal(lbls)
}
func (l *LabelsString) Scan(src interface{}) error {
if data, ok := src.(string); ok {
*l = LabelsString(data)
}
return nil
}
func (l LabelsString) String() string {
return string(l)
}
type RuleStateTimeline struct {
Items []RuleStateHistory `json:"items"`
Total uint64 `json:"total"`
Labels map[string][]string `json:"labels"`
}
type RuleStateHistory struct {
RuleID string `json:"ruleID" ch:"rule_id"`
RuleName string `json:"ruleName" ch:"rule_name"`
// One of ["normal", "firing"]
OverallState model.AlertState `json:"overallState" ch:"overall_state"`
OverallStateChanged bool `json:"overallStateChanged" ch:"overall_state_changed"`
// One of ["normal", "firing", "no_data", "muted"]
State model.AlertState `json:"state" ch:"state"`
StateChanged bool `json:"stateChanged" ch:"state_changed"`
UnixMilli int64 `json:"unixMilli" ch:"unix_milli"`
Labels LabelsString `json:"labels" ch:"labels"`
Fingerprint uint64 `json:"fingerprint" ch:"fingerprint"`
Value float64 `json:"value" ch:"value"`
RelatedTracesLink string `json:"relatedTracesLink"`
RelatedLogsLink string `json:"relatedLogsLink"`
}
type QueryRuleStateHistory struct {
Start int64 `json:"start"`
End int64 `json:"end"`
State string `json:"state"`
Filters *FilterSet `json:"filters"`
Offset int64 `json:"offset"`
Limit int64 `json:"limit"`
Order string `json:"order"`
}
func (r *QueryRuleStateHistory) Validate() error {
if r.Start == 0 || r.End == 0 {
return fmt.Errorf("start and end are required")
}
if r.Offset < 0 || r.Limit < 0 {
return fmt.Errorf("offset and limit must be greater than 0")
}
if r.Order != "asc" && r.Order != "desc" {
return fmt.Errorf("order must be asc or desc")
}
return nil
}
type RuleStateHistoryContributor struct {
Fingerprint uint64 `json:"fingerprint" ch:"fingerprint"`
Labels LabelsString `json:"labels" ch:"labels"`
Count uint64 `json:"count" ch:"count"`
RelatedTracesLink string `json:"relatedTracesLink"`
RelatedLogsLink string `json:"relatedLogsLink"`
}
type RuleStateTransition struct {
RuleID string `json:"ruleID" ch:"rule_id"`
State model.AlertState `json:"state" ch:"state"`
FiringTime int64 `json:"firingTime" ch:"firing_time"`
ResolutionTime int64 `json:"resolutionTime" ch:"resolution_time"`
}
type ReleStateItem struct {
State model.AlertState `json:"state"`
Start int64 `json:"start"`
End int64 `json:"end"`
}
type Stats struct {
TotalCurrentTriggers uint64 `json:"totalCurrentTriggers"`
TotalPastTriggers uint64 `json:"totalPastTriggers"`
CurrentTriggersSeries *Series `json:"currentTriggersSeries"`
PastTriggersSeries *Series `json:"pastTriggersSeries"`
CurrentAvgResolutionTime string `json:"currentAvgResolutionTime"`
PastAvgResolutionTime string `json:"pastAvgResolutionTime"`
CurrentAvgResolutionTimeSeries *Series `json:"currentAvgResolutionTimeSeries"`
PastAvgResolutionTimeSeries *Series `json:"pastAvgResolutionTimeSeries"`
}
type QueryProgress struct {
ReadRows uint64 `json:"read_rows"`
ReadBytes uint64 `json:"read_bytes"`
ElapsedMs uint64 `json:"elapsed_ms"`
}
type URLShareableTimeRange struct {
Start int64 `json:"start"`
End int64 `json:"end"`
@@ -1290,3 +1175,9 @@ type URLShareableOptions struct {
Format string `json:"format"`
SelectColumns []AttributeKey `json:"selectColumns"`
}
type LogQBOptions struct {
GraphLimitQtype string
IsLivetailQuery bool
PreferRPM bool
}

View File

@@ -109,6 +109,7 @@ func NewBaseRule(id string, p *PostableRule, reader interfaces.Reader, opts ...R
id: id,
name: p.AlertName,
source: p.Source,
typ: p.AlertType,
ruleCondition: p.RuleCondition,
evalWindow: time.Duration(p.EvalWindow),
labels: qslabels.FromMap(p.Labels),
@@ -471,9 +472,9 @@ func (r *BaseRule) shouldAlert(series v3.Series) (Sample, bool) {
return alertSmpl, shouldAlert
}
func (r *BaseRule) RecordRuleStateHistory(ctx context.Context, prevState, currentState model.AlertState, itemsToAdd []v3.RuleStateHistory) error {
func (r *BaseRule) RecordRuleStateHistory(ctx context.Context, prevState, currentState model.AlertState, itemsToAdd []model.RuleStateHistory) error {
zap.L().Debug("recording rule state history", zap.String("ruleid", r.ID()), zap.Any("prevState", prevState), zap.Any("currentState", currentState), zap.Any("itemsToAdd", itemsToAdd))
revisedItemsToAdd := map[uint64]v3.RuleStateHistory{}
revisedItemsToAdd := map[uint64]model.RuleStateHistory{}
lastSavedState, err := r.reader.GetLastSavedRuleStateHistory(ctx, r.ID())
if err != nil {
@@ -483,7 +484,7 @@ func (r *BaseRule) RecordRuleStateHistory(ctx context.Context, prevState, curren
// the state would reset so we need to add the corresponding state changes to previously saved states
if !r.handledRestart && len(lastSavedState) > 0 {
zap.L().Debug("handling restart", zap.String("ruleid", r.ID()), zap.Any("lastSavedState", lastSavedState))
l := map[uint64]v3.RuleStateHistory{}
l := map[uint64]model.RuleStateHistory{}
for _, item := range itemsToAdd {
l[item.Fingerprint] = item
}
@@ -552,7 +553,7 @@ func (r *BaseRule) RecordRuleStateHistory(ctx context.Context, prevState, curren
if len(revisedItemsToAdd) > 0 && r.reader != nil {
zap.L().Debug("writing rule state history", zap.String("ruleid", r.ID()), zap.Any("revisedItemsToAdd", revisedItemsToAdd))
entries := make([]v3.RuleStateHistory, 0, len(revisedItemsToAdd))
entries := make([]model.RuleStateHistory, 0, len(revisedItemsToAdd))
for _, item := range revisedItemsToAdd {
entries = append(entries, item)
}

View File

@@ -319,6 +319,10 @@ func (r *ruleDB) GetAlertsInfo(ctx context.Context) (*model.AlertsInfo, error) {
if strings.Contains(alert, "time_series_v2") {
alertsInfo.AlertsWithTSV2 = alertsInfo.AlertsWithTSV2 + 1
}
if strings.Contains(alert, "signoz_logs.distributed_logs") ||
strings.Contains(alert, "signoz_logs.logs") {
alertsInfo.AlertsWithLogsChQuery = alertsInfo.AlertsWithLogsChQuery + 1
}
err = json.Unmarshal([]byte(alert), &rule)
if err != nil {
zap.L().Error("invalid rule data", zap.Error(err))

View File

@@ -35,6 +35,8 @@ type PrepareTaskOptions struct {
FF interfaces.FeatureLookup
ManagerOpts *ManagerOptions
NotifyFunc NotifyFunc
UseLogsNewSchema bool
}
const taskNamesuffix = "webAppEditor"
@@ -75,6 +77,8 @@ type ManagerOptions struct {
EvalDelay time.Duration
PrepareTaskFunc func(opts PrepareTaskOptions) (Task, error)
UseLogsNewSchema bool
}
// The Manager manages recording and alerting rules.
@@ -96,6 +100,8 @@ type Manager struct {
reader interfaces.Reader
prepareTaskFunc func(opts PrepareTaskOptions) (Task, error)
UseLogsNewSchema bool
}
func defaultOptions(o *ManagerOptions) *ManagerOptions {
@@ -130,6 +136,7 @@ func defaultPrepareTaskFunc(opts PrepareTaskOptions) (Task, error) {
opts.Rule,
opts.FF,
opts.Reader,
opts.UseLogsNewSchema,
WithEvalDelay(opts.ManagerOpts.EvalDelay),
)
@@ -333,6 +340,8 @@ func (m *Manager) editTask(rule *PostableRule, taskName string) error {
FF: m.featureFlags,
ManagerOpts: m.opts,
NotifyFunc: m.prepareNotifyFunc(),
UseLogsNewSchema: m.opts.UseLogsNewSchema,
})
if err != nil {
@@ -452,6 +461,8 @@ func (m *Manager) addTask(rule *PostableRule, taskName string) error {
FF: m.featureFlags,
ManagerOpts: m.opts,
NotifyFunc: m.prepareNotifyFunc(),
UseLogsNewSchema: m.opts.UseLogsNewSchema,
})
for _, r := range newTask.Rules() {
@@ -794,6 +805,7 @@ func (m *Manager) TestNotification(ctx context.Context, ruleStr string) (int, *m
parsedRule,
m.featureFlags,
m.reader,
m.opts.UseLogsNewSchema,
WithSendAlways(),
WithSendUnmatched(),
)

View File

@@ -219,7 +219,7 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time) (interface{}, error)
}
itemsToAdd := []v3.RuleStateHistory{}
itemsToAdd := []model.RuleStateHistory{}
// Check if any pending alerts should be removed or fire now. Write out alert timeseries.
for fp, a := range r.active {
@@ -236,13 +236,13 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time) (interface{}, error)
if a.State != model.StateInactive {
a.State = model.StateInactive
a.ResolvedAt = ts
itemsToAdd = append(itemsToAdd, v3.RuleStateHistory{
itemsToAdd = append(itemsToAdd, model.RuleStateHistory{
RuleID: r.ID(),
RuleName: r.Name(),
State: model.StateInactive,
StateChanged: true,
UnixMilli: ts.UnixMilli(),
Labels: v3.LabelsString(labelsJSON),
Labels: model.LabelsString(labelsJSON),
Fingerprint: a.QueryResultLables.Hash(),
})
}
@@ -256,13 +256,13 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time) (interface{}, error)
if a.Missing {
state = model.StateNoData
}
itemsToAdd = append(itemsToAdd, v3.RuleStateHistory{
itemsToAdd = append(itemsToAdd, model.RuleStateHistory{
RuleID: r.ID(),
RuleName: r.Name(),
State: state,
StateChanged: true,
UnixMilli: ts.UnixMilli(),
Labels: v3.LabelsString(labelsJSON),
Labels: model.LabelsString(labelsJSON),
Fingerprint: a.QueryResultLables.Hash(),
Value: a.Value,
})

View File

@@ -13,7 +13,7 @@ import (
func TestPromRuleShouldAlert(t *testing.T) {
postableRule := PostableRule{
AlertName: "Test Rule",
AlertType: "METRIC_BASED_ALERT",
AlertType: AlertTypeMetric,
RuleType: RuleTypeProm,
EvalWindow: Duration(5 * time.Minute),
Frequency: Duration(1 * time.Minute),

View File

@@ -5,7 +5,6 @@ import (
"time"
"go.signoz.io/signoz/pkg/query-service/model"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/query-service/utils/labels"
)
@@ -35,7 +34,7 @@ type Rule interface {
SetEvaluationTimestamp(time.Time)
GetEvaluationTimestamp() time.Time
RecordRuleStateHistory(ctx context.Context, prevState, currentState model.AlertState, itemsToAdd []v3.RuleStateHistory) error
RecordRuleStateHistory(ctx context.Context, prevState, currentState model.AlertState, itemsToAdd []model.RuleStateHistory) error
SendAlerts(ctx context.Context, ts time.Time, resendDelay time.Duration, interval time.Duration, notifyFunc NotifyFunc)
}

View File

@@ -60,6 +60,7 @@ func NewThresholdRule(
p *PostableRule,
featureFlags interfaces.FeatureLookup,
reader interfaces.Reader,
useLogsNewSchema bool,
opts ...RuleOption,
) (*ThresholdRule, error) {
@@ -77,17 +78,19 @@ func NewThresholdRule(
}
querierOption := querier.QuerierOptions{
Reader: reader,
Cache: nil,
KeyGenerator: queryBuilder.NewKeyGenerator(),
FeatureLookup: featureFlags,
Reader: reader,
Cache: nil,
KeyGenerator: queryBuilder.NewKeyGenerator(),
FeatureLookup: featureFlags,
UseLogsNewSchema: useLogsNewSchema,
}
querierOptsV2 := querierV2.QuerierOptions{
Reader: reader,
Cache: nil,
KeyGenerator: queryBuilder.NewKeyGenerator(),
FeatureLookup: featureFlags,
Reader: reader,
Cache: nil,
KeyGenerator: queryBuilder.NewKeyGenerator(),
FeatureLookup: featureFlags,
UseLogsNewSchema: useLogsNewSchema,
}
t.querier = querier.NewQuerier(querierOption)
@@ -501,9 +504,9 @@ func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, ts time.Time) (Vec
var queryErrors map[string]error
if r.version == "v4" {
results, queryErrors, err = r.querierV2.QueryRange(ctx, params, map[string]v3.AttributeKey{})
results, queryErrors, err = r.querierV2.QueryRange(ctx, params)
} else {
results, queryErrors, err = r.querier.QueryRange(ctx, params, map[string]v3.AttributeKey{})
results, queryErrors, err = r.querier.QueryRange(ctx, params)
}
if err != nil {
@@ -700,7 +703,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time) (interface{}, er
r.active[h] = a
}
itemsToAdd := []v3.RuleStateHistory{}
itemsToAdd := []model.RuleStateHistory{}
// Check if any pending alerts should be removed or fire now. Write out alert timeseries.
for fp, a := range r.active {
@@ -717,13 +720,13 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time) (interface{}, er
if a.State != model.StateInactive {
a.State = model.StateInactive
a.ResolvedAt = ts
itemsToAdd = append(itemsToAdd, v3.RuleStateHistory{
itemsToAdd = append(itemsToAdd, model.RuleStateHistory{
RuleID: r.ID(),
RuleName: r.Name(),
State: model.StateInactive,
StateChanged: true,
UnixMilli: ts.UnixMilli(),
Labels: v3.LabelsString(labelsJSON),
Labels: model.LabelsString(labelsJSON),
Fingerprint: a.QueryResultLables.Hash(),
Value: a.Value,
})
@@ -738,13 +741,13 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time) (interface{}, er
if a.Missing {
state = model.StateNoData
}
itemsToAdd = append(itemsToAdd, v3.RuleStateHistory{
itemsToAdd = append(itemsToAdd, model.RuleStateHistory{
RuleID: r.ID(),
RuleName: r.Name(),
State: state,
StateChanged: true,
UnixMilli: ts.UnixMilli(),
Labels: v3.LabelsString(labelsJSON),
Labels: model.LabelsString(labelsJSON),
Fingerprint: a.QueryResultLables.Hash(),
Value: a.Value,
})

View File

@@ -18,7 +18,7 @@ import (
func TestThresholdRuleShouldAlert(t *testing.T) {
postableRule := PostableRule{
AlertName: "Tricky Condition Tests",
AlertType: "METRIC_BASED_ALERT",
AlertType: AlertTypeMetric,
RuleType: RuleTypeThreshold,
EvalWindow: Duration(5 * time.Minute),
Frequency: Duration(1 * time.Minute),
@@ -685,7 +685,7 @@ func TestThresholdRuleShouldAlert(t *testing.T) {
postableRule.RuleCondition.MatchType = MatchType(c.matchType)
postableRule.RuleCondition.Target = &c.target
rule, err := NewThresholdRule("69", &postableRule, fm, nil, WithEvalDelay(2*time.Minute))
rule, err := NewThresholdRule("69", &postableRule, fm, nil, true, WithEvalDelay(2*time.Minute))
if err != nil {
assert.NoError(t, err)
}
@@ -774,7 +774,7 @@ func TestPrepareLinksToLogs(t *testing.T) {
}
fm := featureManager.StartManager()
rule, err := NewThresholdRule("69", &postableRule, fm, nil, WithEvalDelay(2*time.Minute))
rule, err := NewThresholdRule("69", &postableRule, fm, nil, true, WithEvalDelay(2*time.Minute))
if err != nil {
assert.NoError(t, err)
}
@@ -788,7 +788,7 @@ func TestPrepareLinksToLogs(t *testing.T) {
func TestPrepareLinksToTraces(t *testing.T) {
postableRule := PostableRule{
AlertName: "Links to traces test",
AlertType: "TRACES_BASED_ALERT",
AlertType: AlertTypeTraces,
RuleType: RuleTypeThreshold,
EvalWindow: Duration(5 * time.Minute),
Frequency: Duration(1 * time.Minute),
@@ -816,7 +816,7 @@ func TestPrepareLinksToTraces(t *testing.T) {
}
fm := featureManager.StartManager()
rule, err := NewThresholdRule("69", &postableRule, fm, nil, WithEvalDelay(2*time.Minute))
rule, err := NewThresholdRule("69", &postableRule, fm, nil, true, WithEvalDelay(2*time.Minute))
if err != nil {
assert.NoError(t, err)
}
@@ -830,7 +830,7 @@ func TestPrepareLinksToTraces(t *testing.T) {
func TestThresholdRuleLabelNormalization(t *testing.T) {
postableRule := PostableRule{
AlertName: "Tricky Condition Tests",
AlertType: "METRIC_BASED_ALERT",
AlertType: AlertTypeMetric,
RuleType: RuleTypeThreshold,
EvalWindow: Duration(5 * time.Minute),
Frequency: Duration(1 * time.Minute),
@@ -892,7 +892,7 @@ func TestThresholdRuleLabelNormalization(t *testing.T) {
postableRule.RuleCondition.MatchType = MatchType(c.matchType)
postableRule.RuleCondition.Target = &c.target
rule, err := NewThresholdRule("69", &postableRule, fm, nil, WithEvalDelay(2*time.Minute))
rule, err := NewThresholdRule("69", &postableRule, fm, nil, true, WithEvalDelay(2*time.Minute))
if err != nil {
assert.NoError(t, err)
}
@@ -914,7 +914,7 @@ func TestThresholdRuleLabelNormalization(t *testing.T) {
func TestThresholdRuleEvalDelay(t *testing.T) {
postableRule := PostableRule{
AlertName: "Test Eval Delay",
AlertType: "METRIC_BASED_ALERT",
AlertType: AlertTypeMetric,
RuleType: RuleTypeThreshold,
EvalWindow: Duration(5 * time.Minute),
Frequency: Duration(1 * time.Minute),
@@ -945,7 +945,7 @@ func TestThresholdRuleEvalDelay(t *testing.T) {
fm := featureManager.StartManager()
for idx, c := range cases {
rule, err := NewThresholdRule("69", &postableRule, fm, nil) // no eval delay
rule, err := NewThresholdRule("69", &postableRule, fm, nil, true) // no eval delay
if err != nil {
assert.NoError(t, err)
}
@@ -963,7 +963,7 @@ func TestThresholdRuleEvalDelay(t *testing.T) {
func TestThresholdRuleClickHouseTmpl(t *testing.T) {
postableRule := PostableRule{
AlertName: "Tricky Condition Tests",
AlertType: "METRIC_BASED_ALERT",
AlertType: AlertTypeMetric,
RuleType: RuleTypeThreshold,
EvalWindow: Duration(5 * time.Minute),
Frequency: Duration(1 * time.Minute),
@@ -994,7 +994,7 @@ func TestThresholdRuleClickHouseTmpl(t *testing.T) {
fm := featureManager.StartManager()
for idx, c := range cases {
rule, err := NewThresholdRule("69", &postableRule, fm, nil, WithEvalDelay(2*time.Minute))
rule, err := NewThresholdRule("69", &postableRule, fm, nil, true, WithEvalDelay(2*time.Minute))
if err != nil {
assert.NoError(t, err)
}
@@ -1019,7 +1019,7 @@ func (m *queryMatcherAny) Match(string, string) error {
func TestThresholdRuleUnitCombinations(t *testing.T) {
postableRule := PostableRule{
AlertName: "Units test",
AlertType: "METRIC_BASED_ALERT",
AlertType: AlertTypeMetric,
RuleType: RuleTypeThreshold,
EvalWindow: Duration(5 * time.Minute),
Frequency: Duration(1 * time.Minute),
@@ -1135,9 +1135,9 @@ func TestThresholdRuleUnitCombinations(t *testing.T) {
}
options := clickhouseReader.NewOptions("", 0, 0, 0, "", "archiveNamespace")
reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "")
reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "", true)
rule, err := NewThresholdRule("69", &postableRule, fm, reader)
rule, err := NewThresholdRule("69", &postableRule, fm, reader, true)
rule.temporalityMap = map[string]map[v3.Temporality]bool{
"signoz_calls_total": {
v3.Delta: true,
@@ -1170,8 +1170,8 @@ func TestThresholdRuleUnitCombinations(t *testing.T) {
func TestThresholdRuleNoData(t *testing.T) {
postableRule := PostableRule{
AlertName: "Units test",
AlertType: "METRIC_BASED_ALERT",
AlertName: "No data test",
AlertType: AlertTypeMetric,
RuleType: RuleTypeThreshold,
EvalWindow: Duration(5 * time.Minute),
Frequency: Duration(1 * time.Minute),
@@ -1234,9 +1234,9 @@ func TestThresholdRuleNoData(t *testing.T) {
}
options := clickhouseReader.NewOptions("", 0, 0, 0, "", "archiveNamespace")
reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "")
reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "", true)
rule, err := NewThresholdRule("69", &postableRule, fm, reader)
rule, err := NewThresholdRule("69", &postableRule, fm, reader, true)
rule.temporalityMap = map[string]map[v3.Temporality]bool{
"signoz_calls_total": {
v3.Delta: true,
@@ -1261,3 +1261,201 @@ func TestThresholdRuleNoData(t *testing.T) {
}
}
}
func TestThresholdRuleTracesLink(t *testing.T) {
postableRule := PostableRule{
AlertName: "Traces link test",
AlertType: AlertTypeTraces,
RuleType: RuleTypeThreshold,
EvalWindow: Duration(5 * time.Minute),
Frequency: Duration(1 * time.Minute),
RuleCondition: &RuleCondition{
CompositeQuery: &v3.CompositeQuery{
QueryType: v3.QueryTypeBuilder,
BuilderQueries: map[string]*v3.BuilderQuery{
"A": {
QueryName: "A",
StepInterval: 60,
AggregateAttribute: v3.AttributeKey{
Key: "durationNano",
},
AggregateOperator: v3.AggregateOperatorP95,
DataSource: v3.DataSourceTraces,
Expression: "A",
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{
Key: v3.AttributeKey{Key: "httpMethod", IsColumn: true, Type: v3.AttributeKeyTypeTag, DataType: v3.AttributeKeyDataTypeString},
Value: "GET",
Operator: v3.FilterOperatorEqual,
},
},
},
},
},
},
},
}
fm := featureManager.StartManager()
mock, err := cmock.NewClickHouseWithQueryMatcher(nil, &queryMatcherAny{})
if err != nil {
t.Errorf("an error '%s' was not expected when opening a stub database connection", err)
}
cols := make([]cmock.ColumnType, 0)
cols = append(cols, cmock.ColumnType{Name: "value", Type: "Float64"})
cols = append(cols, cmock.ColumnType{Name: "attr", Type: "String"})
cols = append(cols, cmock.ColumnType{Name: "timestamp", Type: "String"})
for idx, c := range testCases {
rows := cmock.NewRows(cols, c.values)
// We are testing the eval logic after the query is run
// so we don't care about the query string here
queryString := "SELECT any"
mock.
ExpectQuery(queryString).
WillReturnRows(rows)
postableRule.RuleCondition.CompareOp = CompareOp(c.compareOp)
postableRule.RuleCondition.MatchType = MatchType(c.matchType)
postableRule.RuleCondition.Target = &c.target
postableRule.RuleCondition.CompositeQuery.Unit = c.yAxisUnit
postableRule.RuleCondition.TargetUnit = c.targetUnit
postableRule.Annotations = map[string]string{
"description": "This alert is fired when the defined metric (current value: {{$value}}) crosses the threshold ({{$threshold}})",
"summary": "The rule threshold is set to {{$threshold}}, and the observed metric value is {{$value}}",
}
options := clickhouseReader.NewOptions("", 0, 0, 0, "", "archiveNamespace")
reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "", true)
rule, err := NewThresholdRule("69", &postableRule, fm, reader, true)
rule.temporalityMap = map[string]map[v3.Temporality]bool{
"signoz_calls_total": {
v3.Delta: true,
},
}
if err != nil {
assert.NoError(t, err)
}
retVal, err := rule.Eval(context.Background(), time.Now())
if err != nil {
assert.NoError(t, err)
}
if c.expectAlerts == 0 {
assert.Equal(t, 0, retVal.(int), "case %d", idx)
} else {
assert.Equal(t, c.expectAlerts, retVal.(int), "case %d", idx)
for _, item := range rule.active {
for name, value := range item.Annotations.Map() {
if name == "related_traces" {
assert.NotEmpty(t, value, "case %d", idx)
assert.Contains(t, value, "GET")
}
}
}
}
}
}
func TestThresholdRuleLogsLink(t *testing.T) {
postableRule := PostableRule{
AlertName: "Logs link test",
AlertType: AlertTypeLogs,
RuleType: RuleTypeThreshold,
EvalWindow: Duration(5 * time.Minute),
Frequency: Duration(1 * time.Minute),
RuleCondition: &RuleCondition{
CompositeQuery: &v3.CompositeQuery{
QueryType: v3.QueryTypeBuilder,
BuilderQueries: map[string]*v3.BuilderQuery{
"A": {
QueryName: "A",
StepInterval: 60,
AggregateAttribute: v3.AttributeKey{
Key: "component",
},
AggregateOperator: v3.AggregateOperatorCountDistinct,
DataSource: v3.DataSourceLogs,
Expression: "A",
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{
Key: v3.AttributeKey{Key: "k8s.container.name", IsColumn: false, Type: v3.AttributeKeyTypeTag, DataType: v3.AttributeKeyDataTypeString},
Value: "testcontainer",
Operator: v3.FilterOperatorEqual,
},
},
},
},
},
},
},
}
fm := featureManager.StartManager()
mock, err := cmock.NewClickHouseWithQueryMatcher(nil, &queryMatcherAny{})
if err != nil {
t.Errorf("an error '%s' was not expected when opening a stub database connection", err)
}
cols := make([]cmock.ColumnType, 0)
cols = append(cols, cmock.ColumnType{Name: "value", Type: "Float64"})
cols = append(cols, cmock.ColumnType{Name: "attr", Type: "String"})
cols = append(cols, cmock.ColumnType{Name: "timestamp", Type: "String"})
for idx, c := range testCases {
rows := cmock.NewRows(cols, c.values)
// We are testing the eval logic after the query is run
// so we don't care about the query string here
queryString := "SELECT any"
mock.
ExpectQuery(queryString).
WillReturnRows(rows)
postableRule.RuleCondition.CompareOp = CompareOp(c.compareOp)
postableRule.RuleCondition.MatchType = MatchType(c.matchType)
postableRule.RuleCondition.Target = &c.target
postableRule.RuleCondition.CompositeQuery.Unit = c.yAxisUnit
postableRule.RuleCondition.TargetUnit = c.targetUnit
postableRule.Annotations = map[string]string{
"description": "This alert is fired when the defined metric (current value: {{$value}}) crosses the threshold ({{$threshold}})",
"summary": "The rule threshold is set to {{$threshold}}, and the observed metric value is {{$value}}",
}
options := clickhouseReader.NewOptions("", 0, 0, 0, "", "archiveNamespace")
reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "", true)
rule, err := NewThresholdRule("69", &postableRule, fm, reader, true)
rule.temporalityMap = map[string]map[v3.Temporality]bool{
"signoz_calls_total": {
v3.Delta: true,
},
}
if err != nil {
assert.NoError(t, err)
}
retVal, err := rule.Eval(context.Background(), time.Now())
if err != nil {
assert.NoError(t, err)
}
if c.expectAlerts == 0 {
assert.Equal(t, 0, retVal.(int), "case %d", idx)
} else {
assert.Equal(t, c.expectAlerts, retVal.(int), "case %d", idx)
for _, item := range rule.active {
for name, value := range item.Annotations.Map() {
if name == "related_logs" {
assert.NotEmpty(t, value, "case %d", idx)
assert.Contains(t, value, "testcontainer")
}
}
}
}
}
}

View File

@@ -0,0 +1,68 @@
package rules
import "time"
var (
testCases = []struct {
targetUnit string
yAxisUnit string
values [][]interface{}
expectAlerts int
compareOp string
matchType string
target float64
summaryAny []string
}{
{
targetUnit: "s",
yAxisUnit: "ns",
values: [][]interface{}{
{float64(572588400), "attr", time.Now()}, // 0.57 seconds
{float64(572386400), "attr", time.Now().Add(1 * time.Second)}, // 0.57 seconds
{float64(300947400), "attr", time.Now().Add(2 * time.Second)}, // 0.3 seconds
{float64(299316000), "attr", time.Now().Add(3 * time.Second)}, // 0.3 seconds
{float64(66640400.00000001), "attr", time.Now().Add(4 * time.Second)}, // 0.06 seconds
},
expectAlerts: 0,
compareOp: "1", // Above
matchType: "1", // Once
target: 1, // 1 second
},
{
targetUnit: "ms",
yAxisUnit: "ns",
values: [][]interface{}{
{float64(572588400), "attr", time.Now()}, // 572.58 ms
{float64(572386400), "attr", time.Now().Add(1 * time.Second)}, // 572.38 ms
{float64(300947400), "attr", time.Now().Add(2 * time.Second)}, // 300.94 ms
{float64(299316000), "attr", time.Now().Add(3 * time.Second)}, // 299.31 ms
{float64(66640400.00000001), "attr", time.Now().Add(4 * time.Second)}, // 66.64 ms
},
expectAlerts: 4,
compareOp: "1", // Above
matchType: "1", // Once
target: 200, // 200 ms
summaryAny: []string{
"observed metric value is 299 ms",
"the observed metric value is 573 ms",
"the observed metric value is 572 ms",
"the observed metric value is 301 ms",
},
},
{
targetUnit: "decgbytes",
yAxisUnit: "bytes",
values: [][]interface{}{
{float64(2863284053), "attr", time.Now()}, // 2.86 GB
{float64(2863388842), "attr", time.Now().Add(1 * time.Second)}, // 2.86 GB
{float64(300947400), "attr", time.Now().Add(2 * time.Second)}, // 0.3 GB
{float64(299316000), "attr", time.Now().Add(3 * time.Second)}, // 0.3 GB
{float64(66640400.00000001), "attr", time.Now().Add(4 * time.Second)}, // 66.64 MB
},
expectAlerts: 0,
compareOp: "1", // Above
matchType: "1", // Once
target: 200, // 200 GB
},
}
)

View File

@@ -178,9 +178,12 @@ type Telemetry struct {
patTokenUser bool
mutex sync.RWMutex
alertsInfoCallback func(ctx context.Context) (*model.AlertsInfo, error)
userCountCallback func(ctx context.Context) (int, error)
userRoleCallback func(ctx context.Context, groupId string) (string, error)
alertsInfoCallback func(ctx context.Context) (*model.AlertsInfo, error)
userCountCallback func(ctx context.Context) (int, error)
userRoleCallback func(ctx context.Context, groupId string) (string, error)
getUsersCallback func(ctx context.Context) ([]model.UserPayload, *model.ApiError)
dashboardsInfoCallback func(ctx context.Context) (*model.DashboardsInfo, error)
savedViewsInfoCallback func(ctx context.Context) (*model.SavedViewsInfo, error)
}
func (a *Telemetry) SetAlertsInfoCallback(callback func(ctx context.Context) (*model.AlertsInfo, error)) {
@@ -195,6 +198,18 @@ func (a *Telemetry) SetUserRoleCallback(callback func(ctx context.Context, group
a.userRoleCallback = callback
}
func (a *Telemetry) SetGetUsersCallback(callback func(ctx context.Context) ([]model.UserPayload, *model.ApiError)) {
a.getUsersCallback = callback
}
func (a *Telemetry) SetSavedViewsInfoCallback(callback func(ctx context.Context) (*model.SavedViewsInfo, error)) {
a.savedViewsInfoCallback = callback
}
func (a *Telemetry) SetDashboardsInfoCallback(callback func(ctx context.Context) (*model.DashboardsInfo, error)) {
a.dashboardsInfoCallback = callback
}
func createTelemetry() {
// Do not do anything in CI (not even resolving the outbound IP address)
if testing.Testing() {
@@ -296,7 +311,7 @@ func createTelemetry() {
data[key] = value
}
users, apiErr := telemetry.reader.GetUsers(ctx)
users, apiErr := telemetry.getUsersCallback(ctx)
if apiErr == nil {
for _, user := range users {
if user.Email == DEFAULT_CLOUD_EMAIL {
@@ -308,7 +323,7 @@ func createTelemetry() {
alertsInfo, err := telemetry.alertsInfoCallback(ctx)
if err == nil {
dashboardsInfo, err := telemetry.reader.GetDashboardsInfo(ctx)
dashboardsInfo, err := telemetry.dashboardsInfoCallback(ctx)
if err == nil {
channels, err := telemetry.reader.GetChannels()
if err == nil {
@@ -328,7 +343,7 @@ func createTelemetry() {
alertsInfo.MSTeamsChannels++
}
}
savedViewsInfo, err := telemetry.reader.GetSavedViewsInfo(ctx)
savedViewsInfo, err := telemetry.savedViewsInfoCallback(ctx)
if err == nil {
dashboardsAlertsData := map[string]interface{}{
"totalDashboards": dashboardsInfo.TotalDashboards,
@@ -339,6 +354,7 @@ func createTelemetry() {
"metricBasedPanels": dashboardsInfo.MetricBasedPanels,
"tracesBasedPanels": dashboardsInfo.TracesBasedPanels,
"dashboardsWithTSV2": dashboardsInfo.QueriesWithTSV2,
"dashboardWithLogsChQuery": dashboardsInfo.DashboardsWithLogsChQuery,
"totalAlerts": alertsInfo.TotalAlerts,
"alertsWithTSV2": alertsInfo.AlertsWithTSV2,
"logsBasedAlerts": alertsInfo.LogsBasedAlerts,
@@ -358,6 +374,7 @@ func createTelemetry() {
"metricsClickHouseQueries": alertsInfo.MetricsClickHouseQueries,
"metricsPrometheusQueries": alertsInfo.MetricsPrometheusQueries,
"spanMetricsPrometheusQueries": alertsInfo.SpanMetricsPrometheusQueries,
"alertsWithLogsChQuery": alertsInfo.AlertsWithLogsChQuery,
}
// send event only if there are dashboards or alerts or channels
if (dashboardsInfo.TotalDashboards > 0 || alertsInfo.TotalAlerts > 0 || len(*channels) > 0 || savedViewsInfo.TotalSavedViews > 0) && apiErr == nil {

View File

@@ -45,6 +45,7 @@ func NewMockClickhouseReader(
"",
featureFlags,
"",
true,
)
return reader, mockDB

Some files were not shown because too many files have changed in this diff Show More