Compare commits

...

39 Commits

Author SHA1 Message Date
Prashant Shahi
9ccc686c63 chore(signoz): pin versions: SigNoz 0.58.2
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-11-19 11:27:28 +05:30
Prashant Shahi
3ad6ff73df Merge branch 'develop' into release/v0.58.x 2024-11-19 11:27:01 +05:30
Srikanth Chekuri
c93cf1ce95 fix: incorrect formula for apdex (#6460) 2024-11-19 05:42:04 +00:00
Prashant Shahi
a9ced66258 Merge branch 'develop' into release/v0.58.x 2024-11-19 11:06:27 +05:30
Nityananda Gohain
98a350692b fix: update TestListTsRange to return all range (#6470) 2024-11-19 10:51:40 +05:30
Vikrant Gupta
d93f72f18d chore: use the license v2 key to fill licenses v3 on startup (#6468)
* feat: use the license v2 key to fill licenses v3 on startup

* chore: make the init only if the licenses v2 is present

* chore: address review comments
2024-11-18 17:55:00 +05:30
Shaheer Kochai
a59e7b9dfb feat: add 'create channel' option in channels list and refetch alert channels on opening the channels dropdown (#6416)
* feat: add channel creation option and auto-refresh channels list on dropdown open

* chore: move inline styles to style.ts

* fix: show the prompt to ask admin if the user doesn't have permissions

* fix: display create channel option only if the user has permission

* fix: prevent repeated new alert event logs + log new channel option inside dropdown
2024-11-18 06:30:06 +00:00
Nityananda Gohain
91bbeaf175 fix: remove unwanted trace API's (#6464) 2024-11-18 10:27:08 +05:30
Yunus M
22e61e1605 [Snyk] Security upgrade alpine from 3.18.6 to 3.20.3 (#6463)
The following vulnerabilities are fixed with an upgrade:
- https://snyk.io/vuln/SNYK-ALPINE318-BUSYBOX-6913411
- https://snyk.io/vuln/SNYK-ALPINE318-BUSYBOX-7249236
- https://snyk.io/vuln/SNYK-ALPINE318-BUSYBOX-7249265
- https://snyk.io/vuln/SNYK-ALPINE318-BUSYBOX-7249265
- https://snyk.io/vuln/SNYK-ALPINE318-BUSYBOX-7249419

Co-authored-by: snyk-bot <snyk-bot@snyk.io>
2024-11-17 21:56:15 +05:30
Srikanth Chekuri
656d1c2b1c chore: add missing alert telemetry (#6459) 2024-11-16 19:16:05 +00:00
Srikanth Chekuri
493ae4fd07 chore: add user email to log_comment (#6461) 2024-11-17 00:36:10 +05:30
Srikanth Chekuri
cd1ec561b1 fix: compare op outside bounds for anomaly alert (#6458) 2024-11-16 20:17:34 +05:30
Nityananda Gohain
0acf39a532 feat: support for new enrichment logic in traces (#6438)
* feat: support for new enrichment logic in traces

* fix: default test added

* fix: update func name in links

* Update pkg/query-service/utils/logs_test.go

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>

---------

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>
2024-11-16 15:19:25 +05:30
Prashant Shahi
d859301d30 Merge branch 'develop' into release/v0.58.x 2024-11-16 04:50:33 +05:30
Nityananda Gohain
35f4eaa23b fix: update logs struct to fix live logs (#6453) 2024-11-15 22:42:16 +05:30
Nityananda Gohain
77c5f17dce feat: support for window based pagination in new trace v4 (#6440)
* feat: support for window based pagination in new trace v4

* fix: update pagination logic

* fix: update comment

* fix: substract correct length

* fix: revert changes

* fix: add tests for querier

* fix: rename matcher

* fix: handle offset inmemory for list queries in traces

* fix: correct var name

* fix: add max pagination limit for traces
2024-11-15 22:13:28 +05:30
Prashant Shahi
a11aadb712 Merge branch 'main' into release/v0.58.x 2024-11-15 22:06:09 +05:30
Prashant Shahi
bc9c7b5f1d chore(signoz): pin versions: SigNoz 0.58.1
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-11-15 22:02:47 +05:30
Prashant Shahi
1bba932d08 Merge branch 'develop' into release/v0.58.x 2024-11-15 22:01:55 +05:30
SagarRajput-7
c1478c4e54 feat: removed dashboard uuid is all cases be it duplicate, empty or somevalid, while import json (#6448)
* feat: removed dashboard uuid is all cases be it duplicate, empty or somevalid, while import json

* feat: added comment to better explain the logic
2024-11-15 19:35:29 +05:30
Yunus M
371224a64a fix: show org onboarding only to cloud customers (#6451) 2024-11-15 19:12:38 +05:30
Yunus M
504bc0d541 feat: ingestion limits - add toggle feature (#6430) 2024-11-15 08:32:31 +00:00
Nityananda Gohain
2faa0c6d4f feat: trace V4 QB (#6407)
* feat: trace V4 QB

* fix: update get column name and remove id

* fix: handle contains and update tests

* fix: remove unwanted step interval calculation

* fix: add test cases

* fix: add tests for static columns in QB

* fix: add more order by tests

* fix: update order by logic
2024-11-13 20:30:01 +05:30
Srikanth Chekuri
969ac5028e chore: add v2 metric writer to pipelines (#6345) 2024-11-13 10:41:28 +00:00
Prashant Shahi
3f7adeb040 Merge branch 'develop' into release/v0.58.x 2024-11-13 12:01:54 +05:30
Srikanth Chekuri
323da3494b chore: add experimental rate/increase calc (#6432) 2024-11-13 11:47:56 +05:30
Vikrant Gupta
01fda51959 chore: return proper http codes on unique constraint error (#6428) 2024-11-13 00:25:00 +05:30
Srikanth Chekuri
85ac21f253 fix: update request payload for span metrics queries (#6323) 2024-11-12 17:22:42 +00:00
Srikanth Chekuri
fd9e9f0fb3 chore: add k8s {deployment, daemonset, statefulset, job} resources (#6401) 2024-11-12 15:23:40 +00:00
Nityananda Gohain
d5523fc092 fix: ignore ts for panel type table (#6419) 2024-11-12 08:04:45 +00:00
Nityananda Gohain
2ec641b99e fix: add severity_text legend (#6415) 2024-11-12 05:54:22 +00:00
Ekansh Gupta
d1503f1418 feat: fixProducerAPI (#6422)
chore: bugfix
2024-11-12 05:30:36 +00:00
Prashant Shahi
eb6670980a Merge pull request #6405 from SigNoz/release/v0.58.x
Release/v0.58.x
2024-11-08 21:49:57 +05:30
Prashant Shahi
48f3b9cacb chore(signoz): pin versions: SigNoz 0.58.0, SigNoz OtelCollector 0.111.8
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-11-08 16:02:59 +05:30
Prashant Shahi
eaf8571fe9 Merge branch 'main' into release/v0.58.x 2024-11-08 15:49:41 +05:30
Prashant Shahi
b10c22223b Merge pull request #6300 from SigNoz/release/signoz-0.57.0
Release/signoz 0.57.0
2024-10-29 00:45:14 +05:30
Prashant Shahi
cdde369748 Merge branch 'develop' into release/signoz-0.57.0 2024-10-28 21:26:47 +05:30
Prashant Shahi
523cbcd6fc chore: go mod tidy
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-10-28 19:40:23 +05:30
Prashant Shahi
eeadc021e1 chore(signoz): pin versions: SigNoz 0.57.0
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-10-28 19:40:10 +05:30
63 changed files with 5466 additions and 1283 deletions

View File

@@ -146,7 +146,7 @@ services:
condition: on-failure
query-service:
image: signoz/query-service:0.56.0
image: signoz/query-service:0.58.2
command:
[
"-config=/root/config/prometheus.yml",
@@ -186,7 +186,7 @@ services:
<<: *db-depend
frontend:
image: signoz/frontend:0.56.0
image: signoz/frontend:0.58.2
deploy:
restart_policy:
condition: on-failure
@@ -199,7 +199,7 @@ services:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
image: signoz/signoz-otel-collector:0.111.5
image: signoz/signoz-otel-collector:0.111.8
command:
[
"--config=/etc/otel-collector-config.yaml",
@@ -237,7 +237,7 @@ services:
- query-service
otel-collector-migrator:
image: signoz/signoz-schema-migrator:0.111.5
image: signoz/signoz-schema-migrator:0.111.8
deploy:
restart_policy:
condition: on-failure

View File

@@ -66,28 +66,6 @@ processors:
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure.
timeout: 2s
signozspanmetrics/cumulative:
metrics_exporter: clickhousemetricswrite
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
dimensions_cache_size: 100000
dimensions:
- name: service.namespace
default: default
- name: deployment.environment
default: default
# This is added to ensure the uniqueness of the timeseries
# Otherwise, identical timeseries produced by multiple replicas of
# collectors result in incorrect APM metrics
- name: signoz.collector.id
- name: service.version
- name: browser.platform
- name: browser.mobile
- name: k8s.cluster.name
- name: k8s.node.name
- name: k8s.namespace.name
- name: host.name
- name: host.type
- name: container.name
# memory_limiter:
# # 80% of maximum memory up to 2G
# limit_mib: 1500
@@ -138,6 +116,8 @@ exporters:
enabled: true
clickhousemetricswrite/prometheus:
endpoint: tcp://clickhouse:9000/signoz_metrics
clickhousemetricswritev2:
dsn: tcp://clickhouse:9000/signoz_metrics
# logging: {}
clickhouselogsexporter:
dsn: tcp://clickhouse:9000/signoz_logs
@@ -161,20 +141,20 @@ service:
pipelines:
traces:
receivers: [jaeger, otlp]
processors: [signozspanmetrics/cumulative, signozspanmetrics/delta, batch]
processors: [signozspanmetrics/delta, batch]
exporters: [clickhousetraces]
metrics:
receivers: [otlp]
processors: [batch]
exporters: [clickhousemetricswrite]
metrics/generic:
exporters: [clickhousemetricswrite, clickhousemetricswritev2]
metrics/hostmetrics:
receivers: [hostmetrics]
processors: [resourcedetection, batch]
exporters: [clickhousemetricswrite]
exporters: [clickhousemetricswrite, clickhousemetricswritev2]
metrics/prometheus:
receivers: [prometheus]
processors: [batch]
exporters: [clickhousemetricswrite/prometheus]
exporters: [clickhousemetricswrite/prometheus, clickhousemetricswritev2]
logs:
receivers: [otlp, tcplog/docker]
processors: [batch]

View File

@@ -69,7 +69,7 @@ services:
- --storage.path=/data
otel-collector-migrator:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.5}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.8}
container_name: otel-migrator
command:
- "--dsn=tcp://clickhouse:9000"
@@ -84,7 +84,7 @@ services:
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
otel-collector:
container_name: signoz-otel-collector
image: signoz/signoz-otel-collector:0.111.5
image: signoz/signoz-otel-collector:0.111.8
command:
[
"--config=/etc/otel-collector-config.yaml",

View File

@@ -162,7 +162,7 @@ services:
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
query-service:
image: signoz/query-service:${DOCKER_TAG:-0.56.0}
image: signoz/query-service:${DOCKER_TAG:-0.58.2}
container_name: signoz-query-service
command:
[
@@ -201,7 +201,7 @@ services:
<<: *db-depend
frontend:
image: signoz/frontend:${DOCKER_TAG:-0.56.0}
image: signoz/frontend:${DOCKER_TAG:-0.58.2}
container_name: signoz-frontend
restart: on-failure
depends_on:
@@ -213,7 +213,7 @@ services:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector-migrator-sync:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.5}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.8}
container_name: otel-migrator-sync
command:
- "sync"
@@ -228,7 +228,7 @@ services:
# condition: service_healthy
otel-collector-migrator-async:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.5}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.8}
container_name: otel-migrator-async
command:
- "async"
@@ -245,7 +245,7 @@ services:
# condition: service_healthy
otel-collector:
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.5}
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.8}
container_name: signoz-otel-collector
command:
[

View File

@@ -167,7 +167,7 @@ services:
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
query-service:
image: signoz/query-service:${DOCKER_TAG:-0.56.0}
image: signoz/query-service:${DOCKER_TAG:-0.58.2}
container_name: signoz-query-service
command:
[
@@ -208,7 +208,7 @@ services:
<<: *db-depend
frontend:
image: signoz/frontend:${DOCKER_TAG:-0.56.0}
image: signoz/frontend:${DOCKER_TAG:-0.58.2}
container_name: signoz-frontend
restart: on-failure
depends_on:
@@ -220,7 +220,7 @@ services:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector-migrator:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.5}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.8}
container_name: otel-migrator
command:
- "--dsn=tcp://clickhouse:9000"
@@ -234,7 +234,7 @@ services:
otel-collector:
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.5}
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.8}
container_name: signoz-otel-collector
command:
[

View File

@@ -57,35 +57,11 @@ receivers:
labels:
job_name: otel-collector
processors:
batch:
send_batch_size: 10000
send_batch_max_size: 11000
timeout: 10s
signozspanmetrics/cumulative:
metrics_exporter: clickhousemetricswrite
metrics_flush_interval: 60s
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
dimensions_cache_size: 100000
dimensions:
- name: service.namespace
default: default
- name: deployment.environment
default: default
# This is added to ensure the uniqueness of the timeseries
# Otherwise, identical timeseries produced by multiple replicas of
# collectors result in incorrect APM metrics
- name: signoz.collector.id
- name: service.version
- name: browser.platform
- name: browser.mobile
- name: k8s.cluster.name
- name: k8s.node.name
- name: k8s.namespace.name
- name: host.name
- name: host.type
- name: container.name
# memory_limiter:
# # 80% of maximum memory up to 2G
# limit_mib: 1500
@@ -149,6 +125,8 @@ exporters:
enabled: true
clickhousemetricswrite/prometheus:
endpoint: tcp://clickhouse:9000/signoz_metrics
clickhousemetricswritev2:
dsn: tcp://clickhouse:9000/signoz_metrics
clickhouselogsexporter:
dsn: tcp://clickhouse:9000/signoz_logs
timeout: 10s
@@ -168,20 +146,20 @@ service:
pipelines:
traces:
receivers: [jaeger, otlp]
processors: [signozspanmetrics/cumulative, signozspanmetrics/delta, batch]
processors: [signozspanmetrics/delta, batch]
exporters: [clickhousetraces]
metrics:
receivers: [otlp]
processors: [batch]
exporters: [clickhousemetricswrite]
metrics/generic:
exporters: [clickhousemetricswrite, clickhousemetricswritev2]
metrics/hostmetrics:
receivers: [hostmetrics]
processors: [resourcedetection, batch]
exporters: [clickhousemetricswrite]
exporters: [clickhousemetricswrite, clickhousemetricswritev2]
metrics/prometheus:
receivers: [prometheus]
processors: [batch]
exporters: [clickhousemetricswrite/prometheus]
exporters: [clickhousemetricswrite/prometheus, clickhousemetricswritev2]
logs:
receivers: [otlp, tcplog/docker]
processors: [batch]

View File

@@ -1,5 +1,5 @@
# use a minimal alpine image
FROM alpine:3.18.6
FROM alpine:3.20.3
# Add Maintainer Info
LABEL maintainer="signoz"

View File

@@ -8,6 +8,7 @@ import (
"time"
"github.com/jmoiron/sqlx"
"github.com/mattn/go-sqlite3"
"go.signoz.io/signoz/ee/query-service/license/sqlite"
"go.signoz.io/signoz/ee/query-service/model"
@@ -274,14 +275,14 @@ func (r *Repo) InitFeatures(req basemodel.FeatureSet) error {
}
// InsertLicenseV3 inserts a new license v3 in db
func (r *Repo) InsertLicenseV3(ctx context.Context, l *model.LicenseV3) error {
func (r *Repo) InsertLicenseV3(ctx context.Context, l *model.LicenseV3) *model.ApiError {
query := `INSERT INTO licenses_v3 (id, key, data) VALUES ($1, $2, $3)`
// licsense is the entity of zeus so putting the entire license here without defining schema
licenseData, err := json.Marshal(l.Data)
if err != nil {
return fmt.Errorf("insert license failed: license marshal error")
return &model.ApiError{Typ: basemodel.ErrorBadData, Err: err}
}
_, err = r.db.ExecContext(ctx,
@@ -292,8 +293,14 @@ func (r *Repo) InsertLicenseV3(ctx context.Context, l *model.LicenseV3) error {
)
if err != nil {
if sqliteErr, ok := err.(sqlite3.Error); ok {
if sqliteErr.ExtendedCode == sqlite3.ErrConstraintUnique {
zap.L().Error("error in inserting license data: ", zap.Error(sqliteErr))
return &model.ApiError{Typ: model.ErrorConflict, Err: sqliteErr}
}
}
zap.L().Error("error in inserting license data: ", zap.Error(err))
return fmt.Errorf("failed to insert license in db: %v", err)
return &model.ApiError{Typ: basemodel.ErrorExec, Err: err}
}
return nil

View File

@@ -67,6 +67,30 @@ func StartManager(dbType string, db *sqlx.DB, useLicensesV3 bool, features ...ba
repo: &repo,
}
if useLicensesV3 {
// get active license from the db
active, err := m.repo.GetActiveLicense(context.Background())
if err != nil {
return m, err
}
// if we have an active license then need to fetch the complete details
if active != nil {
// fetch the new license structure from control plane
licenseV3, apiError := validate.ValidateLicenseV3(active.Key)
if apiError != nil {
return m, apiError
}
// insert the licenseV3 in sqlite db
apiError = m.repo.InsertLicenseV3(context.Background(), licenseV3)
// if the license already exists move ahead.
if apiError != nil && apiError.Typ != model.ErrorConflict {
return m, apiError
}
}
}
if err := m.start(useLicensesV3, features...); err != nil {
return m, err
}
@@ -463,7 +487,7 @@ func (lm *Manager) ActivateV3(ctx context.Context, licenseKey string) (licenseRe
err := lm.repo.InsertLicenseV3(ctx, license)
if err != nil {
zap.L().Error("failed to activate license", zap.Error(err))
return nil, model.InternalError(err)
return nil, err
}
// license is valid, activate it

View File

@@ -61,6 +61,11 @@ func NewAnomalyRule(
zap.L().Info("creating new AnomalyRule", zap.String("id", id), zap.Any("opts", opts))
if p.RuleCondition.CompareOp == baserules.ValueIsBelow {
target := -1 * *p.RuleCondition.Target
p.RuleCondition.Target = &target
}
baseRule, err := baserules.NewBaseRule(id, p, reader, opts...)
if err != nil {
return nil, err

View File

@@ -22,6 +22,7 @@ import AppActions from 'types/actions';
import { UPDATE_USER_IS_FETCH } from 'types/actions/app';
import { Organization } from 'types/api/user/getOrganization';
import AppReducer from 'types/reducer/app';
import { isCloudUser } from 'utils/app';
import { routePermission } from 'utils/permission';
import routes, {
@@ -76,6 +77,8 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
const { t } = useTranslation(['common']);
const isCloudUserVal = isCloudUser();
const localStorageUserAuthToken = getInitialUserTokenRefreshToken();
const dispatch = useDispatch<Dispatch<AppActions>>();
@@ -143,6 +146,7 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
const handleRedirectForOrgOnboarding = (key: string): void => {
if (
isLoggedInState &&
isCloudUserVal &&
!isFetchingOrgPreferences &&
!isLoadingOrgUsers &&
!isEmpty(orgUsers?.payload) &&
@@ -158,6 +162,10 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
history.push(ROUTES.ONBOARDING);
}
}
if (!isCloudUserVal && key === 'ONBOARDING') {
history.push(ROUTES.APPLICATION);
}
};
const handleUserLoginIfTokenPresent = async (
@@ -250,7 +258,7 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
const handleRouting = (): void => {
const showOrgOnboarding = shouldShowOnboarding();
if (showOrgOnboarding && !isOnboardingComplete) {
if (showOrgOnboarding && !isOnboardingComplete && isCloudUserVal) {
history.push(ROUTES.ONBOARDING);
} else {
history.push(ROUTES.APPLICATION);

View File

@@ -8,7 +8,7 @@ import { ALERTS_DATA_SOURCE_MAP } from 'constants/alerts';
import ROUTES from 'constants/routes';
import useComponentPermission from 'hooks/useComponentPermission';
import useFetch from 'hooks/useFetch';
import { useCallback, useEffect, useState } from 'react';
import { useCallback, useEffect, useRef, useState } from 'react';
import { useTranslation } from 'react-i18next';
import { useSelector } from 'react-redux';
import { AppState } from 'store/reducers';
@@ -83,16 +83,22 @@ function BasicInfo({
window.open(ROUTES.CHANNELS_NEW, '_blank');
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
const hasLoggedEvent = useRef(false);
useEffect(() => {
if (!channels.loading && isNewRule) {
if (!channels.loading && isNewRule && !hasLoggedEvent.current) {
logEvent('Alert: New alert creation page visited', {
dataSource: ALERTS_DATA_SOURCE_MAP[alertDef?.alertType as AlertTypes],
numberOfChannels: channels?.payload?.length,
});
hasLoggedEvent.current = true;
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [channels.payload, channels.loading]);
}, [channels.loading]);
const refetchChannels = async (): Promise<void> => {
await channels.refetch();
};
return (
<>
@@ -197,7 +203,7 @@ function BasicInfo({
{!shouldBroadCastToAllChannels && (
<Tooltip
title={
noChannels
noChannels && !addNewChannelPermission
? 'No channels. Ask an admin to create a notification channel'
: undefined
}
@@ -212,10 +218,10 @@ function BasicInfo({
]}
>
<ChannelSelect
disabled={
shouldBroadCastToAllChannels || noChannels || !!channels.loading
}
onDropdownOpen={refetchChannels}
disabled={shouldBroadCastToAllChannels}
currentValue={alertDef.preferredChannels}
handleCreateNewChannels={handleCreateNewChannels}
channels={channels}
onSelectChannels={(preferredChannels): void => {
setAlertDef({

View File

@@ -1,24 +1,33 @@
import { Select } from 'antd';
import { PlusOutlined } from '@ant-design/icons';
import { Select, Spin } from 'antd';
import useComponentPermission from 'hooks/useComponentPermission';
import { State } from 'hooks/useFetch';
import { useNotifications } from 'hooks/useNotifications';
import { ReactNode } from 'react';
import { useTranslation } from 'react-i18next';
import { useSelector } from 'react-redux';
import { AppState } from 'store/reducers';
import { PayloadProps } from 'types/api/channels/getAll';
import AppReducer from 'types/reducer/app';
import { StyledSelect } from './styles';
import { StyledCreateChannelOption, StyledSelect } from './styles';
export interface ChannelSelectProps {
disabled?: boolean;
currentValue?: string[];
onSelectChannels: (s: string[]) => void;
onDropdownOpen: () => void;
channels: State<PayloadProps | undefined>;
handleCreateNewChannels: () => void;
}
function ChannelSelect({
disabled,
currentValue,
onSelectChannels,
onDropdownOpen,
channels,
handleCreateNewChannels,
}: ChannelSelectProps): JSX.Element | null {
// init namespace for translations
const { t } = useTranslation('alerts');
@@ -26,6 +35,10 @@ function ChannelSelect({
const { notifications } = useNotifications();
const handleChange = (value: string[]): void => {
if (value.includes('add-new-channel')) {
handleCreateNewChannels();
return;
}
onSelectChannels(value);
};
@@ -35,9 +48,27 @@ function ChannelSelect({
description: channels.errorMessage,
});
}
const { role } = useSelector<AppState, AppReducer>((state) => state.app);
const [addNewChannelPermission] = useComponentPermission(
['add_new_channel'],
role,
);
const renderOptions = (): ReactNode[] => {
const children: ReactNode[] = [];
if (!channels.loading && addNewChannelPermission) {
children.push(
<Select.Option key="add-new-channel" value="add-new-channel">
<StyledCreateChannelOption>
<PlusOutlined />
Create a new channel
</StyledCreateChannelOption>
</Select.Option>,
);
}
if (
channels.loading ||
channels.payload === undefined ||
@@ -56,6 +87,7 @@ function ChannelSelect({
return children;
};
return (
<StyledSelect
disabled={disabled}
@@ -65,6 +97,12 @@ function ChannelSelect({
placeholder={t('placeholder_channel_select')}
data-testid="alert-channel-select"
value={currentValue}
notFoundContent={channels.loading && <Spin size="small" />}
onDropdownVisibleChange={(open): void => {
if (open) {
onDropdownOpen();
}
}}
onChange={(value): void => {
handleChange(value as string[]);
}}

View File

@@ -4,3 +4,10 @@ import styled from 'styled-components';
export const StyledSelect = styled(Select)`
border-radius: 4px;
`;
export const StyledCreateChannelOption = styled.div`
color: var(--bg-robin-500);
display: flex;
align-items: center;
gap: 8px;
`;

View File

@@ -102,9 +102,9 @@ function RuleOptions({
<Select.Option value="4">{t('option_notequal')}</Select.Option>
</>
)}
{/* the value 5 and 6 are reserved for above or equal and below or equal */}
{ruleType === 'anomaly_rule' && (
<Select.Option value="5">{t('option_above_below')}</Select.Option>
<Select.Option value="7">{t('option_above_below')}</Select.Option>
)}
</InlineSelect>
);

View File

@@ -18,8 +18,13 @@
font-style: normal;
font-weight: var(--font-weight-normal);
line-height: 28px;
/* 155.556% */
letter-spacing: -0.09px;
width: 72%; // arbitrary number to match input width
display: flex;
align-items: center;
gap: 8px;
justify-content: space-between;
}
.subtitle {
@@ -356,6 +361,8 @@
flex: 1;
.heading {
margin-bottom: 8px;
.title {
font-size: 12px;
}
@@ -370,6 +377,18 @@
.ant-input-number {
width: 80%;
}
.no-limit {
display: flex;
align-items: center;
gap: 8px;
margin-bottom: 24px;
font-weight: 700;
font-size: 12px;
color: var(--bg-forest-400);
}
}
.signal-limit-view-mode {

View File

@@ -12,6 +12,7 @@ import {
Modal,
Row,
Select,
Switch,
Table,
TablePaginationConfig,
TableProps as AntDTableProps,
@@ -34,7 +35,7 @@ import dayjs, { Dayjs } from 'dayjs';
import { useGetAllIngestionsKeys } from 'hooks/IngestionKeys/useGetAllIngestionKeys';
import useDebouncedFn from 'hooks/useDebouncedFunction';
import { useNotifications } from 'hooks/useNotifications';
import { isNil } from 'lodash-es';
import { isNil, isUndefined } from 'lodash-es';
import {
ArrowUpRight,
CalendarClock,
@@ -395,84 +396,6 @@ function MultiIngestionSettings(): JSX.Element {
const getFormattedTime = (date: string): string =>
dayjs(date).format('MMM DD,YYYY, hh:mm a');
const handleAddLimit = (
APIKey: IngestionKeyProps,
signalName: string,
): void => {
setActiveSignal({
id: signalName,
signal: signalName,
config: {},
});
const { dailyLimit, secondsLimit } = addEditLimitForm.getFieldsValue();
const payload = {
keyID: APIKey.id,
signal: signalName,
config: {
day: {
size: gbToBytes(dailyLimit),
},
second: {
size: gbToBytes(secondsLimit),
},
},
};
createLimitForIngestionKey(payload);
};
const handleUpdateLimit = (
APIKey: IngestionKeyProps,
signal: LimitProps,
): void => {
setActiveSignal(signal);
const { dailyLimit, secondsLimit } = addEditLimitForm.getFieldsValue();
const payload = {
limitID: signal.id,
signal: signal.signal,
config: {
day: {
size: gbToBytes(dailyLimit),
},
second: {
size: gbToBytes(secondsLimit),
},
},
};
updateLimitForIngestionKey(payload);
};
const bytesToGb = (size: number | undefined): number => {
if (!size) {
return 0;
}
return size / BYTES;
};
const enableEditLimitMode = (
APIKey: IngestionKeyProps,
signal: LimitProps,
): void => {
setActiveAPIKey(APIKey);
setActiveSignal(signal);
addEditLimitForm.setFieldsValue({
dailyLimit: bytesToGb(signal?.config?.day?.size || 0),
secondsLimit: bytesToGb(signal?.config?.second?.size || 0),
});
setIsEditAddLimitOpen(true);
};
const onDeleteLimitHandler = (): void => {
if (activeSignal && activeSignal?.id) {
deleteLimitForKey(activeSignal.id);
}
};
const showDeleteLimitModal = (
APIKey: IngestionKeyProps,
limit: LimitProps,
@@ -496,6 +419,131 @@ function MultiIngestionSettings(): JSX.Element {
addEditLimitForm.resetFields();
};
const handleAddLimit = (
APIKey: IngestionKeyProps,
signalName: string,
): void => {
const { dailyLimit, secondsLimit } = addEditLimitForm.getFieldsValue();
const payload = {
keyID: APIKey.id,
signal: signalName,
config: {},
};
if (!isUndefined(dailyLimit)) {
payload.config = {
day: {
size: gbToBytes(dailyLimit),
},
};
}
if (!isUndefined(secondsLimit)) {
payload.config = {
...payload.config,
second: {
size: gbToBytes(secondsLimit),
},
};
}
if (isUndefined(dailyLimit) && isUndefined(secondsLimit)) {
// No need to save as no limit is provided, close the edit view and reset active signal and api key
setActiveSignal(null);
setActiveAPIKey(null);
setIsEditAddLimitOpen(false);
setUpdatedTags([]);
hideAddViewModal();
setHasCreateLimitForIngestionKeyError(false);
return;
}
createLimitForIngestionKey(payload);
};
const handleUpdateLimit = (
APIKey: IngestionKeyProps,
signal: LimitProps,
): void => {
const { dailyLimit, secondsLimit } = addEditLimitForm.getFieldsValue();
const payload = {
limitID: signal.id,
signal: signal.signal,
config: {},
};
if (isUndefined(dailyLimit) && isUndefined(secondsLimit)) {
showDeleteLimitModal(APIKey, signal);
return;
}
if (!isUndefined(dailyLimit)) {
payload.config = {
day: {
size: gbToBytes(dailyLimit),
},
};
}
if (!isUndefined(secondsLimit)) {
payload.config = {
...payload.config,
second: {
size: gbToBytes(secondsLimit),
},
};
}
updateLimitForIngestionKey(payload);
};
const bytesToGb = (size: number | undefined): number => {
if (!size) {
return 0;
}
return size / BYTES;
};
const enableEditLimitMode = (
APIKey: IngestionKeyProps,
signal: LimitProps,
): void => {
setActiveAPIKey(APIKey);
setActiveSignal({
...signal,
config: {
...signal.config,
day: {
...signal.config?.day,
enabled: !isNil(signal?.config?.day?.size),
},
second: {
...signal.config?.second,
enabled: !isNil(signal?.config?.second?.size),
},
},
});
addEditLimitForm.setFieldsValue({
dailyLimit: bytesToGb(signal?.config?.day?.size || 0),
secondsLimit: bytesToGb(signal?.config?.second?.size || 0),
enableDailyLimit: !isNil(signal?.config?.day?.size),
enableSecondLimit: !isNil(signal?.config?.second?.size),
});
setIsEditAddLimitOpen(true);
};
const onDeleteLimitHandler = (): void => {
if (activeSignal && activeSignal?.id) {
deleteLimitForKey(activeSignal.id);
}
};
const columns: AntDTableProps<IngestionKeyProps>['columns'] = [
{
title: 'Ingestion Key',
@@ -684,50 +732,108 @@ function MultiIngestionSettings(): JSX.Element {
<div className="signal-limit-edit-mode">
<div className="daily-limit">
<div className="heading">
<div className="title"> Daily limit </div>
<div className="title">
Daily limit
<div className="limit-enable-disable-toggle">
<Form.Item name="enableDailyLimit">
<Switch
size="small"
checked={activeSignal?.config?.day?.enabled}
onChange={(value): void => {
setActiveSignal({
...activeSignal,
config: {
...activeSignal.config,
day: {
...activeSignal.config?.day,
enabled: value,
},
},
});
}}
/>
</Form.Item>
</div>
</div>
<div className="subtitle">
Add a limit for data ingested daily{' '}
Add a limit for data ingested daily
</div>
</div>
<div className="size">
<Form.Item name="dailyLimit">
<InputNumber
addonAfter={
<Select defaultValue="GiB" disabled>
<Option value="TiB"> TiB</Option>
<Option value="GiB"> GiB</Option>
<Option value="MiB"> MiB </Option>
<Option value="KiB"> KiB </Option>
</Select>
}
/>
</Form.Item>
{activeSignal?.config?.day?.enabled ? (
<Form.Item name="dailyLimit" key="dailyLimit">
<InputNumber
disabled={!activeSignal?.config?.day?.enabled}
key="dailyLimit"
addonAfter={
<Select defaultValue="GiB" disabled>
<Option value="TiB"> TiB</Option>
<Option value="GiB"> GiB</Option>
<Option value="MiB"> MiB </Option>
<Option value="KiB"> KiB </Option>
</Select>
}
/>
</Form.Item>
) : (
<div className="no-limit">
<Infinity size={16} /> NO LIMIT
</div>
)}
</div>
</div>
<div className="second-limit">
<div className="heading">
<div className="title"> Per Second limit </div>
<div className="title">
Per Second limit{' '}
<div className="limit-enable-disable-toggle">
<Form.Item name="enableSecondLimit">
<Switch
size="small"
checked={activeSignal?.config?.second?.enabled}
onChange={(value): void => {
setActiveSignal({
...activeSignal,
config: {
...activeSignal.config,
second: {
...activeSignal.config?.second,
enabled: value,
},
},
});
}}
/>
</Form.Item>
</div>
</div>
<div className="subtitle">
{' '}
Add a limit for data ingested every second{' '}
Add a limit for data ingested every second
</div>
</div>
<div className="size">
<Form.Item name="secondsLimit">
<InputNumber
addonAfter={
<Select defaultValue="GiB" disabled>
<Option value="TiB"> TiB</Option>
<Option value="GiB"> GiB</Option>
<Option value="MiB"> MiB </Option>
<Option value="KiB"> KiB </Option>
</Select>
}
/>
</Form.Item>
{activeSignal?.config?.second?.enabled ? (
<Form.Item name="secondsLimit" key="secondsLimit">
<InputNumber
key="secondsLimit"
disabled={!activeSignal?.config?.second?.enabled}
addonAfter={
<Select defaultValue="GiB" disabled>
<Option value="TiB"> TiB</Option>
<Option value="GiB"> GiB</Option>
<Option value="MiB"> MiB </Option>
<Option value="KiB"> KiB </Option>
</Select>
}
/>
</Form.Item>
) : (
<div className="no-limit">
<Infinity size={16} /> NO LIMIT
</div>
)}
</div>
</div>
</div>

View File

@@ -82,9 +82,8 @@ function ImportJSON({
const dashboardData = JSON.parse(editorValue) as DashboardData;
// Add validation for uuid
if (dashboardData.uuid !== undefined && dashboardData.uuid.trim() === '') {
// silently remove uuid if it is empty
// Remove uuid from the dashboard data, in all cases - empty, duplicate or any valid not duplicate uuid
if (dashboardData.uuid !== undefined) {
delete dashboardData.uuid;
}

View File

@@ -202,6 +202,7 @@ function LogsExplorerViews({
id: 'severity_text--string----true',
},
],
legend: '{{severity_text}}',
};
const modifiedQuery: Query = {

View File

@@ -58,12 +58,17 @@ export const databaseCallsRPS = ({
const legends = [legend];
const dataSource = DataSource.METRICS;
const timeAggregateOperators = [MetricAggregateOperator.RATE];
const spaceAggregateOperators = [MetricAggregateOperator.SUM];
return getQueryBuilderQueries({
autocompleteData,
groupBy,
legends,
filterItems,
dataSource,
timeAggregateOperators,
spaceAggregateOperators,
});
};

View File

@@ -213,12 +213,17 @@ export const externalCallRpsByAddress = ({
const legends = [legend];
const dataSource = DataSource.METRICS;
const timeAggregateOperators = [MetricAggregateOperator.RATE];
const spaceAggregateOperators = [MetricAggregateOperator.SUM];
return getQueryBuilderQueries({
autocompleteData,
groupBy,
legends,
filterItems,
dataSource,
timeAggregateOperators,
spaceAggregateOperators,
});
};

View File

@@ -25,6 +25,8 @@ export const getQueryBuilderQueries = ({
aggregateOperator,
dataSource,
queryNameAndExpression,
timeAggregateOperators,
spaceAggregateOperators,
}: BuilderQueriesProps): QueryBuilderData => ({
queryFormulas: [],
queryData: autocompleteData.map((item, index) => {
@@ -50,6 +52,8 @@ export const getQueryBuilderQueries = ({
op: 'AND',
},
reduceTo: 'avg',
spaceAggregation: spaceAggregateOperators[index],
timeAggregation: timeAggregateOperators[index],
dataSource,
};

View File

@@ -83,6 +83,17 @@ export const latency = ({
const dataSource = isSpanMetricEnable ? DataSource.METRICS : DataSource.TRACES;
const queryNameAndExpression = QUERYNAME_AND_EXPRESSION;
const timeAggregateOperators = [
MetricAggregateOperator.EMPTY,
MetricAggregateOperator.EMPTY,
MetricAggregateOperator.EMPTY,
];
const spaceAggregateOperators = [
MetricAggregateOperator.P50,
MetricAggregateOperator.P90,
MetricAggregateOperator.P99,
];
return getQueryBuilderQueries({
autocompleteData,
legends,
@@ -90,6 +101,8 @@ export const latency = ({
aggregateOperator,
dataSource,
queryNameAndExpression,
timeAggregateOperators,
spaceAggregateOperators,
});
};
@@ -510,11 +523,16 @@ export const operationPerSec = ({
const legends = OPERATION_LEGENDS;
const dataSource = DataSource.METRICS;
const timeAggregateOperators = [MetricAggregateOperator.RATE];
const spaceAggregateOperators = [MetricAggregateOperator.SUM];
return getQueryBuilderQueries({
autocompleteData,
legends,
filterItems,
dataSource,
timeAggregateOperators,
spaceAggregateOperators,
});
};

View File

@@ -29,6 +29,8 @@ export interface BuilderQueriesProps {
aggregateOperator?: string[];
dataSource: DataSource;
queryNameAndExpression?: string[];
timeAggregateOperators: MetricAggregateOperator[];
spaceAggregateOperators: MetricAggregateOperator[];
}
export interface BuilderQuerieswithFormulaProps {

View File

@@ -2,18 +2,27 @@
import { DownloadOptions } from 'container/Download/Download.types';
import { MenuItemKeys } from 'container/GridCardLayout/WidgetHeader/contants';
import {
MetricAggregateOperator,
TracesAggregatorOperator,
} from 'types/common/queryBuilder';
export const legend = {
address: '{{address}}',
};
export const QUERYNAME_AND_EXPRESSION = ['A', 'B', 'C'];
export const LATENCY_AGGREGATEOPERATOR = ['p50', 'p90', 'p99'];
export const LATENCY_AGGREGATEOPERATOR_SPAN_METRICS = [
'hist_quantile_50',
'hist_quantile_90',
'hist_quantile_99',
export const LATENCY_AGGREGATEOPERATOR = [
TracesAggregatorOperator.P50,
TracesAggregatorOperator.P90,
TracesAggregatorOperator.P99,
];
export const LATENCY_AGGREGATEOPERATOR_SPAN_METRICS = [
MetricAggregateOperator.P50,
MetricAggregateOperator.P90,
MetricAggregateOperator.P99,
];
export const OPERATION_LEGENDS = ['Operations'];
export const MENU_ITEMS = [MenuItemKeys.View, MenuItemKeys.CreateAlerts];
@@ -21,8 +30,22 @@ export const MENU_ITEMS = [MenuItemKeys.View, MenuItemKeys.CreateAlerts];
export enum FORMULA {
ERROR_PERCENTAGE = 'A*100/B',
DATABASE_CALLS_AVG_DURATION = 'A/B',
// The apdex formula is (satisfied_count + 0.5 * tolerating_count + 0 * frustating_count) / total_count
// The satisfied_count is B, tolerating_count is C, total_count is A
// But why do we have (B+C)/2 instead of B + C/2?
// The way we issue the query is latency <= threshold, which means we over count i.e
// query B => durationNano <= 500ms
// query C => durationNano <= 2000ms
// Since <= 2000ms includes <= 500ms, we over count, to correct we subtract B/2
// so the full expression would be (B + C/2) - B/2 = (B+C)/2
// However, if you add a filter on durationNano > 500ms, (filterItemC in overviewQueries) the query would be
// B + C/2
APDEX_TRACES = '((B + C)/2)/A',
// The delta span metrics store delta compared to previous reporting interval
// but not the counts for the current interval. The bucket counts are cumulative
APDEX_DELTA_SPAN_METRICS = '((B + C)/2)/A',
// Cumulative span metrics store the counts for all buckets
// so we need to subtract B/2 to correct the over counting
APDEX_CUMULATIVE_SPAN_METRICS = '((B + C)/2)/A',
}

View File

@@ -1,4 +1,4 @@
import { useEffect, useRef, useState } from 'react';
import { useCallback, useEffect, useState } from 'react';
import { ErrorResponse, SuccessResponse } from 'types/api';
function useFetch<PayloadProps, FunctionParams>(
@@ -10,7 +10,7 @@ function useFetch<PayloadProps, FunctionParams>(
(arg0: any): Promise<SuccessResponse<PayloadProps> | ErrorResponse>;
},
param?: FunctionParams,
): State<PayloadProps | undefined> {
): State<PayloadProps | undefined> & { refetch: () => Promise<void> } {
const [state, setStates] = useState<State<PayloadProps | undefined>>({
loading: true,
success: null,
@@ -19,37 +19,28 @@ function useFetch<PayloadProps, FunctionParams>(
payload: undefined,
});
const loadingRef = useRef(0);
useEffect(() => {
const fetchData = useCallback(async (): Promise<void> => {
setStates((prev) => ({ ...prev, loading: true }));
try {
(async (): Promise<void> => {
if (state.loading) {
const response = await functions(param);
const response = await functions(param);
if (loadingRef.current === 0) {
loadingRef.current = 1;
if (response.statusCode === 200) {
setStates({
loading: false,
error: false,
success: true,
payload: response.payload,
errorMessage: '',
});
} else {
setStates({
loading: false,
error: true,
success: false,
payload: undefined,
errorMessage: response.error as string,
});
}
}
}
})();
if (response.statusCode === 200) {
setStates({
loading: false,
error: false,
success: true,
payload: response.payload,
errorMessage: '',
});
} else {
setStates({
loading: false,
error: true,
success: false,
payload: undefined,
errorMessage: response.error as string,
});
}
} catch (error) {
setStates({
payload: undefined,
@@ -59,13 +50,16 @@ function useFetch<PayloadProps, FunctionParams>(
errorMessage: error as string,
});
}
return (): void => {
loadingRef.current = 1;
};
}, [functions, param, state.loading]);
}, [functions, param]);
// Initial fetch
useEffect(() => {
fetchData();
}, [fetchData]);
return {
...state,
refetch: fetchData,
};
}

View File

@@ -8,17 +8,21 @@ export interface LimitProps {
config?: {
day?: {
size?: number;
enabled?: boolean;
};
second?: {
size?: number;
enabled?: boolean;
};
};
metric?: {
day?: {
size?: number;
enabled?: boolean;
};
second?: {
size?: number;
enabled?: boolean;
};
};
}
@@ -27,11 +31,13 @@ export interface AddLimitProps {
keyID: string;
signal: string;
config: {
day: {
size: number;
day?: {
size?: number;
enabled?: boolean;
};
second: {
size: number;
second?: {
size?: number;
enabled?: boolean;
};
};
}
@@ -40,11 +46,13 @@ export interface UpdateLimitProps {
limitID: string;
signal: string;
config: {
day: {
size: number;
day?: {
size?: number;
enabled?: boolean;
};
second: {
size: number;
second?: {
size?: number;
enabled?: boolean;
};
};
}

2
go.mod
View File

@@ -8,7 +8,7 @@ require (
github.com/ClickHouse/clickhouse-go/v2 v2.25.0
github.com/DATA-DOG/go-sqlmock v1.5.2
github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd
github.com/SigNoz/signoz-otel-collector v0.111.5
github.com/SigNoz/signoz-otel-collector v0.111.8
github.com/SigNoz/zap_otlp/zap_otlp_encoder v0.0.0-20230822164844-1b861a431974
github.com/SigNoz/zap_otlp/zap_otlp_sync v0.0.0-20230822164844-1b861a431974
github.com/antonmedv/expr v1.15.3

4
go.sum
View File

@@ -70,8 +70,8 @@ github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd h1:Bk43AsDYe0fhkb
github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd/go.mod h1:nxRcH/OEdM8QxzH37xkGzomr1O0JpYBRS6pwjsWW6Pc=
github.com/SigNoz/prometheus v1.12.0 h1:+BXeIHyMOOWWa+xjhJ+x80JFva7r1WzWIfIhQ5PUmIE=
github.com/SigNoz/prometheus v1.12.0/go.mod h1:EqNM27OwmPfqMUk+E+XG1L9rfDFcyXnzzDrg0EPOfxA=
github.com/SigNoz/signoz-otel-collector v0.111.5 h1:kLpJSv9U46doA+89nfUvTLcNb6WbIxiMAtNlTNL88ZE=
github.com/SigNoz/signoz-otel-collector v0.111.5/go.mod h1:/nyVFDiEz/QBfyqekB3zRwstZ/KSIB85qgV9NnzAtig=
github.com/SigNoz/signoz-otel-collector v0.111.8 h1:t3V3Ahue2ucryRdHvqz33zRCPGQ86xkAsx9J23ZNPk0=
github.com/SigNoz/signoz-otel-collector v0.111.8/go.mod h1:/nyVFDiEz/QBfyqekB3zRwstZ/KSIB85qgV9NnzAtig=
github.com/SigNoz/zap_otlp v0.1.0 h1:T7rRcFN87GavY8lDGZj0Z3Xv6OhJA6Pj3I9dNPmqvRc=
github.com/SigNoz/zap_otlp v0.1.0/go.mod h1:lcHvbDbRgvDnPxo9lDlaL1JK2PyOyouP/C3ynnYIvyo=
github.com/SigNoz/zap_otlp/zap_otlp_encoder v0.0.0-20230822164844-1b861a431974 h1:PKVgdf83Yw+lZJbFtNGBgqXiXNf3+kOXW2qZ7Ms7OaY=

View File

@@ -766,307 +766,6 @@ func buildFilterArrayQuery(_ context.Context, excludeMap map[string]struct{}, pa
return args
}
func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *model.SpanFilterParams) (*model.SpanFiltersResponse, *model.ApiError) {
var query string
excludeMap := make(map[string]struct{})
for _, e := range queryParams.Exclude {
if e == constants.OperationRequest {
excludeMap[constants.OperationDB] = struct{}{}
continue
}
excludeMap[e] = struct{}{}
}
args := []interface{}{clickhouse.Named("timestampL", strconv.FormatInt(queryParams.Start.UnixNano(), 10)), clickhouse.Named("timestampU", strconv.FormatInt(queryParams.End.UnixNano(), 10))}
if len(queryParams.TraceID) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.TraceID, constants.TraceID, &query, args)
}
if len(queryParams.ServiceName) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.ServiceName, constants.ServiceName, &query, args)
}
if len(queryParams.HttpRoute) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpRoute, constants.HttpRoute, &query, args)
}
if len(queryParams.HttpHost) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpHost, constants.HttpHost, &query, args)
}
if len(queryParams.HttpMethod) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpMethod, constants.HttpMethod, &query, args)
}
if len(queryParams.HttpUrl) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpUrl, constants.HttpUrl, &query, args)
}
if len(queryParams.Operation) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.Operation, constants.OperationDB, &query, args)
}
if len(queryParams.RPCMethod) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.RPCMethod, constants.RPCMethod, &query, args)
}
if len(queryParams.ResponseStatusCode) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.ResponseStatusCode, constants.ResponseStatusCode, &query, args)
}
if len(queryParams.MinDuration) != 0 {
query = query + " AND durationNano >= @durationNanoMin"
args = append(args, clickhouse.Named("durationNanoMin", queryParams.MinDuration))
}
if len(queryParams.MaxDuration) != 0 {
query = query + " AND durationNano <= @durationNanoMax"
args = append(args, clickhouse.Named("durationNanoMax", queryParams.MaxDuration))
}
if len(queryParams.SpanKind) != 0 {
query = query + " AND kind = @kind"
args = append(args, clickhouse.Named("kind", queryParams.SpanKind))
}
query = getStatusFilters(query, queryParams.Status, excludeMap)
traceFilterReponse := model.SpanFiltersResponse{
Status: map[string]uint64{},
Duration: map[string]uint64{},
ServiceName: map[string]uint64{},
Operation: map[string]uint64{},
ResponseStatusCode: map[string]uint64{},
RPCMethod: map[string]uint64{},
HttpMethod: map[string]uint64{},
HttpUrl: map[string]uint64{},
HttpRoute: map[string]uint64{},
HttpHost: map[string]uint64{},
}
for _, e := range queryParams.GetFilters {
switch e {
case constants.TraceID:
continue
case constants.ServiceName:
finalQuery := fmt.Sprintf("SELECT serviceName, count() as count FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", r.TraceDB, r.indexTable)
finalQuery += query
finalQuery += " GROUP BY serviceName"
var dBResponse []model.DBResponseServiceName
err := r.db.Select(ctx, &dBResponse, finalQuery, args...)
zap.L().Info(finalQuery)
if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query: %s", err)}
}
for _, service := range dBResponse {
if service.ServiceName != "" {
traceFilterReponse.ServiceName[service.ServiceName] = service.Count
}
}
case constants.HttpRoute:
finalQuery := fmt.Sprintf("SELECT httpRoute, count() as count FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", r.TraceDB, r.indexTable)
finalQuery += query
finalQuery += " GROUP BY httpRoute"
var dBResponse []model.DBResponseHttpRoute
err := r.db.Select(ctx, &dBResponse, finalQuery, args...)
zap.L().Info(finalQuery)
if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query: %s", err)}
}
for _, service := range dBResponse {
if service.HttpRoute != "" {
traceFilterReponse.HttpRoute[service.HttpRoute] = service.Count
}
}
case constants.HttpUrl:
finalQuery := fmt.Sprintf("SELECT httpUrl, count() as count FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", r.TraceDB, r.indexTable)
finalQuery += query
finalQuery += " GROUP BY httpUrl"
var dBResponse []model.DBResponseHttpUrl
err := r.db.Select(ctx, &dBResponse, finalQuery, args...)
zap.L().Info(finalQuery)
if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query: %s", err)}
}
for _, service := range dBResponse {
if service.HttpUrl != "" {
traceFilterReponse.HttpUrl[service.HttpUrl] = service.Count
}
}
case constants.HttpMethod:
finalQuery := fmt.Sprintf("SELECT httpMethod, count() as count FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", r.TraceDB, r.indexTable)
finalQuery += query
finalQuery += " GROUP BY httpMethod"
var dBResponse []model.DBResponseHttpMethod
err := r.db.Select(ctx, &dBResponse, finalQuery, args...)
zap.L().Info(finalQuery)
if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query: %s", err)}
}
for _, service := range dBResponse {
if service.HttpMethod != "" {
traceFilterReponse.HttpMethod[service.HttpMethod] = service.Count
}
}
case constants.HttpHost:
finalQuery := fmt.Sprintf("SELECT httpHost, count() as count FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", r.TraceDB, r.indexTable)
finalQuery += query
finalQuery += " GROUP BY httpHost"
var dBResponse []model.DBResponseHttpHost
err := r.db.Select(ctx, &dBResponse, finalQuery, args...)
zap.L().Info(finalQuery)
if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query: %s", err)}
}
for _, service := range dBResponse {
if service.HttpHost != "" {
traceFilterReponse.HttpHost[service.HttpHost] = service.Count
}
}
case constants.OperationRequest:
finalQuery := fmt.Sprintf("SELECT name, count() as count FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", r.TraceDB, r.indexTable)
finalQuery += query
finalQuery += " GROUP BY name"
var dBResponse []model.DBResponseOperation
err := r.db.Select(ctx, &dBResponse, finalQuery, args...)
zap.L().Info(finalQuery)
if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query: %s", err)}
}
for _, service := range dBResponse {
if service.Operation != "" {
traceFilterReponse.Operation[service.Operation] = service.Count
}
}
case constants.Status:
finalQuery := fmt.Sprintf("SELECT COUNT(*) as numTotal FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU AND hasError = true", r.TraceDB, r.indexTable)
finalQuery += query
var dBResponse []model.DBResponseTotal
err := r.db.Select(ctx, &dBResponse, finalQuery, args...)
zap.L().Info(finalQuery)
if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query: %s", err)}
}
finalQuery2 := fmt.Sprintf("SELECT COUNT(*) as numTotal FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU AND hasError = false", r.TraceDB, r.indexTable)
finalQuery2 += query
var dBResponse2 []model.DBResponseTotal
err = r.db.Select(ctx, &dBResponse2, finalQuery2, args...)
zap.L().Info(finalQuery2)
if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query: %s", err)}
}
if len(dBResponse) > 0 && len(dBResponse2) > 0 {
traceFilterReponse.Status = map[string]uint64{"ok": dBResponse2[0].NumTotal, "error": dBResponse[0].NumTotal}
} else if len(dBResponse) > 0 {
traceFilterReponse.Status = map[string]uint64{"ok": 0, "error": dBResponse[0].NumTotal}
} else if len(dBResponse2) > 0 {
traceFilterReponse.Status = map[string]uint64{"ok": dBResponse2[0].NumTotal, "error": 0}
} else {
traceFilterReponse.Status = map[string]uint64{"ok": 0, "error": 0}
}
case constants.Duration:
err := r.featureFlags.CheckFeature(constants.DurationSort)
durationSortEnabled := err == nil
finalQuery := ""
if !durationSortEnabled {
// if duration sort is not enabled, we need to get the min and max duration from the index table
finalQuery = fmt.Sprintf("SELECT min(durationNano) as min, max(durationNano) as max FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", r.TraceDB, r.indexTable)
finalQuery += query
var dBResponse []model.DBResponseMinMax
err = r.db.Select(ctx, &dBResponse, finalQuery, args...)
zap.L().Info(finalQuery)
if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query: %s", err)}
}
if len(dBResponse) > 0 {
traceFilterReponse.Duration = map[string]uint64{"minDuration": dBResponse[0].Min, "maxDuration": dBResponse[0].Max}
}
} else {
// when duration sort is enabled, we need to get the min and max duration from the duration table
finalQuery = fmt.Sprintf("SELECT durationNano as numTotal FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", r.TraceDB, r.durationTable)
finalQuery += query
finalQuery += " ORDER BY durationNano LIMIT 1"
var dBResponse []model.DBResponseTotal
err = r.db.Select(ctx, &dBResponse, finalQuery, args...)
zap.L().Info(finalQuery)
if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query: %s", err)}
}
finalQuery = fmt.Sprintf("SELECT durationNano as numTotal FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", r.TraceDB, r.durationTable)
finalQuery += query
finalQuery += " ORDER BY durationNano DESC LIMIT 1"
var dBResponse2 []model.DBResponseTotal
err = r.db.Select(ctx, &dBResponse2, finalQuery, args...)
zap.L().Info(finalQuery)
if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query: %s", err)}
}
if len(dBResponse) > 0 {
traceFilterReponse.Duration["minDuration"] = dBResponse[0].NumTotal
}
if len(dBResponse2) > 0 {
traceFilterReponse.Duration["maxDuration"] = dBResponse2[0].NumTotal
}
}
case constants.RPCMethod:
finalQuery := fmt.Sprintf("SELECT rpcMethod, count() as count FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", r.TraceDB, r.indexTable)
finalQuery += query
finalQuery += " GROUP BY rpcMethod"
var dBResponse []model.DBResponseRPCMethod
err := r.db.Select(ctx, &dBResponse, finalQuery, args...)
zap.L().Info(finalQuery)
if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query: %s", err)}
}
for _, service := range dBResponse {
if service.RPCMethod != "" {
traceFilterReponse.RPCMethod[service.RPCMethod] = service.Count
}
}
case constants.ResponseStatusCode:
finalQuery := fmt.Sprintf("SELECT responseStatusCode, count() as count FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", r.TraceDB, r.indexTable)
finalQuery += query
finalQuery += " GROUP BY responseStatusCode"
var dBResponse []model.DBResponseStatusCodeMethod
err := r.db.Select(ctx, &dBResponse, finalQuery, args...)
zap.L().Info(finalQuery)
if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query: %s", err)}
}
for _, service := range dBResponse {
if service.ResponseStatusCode != "" {
traceFilterReponse.ResponseStatusCode[service.ResponseStatusCode] = service.Count
}
}
default:
return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("filter type: %s not supported", e)}
}
}
return &traceFilterReponse, nil
}
func getStatusFilters(query string, statusParams []string, excludeMap map[string]struct{}) string {
// status can only be two and if both are selected than they are equivalent to none selected
@@ -1088,140 +787,6 @@ func getStatusFilters(query string, statusParams []string, excludeMap map[string
return query
}
func (r *ClickHouseReader) GetFilteredSpans(ctx context.Context, queryParams *model.GetFilteredSpansParams) (*model.GetFilterSpansResponse, *model.ApiError) {
queryTable := fmt.Sprintf("%s.%s", r.TraceDB, r.indexTable)
excludeMap := make(map[string]struct{})
for _, e := range queryParams.Exclude {
if e == constants.OperationRequest {
excludeMap[constants.OperationDB] = struct{}{}
continue
}
excludeMap[e] = struct{}{}
}
var query string
args := []interface{}{clickhouse.Named("timestampL", strconv.FormatInt(queryParams.Start.UnixNano(), 10)), clickhouse.Named("timestampU", strconv.FormatInt(queryParams.End.UnixNano(), 10))}
if len(queryParams.TraceID) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.TraceID, constants.TraceID, &query, args)
}
if len(queryParams.ServiceName) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.ServiceName, constants.ServiceName, &query, args)
}
if len(queryParams.HttpRoute) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpRoute, constants.HttpRoute, &query, args)
}
if len(queryParams.HttpHost) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpHost, constants.HttpHost, &query, args)
}
if len(queryParams.HttpMethod) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpMethod, constants.HttpMethod, &query, args)
}
if len(queryParams.HttpUrl) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpUrl, constants.HttpUrl, &query, args)
}
if len(queryParams.Operation) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.Operation, constants.OperationDB, &query, args)
}
if len(queryParams.RPCMethod) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.RPCMethod, constants.RPCMethod, &query, args)
}
if len(queryParams.ResponseStatusCode) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.ResponseStatusCode, constants.ResponseStatusCode, &query, args)
}
if len(queryParams.MinDuration) != 0 {
query = query + " AND durationNano >= @durationNanoMin"
args = append(args, clickhouse.Named("durationNanoMin", queryParams.MinDuration))
}
if len(queryParams.MaxDuration) != 0 {
query = query + " AND durationNano <= @durationNanoMax"
args = append(args, clickhouse.Named("durationNanoMax", queryParams.MaxDuration))
}
query = getStatusFilters(query, queryParams.Status, excludeMap)
if len(queryParams.SpanKind) != 0 {
query = query + " AND kind = @kind"
args = append(args, clickhouse.Named("kind", queryParams.SpanKind))
}
// create TagQuery from TagQueryParams
tags := createTagQueryFromTagQueryParams(queryParams.Tags)
subQuery, argsSubQuery, errStatus := buildQueryWithTagParams(ctx, tags)
query += subQuery
args = append(args, argsSubQuery...)
if errStatus != nil {
return nil, errStatus
}
if len(queryParams.OrderParam) != 0 {
if queryParams.OrderParam == constants.Duration {
queryTable = fmt.Sprintf("%s.%s", r.TraceDB, r.durationTable)
if queryParams.Order == constants.Descending {
query = query + " ORDER BY durationNano DESC"
}
if queryParams.Order == constants.Ascending {
query = query + " ORDER BY durationNano ASC"
}
} else if queryParams.OrderParam == constants.Timestamp {
projectionOptQuery := "SET allow_experimental_projection_optimization = 1"
err := r.db.Exec(ctx, projectionOptQuery)
zap.L().Info(projectionOptQuery)
if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
if queryParams.Order == constants.Descending {
query = query + " ORDER BY timestamp DESC"
}
if queryParams.Order == constants.Ascending {
query = query + " ORDER BY timestamp ASC"
}
}
}
if queryParams.Limit > 0 {
query = query + " LIMIT @limit"
args = append(args, clickhouse.Named("limit", queryParams.Limit))
}
if queryParams.Offset > 0 {
query = query + " OFFSET @offset"
args = append(args, clickhouse.Named("offset", queryParams.Offset))
}
var getFilterSpansResponseItems []model.GetFilterSpansResponseItem
baseQuery := fmt.Sprintf("SELECT timestamp, spanID, traceID, serviceName, name, durationNano, httpMethod, rpcMethod, responseStatusCode FROM %s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryTable)
baseQuery += query
err := r.db.Select(ctx, &getFilterSpansResponseItems, baseQuery, args...)
// Fill status and method
for i, e := range getFilterSpansResponseItems {
if e.RPCMethod != "" {
getFilterSpansResponseItems[i].Method = e.RPCMethod
} else {
getFilterSpansResponseItems[i].Method = e.HttpMethod
}
}
zap.L().Info(baseQuery)
if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
getFilterSpansResponse := model.GetFilterSpansResponse{
Spans: getFilterSpansResponseItems,
TotalSpans: 1000,
}
return &getFilterSpansResponse, nil
}
func createTagQueryFromTagQueryParams(queryParams []model.TagQueryParam) []model.TagQuery {
tags := []model.TagQuery{}
for _, tag := range queryParams {
@@ -1379,87 +944,6 @@ func addExistsOperator(item model.TagQuery, tagMapType string, not bool) (string
return fmt.Sprintf(" AND %s (%s)", notStr, strings.Join(tagOperatorPair, " OR ")), args
}
func (r *ClickHouseReader) GetTagFilters(ctx context.Context, queryParams *model.TagFilterParams) (*model.TagFilters, *model.ApiError) {
excludeMap := make(map[string]struct{})
for _, e := range queryParams.Exclude {
if e == constants.OperationRequest {
excludeMap[constants.OperationDB] = struct{}{}
continue
}
excludeMap[e] = struct{}{}
}
var query string
args := []interface{}{clickhouse.Named("timestampL", strconv.FormatInt(queryParams.Start.UnixNano(), 10)), clickhouse.Named("timestampU", strconv.FormatInt(queryParams.End.UnixNano(), 10))}
if len(queryParams.TraceID) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.TraceID, constants.TraceID, &query, args)
}
if len(queryParams.ServiceName) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.ServiceName, constants.ServiceName, &query, args)
}
if len(queryParams.HttpRoute) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpRoute, constants.HttpRoute, &query, args)
}
if len(queryParams.HttpHost) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpHost, constants.HttpHost, &query, args)
}
if len(queryParams.HttpMethod) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpMethod, constants.HttpMethod, &query, args)
}
if len(queryParams.HttpUrl) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpUrl, constants.HttpUrl, &query, args)
}
if len(queryParams.Operation) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.Operation, constants.OperationDB, &query, args)
}
if len(queryParams.RPCMethod) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.RPCMethod, constants.RPCMethod, &query, args)
}
if len(queryParams.ResponseStatusCode) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.ResponseStatusCode, constants.ResponseStatusCode, &query, args)
}
if len(queryParams.MinDuration) != 0 {
query = query + " AND durationNano >= @durationNanoMin"
args = append(args, clickhouse.Named("durationNanoMin", queryParams.MinDuration))
}
if len(queryParams.MaxDuration) != 0 {
query = query + " AND durationNano <= @durationNanoMax"
args = append(args, clickhouse.Named("durationNanoMax", queryParams.MaxDuration))
}
if len(queryParams.SpanKind) != 0 {
query = query + " AND kind = @kind"
args = append(args, clickhouse.Named("kind", queryParams.SpanKind))
}
query = getStatusFilters(query, queryParams.Status, excludeMap)
tagFilters := []model.TagFilters{}
// Alternative finalQuery := fmt.Sprintf(`SELECT DISTINCT arrayJoin(tagMap.keys) as tagKeys FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU`, r.TraceDB, r.indexTable)
finalQuery := fmt.Sprintf(`SELECT groupUniqArrayArray(mapKeys(stringTagMap)) as stringTagKeys, groupUniqArrayArray(mapKeys(numberTagMap)) as numberTagKeys, groupUniqArrayArray(mapKeys(boolTagMap)) as boolTagKeys FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU`, r.TraceDB, r.indexTable)
finalQuery += query
err := r.db.Select(ctx, &tagFilters, finalQuery, args...)
zap.L().Info(query)
if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
tagFiltersResult := model.TagFilters{
StringTagKeys: make([]string, 0),
NumberTagKeys: make([]string, 0),
BoolTagKeys: make([]string, 0),
}
if len(tagFilters) != 0 {
tagFiltersResult.StringTagKeys = excludeTags(ctx, tagFilters[0].StringTagKeys)
tagFiltersResult.NumberTagKeys = excludeTags(ctx, tagFilters[0].NumberTagKeys)
tagFiltersResult.BoolTagKeys = excludeTags(ctx, tagFilters[0].BoolTagKeys)
}
return &tagFiltersResult, nil
}
func excludeTags(_ context.Context, tags []string) []string {
excludedTagsMap := map[string]bool{
"http.code": true,
@@ -1483,102 +967,6 @@ func excludeTags(_ context.Context, tags []string) []string {
return newTags
}
func (r *ClickHouseReader) GetTagValues(ctx context.Context, queryParams *model.TagFilterParams) (*model.TagValues, *model.ApiError) {
if queryParams.TagKey.Type == model.TagTypeNumber {
return &model.TagValues{
NumberTagValues: make([]float64, 0),
StringTagValues: make([]string, 0),
BoolTagValues: make([]bool, 0),
}, nil
} else if queryParams.TagKey.Type == model.TagTypeBool {
return &model.TagValues{
NumberTagValues: make([]float64, 0),
StringTagValues: make([]string, 0),
BoolTagValues: []bool{true, false},
}, nil
}
excludeMap := make(map[string]struct{})
for _, e := range queryParams.Exclude {
if e == constants.OperationRequest {
excludeMap[constants.OperationDB] = struct{}{}
continue
}
excludeMap[e] = struct{}{}
}
var query string
args := []interface{}{clickhouse.Named("timestampL", strconv.FormatInt(queryParams.Start.UnixNano(), 10)), clickhouse.Named("timestampU", strconv.FormatInt(queryParams.End.UnixNano(), 10))}
if len(queryParams.TraceID) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.TraceID, constants.TraceID, &query, args)
}
if len(queryParams.ServiceName) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.ServiceName, constants.ServiceName, &query, args)
}
if len(queryParams.HttpRoute) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpRoute, constants.HttpRoute, &query, args)
}
if len(queryParams.HttpHost) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpHost, constants.HttpHost, &query, args)
}
if len(queryParams.HttpMethod) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpMethod, constants.HttpMethod, &query, args)
}
if len(queryParams.HttpUrl) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpUrl, constants.HttpUrl, &query, args)
}
if len(queryParams.Operation) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.Operation, constants.OperationDB, &query, args)
}
if len(queryParams.MinDuration) != 0 {
query = query + " AND durationNano >= @durationNanoMin"
args = append(args, clickhouse.Named("durationNanoMin", queryParams.MinDuration))
}
if len(queryParams.MaxDuration) != 0 {
query = query + " AND durationNano <= @durationNanoMax"
args = append(args, clickhouse.Named("durationNanoMax", queryParams.MaxDuration))
}
if len(queryParams.SpanKind) != 0 {
query = query + " AND kind = @kind"
args = append(args, clickhouse.Named("kind", queryParams.SpanKind))
}
query = getStatusFilters(query, queryParams.Status, excludeMap)
tagValues := []model.TagValues{}
finalQuery := fmt.Sprintf(`SELECT groupArray(DISTINCT stringTagMap[@key]) as stringTagValues FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU`, r.TraceDB, r.indexTable)
finalQuery += query
finalQuery += " LIMIT @limit"
args = append(args, clickhouse.Named("key", queryParams.TagKey.Key))
args = append(args, clickhouse.Named("limit", queryParams.Limit))
err := r.db.Select(ctx, &tagValues, finalQuery, args...)
zap.L().Info(query)
if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
cleanedTagValues := model.TagValues{
StringTagValues: []string{},
NumberTagValues: []float64{},
BoolTagValues: []bool{},
}
if len(tagValues) == 0 {
return &cleanedTagValues, nil
}
for _, e := range tagValues[0].StringTagValues {
if e != "" {
cleanedTagValues.StringTagValues = append(cleanedTagValues.StringTagValues, e)
}
}
return &cleanedTagValues, nil
}
func (r *ClickHouseReader) GetTopOperations(ctx context.Context, queryParams *model.GetTopOperationsParams) (*[]model.TopOperationsItem, *model.ApiError) {
namedArgs := []interface{}{
@@ -1823,185 +1211,6 @@ func (r *ClickHouseReader) GetDependencyGraph(ctx context.Context, queryParams *
return &response, nil
}
func (r *ClickHouseReader) GetFilteredSpansAggregates(ctx context.Context, queryParams *model.GetFilteredSpanAggregatesParams) (*model.GetFilteredSpansAggregatesResponse, *model.ApiError) {
excludeMap := make(map[string]struct{})
for _, e := range queryParams.Exclude {
if e == constants.OperationRequest {
excludeMap[constants.OperationDB] = struct{}{}
continue
}
excludeMap[e] = struct{}{}
}
SpanAggregatesDBResponseItems := []model.SpanAggregatesDBResponseItem{}
aggregation_query := ""
if queryParams.Dimension == "duration" {
switch queryParams.AggregationOption {
case "p50":
aggregation_query = " quantile(0.50)(durationNano) as float64Value "
case "p95":
aggregation_query = " quantile(0.95)(durationNano) as float64Value "
case "p90":
aggregation_query = " quantile(0.90)(durationNano) as float64Value "
case "p99":
aggregation_query = " quantile(0.99)(durationNano) as float64Value "
case "max":
aggregation_query = " max(durationNano) as value "
case "min":
aggregation_query = " min(durationNano) as value "
case "avg":
aggregation_query = " avg(durationNano) as float64Value "
case "sum":
aggregation_query = " sum(durationNano) as value "
default:
return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("aggregate type: %s not supported", queryParams.AggregationOption)}
}
} else if queryParams.Dimension == "calls" {
aggregation_query = " count(*) as value "
}
args := []interface{}{clickhouse.Named("timestampL", strconv.FormatInt(queryParams.Start.UnixNano(), 10)), clickhouse.Named("timestampU", strconv.FormatInt(queryParams.End.UnixNano(), 10))}
var query string
var customStr []string
_, columnExists := constants.GroupByColMap[queryParams.GroupBy]
// Using %s for groupBy params as it can be a custom column and custom columns are not supported by clickhouse-go yet:
// issue link: https://github.com/ClickHouse/clickhouse-go/issues/870
if queryParams.GroupBy != "" && columnExists {
query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, %s as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryParams.StepSeconds/60, queryParams.GroupBy, aggregation_query, r.TraceDB, r.indexTable)
args = append(args, clickhouse.Named("groupByVar", queryParams.GroupBy))
} else if queryParams.GroupBy != "" {
customStr = strings.Split(queryParams.GroupBy, ".(")
if len(customStr) < 2 {
return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("GroupBy: %s not supported", queryParams.GroupBy)}
}
if customStr[1] == string(model.TagTypeString)+")" {
query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, stringTagMap['%s'] as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryParams.StepSeconds/60, customStr[0], aggregation_query, r.TraceDB, r.indexTable)
} else if customStr[1] == string(model.TagTypeNumber)+")" {
query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, toString(numberTagMap['%s']) as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryParams.StepSeconds/60, customStr[0], aggregation_query, r.TraceDB, r.indexTable)
} else if customStr[1] == string(model.TagTypeBool)+")" {
query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, toString(boolTagMap['%s']) as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryParams.StepSeconds/60, customStr[0], aggregation_query, r.TraceDB, r.indexTable)
} else {
// return error for unsupported group by
return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("GroupBy: %s not supported", queryParams.GroupBy)}
}
} else {
query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU", queryParams.StepSeconds/60, aggregation_query, r.TraceDB, r.indexTable)
}
if len(queryParams.TraceID) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.TraceID, constants.TraceID, &query, args)
}
if len(queryParams.ServiceName) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.ServiceName, constants.ServiceName, &query, args)
}
if len(queryParams.HttpRoute) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpRoute, constants.HttpRoute, &query, args)
}
if len(queryParams.HttpHost) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpHost, constants.HttpHost, &query, args)
}
if len(queryParams.HttpMethod) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpMethod, constants.HttpMethod, &query, args)
}
if len(queryParams.HttpUrl) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpUrl, constants.HttpUrl, &query, args)
}
if len(queryParams.Operation) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.Operation, constants.OperationDB, &query, args)
}
if len(queryParams.RPCMethod) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.RPCMethod, constants.RPCMethod, &query, args)
}
if len(queryParams.ResponseStatusCode) > 0 {
args = buildFilterArrayQuery(ctx, excludeMap, queryParams.ResponseStatusCode, constants.ResponseStatusCode, &query, args)
}
if len(queryParams.MinDuration) != 0 {
query = query + " AND durationNano >= @durationNanoMin"
args = append(args, clickhouse.Named("durationNanoMin", queryParams.MinDuration))
}
if len(queryParams.MaxDuration) != 0 {
query = query + " AND durationNano <= @durationNanoMax"
args = append(args, clickhouse.Named("durationNanoMax", queryParams.MaxDuration))
}
query = getStatusFilters(query, queryParams.Status, excludeMap)
if len(queryParams.SpanKind) != 0 {
query = query + " AND kind = @kind"
args = append(args, clickhouse.Named("kind", queryParams.SpanKind))
}
// create TagQuery from TagQueryParams
tags := createTagQueryFromTagQueryParams(queryParams.Tags)
subQuery, argsSubQuery, errStatus := buildQueryWithTagParams(ctx, tags)
query += subQuery
args = append(args, argsSubQuery...)
if errStatus != nil {
return nil, errStatus
}
if queryParams.GroupBy != "" && columnExists {
query = query + fmt.Sprintf(" GROUP BY time, %s as groupBy ORDER BY time", queryParams.GroupBy)
} else if queryParams.GroupBy != "" {
if customStr[1] == string(model.TagTypeString)+")" {
query = query + fmt.Sprintf(" GROUP BY time, stringTagMap['%s'] as groupBy ORDER BY time", customStr[0])
} else if customStr[1] == string(model.TagTypeNumber)+")" {
query = query + fmt.Sprintf(" GROUP BY time, toString(numberTagMap['%s']) as groupBy ORDER BY time", customStr[0])
} else if customStr[1] == string(model.TagTypeBool)+")" {
query = query + fmt.Sprintf(" GROUP BY time, toString(boolTagMap['%s']) as groupBy ORDER BY time", customStr[0])
}
} else {
query = query + " GROUP BY time ORDER BY time"
}
err := r.db.Select(ctx, &SpanAggregatesDBResponseItems, query, args...)
zap.L().Info(query)
if err != nil {
zap.L().Error("Error in processing sql query", zap.Error(err))
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
GetFilteredSpansAggregatesResponse := model.GetFilteredSpansAggregatesResponse{
Items: map[int64]model.SpanAggregatesResponseItem{},
}
for i := range SpanAggregatesDBResponseItems {
if SpanAggregatesDBResponseItems[i].Value == 0 {
SpanAggregatesDBResponseItems[i].Value = uint64(SpanAggregatesDBResponseItems[i].Float64Value)
}
SpanAggregatesDBResponseItems[i].Timestamp = int64(SpanAggregatesDBResponseItems[i].Time.UnixNano())
SpanAggregatesDBResponseItems[i].FloatValue = float32(SpanAggregatesDBResponseItems[i].Value)
if queryParams.AggregationOption == "rate_per_sec" {
SpanAggregatesDBResponseItems[i].FloatValue = float32(SpanAggregatesDBResponseItems[i].Value) / float32(queryParams.StepSeconds)
}
if responseElement, ok := GetFilteredSpansAggregatesResponse.Items[SpanAggregatesDBResponseItems[i].Timestamp]; !ok {
if queryParams.GroupBy != "" && SpanAggregatesDBResponseItems[i].GroupBy != "" {
GetFilteredSpansAggregatesResponse.Items[SpanAggregatesDBResponseItems[i].Timestamp] = model.SpanAggregatesResponseItem{
Timestamp: SpanAggregatesDBResponseItems[i].Timestamp,
GroupBy: map[string]float32{SpanAggregatesDBResponseItems[i].GroupBy: SpanAggregatesDBResponseItems[i].FloatValue},
}
} else if queryParams.GroupBy == "" {
GetFilteredSpansAggregatesResponse.Items[SpanAggregatesDBResponseItems[i].Timestamp] = model.SpanAggregatesResponseItem{
Timestamp: SpanAggregatesDBResponseItems[i].Timestamp,
Value: SpanAggregatesDBResponseItems[i].FloatValue,
}
}
} else {
if queryParams.GroupBy != "" && SpanAggregatesDBResponseItems[i].GroupBy != "" {
responseElement.GroupBy[SpanAggregatesDBResponseItems[i].GroupBy] = SpanAggregatesDBResponseItems[i].FloatValue
}
GetFilteredSpansAggregatesResponse.Items[SpanAggregatesDBResponseItems[i].Timestamp] = responseElement
}
}
return &GetFilteredSpansAggregatesResponse, nil
}
func getLocalTableName(tableName string) string {
tableNameSplit := strings.Split(tableName, ".")

View File

@@ -119,6 +119,11 @@ type APIHandler struct {
nodesRepo *inframetrics.NodesRepo
namespacesRepo *inframetrics.NamespacesRepo
clustersRepo *inframetrics.ClustersRepo
// workloads
deploymentsRepo *inframetrics.DeploymentsRepo
daemonsetsRepo *inframetrics.DaemonSetsRepo
statefulsetsRepo *inframetrics.StatefulSetsRepo
jobsRepo *inframetrics.JobsRepo
}
type APIHandlerOpts struct {
@@ -197,6 +202,10 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
nodesRepo := inframetrics.NewNodesRepo(opts.Reader, querierv2)
namespacesRepo := inframetrics.NewNamespacesRepo(opts.Reader, querierv2)
clustersRepo := inframetrics.NewClustersRepo(opts.Reader, querierv2)
deploymentsRepo := inframetrics.NewDeploymentsRepo(opts.Reader, querierv2)
daemonsetsRepo := inframetrics.NewDaemonSetsRepo(opts.Reader, querierv2)
statefulsetsRepo := inframetrics.NewStatefulSetsRepo(opts.Reader, querierv2)
jobsRepo := inframetrics.NewJobsRepo(opts.Reader, querierv2)
aH := &APIHandler{
reader: opts.Reader,
@@ -222,6 +231,10 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
nodesRepo: nodesRepo,
namespacesRepo: namespacesRepo,
clustersRepo: clustersRepo,
deploymentsRepo: deploymentsRepo,
daemonsetsRepo: daemonsetsRepo,
statefulsetsRepo: statefulsetsRepo,
jobsRepo: jobsRepo,
}
logsQueryBuilder := logsv3.PrepareLogsQuery
@@ -319,6 +332,8 @@ func RespondError(w http.ResponseWriter, apiErr model.BaseApiError, data interfa
code = http.StatusUnauthorized
case model.ErrorForbidden:
code = http.StatusForbidden
case model.ErrorConflict:
code = http.StatusConflict
default:
code = http.StatusInternalServerError
}
@@ -400,6 +415,26 @@ func (aH *APIHandler) RegisterInfraMetricsRoutes(router *mux.Router, am *AuthMid
clustersSubRouter.HandleFunc("/attribute_keys", am.ViewAccess(aH.getClusterAttributeKeys)).Methods(http.MethodGet)
clustersSubRouter.HandleFunc("/attribute_values", am.ViewAccess(aH.getClusterAttributeValues)).Methods(http.MethodGet)
clustersSubRouter.HandleFunc("/list", am.ViewAccess(aH.getClusterList)).Methods(http.MethodPost)
deploymentsSubRouter := router.PathPrefix("/api/v1/deployments").Subrouter()
deploymentsSubRouter.HandleFunc("/attribute_keys", am.ViewAccess(aH.getDeploymentAttributeKeys)).Methods(http.MethodGet)
deploymentsSubRouter.HandleFunc("/attribute_values", am.ViewAccess(aH.getDeploymentAttributeValues)).Methods(http.MethodGet)
deploymentsSubRouter.HandleFunc("/list", am.ViewAccess(aH.getDeploymentList)).Methods(http.MethodPost)
daemonsetsSubRouter := router.PathPrefix("/api/v1/daemonsets").Subrouter()
daemonsetsSubRouter.HandleFunc("/attribute_keys", am.ViewAccess(aH.getDaemonSetAttributeKeys)).Methods(http.MethodGet)
daemonsetsSubRouter.HandleFunc("/attribute_values", am.ViewAccess(aH.getDaemonSetAttributeValues)).Methods(http.MethodGet)
daemonsetsSubRouter.HandleFunc("/list", am.ViewAccess(aH.getDaemonSetList)).Methods(http.MethodPost)
statefulsetsSubRouter := router.PathPrefix("/api/v1/statefulsets").Subrouter()
statefulsetsSubRouter.HandleFunc("/attribute_keys", am.ViewAccess(aH.getStatefulSetAttributeKeys)).Methods(http.MethodGet)
statefulsetsSubRouter.HandleFunc("/attribute_values", am.ViewAccess(aH.getStatefulSetAttributeValues)).Methods(http.MethodGet)
statefulsetsSubRouter.HandleFunc("/list", am.ViewAccess(aH.getStatefulSetList)).Methods(http.MethodPost)
jobsSubRouter := router.PathPrefix("/api/v1/jobs").Subrouter()
jobsSubRouter.HandleFunc("/attribute_keys", am.ViewAccess(aH.getJobAttributeKeys)).Methods(http.MethodGet)
jobsSubRouter.HandleFunc("/attribute_values", am.ViewAccess(aH.getJobAttributeValues)).Methods(http.MethodGet)
jobsSubRouter.HandleFunc("/list", am.ViewAccess(aH.getJobList)).Methods(http.MethodPost)
}
func (aH *APIHandler) RegisterWebSocketPaths(router *mux.Router, am *AuthMiddleware) {
@@ -491,12 +526,6 @@ func (aH *APIHandler) RegisterRoutes(router *mux.Router, am *AuthMiddleware) {
router.HandleFunc("/api/v1/configs", am.OpenAccess(aH.getConfigs)).Methods(http.MethodGet)
router.HandleFunc("/api/v1/health", am.OpenAccess(aH.getHealth)).Methods(http.MethodGet)
router.HandleFunc("/api/v1/getSpanFilters", am.ViewAccess(aH.getSpanFilters)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/getTagFilters", am.ViewAccess(aH.getTagFilters)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/getFilteredSpans", am.ViewAccess(aH.getFilteredSpans)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/getFilteredSpans/aggregates", am.ViewAccess(aH.getFilteredSpanAggregates)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/getTagValues", am.ViewAccess(aH.getTagValues)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/listErrors", am.ViewAccess(aH.listErrors)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/countErrors", am.ViewAccess(aH.countErrors)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/errorFromErrorID", am.ViewAccess(aH.getErrorFromErrorID)).Methods(http.MethodGet)
@@ -1812,86 +1841,6 @@ func (aH *APIHandler) getErrorFromGroupID(w http.ResponseWriter, r *http.Request
aH.WriteJSON(w, r, result)
}
func (aH *APIHandler) getSpanFilters(w http.ResponseWriter, r *http.Request) {
query, err := parseSpanFilterRequestBody(r)
if aH.HandleError(w, err, http.StatusBadRequest) {
return
}
result, apiErr := aH.reader.GetSpanFilters(r.Context(), query)
if apiErr != nil && aH.HandleError(w, apiErr.Err, http.StatusInternalServerError) {
return
}
aH.WriteJSON(w, r, result)
}
func (aH *APIHandler) getFilteredSpans(w http.ResponseWriter, r *http.Request) {
query, err := parseFilteredSpansRequest(r, aH)
if aH.HandleError(w, err, http.StatusBadRequest) {
return
}
result, apiErr := aH.reader.GetFilteredSpans(r.Context(), query)
if apiErr != nil && aH.HandleError(w, apiErr.Err, http.StatusInternalServerError) {
return
}
aH.WriteJSON(w, r, result)
}
func (aH *APIHandler) getFilteredSpanAggregates(w http.ResponseWriter, r *http.Request) {
query, err := parseFilteredSpanAggregatesRequest(r)
if aH.HandleError(w, err, http.StatusBadRequest) {
return
}
result, apiErr := aH.reader.GetFilteredSpansAggregates(r.Context(), query)
if apiErr != nil && aH.HandleError(w, apiErr.Err, http.StatusInternalServerError) {
return
}
aH.WriteJSON(w, r, result)
}
func (aH *APIHandler) getTagFilters(w http.ResponseWriter, r *http.Request) {
query, err := parseTagFilterRequest(r)
if aH.HandleError(w, err, http.StatusBadRequest) {
return
}
result, apiErr := aH.reader.GetTagFilters(r.Context(), query)
if apiErr != nil && aH.HandleError(w, apiErr.Err, http.StatusInternalServerError) {
return
}
aH.WriteJSON(w, r, result)
}
func (aH *APIHandler) getTagValues(w http.ResponseWriter, r *http.Request) {
query, err := parseTagValueRequest(r)
if aH.HandleError(w, err, http.StatusBadRequest) {
return
}
result, apiErr := aH.reader.GetTagValues(r.Context(), query)
if apiErr != nil && aH.HandleError(w, apiErr.Err, http.StatusInternalServerError) {
return
}
aH.WriteJSON(w, r, result)
}
func (aH *APIHandler) setTTL(w http.ResponseWriter, r *http.Request) {
ttlParams, err := parseTTLParams(r)
if aH.HandleError(w, err, http.StatusBadRequest) {
@@ -3222,16 +3171,16 @@ func (aH *APIHandler) getProducerThroughputOverview(
}
for _, res := range result {
for _, series := range res.Series {
serviceName, serviceNameOk := series.Labels["service_name"]
topicName, topicNameOk := series.Labels["topic"]
params := []string{serviceName, topicName}
for _, list := range res.List {
serviceName, serviceNameOk := list.Data["service_name"].(*string)
topicName, topicNameOk := list.Data["topic"].(*string)
params := []string{*serviceName, *topicName}
hashKey := uniqueIdentifier(params, "#")
_, ok := attributeCache.Hash[hashKey]
if topicNameOk && serviceNameOk && !ok {
attributeCache.Hash[hashKey] = struct{}{}
attributeCache.TopicName = append(attributeCache.TopicName, topicName)
attributeCache.ServiceName = append(attributeCache.ServiceName, serviceName)
attributeCache.TopicName = append(attributeCache.TopicName, *topicName)
attributeCache.ServiceName = append(attributeCache.ServiceName, *serviceName)
}
}
}
@@ -3256,25 +3205,23 @@ func (aH *APIHandler) getProducerThroughputOverview(
}
latencyColumn := &v3.Result{QueryName: "latency"}
var latencySeries []*v3.Series
var latencySeries []*v3.Row
for _, res := range resultFetchLatency {
for _, series := range res.Series {
topic, topicOk := series.Labels["topic"]
serviceName, serviceNameOk := series.Labels["service_name"]
params := []string{topic, serviceName}
for _, list := range res.List {
topic, topicOk := list.Data["topic"].(*string)
serviceName, serviceNameOk := list.Data["service_name"].(*string)
params := []string{*serviceName, *topic}
hashKey := uniqueIdentifier(params, "#")
_, ok := attributeCache.Hash[hashKey]
if topicOk && serviceNameOk && ok {
latencySeries = append(latencySeries, series)
latencySeries = append(latencySeries, list)
}
}
}
latencyColumn.Series = latencySeries
latencyColumn.List = latencySeries
result = append(result, latencyColumn)
resultFetchLatency = postprocess.TransformToTableForBuilderQueries(result, queryRangeParams)
resp := v3.QueryRangeResponse{
Result: resultFetchLatency,
}

View File

@@ -334,3 +334,213 @@ func (aH *APIHandler) getClusterList(w http.ResponseWriter, r *http.Request) {
aH.Respond(w, clusterList)
}
func (aH *APIHandler) getDeploymentAttributeKeys(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req, err := parseFilterAttributeKeyRequest(r)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
keys, err := aH.deploymentsRepo.GetDeploymentAttributeKeys(ctx, *req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
aH.Respond(w, keys)
}
func (aH *APIHandler) getDeploymentAttributeValues(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req, err := parseFilterAttributeValueRequest(r)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
values, err := aH.deploymentsRepo.GetDeploymentAttributeValues(ctx, *req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
aH.Respond(w, values)
}
func (aH *APIHandler) getDeploymentList(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req := model.DeploymentListRequest{}
err := json.NewDecoder(r.Body).Decode(&req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
deploymentList, err := aH.deploymentsRepo.GetDeploymentList(ctx, req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
aH.Respond(w, deploymentList)
}
func (aH *APIHandler) getDaemonSetAttributeKeys(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req, err := parseFilterAttributeKeyRequest(r)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
keys, err := aH.daemonsetsRepo.GetDaemonSetAttributeKeys(ctx, *req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
aH.Respond(w, keys)
}
func (aH *APIHandler) getDaemonSetAttributeValues(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req, err := parseFilterAttributeValueRequest(r)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
values, err := aH.daemonsetsRepo.GetDaemonSetAttributeValues(ctx, *req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
aH.Respond(w, values)
}
func (aH *APIHandler) getDaemonSetList(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req := model.DaemonSetListRequest{}
err := json.NewDecoder(r.Body).Decode(&req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
daemonSetList, err := aH.daemonsetsRepo.GetDaemonSetList(ctx, req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
aH.Respond(w, daemonSetList)
}
func (aH *APIHandler) getStatefulSetAttributeKeys(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req, err := parseFilterAttributeKeyRequest(r)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
keys, err := aH.statefulsetsRepo.GetStatefulSetAttributeKeys(ctx, *req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
aH.Respond(w, keys)
}
func (aH *APIHandler) getStatefulSetAttributeValues(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req, err := parseFilterAttributeValueRequest(r)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
values, err := aH.statefulsetsRepo.GetStatefulSetAttributeValues(ctx, *req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
aH.Respond(w, values)
}
func (aH *APIHandler) getStatefulSetList(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req := model.StatefulSetListRequest{}
err := json.NewDecoder(r.Body).Decode(&req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
statefulSetList, err := aH.statefulsetsRepo.GetStatefulSetList(ctx, req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
aH.Respond(w, statefulSetList)
}
func (aH *APIHandler) getJobAttributeKeys(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req, err := parseFilterAttributeKeyRequest(r)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
keys, err := aH.jobsRepo.GetJobAttributeKeys(ctx, *req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
aH.Respond(w, keys)
}
func (aH *APIHandler) getJobAttributeValues(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req, err := parseFilterAttributeValueRequest(r)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
values, err := aH.jobsRepo.GetJobAttributeValues(ctx, *req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
aH.Respond(w, values)
}
func (aH *APIHandler) getJobList(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req := model.JobListRequest{}
err := json.NewDecoder(r.Body).Decode(&req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
jobList, err := aH.jobsRepo.GetJobList(ctx, req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
aH.Respond(w, jobList)
}

View File

@@ -73,6 +73,22 @@ func getParamsForTopClusters(req model.ClusterListRequest) (int64, string, strin
return getParamsForTopItems(req.Start, req.End)
}
func getParamsForTopDeployments(req model.DeploymentListRequest) (int64, string, string) {
return getParamsForTopItems(req.Start, req.End)
}
func getParamsForTopDaemonSets(req model.DaemonSetListRequest) (int64, string, string) {
return getParamsForTopItems(req.Start, req.End)
}
func getParamsForTopStatefulSets(req model.StatefulSetListRequest) (int64, string, string) {
return getParamsForTopItems(req.Start, req.End)
}
func getParamsForTopJobs(req model.JobListRequest) (int64, string, string) {
return getParamsForTopItems(req.Start, req.End)
}
// TODO(srikanthccv): remove this
// What is happening here?
// The `PrepareTimeseriesFilterQuery` uses the local time series table for sub-query because each fingerprint

View File

@@ -0,0 +1,444 @@
package inframetrics
import (
"context"
"math"
"sort"
"go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers"
"go.signoz.io/signoz/pkg/query-service/common"
"go.signoz.io/signoz/pkg/query-service/interfaces"
"go.signoz.io/signoz/pkg/query-service/model"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/query-service/postprocess"
"golang.org/x/exp/slices"
)
var (
metricToUseForDaemonSets = "k8s_pod_cpu_utilization"
k8sDaemonSetNameAttrKey = "k8s_daemonset_name"
metricNamesForDaemonSets = map[string]string{
"desired_nodes": "k8s_daemonset_desired_scheduled_nodes",
"available_nodes": "k8s_daemonset_current_scheduled_nodes",
}
daemonSetAttrsToEnrich = []string{
"k8s_daemonset_name",
"k8s_namespace_name",
"k8s_cluster_name",
}
queryNamesForDaemonSets = map[string][]string{
"cpu": {"A"},
"cpu_request": {"B", "A"},
"cpu_limit": {"C", "A"},
"memory": {"D"},
"memory_request": {"E", "D"},
"memory_limit": {"F", "D"},
"restarts": {"G", "A"},
"desired_nodes": {"H"},
"available_nodes": {"I"},
}
builderQueriesForDaemonSets = map[string]*v3.BuilderQuery{
// desired nodes
"H": {
QueryName: "H",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForDaemonSets["desired_nodes"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
GroupBy: []v3.AttributeKey{},
Expression: "H",
ReduceTo: v3.ReduceToOperatorLast,
TimeAggregation: v3.TimeAggregationAnyLast,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
// available nodes
"I": {
QueryName: "I",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForDaemonSets["available_nodes"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
GroupBy: []v3.AttributeKey{},
Expression: "I",
ReduceTo: v3.ReduceToOperatorLast,
TimeAggregation: v3.TimeAggregationAnyLast,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
}
daemonSetQueryNames = []string{"A", "B", "C", "D", "E", "F", "G", "H", "I"}
)
type DaemonSetsRepo struct {
reader interfaces.Reader
querierV2 interfaces.Querier
}
func NewDaemonSetsRepo(reader interfaces.Reader, querierV2 interfaces.Querier) *DaemonSetsRepo {
return &DaemonSetsRepo{reader: reader, querierV2: querierV2}
}
func (d *DaemonSetsRepo) GetDaemonSetAttributeKeys(ctx context.Context, req v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error) {
// TODO(srikanthccv): remove hardcoded metric name and support keys from any pod metric
req.DataSource = v3.DataSourceMetrics
req.AggregateAttribute = metricToUseForDaemonSets
if req.Limit == 0 {
req.Limit = 50
}
attributeKeysResponse, err := d.reader.GetMetricAttributeKeys(ctx, &req)
if err != nil {
return nil, err
}
// TODO(srikanthccv): only return resource attributes when we have a way to
// distinguish between resource attributes and other attributes.
filteredKeys := []v3.AttributeKey{}
for _, key := range attributeKeysResponse.AttributeKeys {
if slices.Contains(pointAttrsToIgnore, key.Key) {
continue
}
filteredKeys = append(filteredKeys, key)
}
return &v3.FilterAttributeKeyResponse{AttributeKeys: filteredKeys}, nil
}
func (d *DaemonSetsRepo) GetDaemonSetAttributeValues(ctx context.Context, req v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error) {
req.DataSource = v3.DataSourceMetrics
req.AggregateAttribute = metricToUseForDaemonSets
if req.Limit == 0 {
req.Limit = 50
}
attributeValuesResponse, err := d.reader.GetMetricAttributeValues(ctx, &req)
if err != nil {
return nil, err
}
return attributeValuesResponse, nil
}
func (d *DaemonSetsRepo) getMetadataAttributes(ctx context.Context, req model.DaemonSetListRequest) (map[string]map[string]string, error) {
daemonSetAttrs := map[string]map[string]string{}
for _, key := range daemonSetAttrsToEnrich {
hasKey := false
for _, groupByKey := range req.GroupBy {
if groupByKey.Key == key {
hasKey = true
break
}
}
if !hasKey {
req.GroupBy = append(req.GroupBy, v3.AttributeKey{Key: key})
}
}
mq := v3.BuilderQuery{
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricToUseForDaemonSets,
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
GroupBy: req.GroupBy,
}
query, err := helpers.PrepareTimeseriesFilterQuery(req.Start, req.End, &mq)
if err != nil {
return nil, err
}
query = localQueryToDistributedQuery(query)
attrsListResponse, err := d.reader.GetListResultV3(ctx, query)
if err != nil {
return nil, err
}
for _, row := range attrsListResponse {
stringData := map[string]string{}
for key, value := range row.Data {
if str, ok := value.(string); ok {
stringData[key] = str
} else if strPtr, ok := value.(*string); ok {
stringData[key] = *strPtr
}
}
daemonSetName := stringData[k8sDaemonSetNameAttrKey]
if _, ok := daemonSetAttrs[daemonSetName]; !ok {
daemonSetAttrs[daemonSetName] = map[string]string{}
}
for _, key := range req.GroupBy {
daemonSetAttrs[daemonSetName][key.Key] = stringData[key.Key]
}
}
return daemonSetAttrs, nil
}
func (d *DaemonSetsRepo) getTopDaemonSetGroups(ctx context.Context, req model.DaemonSetListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) {
step, timeSeriesTableName, samplesTableName := getParamsForTopDaemonSets(req)
queryNames := queryNamesForDaemonSets[req.OrderBy.ColumnName]
topDaemonSetGroupsQueryRangeParams := &v3.QueryRangeParamsV3{
Start: req.Start,
End: req.End,
Step: step,
CompositeQuery: &v3.CompositeQuery{
BuilderQueries: map[string]*v3.BuilderQuery{},
QueryType: v3.QueryTypeBuilder,
PanelType: v3.PanelTypeTable,
},
}
for _, queryName := range queryNames {
query := q.CompositeQuery.BuilderQueries[queryName].Clone()
query.StepInterval = step
query.MetricTableHints = &v3.MetricTableHints{
TimeSeriesTableName: timeSeriesTableName,
SamplesTableName: samplesTableName,
}
if req.Filters != nil && len(req.Filters.Items) > 0 {
if query.Filters == nil {
query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}}
}
query.Filters.Items = append(query.Filters.Items, req.Filters.Items...)
}
topDaemonSetGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query
}
queryResponse, _, err := d.querierV2.QueryRange(ctx, topDaemonSetGroupsQueryRangeParams)
if err != nil {
return nil, nil, err
}
formattedResponse, err := postprocess.PostProcessResult(queryResponse, topDaemonSetGroupsQueryRangeParams)
if err != nil {
return nil, nil, err
}
if len(formattedResponse) == 0 || len(formattedResponse[0].Series) == 0 {
return nil, nil, nil
}
if req.OrderBy.Order == v3.DirectionDesc {
sort.Slice(formattedResponse[0].Series, func(i, j int) bool {
return formattedResponse[0].Series[i].Points[0].Value > formattedResponse[0].Series[j].Points[0].Value
})
} else {
sort.Slice(formattedResponse[0].Series, func(i, j int) bool {
return formattedResponse[0].Series[i].Points[0].Value < formattedResponse[0].Series[j].Points[0].Value
})
}
limit := math.Min(float64(req.Offset+req.Limit), float64(len(formattedResponse[0].Series)))
paginatedTopDaemonSetGroupsSeries := formattedResponse[0].Series[req.Offset:int(limit)]
topDaemonSetGroups := []map[string]string{}
for _, series := range paginatedTopDaemonSetGroupsSeries {
topDaemonSetGroups = append(topDaemonSetGroups, series.Labels)
}
allDaemonSetGroups := []map[string]string{}
for _, series := range formattedResponse[0].Series {
allDaemonSetGroups = append(allDaemonSetGroups, series.Labels)
}
return topDaemonSetGroups, allDaemonSetGroups, nil
}
func (d *DaemonSetsRepo) GetDaemonSetList(ctx context.Context, req model.DaemonSetListRequest) (model.DaemonSetListResponse, error) {
resp := model.DaemonSetListResponse{}
if req.Limit == 0 {
req.Limit = 10
}
if req.OrderBy == nil {
req.OrderBy = &v3.OrderBy{ColumnName: "cpu", Order: v3.DirectionDesc}
}
if req.GroupBy == nil {
req.GroupBy = []v3.AttributeKey{{Key: k8sDaemonSetNameAttrKey}}
resp.Type = model.ResponseTypeList
} else {
resp.Type = model.ResponseTypeGroupedList
}
step := int64(math.Max(float64(common.MinAllowedStepInterval(req.Start, req.End)), 60))
query := WorkloadTableListQuery.Clone()
query.Start = req.Start
query.End = req.End
query.Step = step
// add additional queries for daemon sets
for _, daemonSetQuery := range builderQueriesForDaemonSets {
query.CompositeQuery.BuilderQueries[daemonSetQuery.QueryName] = daemonSetQuery
}
for _, query := range query.CompositeQuery.BuilderQueries {
query.StepInterval = step
if req.Filters != nil && len(req.Filters.Items) > 0 {
if query.Filters == nil {
query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}}
}
query.Filters.Items = append(query.Filters.Items, req.Filters.Items...)
}
query.GroupBy = req.GroupBy
// make sure we only get records for daemon sets
query.Filters.Items = append(query.Filters.Items, v3.FilterItem{
Key: v3.AttributeKey{Key: k8sDaemonSetNameAttrKey},
Operator: v3.FilterOperatorExists,
})
}
daemonSetAttrs, err := d.getMetadataAttributes(ctx, req)
if err != nil {
return resp, err
}
topDaemonSetGroups, allDaemonSetGroups, err := d.getTopDaemonSetGroups(ctx, req, query)
if err != nil {
return resp, err
}
groupFilters := map[string][]string{}
for _, topDaemonSetGroup := range topDaemonSetGroups {
for k, v := range topDaemonSetGroup {
groupFilters[k] = append(groupFilters[k], v)
}
}
for groupKey, groupValues := range groupFilters {
hasGroupFilter := false
if req.Filters != nil && len(req.Filters.Items) > 0 {
for _, filter := range req.Filters.Items {
if filter.Key.Key == groupKey {
hasGroupFilter = true
break
}
}
}
if !hasGroupFilter {
for _, query := range query.CompositeQuery.BuilderQueries {
query.Filters.Items = append(query.Filters.Items, v3.FilterItem{
Key: v3.AttributeKey{Key: groupKey},
Value: groupValues,
Operator: v3.FilterOperatorIn,
})
}
}
}
queryResponse, _, err := d.querierV2.QueryRange(ctx, query)
if err != nil {
return resp, err
}
formattedResponse, err := postprocess.PostProcessResult(queryResponse, query)
if err != nil {
return resp, err
}
records := []model.DaemonSetListRecord{}
for _, result := range formattedResponse {
for _, row := range result.Table.Rows {
record := model.DaemonSetListRecord{
DaemonSetName: "",
CPUUsage: -1,
CPURequest: -1,
CPULimit: -1,
MemoryUsage: -1,
MemoryRequest: -1,
MemoryLimit: -1,
DesiredNodes: -1,
AvailableNodes: -1,
}
if daemonSetName, ok := row.Data[k8sDaemonSetNameAttrKey].(string); ok {
record.DaemonSetName = daemonSetName
}
if cpu, ok := row.Data["A"].(float64); ok {
record.CPUUsage = cpu
}
if cpuRequest, ok := row.Data["B"].(float64); ok {
record.CPURequest = cpuRequest
}
if cpuLimit, ok := row.Data["C"].(float64); ok {
record.CPULimit = cpuLimit
}
if memory, ok := row.Data["D"].(float64); ok {
record.MemoryUsage = memory
}
if memoryRequest, ok := row.Data["E"].(float64); ok {
record.MemoryRequest = memoryRequest
}
if memoryLimit, ok := row.Data["F"].(float64); ok {
record.MemoryLimit = memoryLimit
}
if restarts, ok := row.Data["G"].(float64); ok {
record.Restarts = int(restarts)
}
if desiredNodes, ok := row.Data["H"].(float64); ok {
record.DesiredNodes = int(desiredNodes)
}
if availableNodes, ok := row.Data["I"].(float64); ok {
record.AvailableNodes = int(availableNodes)
}
record.Meta = map[string]string{}
if _, ok := daemonSetAttrs[record.DaemonSetName]; ok {
record.Meta = daemonSetAttrs[record.DaemonSetName]
}
for k, v := range row.Data {
if slices.Contains(daemonSetQueryNames, k) {
continue
}
if labelValue, ok := v.(string); ok {
record.Meta[k] = labelValue
}
}
records = append(records, record)
}
}
resp.Total = len(allDaemonSetGroups)
resp.Records = records
return resp, nil
}

View File

@@ -0,0 +1,444 @@
package inframetrics
import (
"context"
"math"
"sort"
"go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers"
"go.signoz.io/signoz/pkg/query-service/common"
"go.signoz.io/signoz/pkg/query-service/interfaces"
"go.signoz.io/signoz/pkg/query-service/model"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/query-service/postprocess"
"golang.org/x/exp/slices"
)
var (
metricToUseForDeployments = "k8s_pod_cpu_utilization"
k8sDeploymentNameAttrKey = "k8s_deployment_name"
metricNamesForDeployments = map[string]string{
"desired_pods": "k8s_deployment_desired",
"available_pods": "k8s_deployment_available",
}
deploymentAttrsToEnrich = []string{
"k8s_deployment_name",
"k8s_namespace_name",
"k8s_cluster_name",
}
queryNamesForDeployments = map[string][]string{
"cpu": {"A"},
"cpu_request": {"B", "A"},
"cpu_limit": {"C", "A"},
"memory": {"D"},
"memory_request": {"E", "D"},
"memory_limit": {"F", "D"},
"restarts": {"G", "A"},
"desired_pods": {"H"},
"available_pods": {"I"},
}
builderQueriesForDeployments = map[string]*v3.BuilderQuery{
// desired pods
"H": {
QueryName: "H",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForDeployments["desired_pods"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
GroupBy: []v3.AttributeKey{},
Expression: "H",
ReduceTo: v3.ReduceToOperatorLast,
TimeAggregation: v3.TimeAggregationAnyLast,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
// available pods
"I": {
QueryName: "I",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForDeployments["available_pods"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
GroupBy: []v3.AttributeKey{},
Expression: "I",
ReduceTo: v3.ReduceToOperatorLast,
TimeAggregation: v3.TimeAggregationAnyLast,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
}
deploymentQueryNames = []string{"A", "B", "C", "D", "E", "F", "G", "H", "I"}
)
type DeploymentsRepo struct {
reader interfaces.Reader
querierV2 interfaces.Querier
}
func NewDeploymentsRepo(reader interfaces.Reader, querierV2 interfaces.Querier) *DeploymentsRepo {
return &DeploymentsRepo{reader: reader, querierV2: querierV2}
}
func (d *DeploymentsRepo) GetDeploymentAttributeKeys(ctx context.Context, req v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error) {
// TODO(srikanthccv): remove hardcoded metric name and support keys from any pod metric
req.DataSource = v3.DataSourceMetrics
req.AggregateAttribute = metricToUseForDeployments
if req.Limit == 0 {
req.Limit = 50
}
attributeKeysResponse, err := d.reader.GetMetricAttributeKeys(ctx, &req)
if err != nil {
return nil, err
}
// TODO(srikanthccv): only return resource attributes when we have a way to
// distinguish between resource attributes and other attributes.
filteredKeys := []v3.AttributeKey{}
for _, key := range attributeKeysResponse.AttributeKeys {
if slices.Contains(pointAttrsToIgnore, key.Key) {
continue
}
filteredKeys = append(filteredKeys, key)
}
return &v3.FilterAttributeKeyResponse{AttributeKeys: filteredKeys}, nil
}
func (d *DeploymentsRepo) GetDeploymentAttributeValues(ctx context.Context, req v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error) {
req.DataSource = v3.DataSourceMetrics
req.AggregateAttribute = metricToUseForDeployments
if req.Limit == 0 {
req.Limit = 50
}
attributeValuesResponse, err := d.reader.GetMetricAttributeValues(ctx, &req)
if err != nil {
return nil, err
}
return attributeValuesResponse, nil
}
func (d *DeploymentsRepo) getMetadataAttributes(ctx context.Context, req model.DeploymentListRequest) (map[string]map[string]string, error) {
deploymentAttrs := map[string]map[string]string{}
for _, key := range deploymentAttrsToEnrich {
hasKey := false
for _, groupByKey := range req.GroupBy {
if groupByKey.Key == key {
hasKey = true
break
}
}
if !hasKey {
req.GroupBy = append(req.GroupBy, v3.AttributeKey{Key: key})
}
}
mq := v3.BuilderQuery{
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricToUseForDeployments,
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
GroupBy: req.GroupBy,
}
query, err := helpers.PrepareTimeseriesFilterQuery(req.Start, req.End, &mq)
if err != nil {
return nil, err
}
query = localQueryToDistributedQuery(query)
attrsListResponse, err := d.reader.GetListResultV3(ctx, query)
if err != nil {
return nil, err
}
for _, row := range attrsListResponse {
stringData := map[string]string{}
for key, value := range row.Data {
if str, ok := value.(string); ok {
stringData[key] = str
} else if strPtr, ok := value.(*string); ok {
stringData[key] = *strPtr
}
}
deploymentName := stringData[k8sDeploymentNameAttrKey]
if _, ok := deploymentAttrs[deploymentName]; !ok {
deploymentAttrs[deploymentName] = map[string]string{}
}
for _, key := range req.GroupBy {
deploymentAttrs[deploymentName][key.Key] = stringData[key.Key]
}
}
return deploymentAttrs, nil
}
func (d *DeploymentsRepo) getTopDeploymentGroups(ctx context.Context, req model.DeploymentListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) {
step, timeSeriesTableName, samplesTableName := getParamsForTopDeployments(req)
queryNames := queryNamesForDeployments[req.OrderBy.ColumnName]
topDeploymentGroupsQueryRangeParams := &v3.QueryRangeParamsV3{
Start: req.Start,
End: req.End,
Step: step,
CompositeQuery: &v3.CompositeQuery{
BuilderQueries: map[string]*v3.BuilderQuery{},
QueryType: v3.QueryTypeBuilder,
PanelType: v3.PanelTypeTable,
},
}
for _, queryName := range queryNames {
query := q.CompositeQuery.BuilderQueries[queryName].Clone()
query.StepInterval = step
query.MetricTableHints = &v3.MetricTableHints{
TimeSeriesTableName: timeSeriesTableName,
SamplesTableName: samplesTableName,
}
if req.Filters != nil && len(req.Filters.Items) > 0 {
if query.Filters == nil {
query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}}
}
query.Filters.Items = append(query.Filters.Items, req.Filters.Items...)
}
topDeploymentGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query
}
queryResponse, _, err := d.querierV2.QueryRange(ctx, topDeploymentGroupsQueryRangeParams)
if err != nil {
return nil, nil, err
}
formattedResponse, err := postprocess.PostProcessResult(queryResponse, topDeploymentGroupsQueryRangeParams)
if err != nil {
return nil, nil, err
}
if len(formattedResponse) == 0 || len(formattedResponse[0].Series) == 0 {
return nil, nil, nil
}
if req.OrderBy.Order == v3.DirectionDesc {
sort.Slice(formattedResponse[0].Series, func(i, j int) bool {
return formattedResponse[0].Series[i].Points[0].Value > formattedResponse[0].Series[j].Points[0].Value
})
} else {
sort.Slice(formattedResponse[0].Series, func(i, j int) bool {
return formattedResponse[0].Series[i].Points[0].Value < formattedResponse[0].Series[j].Points[0].Value
})
}
limit := math.Min(float64(req.Offset+req.Limit), float64(len(formattedResponse[0].Series)))
paginatedTopDeploymentGroupsSeries := formattedResponse[0].Series[req.Offset:int(limit)]
topDeploymentGroups := []map[string]string{}
for _, series := range paginatedTopDeploymentGroupsSeries {
topDeploymentGroups = append(topDeploymentGroups, series.Labels)
}
allDeploymentGroups := []map[string]string{}
for _, series := range formattedResponse[0].Series {
allDeploymentGroups = append(allDeploymentGroups, series.Labels)
}
return topDeploymentGroups, allDeploymentGroups, nil
}
func (d *DeploymentsRepo) GetDeploymentList(ctx context.Context, req model.DeploymentListRequest) (model.DeploymentListResponse, error) {
resp := model.DeploymentListResponse{}
if req.Limit == 0 {
req.Limit = 10
}
if req.OrderBy == nil {
req.OrderBy = &v3.OrderBy{ColumnName: "cpu", Order: v3.DirectionDesc}
}
if req.GroupBy == nil {
req.GroupBy = []v3.AttributeKey{{Key: k8sDeploymentNameAttrKey}}
resp.Type = model.ResponseTypeList
} else {
resp.Type = model.ResponseTypeGroupedList
}
step := int64(math.Max(float64(common.MinAllowedStepInterval(req.Start, req.End)), 60))
query := WorkloadTableListQuery.Clone()
query.Start = req.Start
query.End = req.End
query.Step = step
// add additional queries for deployments
for _, deploymentQuery := range builderQueriesForDeployments {
query.CompositeQuery.BuilderQueries[deploymentQuery.QueryName] = deploymentQuery
}
for _, query := range query.CompositeQuery.BuilderQueries {
query.StepInterval = step
if req.Filters != nil && len(req.Filters.Items) > 0 {
if query.Filters == nil {
query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}}
}
query.Filters.Items = append(query.Filters.Items, req.Filters.Items...)
}
query.GroupBy = req.GroupBy
// make sure we only get records for deployments
query.Filters.Items = append(query.Filters.Items, v3.FilterItem{
Key: v3.AttributeKey{Key: k8sDeploymentNameAttrKey},
Operator: v3.FilterOperatorExists,
})
}
deploymentAttrs, err := d.getMetadataAttributes(ctx, req)
if err != nil {
return resp, err
}
topDeploymentGroups, allDeploymentGroups, err := d.getTopDeploymentGroups(ctx, req, query)
if err != nil {
return resp, err
}
groupFilters := map[string][]string{}
for _, topDeploymentGroup := range topDeploymentGroups {
for k, v := range topDeploymentGroup {
groupFilters[k] = append(groupFilters[k], v)
}
}
for groupKey, groupValues := range groupFilters {
hasGroupFilter := false
if req.Filters != nil && len(req.Filters.Items) > 0 {
for _, filter := range req.Filters.Items {
if filter.Key.Key == groupKey {
hasGroupFilter = true
break
}
}
}
if !hasGroupFilter {
for _, query := range query.CompositeQuery.BuilderQueries {
query.Filters.Items = append(query.Filters.Items, v3.FilterItem{
Key: v3.AttributeKey{Key: groupKey},
Value: groupValues,
Operator: v3.FilterOperatorIn,
})
}
}
}
queryResponse, _, err := d.querierV2.QueryRange(ctx, query)
if err != nil {
return resp, err
}
formattedResponse, err := postprocess.PostProcessResult(queryResponse, query)
if err != nil {
return resp, err
}
records := []model.DeploymentListRecord{}
for _, result := range formattedResponse {
for _, row := range result.Table.Rows {
record := model.DeploymentListRecord{
DeploymentName: "",
CPUUsage: -1,
CPURequest: -1,
CPULimit: -1,
MemoryUsage: -1,
MemoryRequest: -1,
MemoryLimit: -1,
DesiredPods: -1,
AvailablePods: -1,
}
if deploymentName, ok := row.Data[k8sDeploymentNameAttrKey].(string); ok {
record.DeploymentName = deploymentName
}
if cpu, ok := row.Data["A"].(float64); ok {
record.CPUUsage = cpu
}
if cpuRequest, ok := row.Data["B"].(float64); ok {
record.CPURequest = cpuRequest
}
if cpuLimit, ok := row.Data["C"].(float64); ok {
record.CPULimit = cpuLimit
}
if memory, ok := row.Data["D"].(float64); ok {
record.MemoryUsage = memory
}
if memoryRequest, ok := row.Data["E"].(float64); ok {
record.MemoryRequest = memoryRequest
}
if memoryLimit, ok := row.Data["F"].(float64); ok {
record.MemoryLimit = memoryLimit
}
if restarts, ok := row.Data["G"].(float64); ok {
record.Restarts = int(restarts)
}
if desiredPods, ok := row.Data["H"].(float64); ok {
record.DesiredPods = int(desiredPods)
}
if availablePods, ok := row.Data["I"].(float64); ok {
record.AvailablePods = int(availablePods)
}
record.Meta = map[string]string{}
if _, ok := deploymentAttrs[record.DeploymentName]; ok {
record.Meta = deploymentAttrs[record.DeploymentName]
}
for k, v := range row.Data {
if slices.Contains(deploymentQueryNames, k) {
continue
}
if labelValue, ok := v.(string); ok {
record.Meta[k] = labelValue
}
}
records = append(records, record)
}
}
resp.Total = len(allDeploymentGroups)
resp.Records = records
return resp, nil
}

View File

@@ -0,0 +1,498 @@
package inframetrics
import (
"context"
"math"
"sort"
"go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers"
"go.signoz.io/signoz/pkg/query-service/common"
"go.signoz.io/signoz/pkg/query-service/interfaces"
"go.signoz.io/signoz/pkg/query-service/model"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/query-service/postprocess"
"golang.org/x/exp/slices"
)
var (
metricToUseForJobs = "k8s_pod_cpu_utilization"
k8sJobNameAttrKey = "k8s_job_name"
metricNamesForJobs = map[string]string{
"desired_successful_pods": "k8s_job_desired_successful_pods",
"active_pods": "k8s_job_active_pods",
"failed_pods": "k8s_job_failed_pods",
"successful_pods": "k8s_job_successful_pods",
}
jobAttrsToEnrich = []string{
"k8s_job_name",
"k8s_namespace_name",
"k8s_cluster_name",
}
queryNamesForJobs = map[string][]string{
"cpu": {"A"},
"cpu_request": {"B", "A"},
"cpu_limit": {"C", "A"},
"memory": {"D"},
"memory_request": {"E", "D"},
"memory_limit": {"F", "D"},
"restarts": {"G", "A"},
"desired_pods": {"H"},
"active_pods": {"I"},
"failed_pods": {"J"},
"successful_pods": {"K"},
}
builderQueriesForJobs = map[string]*v3.BuilderQuery{
// desired nodes
"H": {
QueryName: "H",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForJobs["desired_successful_pods"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
GroupBy: []v3.AttributeKey{},
Expression: "H",
ReduceTo: v3.ReduceToOperatorLast,
TimeAggregation: v3.TimeAggregationAnyLast,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
// available nodes
"I": {
QueryName: "I",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForJobs["active_pods"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
GroupBy: []v3.AttributeKey{},
Expression: "I",
ReduceTo: v3.ReduceToOperatorLast,
TimeAggregation: v3.TimeAggregationAnyLast,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
// failed pods
"J": {
QueryName: "J",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForJobs["failed_pods"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
GroupBy: []v3.AttributeKey{},
Expression: "J",
ReduceTo: v3.ReduceToOperatorLast,
TimeAggregation: v3.TimeAggregationAnyLast,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
// successful pods
"K": {
QueryName: "K",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForJobs["successful_pods"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
GroupBy: []v3.AttributeKey{},
Expression: "K",
ReduceTo: v3.ReduceToOperatorLast,
TimeAggregation: v3.TimeAggregationAnyLast,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
}
jobQueryNames = []string{"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K"}
)
type JobsRepo struct {
reader interfaces.Reader
querierV2 interfaces.Querier
}
func NewJobsRepo(reader interfaces.Reader, querierV2 interfaces.Querier) *JobsRepo {
return &JobsRepo{reader: reader, querierV2: querierV2}
}
func (d *JobsRepo) GetJobAttributeKeys(ctx context.Context, req v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error) {
// TODO(srikanthccv): remove hardcoded metric name and support keys from any pod metric
req.DataSource = v3.DataSourceMetrics
req.AggregateAttribute = metricToUseForJobs
if req.Limit == 0 {
req.Limit = 50
}
attributeKeysResponse, err := d.reader.GetMetricAttributeKeys(ctx, &req)
if err != nil {
return nil, err
}
// TODO(srikanthccv): only return resource attributes when we have a way to
// distinguish between resource attributes and other attributes.
filteredKeys := []v3.AttributeKey{}
for _, key := range attributeKeysResponse.AttributeKeys {
if slices.Contains(pointAttrsToIgnore, key.Key) {
continue
}
filteredKeys = append(filteredKeys, key)
}
return &v3.FilterAttributeKeyResponse{AttributeKeys: filteredKeys}, nil
}
func (d *JobsRepo) GetJobAttributeValues(ctx context.Context, req v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error) {
req.DataSource = v3.DataSourceMetrics
req.AggregateAttribute = metricToUseForJobs
if req.Limit == 0 {
req.Limit = 50
}
attributeValuesResponse, err := d.reader.GetMetricAttributeValues(ctx, &req)
if err != nil {
return nil, err
}
return attributeValuesResponse, nil
}
func (d *JobsRepo) getMetadataAttributes(ctx context.Context, req model.JobListRequest) (map[string]map[string]string, error) {
jobAttrs := map[string]map[string]string{}
for _, key := range jobAttrsToEnrich {
hasKey := false
for _, groupByKey := range req.GroupBy {
if groupByKey.Key == key {
hasKey = true
break
}
}
if !hasKey {
req.GroupBy = append(req.GroupBy, v3.AttributeKey{Key: key})
}
}
mq := v3.BuilderQuery{
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricToUseForJobs,
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
GroupBy: req.GroupBy,
}
query, err := helpers.PrepareTimeseriesFilterQuery(req.Start, req.End, &mq)
if err != nil {
return nil, err
}
query = localQueryToDistributedQuery(query)
attrsListResponse, err := d.reader.GetListResultV3(ctx, query)
if err != nil {
return nil, err
}
for _, row := range attrsListResponse {
stringData := map[string]string{}
for key, value := range row.Data {
if str, ok := value.(string); ok {
stringData[key] = str
} else if strPtr, ok := value.(*string); ok {
stringData[key] = *strPtr
}
}
jobName := stringData[k8sJobNameAttrKey]
if _, ok := jobAttrs[jobName]; !ok {
jobAttrs[jobName] = map[string]string{}
}
for _, key := range req.GroupBy {
jobAttrs[jobName][key.Key] = stringData[key.Key]
}
}
return jobAttrs, nil
}
func (d *JobsRepo) getTopJobGroups(ctx context.Context, req model.JobListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) {
step, timeSeriesTableName, samplesTableName := getParamsForTopJobs(req)
queryNames := queryNamesForJobs[req.OrderBy.ColumnName]
topJobGroupsQueryRangeParams := &v3.QueryRangeParamsV3{
Start: req.Start,
End: req.End,
Step: step,
CompositeQuery: &v3.CompositeQuery{
BuilderQueries: map[string]*v3.BuilderQuery{},
QueryType: v3.QueryTypeBuilder,
PanelType: v3.PanelTypeTable,
},
}
for _, queryName := range queryNames {
query := q.CompositeQuery.BuilderQueries[queryName].Clone()
query.StepInterval = step
query.MetricTableHints = &v3.MetricTableHints{
TimeSeriesTableName: timeSeriesTableName,
SamplesTableName: samplesTableName,
}
if req.Filters != nil && len(req.Filters.Items) > 0 {
if query.Filters == nil {
query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}}
}
query.Filters.Items = append(query.Filters.Items, req.Filters.Items...)
}
topJobGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query
}
queryResponse, _, err := d.querierV2.QueryRange(ctx, topJobGroupsQueryRangeParams)
if err != nil {
return nil, nil, err
}
formattedResponse, err := postprocess.PostProcessResult(queryResponse, topJobGroupsQueryRangeParams)
if err != nil {
return nil, nil, err
}
if len(formattedResponse) == 0 || len(formattedResponse[0].Series) == 0 {
return nil, nil, nil
}
if req.OrderBy.Order == v3.DirectionDesc {
sort.Slice(formattedResponse[0].Series, func(i, j int) bool {
return formattedResponse[0].Series[i].Points[0].Value > formattedResponse[0].Series[j].Points[0].Value
})
} else {
sort.Slice(formattedResponse[0].Series, func(i, j int) bool {
return formattedResponse[0].Series[i].Points[0].Value < formattedResponse[0].Series[j].Points[0].Value
})
}
limit := math.Min(float64(req.Offset+req.Limit), float64(len(formattedResponse[0].Series)))
paginatedTopJobGroupsSeries := formattedResponse[0].Series[req.Offset:int(limit)]
topJobGroups := []map[string]string{}
for _, series := range paginatedTopJobGroupsSeries {
topJobGroups = append(topJobGroups, series.Labels)
}
allJobGroups := []map[string]string{}
for _, series := range formattedResponse[0].Series {
allJobGroups = append(allJobGroups, series.Labels)
}
return topJobGroups, allJobGroups, nil
}
func (d *JobsRepo) GetJobList(ctx context.Context, req model.JobListRequest) (model.JobListResponse, error) {
resp := model.JobListResponse{}
if req.Limit == 0 {
req.Limit = 10
}
if req.OrderBy == nil {
req.OrderBy = &v3.OrderBy{ColumnName: "desired_pods", Order: v3.DirectionDesc}
}
if req.GroupBy == nil {
req.GroupBy = []v3.AttributeKey{{Key: k8sJobNameAttrKey}}
resp.Type = model.ResponseTypeList
} else {
resp.Type = model.ResponseTypeGroupedList
}
step := int64(math.Max(float64(common.MinAllowedStepInterval(req.Start, req.End)), 60))
query := WorkloadTableListQuery.Clone()
query.Start = req.Start
query.End = req.End
query.Step = step
// add additional queries for jobs
for _, jobQuery := range builderQueriesForJobs {
query.CompositeQuery.BuilderQueries[jobQuery.QueryName] = jobQuery
}
for _, query := range query.CompositeQuery.BuilderQueries {
query.StepInterval = step
if req.Filters != nil && len(req.Filters.Items) > 0 {
if query.Filters == nil {
query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}}
}
query.Filters.Items = append(query.Filters.Items, req.Filters.Items...)
}
query.GroupBy = req.GroupBy
// make sure we only get records for jobs
query.Filters.Items = append(query.Filters.Items, v3.FilterItem{
Key: v3.AttributeKey{Key: k8sJobNameAttrKey},
Operator: v3.FilterOperatorExists,
})
}
jobAttrs, err := d.getMetadataAttributes(ctx, req)
if err != nil {
return resp, err
}
topJobGroups, allJobGroups, err := d.getTopJobGroups(ctx, req, query)
if err != nil {
return resp, err
}
groupFilters := map[string][]string{}
for _, topJobGroup := range topJobGroups {
for k, v := range topJobGroup {
groupFilters[k] = append(groupFilters[k], v)
}
}
for groupKey, groupValues := range groupFilters {
hasGroupFilter := false
if req.Filters != nil && len(req.Filters.Items) > 0 {
for _, filter := range req.Filters.Items {
if filter.Key.Key == groupKey {
hasGroupFilter = true
break
}
}
}
if !hasGroupFilter {
for _, query := range query.CompositeQuery.BuilderQueries {
query.Filters.Items = append(query.Filters.Items, v3.FilterItem{
Key: v3.AttributeKey{Key: groupKey},
Value: groupValues,
Operator: v3.FilterOperatorIn,
})
}
}
}
queryResponse, _, err := d.querierV2.QueryRange(ctx, query)
if err != nil {
return resp, err
}
formattedResponse, err := postprocess.PostProcessResult(queryResponse, query)
if err != nil {
return resp, err
}
records := []model.JobListRecord{}
for _, result := range formattedResponse {
for _, row := range result.Table.Rows {
record := model.JobListRecord{
JobName: "",
CPUUsage: -1,
CPURequest: -1,
CPULimit: -1,
MemoryUsage: -1,
MemoryRequest: -1,
MemoryLimit: -1,
DesiredSuccessfulPods: -1,
ActivePods: -1,
FailedPods: -1,
SuccessfulPods: -1,
}
if jobName, ok := row.Data[k8sJobNameAttrKey].(string); ok {
record.JobName = jobName
}
if cpu, ok := row.Data["A"].(float64); ok {
record.CPUUsage = cpu
}
if cpuRequest, ok := row.Data["B"].(float64); ok {
record.CPURequest = cpuRequest
}
if cpuLimit, ok := row.Data["C"].(float64); ok {
record.CPULimit = cpuLimit
}
if memory, ok := row.Data["D"].(float64); ok {
record.MemoryUsage = memory
}
if memoryRequest, ok := row.Data["E"].(float64); ok {
record.MemoryRequest = memoryRequest
}
if memoryLimit, ok := row.Data["F"].(float64); ok {
record.MemoryLimit = memoryLimit
}
if restarts, ok := row.Data["G"].(float64); ok {
record.Restarts = int(restarts)
}
if desiredSuccessfulPods, ok := row.Data["H"].(float64); ok {
record.DesiredSuccessfulPods = int(desiredSuccessfulPods)
}
if activePods, ok := row.Data["I"].(float64); ok {
record.ActivePods = int(activePods)
}
if failedPods, ok := row.Data["J"].(float64); ok {
record.FailedPods = int(failedPods)
}
if successfulPods, ok := row.Data["K"].(float64); ok {
record.SuccessfulPods = int(successfulPods)
}
record.Meta = map[string]string{}
if _, ok := jobAttrs[record.JobName]; ok {
record.Meta = jobAttrs[record.JobName]
}
for k, v := range row.Data {
if slices.Contains(jobQueryNames, k) {
continue
}
if labelValue, ok := v.(string); ok {
record.Meta[k] = labelValue
}
}
records = append(records, record)
}
}
resp.Total = len(allJobGroups)
resp.Records = records
return resp, nil
}

View File

@@ -0,0 +1,444 @@
package inframetrics
import (
"context"
"math"
"sort"
"go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers"
"go.signoz.io/signoz/pkg/query-service/common"
"go.signoz.io/signoz/pkg/query-service/interfaces"
"go.signoz.io/signoz/pkg/query-service/model"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/query-service/postprocess"
"golang.org/x/exp/slices"
)
var (
metricToUseForStatefulSets = "k8s_pod_cpu_utilization"
k8sStatefulSetNameAttrKey = "k8s_statefulset_name"
metricNamesForStatefulSets = map[string]string{
"desired_pods": "k8s_statefulset_desired_pods",
"available_pods": "k8s_statefulset_current_pods",
}
statefulSetAttrsToEnrich = []string{
"k8s_statefulset_name",
"k8s_namespace_name",
"k8s_cluster_name",
}
queryNamesForStatefulSets = map[string][]string{
"cpu": {"A"},
"cpu_request": {"B", "A"},
"cpu_limit": {"C", "A"},
"memory": {"D"},
"memory_request": {"E", "D"},
"memory_limit": {"F", "D"},
"restarts": {"G", "A"},
"desired_pods": {"H"},
"available_pods": {"I"},
}
builderQueriesForStatefulSets = map[string]*v3.BuilderQuery{
// desired pods
"H": {
QueryName: "H",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForStatefulSets["desired_pods"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
GroupBy: []v3.AttributeKey{},
Expression: "H",
ReduceTo: v3.ReduceToOperatorLast,
TimeAggregation: v3.TimeAggregationAnyLast,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
// available pods
"I": {
QueryName: "I",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForStatefulSets["available_pods"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
GroupBy: []v3.AttributeKey{},
Expression: "I",
ReduceTo: v3.ReduceToOperatorLast,
TimeAggregation: v3.TimeAggregationAnyLast,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
}
statefulSetQueryNames = []string{"A", "B", "C", "D", "E", "F", "G", "H", "I"}
)
type StatefulSetsRepo struct {
reader interfaces.Reader
querierV2 interfaces.Querier
}
func NewStatefulSetsRepo(reader interfaces.Reader, querierV2 interfaces.Querier) *StatefulSetsRepo {
return &StatefulSetsRepo{reader: reader, querierV2: querierV2}
}
func (d *StatefulSetsRepo) GetStatefulSetAttributeKeys(ctx context.Context, req v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error) {
// TODO(srikanthccv): remove hardcoded metric name and support keys from any pod metric
req.DataSource = v3.DataSourceMetrics
req.AggregateAttribute = metricToUseForStatefulSets
if req.Limit == 0 {
req.Limit = 50
}
attributeKeysResponse, err := d.reader.GetMetricAttributeKeys(ctx, &req)
if err != nil {
return nil, err
}
// TODO(srikanthccv): only return resource attributes when we have a way to
// distinguish between resource attributes and other attributes.
filteredKeys := []v3.AttributeKey{}
for _, key := range attributeKeysResponse.AttributeKeys {
if slices.Contains(pointAttrsToIgnore, key.Key) {
continue
}
filteredKeys = append(filteredKeys, key)
}
return &v3.FilterAttributeKeyResponse{AttributeKeys: filteredKeys}, nil
}
func (d *StatefulSetsRepo) GetStatefulSetAttributeValues(ctx context.Context, req v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error) {
req.DataSource = v3.DataSourceMetrics
req.AggregateAttribute = metricToUseForStatefulSets
if req.Limit == 0 {
req.Limit = 50
}
attributeValuesResponse, err := d.reader.GetMetricAttributeValues(ctx, &req)
if err != nil {
return nil, err
}
return attributeValuesResponse, nil
}
func (d *StatefulSetsRepo) getMetadataAttributes(ctx context.Context, req model.StatefulSetListRequest) (map[string]map[string]string, error) {
statefulSetAttrs := map[string]map[string]string{}
for _, key := range statefulSetAttrsToEnrich {
hasKey := false
for _, groupByKey := range req.GroupBy {
if groupByKey.Key == key {
hasKey = true
break
}
}
if !hasKey {
req.GroupBy = append(req.GroupBy, v3.AttributeKey{Key: key})
}
}
mq := v3.BuilderQuery{
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricToUseForStatefulSets,
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
GroupBy: req.GroupBy,
}
query, err := helpers.PrepareTimeseriesFilterQuery(req.Start, req.End, &mq)
if err != nil {
return nil, err
}
query = localQueryToDistributedQuery(query)
attrsListResponse, err := d.reader.GetListResultV3(ctx, query)
if err != nil {
return nil, err
}
for _, row := range attrsListResponse {
stringData := map[string]string{}
for key, value := range row.Data {
if str, ok := value.(string); ok {
stringData[key] = str
} else if strPtr, ok := value.(*string); ok {
stringData[key] = *strPtr
}
}
statefulSetName := stringData[k8sStatefulSetNameAttrKey]
if _, ok := statefulSetAttrs[statefulSetName]; !ok {
statefulSetAttrs[statefulSetName] = map[string]string{}
}
for _, key := range req.GroupBy {
statefulSetAttrs[statefulSetName][key.Key] = stringData[key.Key]
}
}
return statefulSetAttrs, nil
}
func (d *StatefulSetsRepo) getTopStatefulSetGroups(ctx context.Context, req model.StatefulSetListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) {
step, timeSeriesTableName, samplesTableName := getParamsForTopStatefulSets(req)
queryNames := queryNamesForStatefulSets[req.OrderBy.ColumnName]
topStatefulSetGroupsQueryRangeParams := &v3.QueryRangeParamsV3{
Start: req.Start,
End: req.End,
Step: step,
CompositeQuery: &v3.CompositeQuery{
BuilderQueries: map[string]*v3.BuilderQuery{},
QueryType: v3.QueryTypeBuilder,
PanelType: v3.PanelTypeTable,
},
}
for _, queryName := range queryNames {
query := q.CompositeQuery.BuilderQueries[queryName].Clone()
query.StepInterval = step
query.MetricTableHints = &v3.MetricTableHints{
TimeSeriesTableName: timeSeriesTableName,
SamplesTableName: samplesTableName,
}
if req.Filters != nil && len(req.Filters.Items) > 0 {
if query.Filters == nil {
query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}}
}
query.Filters.Items = append(query.Filters.Items, req.Filters.Items...)
}
topStatefulSetGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query
}
queryResponse, _, err := d.querierV2.QueryRange(ctx, topStatefulSetGroupsQueryRangeParams)
if err != nil {
return nil, nil, err
}
formattedResponse, err := postprocess.PostProcessResult(queryResponse, topStatefulSetGroupsQueryRangeParams)
if err != nil {
return nil, nil, err
}
if len(formattedResponse) == 0 || len(formattedResponse[0].Series) == 0 {
return nil, nil, nil
}
if req.OrderBy.Order == v3.DirectionDesc {
sort.Slice(formattedResponse[0].Series, func(i, j int) bool {
return formattedResponse[0].Series[i].Points[0].Value > formattedResponse[0].Series[j].Points[0].Value
})
} else {
sort.Slice(formattedResponse[0].Series, func(i, j int) bool {
return formattedResponse[0].Series[i].Points[0].Value < formattedResponse[0].Series[j].Points[0].Value
})
}
limit := math.Min(float64(req.Offset+req.Limit), float64(len(formattedResponse[0].Series)))
paginatedTopStatefulSetGroupsSeries := formattedResponse[0].Series[req.Offset:int(limit)]
topStatefulSetGroups := []map[string]string{}
for _, series := range paginatedTopStatefulSetGroupsSeries {
topStatefulSetGroups = append(topStatefulSetGroups, series.Labels)
}
allStatefulSetGroups := []map[string]string{}
for _, series := range formattedResponse[0].Series {
allStatefulSetGroups = append(allStatefulSetGroups, series.Labels)
}
return topStatefulSetGroups, allStatefulSetGroups, nil
}
func (d *StatefulSetsRepo) GetStatefulSetList(ctx context.Context, req model.StatefulSetListRequest) (model.StatefulSetListResponse, error) {
resp := model.StatefulSetListResponse{}
if req.Limit == 0 {
req.Limit = 10
}
if req.OrderBy == nil {
req.OrderBy = &v3.OrderBy{ColumnName: "cpu", Order: v3.DirectionDesc}
}
if req.GroupBy == nil {
req.GroupBy = []v3.AttributeKey{{Key: k8sStatefulSetNameAttrKey}}
resp.Type = model.ResponseTypeList
} else {
resp.Type = model.ResponseTypeGroupedList
}
step := int64(math.Max(float64(common.MinAllowedStepInterval(req.Start, req.End)), 60))
query := WorkloadTableListQuery.Clone()
query.Start = req.Start
query.End = req.End
query.Step = step
// add additional queries for stateful sets
for _, statefulSetQuery := range builderQueriesForStatefulSets {
query.CompositeQuery.BuilderQueries[statefulSetQuery.QueryName] = statefulSetQuery
}
for _, query := range query.CompositeQuery.BuilderQueries {
query.StepInterval = step
if req.Filters != nil && len(req.Filters.Items) > 0 {
if query.Filters == nil {
query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}}
}
query.Filters.Items = append(query.Filters.Items, req.Filters.Items...)
}
query.GroupBy = req.GroupBy
// make sure we only get records for daemon sets
query.Filters.Items = append(query.Filters.Items, v3.FilterItem{
Key: v3.AttributeKey{Key: k8sStatefulSetNameAttrKey},
Operator: v3.FilterOperatorExists,
})
}
statefulSetAttrs, err := d.getMetadataAttributes(ctx, req)
if err != nil {
return resp, err
}
topStatefulSetGroups, allStatefulSetGroups, err := d.getTopStatefulSetGroups(ctx, req, query)
if err != nil {
return resp, err
}
groupFilters := map[string][]string{}
for _, topStatefulSetGroup := range topStatefulSetGroups {
for k, v := range topStatefulSetGroup {
groupFilters[k] = append(groupFilters[k], v)
}
}
for groupKey, groupValues := range groupFilters {
hasGroupFilter := false
if req.Filters != nil && len(req.Filters.Items) > 0 {
for _, filter := range req.Filters.Items {
if filter.Key.Key == groupKey {
hasGroupFilter = true
break
}
}
}
if !hasGroupFilter {
for _, query := range query.CompositeQuery.BuilderQueries {
query.Filters.Items = append(query.Filters.Items, v3.FilterItem{
Key: v3.AttributeKey{Key: groupKey},
Value: groupValues,
Operator: v3.FilterOperatorIn,
})
}
}
}
queryResponse, _, err := d.querierV2.QueryRange(ctx, query)
if err != nil {
return resp, err
}
formattedResponse, err := postprocess.PostProcessResult(queryResponse, query)
if err != nil {
return resp, err
}
records := []model.StatefulSetListRecord{}
for _, result := range formattedResponse {
for _, row := range result.Table.Rows {
record := model.StatefulSetListRecord{
StatefulSetName: "",
CPUUsage: -1,
CPURequest: -1,
CPULimit: -1,
MemoryUsage: -1,
MemoryRequest: -1,
MemoryLimit: -1,
DesiredPods: -1,
AvailablePods: -1,
}
if statefulSetName, ok := row.Data[k8sStatefulSetNameAttrKey].(string); ok {
record.StatefulSetName = statefulSetName
}
if cpu, ok := row.Data["A"].(float64); ok {
record.CPUUsage = cpu
}
if cpuRequest, ok := row.Data["B"].(float64); ok {
record.CPURequest = cpuRequest
}
if cpuLimit, ok := row.Data["C"].(float64); ok {
record.CPULimit = cpuLimit
}
if memory, ok := row.Data["D"].(float64); ok {
record.MemoryUsage = memory
}
if memoryRequest, ok := row.Data["E"].(float64); ok {
record.MemoryRequest = memoryRequest
}
if memoryLimit, ok := row.Data["F"].(float64); ok {
record.MemoryLimit = memoryLimit
}
if restarts, ok := row.Data["G"].(float64); ok {
record.Restarts = int(restarts)
}
if desiredPods, ok := row.Data["H"].(float64); ok {
record.DesiredPods = int(desiredPods)
}
if availablePods, ok := row.Data["I"].(float64); ok {
record.AvailablePods = int(availablePods)
}
record.Meta = map[string]string{}
if _, ok := statefulSetAttrs[record.StatefulSetName]; ok {
record.Meta = statefulSetAttrs[record.StatefulSetName]
}
for k, v := range row.Data {
if slices.Contains(statefulSetQueryNames, k) {
continue
}
if labelValue, ok := v.(string); ok {
record.Meta[k] = labelValue
}
}
records = append(records, record)
}
}
resp.Total = len(allStatefulSetGroups)
resp.Records = records
return resp, nil
}

View File

@@ -0,0 +1,166 @@
package inframetrics
import v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
var (
metricNamesForWorkloads = map[string]string{
"cpu": "k8s_pod_cpu_utilization",
"cpu_req": "k8s_pod_cpu_request_utilization",
"cpu_limit": "k8s_pod_cpu_limit_utilization",
"memory": "k8s_pod_memory_usage",
"memory_req": "k8s_pod_memory_request_utilization",
"memory_limit": "k8s_pod_memory_limit_utilization",
"restarts": "k8s_container_restarts",
}
)
var WorkloadTableListQuery = v3.QueryRangeParamsV3{
CompositeQuery: &v3.CompositeQuery{
BuilderQueries: map[string]*v3.BuilderQuery{
// pod cpu utilization
"A": {
QueryName: "A",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForWorkloads["cpu"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
GroupBy: []v3.AttributeKey{},
Expression: "A",
ReduceTo: v3.ReduceToOperatorAvg,
TimeAggregation: v3.TimeAggregationAvg,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
// pod cpu request utilization
"B": {
QueryName: "B",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForWorkloads["cpu_request"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
GroupBy: []v3.AttributeKey{},
Expression: "B",
ReduceTo: v3.ReduceToOperatorAvg,
TimeAggregation: v3.TimeAggregationAvg,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
// pod cpu limit utilization
"C": {
QueryName: "C",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForWorkloads["cpu_limit"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
GroupBy: []v3.AttributeKey{},
Expression: "C",
ReduceTo: v3.ReduceToOperatorAvg,
TimeAggregation: v3.TimeAggregationAvg,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
// pod memory utilization
"D": {
QueryName: "D",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForWorkloads["memory"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
GroupBy: []v3.AttributeKey{},
Expression: "D",
ReduceTo: v3.ReduceToOperatorAvg,
TimeAggregation: v3.TimeAggregationAvg,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
// pod memory request utilization
"E": {
QueryName: "E",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForWorkloads["memory_request"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
GroupBy: []v3.AttributeKey{},
Expression: "E",
ReduceTo: v3.ReduceToOperatorAvg,
TimeAggregation: v3.TimeAggregationAvg,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
// pod memory limit utilization
"F": {
QueryName: "F",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForWorkloads["memory_limit"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
GroupBy: []v3.AttributeKey{},
Expression: "F",
ReduceTo: v3.ReduceToOperatorAvg,
TimeAggregation: v3.TimeAggregationAvg,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
"G": {
QueryName: "G",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForWorkloads["restarts"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
GroupBy: []v3.AttributeKey{},
Expression: "G",
ReduceTo: v3.ReduceToOperatorSum,
TimeAggregation: v3.TimeAggregationAnyLast,
SpaceAggregation: v3.SpaceAggregationMax,
Functions: []v3.Function{{Name: v3.FunctionNameRunningDiff}},
Disabled: false,
},
},
PanelType: v3.PanelTypeTable,
QueryType: v3.QueryTypeBuilder,
},
Version: "v4",
FormatForWeb: true,
}

View File

@@ -284,7 +284,7 @@ func BuildQRParamsWithCache(messagingQueue *MessagingQueue, queryContext string,
cq = &v3.CompositeQuery{
QueryType: v3.QueryTypeBuilder,
BuilderQueries: bhq,
PanelType: v3.PanelTypeTable,
PanelType: v3.PanelTypeList,
}
}
@@ -364,7 +364,7 @@ func BuildClickHouseQuery(messagingQueue *MessagingQueue, queueType string, quer
func buildCompositeQuery(chq *v3.ClickHouseQuery, queryContext string) (*v3.CompositeQuery, error) {
if queryContext == "producer-consumer-eval" {
if queryContext == "producer-consumer-eval" || queryContext == "producer-throughput-overview" {
return &v3.CompositeQuery{
QueryType: v3.QueryTypeClickHouseSQL,
ClickHouseQueries: map[string]*v3.ClickHouseQuery{queryContext: chq},

View File

@@ -142,7 +142,7 @@ func enrichFieldWithMetadata(field v3.AttributeKey, fields map[string]v3.Attribu
}
// check if the field is present in the fields map
for _, key := range utils.GenerateLogEnrichmentKeys(field) {
for _, key := range utils.GenerateEnrichmentKeys(field) {
if val, ok := fields[key]; ok {
return val
}

View File

@@ -436,8 +436,6 @@ func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.Build
} else if panelType == v3.PanelTypeTable {
queryTmplPrefix =
"SELECT"
// step or aggregate interval is whole time period in case of table panel
step = (utils.GetEpochNanoSecs(end) - utils.GetEpochNanoSecs(start)) / NANOSECOND
} else if panelType == v3.PanelTypeGraph || panelType == v3.PanelTypeValue {
// Select the aggregate value for interval
queryTmplPrefix =

View File

@@ -2,6 +2,7 @@ package cumulative
import (
"fmt"
"os"
"go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers"
"go.signoz.io/signoz/pkg/query-service/constants"
@@ -40,6 +41,9 @@ import (
const (
rateWithoutNegative = `If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window)))`
increaseWithoutNegative = `If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window)))`
experimentalRateWithoutNegative = `If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, per_series_value, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window))`
experimentalIncreaseWithoutNegative = `If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, per_series_value, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window))`
)
// prepareTimeAggregationSubQueryTimeSeries prepares the sub-query to be used for temporal aggregation
@@ -151,14 +155,22 @@ func prepareTimeAggregationSubQuery(start, end, step int64, mq *v3.BuilderQuery)
subQuery = fmt.Sprintf(queryTmpl, selectLabelsAny, step, op, timeSeriesSubQuery)
case v3.TimeAggregationRate:
innerSubQuery := fmt.Sprintf(queryTmpl, selectLabelsAny, step, op, timeSeriesSubQuery)
rateExp := rateWithoutNegative
if _, ok := os.LookupEnv("EXPERIMENTAL_RATE_WITHOUT_NEGATIVE"); ok {
rateExp = fmt.Sprintf(experimentalRateWithoutNegative, start)
}
rateQueryTmpl :=
"SELECT %s ts, " + rateWithoutNegative +
"SELECT %s ts, " + rateExp +
" as per_series_value FROM (%s) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)"
subQuery = fmt.Sprintf(rateQueryTmpl, selectLabels, innerSubQuery)
case v3.TimeAggregationIncrease:
innerSubQuery := fmt.Sprintf(queryTmpl, selectLabelsAny, step, op, timeSeriesSubQuery)
increaseExp := increaseWithoutNegative
if _, ok := os.LookupEnv("EXPERIMENTAL_INCREASE_WITHOUT_NEGATIVE"); ok {
increaseExp = experimentalIncreaseWithoutNegative
}
rateQueryTmpl :=
"SELECT %s ts, " + increaseWithoutNegative +
"SELECT %s ts, " + increaseExp +
" as per_series_value FROM (%s) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)"
subQuery = fmt.Sprintf(rateQueryTmpl, selectLabels, innerSubQuery)
}

View File

@@ -12,6 +12,7 @@ import (
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
"go.signoz.io/signoz/pkg/query-service/common"
"go.signoz.io/signoz/pkg/query-service/constants"
chErrors "go.signoz.io/signoz/pkg/query-service/errors"
"go.signoz.io/signoz/pkg/query-service/querycache"
"go.signoz.io/signoz/pkg/query-service/utils"
@@ -52,7 +53,8 @@ type querier struct {
returnedSeries []*v3.Series
returnedErr error
UseLogsNewSchema bool
UseLogsNewSchema bool
UseTraceNewSchema bool
}
type QuerierOptions struct {
@@ -308,56 +310,121 @@ func (q *querier) runClickHouseQueries(ctx context.Context, params *v3.QueryRang
return results, errQueriesByName, err
}
func (q *querier) runLogsListQuery(ctx context.Context, params *v3.QueryRangeParamsV3, tsRanges []utils.LogsListTsRange) ([]*v3.Result, map[string]error, error) {
func (q *querier) runWindowBasedListQuery(ctx context.Context, params *v3.QueryRangeParamsV3, tsRanges []utils.LogsListTsRange) ([]*v3.Result, map[string]error, error) {
res := make([]*v3.Result, 0)
qName := ""
pageSize := uint64(0)
limit := uint64(0)
offset := uint64(0)
// se we are considering only one query
for name, v := range params.CompositeQuery.BuilderQueries {
qName = name
pageSize = v.PageSize
// for traces specifically
limit = v.Limit
offset = v.Offset
}
data := []*v3.Row{}
tracesLimit := limit + offset
for _, v := range tsRanges {
params.Start = v.Start
params.End = v.End
params.CompositeQuery.BuilderQueries[qName].PageSize = pageSize - uint64(len(data))
queries, err := q.builder.PrepareQueries(params)
if err != nil {
return nil, nil, err
}
length := uint64(0)
// this will to run only once
for name, query := range queries {
rowList, err := q.reader.GetListResultV3(ctx, query)
// appending the filter to get the next set of data
if params.CompositeQuery.BuilderQueries[qName].DataSource == v3.DataSourceLogs {
params.CompositeQuery.BuilderQueries[qName].PageSize = pageSize - uint64(len(data))
queries, err := q.builder.PrepareQueries(params)
if err != nil {
errs := []error{err}
errQuriesByName := map[string]error{
name: err,
}
return nil, errQuriesByName, fmt.Errorf("encountered multiple errors: %s", multierr.Combine(errs...))
return nil, nil, err
}
for name, query := range queries {
rowList, err := q.reader.GetListResultV3(ctx, query)
if err != nil {
errs := []error{err}
errQueriesByName := map[string]error{
name: err,
}
return nil, errQueriesByName, fmt.Errorf("encountered multiple errors: %s", multierr.Combine(errs...))
}
length += uint64(len(rowList))
data = append(data, rowList...)
}
data = append(data, rowList...)
}
// append a filter to the params
if len(data) > 0 {
params.CompositeQuery.BuilderQueries[qName].Filters.Items = append(params.CompositeQuery.BuilderQueries[qName].Filters.Items, v3.FilterItem{
Key: v3.AttributeKey{
Key: "id",
IsColumn: true,
DataType: "string",
},
Operator: v3.FilterOperatorLessThan,
Value: data[len(data)-1].Data["id"],
})
}
if length > 0 {
params.CompositeQuery.BuilderQueries[qName].Filters.Items = append(params.CompositeQuery.BuilderQueries[qName].Filters.Items, v3.FilterItem{
Key: v3.AttributeKey{
Key: "id",
IsColumn: true,
DataType: "string",
},
Operator: v3.FilterOperatorLessThan,
Value: data[len(data)-1].Data["id"],
})
}
if uint64(len(data)) >= pageSize {
break
if uint64(len(data)) >= pageSize {
break
}
} else {
// TRACE
// we are updating the offset and limit based on the number of traces we have found in the current timerange
// eg -
// 1)offset = 0, limit = 100, tsRanges = [t1, t10], [t10, 20], [t20, t30]
//
// if 100 traces are there in [t1, t10] then 100 will return immediately.
// if 10 traces are there in [t1, t10] then we get 10, set offset to 0 and limit to 90, search in the next timerange of [t10, 20]
// if we don't find any trace in [t1, t10], then we search in [t10, 20] with offset=0, limit=100
//
// 2) offset = 50, limit = 100, tsRanges = [t1, t10], [t10, 20], [t20, t30]
//
// If we find 150 traces with limit=150 and offset=0 in [t1, t10] then we return immediately 100 traces
// If we find 50 in [t1, t10] with limit=150 and offset=0 then it will set limit = 100 and offset=0 and search in the next timerange of [t10, 20]
// if we don't find any trace in [t1, t10], then we search in [t10, 20] with limit=150 and offset=0
// max limit + offset is 10k for pagination
if tracesLimit > constants.TRACE_V4_MAX_PAGINATION_LIMIT {
return nil, nil, fmt.Errorf("maximum traces that can be paginated is 10000")
}
params.CompositeQuery.BuilderQueries[qName].Offset = 0
params.CompositeQuery.BuilderQueries[qName].Limit = tracesLimit
queries, err := q.builder.PrepareQueries(params)
if err != nil {
return nil, nil, err
}
for name, query := range queries {
rowList, err := q.reader.GetListResultV3(ctx, query)
if err != nil {
errs := []error{err}
errQueriesByName := map[string]error{
name: err,
}
return nil, errQueriesByName, fmt.Errorf("encountered multiple errors: %s", multierr.Combine(errs...))
}
length += uint64(len(rowList))
// skip the traces unless offset is 0
for _, row := range rowList {
if offset == 0 {
data = append(data, row)
} else {
offset--
}
}
}
tracesLimit = tracesLimit - length
if uint64(len(data)) >= limit {
break
}
}
}
res = append(res, &v3.Result{
@@ -368,15 +435,25 @@ func (q *querier) runLogsListQuery(ctx context.Context, params *v3.QueryRangePar
}
func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) {
// List query has support for only one query.
if q.UseLogsNewSchema && params.CompositeQuery != nil && len(params.CompositeQuery.BuilderQueries) == 1 {
// List query has support for only one query
// we are skipping for PanelTypeTrace as it has a custom order by regardless of what's in the payload
if params.CompositeQuery != nil &&
len(params.CompositeQuery.BuilderQueries) == 1 &&
params.CompositeQuery.PanelType != v3.PanelTypeTrace {
for _, v := range params.CompositeQuery.BuilderQueries {
if (v.DataSource == v3.DataSourceLogs && !q.UseLogsNewSchema) ||
(v.DataSource == v3.DataSourceTraces && !q.UseTraceNewSchema) {
break
}
// only allow of logs queries with timestamp ordering desc
if v.DataSource == v3.DataSourceLogs && len(v.OrderBy) == 1 && v.OrderBy[0].ColumnName == "timestamp" && v.OrderBy[0].Order == "desc" {
startEndArr := utils.GetLogsListTsRanges(params.Start, params.End)
if len(startEndArr) > 0 {
return q.runLogsListQuery(ctx, params, startEndArr)
}
// TODO(nitya): allow for timestamp asc
if (v.DataSource == v3.DataSourceLogs || v.DataSource == v3.DataSourceTraces) &&
len(v.OrderBy) == 1 &&
v.OrderBy[0].ColumnName == "timestamp" &&
v.OrderBy[0].Order == "desc" {
startEndArr := utils.GetListTsRanges(params.Start, params.End)
return q.runWindowBasedListQuery(ctx, params, startEndArr)
}
}
}
@@ -408,13 +485,13 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan
close(ch)
var errs []error
errQuriesByName := make(map[string]error)
errQueriesByName := make(map[string]error)
res := make([]*v3.Result, 0)
// read values from the channel
for r := range ch {
if r.Err != nil {
errs = append(errs, r.Err)
errQuriesByName[r.Name] = r.Err
errQueriesByName[r.Name] = r.Err
continue
}
res = append(res, &v3.Result{
@@ -423,7 +500,7 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan
})
}
if len(errs) != 0 {
return nil, errQuriesByName, fmt.Errorf("encountered multiple errors: %s", multierr.Combine(errs...))
return nil, errQueriesByName, fmt.Errorf("encountered multiple errors: %s", multierr.Combine(errs...))
}
return res, nil, nil
}

View File

@@ -5,15 +5,21 @@ import (
"encoding/json"
"fmt"
"math"
"regexp"
"strings"
"testing"
"time"
cmock "github.com/srikanthccv/ClickHouse-go-mock"
"github.com/stretchr/testify/require"
"go.signoz.io/signoz/pkg/query-service/app/clickhouseReader"
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
"go.signoz.io/signoz/pkg/query-service/cache/inmemory"
"go.signoz.io/signoz/pkg/query-service/featureManager"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/query-service/querycache"
"go.signoz.io/signoz/pkg/query-service/utils"
)
func minTimestamp(series []*v3.Series) int64 {
@@ -1124,3 +1130,304 @@ func TestQueryRangeValueTypePromQL(t *testing.T) {
}
}
}
type regexMatcher struct {
}
func (m *regexMatcher) Match(expectedSQL, actualSQL string) error {
re, err := regexp.Compile(expectedSQL)
if err != nil {
return err
}
if !re.MatchString(actualSQL) {
return fmt.Errorf("expected query to contain %s, got %s", expectedSQL, actualSQL)
}
return nil
}
func Test_querier_runWindowBasedListQuery(t *testing.T) {
params := &v3.QueryRangeParamsV3{
Start: 1722171576000000000, // July 28, 2024 6:29:36 PM
End: 1722262800000000000, // July 29, 2024 7:50:00 PM
CompositeQuery: &v3.CompositeQuery{
PanelType: v3.PanelTypeList,
BuilderQueries: map[string]*v3.BuilderQuery{
"A": {
QueryName: "A",
Expression: "A",
DataSource: v3.DataSourceTraces,
PageSize: 10,
Limit: 100,
StepInterval: 60,
AggregateOperator: v3.AggregateOperatorNoOp,
SelectColumns: []v3.AttributeKey{{Key: "serviceName"}},
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
},
},
},
}
tsRanges := []utils.LogsListTsRange{
{
Start: 1722259200000000000, // July 29, 2024 6:50:00 PM
End: 1722262800000000000, // July 29, 2024 7:50:00 PM
},
{
Start: 1722252000000000000, // July 29, 2024 4:50:00 PM
End: 1722259200000000000, // July 29, 2024 6:50:00 PM
},
{
Start: 1722237600000000000, // July 29, 2024 12:50:00 PM
End: 1722252000000000000, // July 29, 2024 4:50:00 PM
},
{
Start: 1722208800000000000, // July 29, 2024 4:50:00 AM
End: 1722237600000000000, // July 29, 2024 12:50:00 PM
},
{
Start: 1722171576000000000, // July 28, 2024 6:29:36 PM
End: 1722208800000000000, // July 29, 2024 4:50:00 AM
},
}
type queryParams struct {
start int64
end int64
limit uint64
offset uint64
}
type queryResponse struct {
expectedQuery string
timestamps []uint64
}
// create test struct with moc data i.e array of timestamps, limit, offset and expected results
testCases := []struct {
name string
queryResponses []queryResponse
queryParams queryParams
expectedTimestamps []int64
expectedError bool
}{
{
name: "should return correct timestamps when querying within time window",
queryResponses: []queryResponse{
{
expectedQuery: ".*(timestamp >= '1722259200000000000' AND timestamp <= '1722262800000000000').* DESC LIMIT 2",
timestamps: []uint64{1722259300000000000, 1722259400000000000},
},
},
queryParams: queryParams{
start: 1722171576000000000,
end: 1722262800000000000,
limit: 2,
offset: 0,
},
expectedTimestamps: []int64{1722259300000000000, 1722259400000000000},
},
{
name: "all data not in first windows",
queryResponses: []queryResponse{
{
expectedQuery: ".*(timestamp >= '1722259200000000000' AND timestamp <= '1722262800000000000').* DESC LIMIT 3",
timestamps: []uint64{1722259300000000000, 1722259400000000000},
},
{
expectedQuery: ".*(timestamp >= '1722252000000000000' AND timestamp <= '1722259200000000000').* DESC LIMIT 1",
timestamps: []uint64{1722253000000000000},
},
},
queryParams: queryParams{
start: 1722171576000000000,
end: 1722262800000000000,
limit: 3,
offset: 0,
},
expectedTimestamps: []int64{1722259300000000000, 1722259400000000000, 1722253000000000000},
},
{
name: "data in multiple windows",
queryResponses: []queryResponse{
{
expectedQuery: ".*(timestamp >= '1722259200000000000' AND timestamp <= '1722262800000000000').* DESC LIMIT 5",
timestamps: []uint64{1722259300000000000, 1722259400000000000},
},
{
expectedQuery: ".*(timestamp >= '1722252000000000000' AND timestamp <= '1722259200000000000').* DESC LIMIT 3",
timestamps: []uint64{1722253000000000000},
},
{
expectedQuery: ".*(timestamp >= '1722237600000000000' AND timestamp <= '1722252000000000000').* DESC LIMIT 2",
timestamps: []uint64{1722237700000000000},
},
{
expectedQuery: ".*(timestamp >= '1722208800000000000' AND timestamp <= '1722237600000000000').* DESC LIMIT 1",
timestamps: []uint64{},
},
{
expectedQuery: ".*(timestamp >= '1722171576000000000' AND timestamp <= '1722208800000000000').* DESC LIMIT 1",
timestamps: []uint64{},
},
},
queryParams: queryParams{
start: 1722171576000000000,
end: 1722262800000000000,
limit: 5,
offset: 0,
},
expectedTimestamps: []int64{1722259300000000000, 1722259400000000000, 1722253000000000000, 1722237700000000000},
},
{
name: "query with offset",
queryResponses: []queryResponse{
{
expectedQuery: ".*(timestamp >= '1722259200000000000' AND timestamp <= '1722262800000000000').* DESC LIMIT 7",
timestamps: []uint64{1722259210000000000, 1722259220000000000, 1722259230000000000},
},
{
expectedQuery: ".*(timestamp >= '1722252000000000000' AND timestamp <= '1722259200000000000').* DESC LIMIT 4",
timestamps: []uint64{1722253000000000000, 1722254000000000000, 1722255000000000000},
},
{
expectedQuery: ".*(timestamp >= '1722237600000000000' AND timestamp <= '1722252000000000000').* DESC LIMIT 1",
timestamps: []uint64{1722237700000000000},
},
},
queryParams: queryParams{
start: 1722171576000000000,
end: 1722262800000000000,
limit: 4,
offset: 3,
},
expectedTimestamps: []int64{1722253000000000000, 1722254000000000000, 1722255000000000000, 1722237700000000000},
},
{
name: "query with offset and limit- data spread across multiple windows",
queryResponses: []queryResponse{
{
expectedQuery: ".*(timestamp >= '1722259200000000000' AND timestamp <= '1722262800000000000').* DESC LIMIT 11",
timestamps: []uint64{},
},
{
expectedQuery: ".*(timestamp >= '1722252000000000000' AND timestamp <= '1722259200000000000').* DESC LIMIT 11",
timestamps: []uint64{1722253000000000000, 1722254000000000000, 1722255000000000000},
},
{
expectedQuery: ".*(timestamp >= '1722237600000000000' AND timestamp <= '1722252000000000000').* DESC LIMIT 8",
timestamps: []uint64{1722237700000000000, 1722237800000000000, 1722237900000000000, 1722237910000000000, 1722237920000000000},
},
{
expectedQuery: ".*(timestamp >= '1722208800000000000' AND timestamp <= '1722237600000000000').* DESC LIMIT 3",
timestamps: []uint64{1722208810000000000, 1722208820000000000, 1722208830000000000},
},
},
queryParams: queryParams{
start: 1722171576000000000,
end: 1722262800000000000,
limit: 5,
offset: 6,
},
expectedTimestamps: []int64{1722237910000000000, 1722237920000000000, 1722208810000000000, 1722208820000000000, 1722208830000000000},
},
{
name: "don't allow pagination to get more than 10k spans",
queryResponses: []queryResponse{},
queryParams: queryParams{
start: 1722171576000000000,
end: 1722262800000000000,
limit: 10,
offset: 9991,
},
expectedError: true,
},
}
cols := []cmock.ColumnType{
{Name: "timestamp", Type: "UInt64"},
{Name: "name", Type: "String"},
}
testName := "name"
options := clickhouseReader.NewOptions("", 0, 0, 0, "", "archiveNamespace")
// iterate over test data, create reader and run test
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
// Setup mock
mock, err := cmock.NewClickHouseWithQueryMatcher(nil, &regexMatcher{})
require.NoError(t, err, "Failed to create ClickHouse mock")
// Configure mock responses
for _, response := range tc.queryResponses {
values := make([][]any, 0, len(response.timestamps))
for _, ts := range response.timestamps {
values = append(values, []any{&ts, &testName})
}
// if len(values) > 0 {
mock.ExpectQuery(response.expectedQuery).WillReturnRows(
cmock.NewRows(cols, values),
)
// }
}
// Create reader and querier
reader := clickhouseReader.NewReaderFromClickhouseConnection(
mock,
options,
nil,
"",
featureManager.StartManager(),
"",
true,
)
q := &querier{
reader: reader,
builder: queryBuilder.NewQueryBuilder(
queryBuilder.QueryBuilderOptions{
BuildTraceQuery: tracesV3.PrepareTracesQuery,
},
featureManager.StartManager(),
),
}
// Update query parameters
params.Start = tc.queryParams.start
params.End = tc.queryParams.end
params.CompositeQuery.BuilderQueries["A"].Limit = tc.queryParams.limit
params.CompositeQuery.BuilderQueries["A"].Offset = tc.queryParams.offset
// Execute query
results, errMap, err := q.runWindowBasedListQuery(context.Background(), params, tsRanges)
if tc.expectedError {
require.Error(t, err)
return
}
// Assertions
require.NoError(t, err, "Query execution failed")
require.Nil(t, errMap, "Unexpected error map in results")
require.Len(t, results, 1, "Expected exactly one result set")
result := results[0]
require.Equal(t, "A", result.QueryName, "Incorrect query name in results")
require.Len(t, result.List, len(tc.expectedTimestamps),
"Result count mismatch: got %d results, expected %d",
len(result.List), len(tc.expectedTimestamps))
for i, expected := range tc.expectedTimestamps {
require.Equal(t, expected, result.List[i].Timestamp.UnixNano(),
"Timestamp mismatch at index %d: got %d, expected %d",
i, result.List[i].Timestamp.UnixNano(), expected)
}
// Verify mock expectations
err = mock.ExpectationsWereMet()
require.NoError(t, err, "Mock expectations were not met")
})
}
}

View File

@@ -12,6 +12,7 @@ import (
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
"go.signoz.io/signoz/pkg/query-service/common"
"go.signoz.io/signoz/pkg/query-service/constants"
chErrors "go.signoz.io/signoz/pkg/query-service/errors"
"go.signoz.io/signoz/pkg/query-service/querycache"
"go.signoz.io/signoz/pkg/query-service/utils"
@@ -48,10 +49,11 @@ type querier struct {
testingMode bool
queriesExecuted []string
// tuple of start and end time in milliseconds
timeRanges [][]int
returnedSeries []*v3.Series
returnedErr error
UseLogsNewSchema bool
timeRanges [][]int
returnedSeries []*v3.Series
returnedErr error
UseLogsNewSchema bool
UseTraceNewSchema bool
}
type QuerierOptions struct {
@@ -308,56 +310,121 @@ func (q *querier) runClickHouseQueries(ctx context.Context, params *v3.QueryRang
return results, errQueriesByName, err
}
func (q *querier) runLogsListQuery(ctx context.Context, params *v3.QueryRangeParamsV3, tsRanges []utils.LogsListTsRange) ([]*v3.Result, map[string]error, error) {
func (q *querier) runWindowBasedListQuery(ctx context.Context, params *v3.QueryRangeParamsV3, tsRanges []utils.LogsListTsRange) ([]*v3.Result, map[string]error, error) {
res := make([]*v3.Result, 0)
qName := ""
pageSize := uint64(0)
limit := uint64(0)
offset := uint64(0)
// se we are considering only one query
for name, v := range params.CompositeQuery.BuilderQueries {
qName = name
pageSize = v.PageSize
// for traces specifically
limit = v.Limit
offset = v.Offset
}
data := []*v3.Row{}
tracesLimit := limit + offset
for _, v := range tsRanges {
params.Start = v.Start
params.End = v.End
params.CompositeQuery.BuilderQueries[qName].PageSize = pageSize - uint64(len(data))
queries, err := q.builder.PrepareQueries(params)
if err != nil {
return nil, nil, err
}
length := uint64(0)
// this will to run only once
for name, query := range queries {
rowList, err := q.reader.GetListResultV3(ctx, query)
// appending the filter to get the next set of data
if params.CompositeQuery.BuilderQueries[qName].DataSource == v3.DataSourceLogs {
params.CompositeQuery.BuilderQueries[qName].PageSize = pageSize - uint64(len(data))
queries, err := q.builder.PrepareQueries(params)
if err != nil {
errs := []error{err}
errQuriesByName := map[string]error{
name: err,
}
return nil, errQuriesByName, fmt.Errorf("encountered multiple errors: %s", multierr.Combine(errs...))
return nil, nil, err
}
for name, query := range queries {
rowList, err := q.reader.GetListResultV3(ctx, query)
if err != nil {
errs := []error{err}
errQueriesByName := map[string]error{
name: err,
}
return nil, errQueriesByName, fmt.Errorf("encountered multiple errors: %s", multierr.Combine(errs...))
}
length += uint64(len(rowList))
data = append(data, rowList...)
}
data = append(data, rowList...)
}
// append a filter to the params
if len(data) > 0 {
params.CompositeQuery.BuilderQueries[qName].Filters.Items = append(params.CompositeQuery.BuilderQueries[qName].Filters.Items, v3.FilterItem{
Key: v3.AttributeKey{
Key: "id",
IsColumn: true,
DataType: "string",
},
Operator: v3.FilterOperatorLessThan,
Value: data[len(data)-1].Data["id"],
})
}
if length > 0 {
params.CompositeQuery.BuilderQueries[qName].Filters.Items = append(params.CompositeQuery.BuilderQueries[qName].Filters.Items, v3.FilterItem{
Key: v3.AttributeKey{
Key: "id",
IsColumn: true,
DataType: "string",
},
Operator: v3.FilterOperatorLessThan,
Value: data[len(data)-1].Data["id"],
})
}
if uint64(len(data)) >= pageSize {
break
if uint64(len(data)) >= pageSize {
break
}
} else {
// TRACE
// we are updating the offset and limit based on the number of traces we have found in the current timerange
// eg -
// 1)offset = 0, limit = 100, tsRanges = [t1, t10], [t10, 20], [t20, t30]
//
// if 100 traces are there in [t1, t10] then 100 will return immediately.
// if 10 traces are there in [t1, t10] then we get 10, set offset to 0 and limit to 90, search in the next timerange of [t10, 20]
// if we don't find any trace in [t1, t10], then we search in [t10, 20] with offset=0, limit=100
//
// 2) offset = 50, limit = 100, tsRanges = [t1, t10], [t10, 20], [t20, t30]
//
// If we find 150 traces with limit=150 and offset=0 in [t1, t10] then we return immediately 100 traces
// If we find 50 in [t1, t10] with limit=150 and offset=0 then it will set limit = 100 and offset=0 and search in the next timerange of [t10, 20]
// if we don't find any trace in [t1, t10], then we search in [t10, 20] with limit=150 and offset=0
// max limit + offset is 10k for pagination
if tracesLimit > constants.TRACE_V4_MAX_PAGINATION_LIMIT {
return nil, nil, fmt.Errorf("maximum traces that can be paginated is 10000")
}
params.CompositeQuery.BuilderQueries[qName].Offset = 0
params.CompositeQuery.BuilderQueries[qName].Limit = tracesLimit
queries, err := q.builder.PrepareQueries(params)
if err != nil {
return nil, nil, err
}
for name, query := range queries {
rowList, err := q.reader.GetListResultV3(ctx, query)
if err != nil {
errs := []error{err}
errQueriesByName := map[string]error{
name: err,
}
return nil, errQueriesByName, fmt.Errorf("encountered multiple errors: %s", multierr.Combine(errs...))
}
length += uint64(len(rowList))
// skip the traces unless offset is 0
for _, row := range rowList {
if offset == 0 {
data = append(data, row)
} else {
offset--
}
}
}
tracesLimit = tracesLimit - length
if uint64(len(data)) >= limit {
break
}
}
}
res = append(res, &v3.Result{
@@ -369,14 +436,24 @@ func (q *querier) runLogsListQuery(ctx context.Context, params *v3.QueryRangePar
func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) {
// List query has support for only one query.
if q.UseLogsNewSchema && params.CompositeQuery != nil && len(params.CompositeQuery.BuilderQueries) == 1 {
// we are skipping for PanelTypeTrace as it has a custom order by regardless of what's in the payload
if params.CompositeQuery != nil &&
len(params.CompositeQuery.BuilderQueries) == 1 &&
params.CompositeQuery.PanelType != v3.PanelTypeTrace {
for _, v := range params.CompositeQuery.BuilderQueries {
if (v.DataSource == v3.DataSourceLogs && !q.UseLogsNewSchema) ||
(v.DataSource == v3.DataSourceTraces && !q.UseTraceNewSchema) {
break
}
// only allow of logs queries with timestamp ordering desc
if v.DataSource == v3.DataSourceLogs && len(v.OrderBy) == 1 && v.OrderBy[0].ColumnName == "timestamp" && v.OrderBy[0].Order == "desc" {
startEndArr := utils.GetLogsListTsRanges(params.Start, params.End)
if len(startEndArr) > 0 {
return q.runLogsListQuery(ctx, params, startEndArr)
}
// TODO(nitya): allow for timestamp asc
if (v.DataSource == v3.DataSourceLogs || v.DataSource == v3.DataSourceTraces) &&
len(v.OrderBy) == 1 &&
v.OrderBy[0].ColumnName == "timestamp" &&
v.OrderBy[0].Order == "desc" {
startEndArr := utils.GetListTsRanges(params.Start, params.End)
return q.runWindowBasedListQuery(ctx, params, startEndArr)
}
}
}
@@ -416,13 +493,13 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan
close(ch)
var errs []error
errQuriesByName := make(map[string]error)
errQueriesByName := make(map[string]error)
res := make([]*v3.Result, 0)
// read values from the channel
for r := range ch {
if r.Err != nil {
errs = append(errs, r.Err)
errQuriesByName[r.Name] = r.Err
errQueriesByName[r.Name] = r.Err
continue
}
res = append(res, &v3.Result{
@@ -431,7 +508,7 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan
})
}
if len(errs) != 0 {
return nil, errQuriesByName, fmt.Errorf("encountered multiple errors: %s", multierr.Combine(errs...))
return nil, errQueriesByName, fmt.Errorf("encountered multiple errors: %s", multierr.Combine(errs...))
}
return res, nil, nil
}

View File

@@ -5,15 +5,21 @@ import (
"encoding/json"
"fmt"
"math"
"regexp"
"strings"
"testing"
"time"
cmock "github.com/srikanthccv/ClickHouse-go-mock"
"github.com/stretchr/testify/require"
"go.signoz.io/signoz/pkg/query-service/app/clickhouseReader"
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
"go.signoz.io/signoz/pkg/query-service/cache/inmemory"
"go.signoz.io/signoz/pkg/query-service/featureManager"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/query-service/querycache"
"go.signoz.io/signoz/pkg/query-service/utils"
)
func minTimestamp(series []*v3.Series) int64 {
@@ -798,8 +804,8 @@ func TestV2QueryRangeValueType(t *testing.T) {
}
q := NewQuerier(opts)
expectedTimeRangeInQueryString := []string{
fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", 1675115520000, 1675115580000+120*60*1000), // 31st Jan, 03:23:00 to 31st Jan, 05:23:00
fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", 1675115580000+120*60*1000, 1675115580000+180*60*1000), // 31st Jan, 05:23:00 to 31st Jan, 06:23:00
fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", 1675115520000, 1675115580000+120*60*1000), // 31st Jan, 03:23:00 to 31st Jan, 05:23:00
fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", 1675115580000+120*60*1000, 1675115580000+180*60*1000), // 31st Jan, 05:23:00 to 31st Jan, 06:23:00
fmt.Sprintf("timestamp >= '%d' AND timestamp <= '%d'", (1675119196722)*int64(1000000), (1675126396722)*int64(1000000)), // 31st Jan, 05:23:00 to 31st Jan, 06:23:00
}
@@ -1178,3 +1184,304 @@ func TestV2QueryRangeValueTypePromQL(t *testing.T) {
}
}
}
type regexMatcher struct {
}
func (m *regexMatcher) Match(expectedSQL, actualSQL string) error {
re, err := regexp.Compile(expectedSQL)
if err != nil {
return err
}
if !re.MatchString(actualSQL) {
return fmt.Errorf("expected query to contain %s, got %s", expectedSQL, actualSQL)
}
return nil
}
func Test_querier_runWindowBasedListQuery(t *testing.T) {
params := &v3.QueryRangeParamsV3{
Start: 1722171576000000000, // July 28, 2024 6:29:36 PM
End: 1722262800000000000, // July 29, 2024 7:50:00 PM
CompositeQuery: &v3.CompositeQuery{
PanelType: v3.PanelTypeList,
BuilderQueries: map[string]*v3.BuilderQuery{
"A": {
QueryName: "A",
Expression: "A",
DataSource: v3.DataSourceTraces,
PageSize: 10,
Limit: 100,
StepInterval: 60,
AggregateOperator: v3.AggregateOperatorNoOp,
SelectColumns: []v3.AttributeKey{{Key: "serviceName"}},
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
},
},
},
}
tsRanges := []utils.LogsListTsRange{
{
Start: 1722259200000000000, // July 29, 2024 6:50:00 PM
End: 1722262800000000000, // July 29, 2024 7:50:00 PM
},
{
Start: 1722252000000000000, // July 29, 2024 4:50:00 PM
End: 1722259200000000000, // July 29, 2024 6:50:00 PM
},
{
Start: 1722237600000000000, // July 29, 2024 12:50:00 PM
End: 1722252000000000000, // July 29, 2024 4:50:00 PM
},
{
Start: 1722208800000000000, // July 29, 2024 4:50:00 AM
End: 1722237600000000000, // July 29, 2024 12:50:00 PM
},
{
Start: 1722171576000000000, // July 28, 2024 6:29:36 PM
End: 1722208800000000000, // July 29, 2024 4:50:00 AM
},
}
type queryParams struct {
start int64
end int64
limit uint64
offset uint64
}
type queryResponse struct {
expectedQuery string
timestamps []uint64
}
// create test struct with moc data i.e array of timestamps, limit, offset and expected results
testCases := []struct {
name string
queryResponses []queryResponse
queryParams queryParams
expectedTimestamps []int64
expectedError bool
}{
{
name: "should return correct timestamps when querying within time window",
queryResponses: []queryResponse{
{
expectedQuery: ".*(timestamp >= '1722259200000000000' AND timestamp <= '1722262800000000000').* DESC LIMIT 2",
timestamps: []uint64{1722259300000000000, 1722259400000000000},
},
},
queryParams: queryParams{
start: 1722171576000000000,
end: 1722262800000000000,
limit: 2,
offset: 0,
},
expectedTimestamps: []int64{1722259300000000000, 1722259400000000000},
},
{
name: "all data not in first windows",
queryResponses: []queryResponse{
{
expectedQuery: ".*(timestamp >= '1722259200000000000' AND timestamp <= '1722262800000000000').* DESC LIMIT 3",
timestamps: []uint64{1722259300000000000, 1722259400000000000},
},
{
expectedQuery: ".*(timestamp >= '1722252000000000000' AND timestamp <= '1722259200000000000').* DESC LIMIT 1",
timestamps: []uint64{1722253000000000000},
},
},
queryParams: queryParams{
start: 1722171576000000000,
end: 1722262800000000000,
limit: 3,
offset: 0,
},
expectedTimestamps: []int64{1722259300000000000, 1722259400000000000, 1722253000000000000},
},
{
name: "data in multiple windows",
queryResponses: []queryResponse{
{
expectedQuery: ".*(timestamp >= '1722259200000000000' AND timestamp <= '1722262800000000000').* DESC LIMIT 5",
timestamps: []uint64{1722259300000000000, 1722259400000000000},
},
{
expectedQuery: ".*(timestamp >= '1722252000000000000' AND timestamp <= '1722259200000000000').* DESC LIMIT 3",
timestamps: []uint64{1722253000000000000},
},
{
expectedQuery: ".*(timestamp >= '1722237600000000000' AND timestamp <= '1722252000000000000').* DESC LIMIT 2",
timestamps: []uint64{1722237700000000000},
},
{
expectedQuery: ".*(timestamp >= '1722208800000000000' AND timestamp <= '1722237600000000000').* DESC LIMIT 1",
timestamps: []uint64{},
},
{
expectedQuery: ".*(timestamp >= '1722171576000000000' AND timestamp <= '1722208800000000000').* DESC LIMIT 1",
timestamps: []uint64{},
},
},
queryParams: queryParams{
start: 1722171576000000000,
end: 1722262800000000000,
limit: 5,
offset: 0,
},
expectedTimestamps: []int64{1722259300000000000, 1722259400000000000, 1722253000000000000, 1722237700000000000},
},
{
name: "query with offset",
queryResponses: []queryResponse{
{
expectedQuery: ".*(timestamp >= '1722259200000000000' AND timestamp <= '1722262800000000000').* DESC LIMIT 7",
timestamps: []uint64{1722259210000000000, 1722259220000000000, 1722259230000000000},
},
{
expectedQuery: ".*(timestamp >= '1722252000000000000' AND timestamp <= '1722259200000000000').* DESC LIMIT 4",
timestamps: []uint64{1722253000000000000, 1722254000000000000, 1722255000000000000},
},
{
expectedQuery: ".*(timestamp >= '1722237600000000000' AND timestamp <= '1722252000000000000').* DESC LIMIT 1",
timestamps: []uint64{1722237700000000000},
},
},
queryParams: queryParams{
start: 1722171576000000000,
end: 1722262800000000000,
limit: 4,
offset: 3,
},
expectedTimestamps: []int64{1722253000000000000, 1722254000000000000, 1722255000000000000, 1722237700000000000},
},
{
name: "query with offset and limit- data spread across multiple windows",
queryResponses: []queryResponse{
{
expectedQuery: ".*(timestamp >= '1722259200000000000' AND timestamp <= '1722262800000000000').* DESC LIMIT 11",
timestamps: []uint64{},
},
{
expectedQuery: ".*(timestamp >= '1722252000000000000' AND timestamp <= '1722259200000000000').* DESC LIMIT 11",
timestamps: []uint64{1722253000000000000, 1722254000000000000, 1722255000000000000},
},
{
expectedQuery: ".*(timestamp >= '1722237600000000000' AND timestamp <= '1722252000000000000').* DESC LIMIT 8",
timestamps: []uint64{1722237700000000000, 1722237800000000000, 1722237900000000000, 1722237910000000000, 1722237920000000000},
},
{
expectedQuery: ".*(timestamp >= '1722208800000000000' AND timestamp <= '1722237600000000000').* DESC LIMIT 3",
timestamps: []uint64{1722208810000000000, 1722208820000000000, 1722208830000000000},
},
},
queryParams: queryParams{
start: 1722171576000000000,
end: 1722262800000000000,
limit: 5,
offset: 6,
},
expectedTimestamps: []int64{1722237910000000000, 1722237920000000000, 1722208810000000000, 1722208820000000000, 1722208830000000000},
},
{
name: "don't allow pagination to get more than 10k spans",
queryResponses: []queryResponse{},
queryParams: queryParams{
start: 1722171576000000000,
end: 1722262800000000000,
limit: 10,
offset: 9991,
},
expectedError: true,
},
}
cols := []cmock.ColumnType{
{Name: "timestamp", Type: "UInt64"},
{Name: "name", Type: "String"},
}
testName := "name"
options := clickhouseReader.NewOptions("", 0, 0, 0, "", "archiveNamespace")
// iterate over test data, create reader and run test
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
// Setup mock
mock, err := cmock.NewClickHouseWithQueryMatcher(nil, &regexMatcher{})
require.NoError(t, err, "Failed to create ClickHouse mock")
// Configure mock responses
for _, response := range tc.queryResponses {
values := make([][]any, 0, len(response.timestamps))
for _, ts := range response.timestamps {
values = append(values, []any{&ts, &testName})
}
// if len(values) > 0 {
mock.ExpectQuery(response.expectedQuery).WillReturnRows(
cmock.NewRows(cols, values),
)
// }
}
// Create reader and querier
reader := clickhouseReader.NewReaderFromClickhouseConnection(
mock,
options,
nil,
"",
featureManager.StartManager(),
"",
true,
)
q := &querier{
reader: reader,
builder: queryBuilder.NewQueryBuilder(
queryBuilder.QueryBuilderOptions{
BuildTraceQuery: tracesV3.PrepareTracesQuery,
},
featureManager.StartManager(),
),
}
// Update query parameters
params.Start = tc.queryParams.start
params.End = tc.queryParams.end
params.CompositeQuery.BuilderQueries["A"].Limit = tc.queryParams.limit
params.CompositeQuery.BuilderQueries["A"].Offset = tc.queryParams.offset
// Execute query
results, errMap, err := q.runWindowBasedListQuery(context.Background(), params, tsRanges)
if tc.expectedError {
require.Error(t, err)
return
}
// Assertions
require.NoError(t, err, "Query execution failed")
require.Nil(t, errMap, "Unexpected error map in results")
require.Len(t, results, 1, "Expected exactly one result set")
result := results[0]
require.Equal(t, "A", result.QueryName, "Incorrect query name in results")
require.Len(t, result.List, len(tc.expectedTimestamps),
"Result count mismatch: got %d results, expected %d",
len(result.List), len(tc.expectedTimestamps))
for i, expected := range tc.expectedTimestamps {
require.Equal(t, expected, result.List[i].Timestamp.UnixNano(),
"Timestamp mismatch at index %d: got %d, expected %d",
i, result.List[i].Timestamp.UnixNano(), expected)
}
// Verify mock expectations
err = mock.ExpectationsWereMet()
require.NoError(t, err, "Mock expectations were not met")
})
}
}

View File

@@ -116,7 +116,9 @@ func expressionToQuery(
for _, tag := range qp.CompositeQuery.BuilderQueries[variable].GroupBy {
groupTags = append(groupTags, tag.Key)
}
groupTags = append(groupTags, "ts")
if qp.CompositeQuery.PanelType != v3.PanelTypeTable {
groupTags = append(groupTags, "ts")
}
if joinUsing == "" {
for _, tag := range groupTags {
joinUsing += fmt.Sprintf("%s.`%s` as `%s`, ", variable, tag, tag)

View File

@@ -498,11 +498,11 @@ var testLogsWithFormula = []struct {
},
},
},
ExpectedQuery: "SELECT A.`key1.1` as `key1.1`, A.`ts` as `ts`, A.value + B.value as value FROM (SELECT now() as ts, attributes_bool_value[indexOf(attributes_bool_key, 'key1.1')] as `key1.1`, " +
ExpectedQuery: "SELECT A.`key1.1` as `key1.1`, A.value + B.value as value FROM (SELECT now() as ts, attributes_bool_value[indexOf(attributes_bool_key, 'key1.1')] as `key1.1`, " +
"toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1702979056000000000 AND timestamp <= 1702982656000000000) AND attributes_bool_value[indexOf(attributes_bool_key, 'key1.1')] = true AND " +
"has(attributes_bool_key, 'key1.1') group by `key1.1` order by value DESC) as A INNER JOIN (SELECT now() as ts, attributes_bool_value[indexOf(attributes_bool_key, 'key1.1')] as `key1.1`, " +
"toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1702979056000000000 AND timestamp <= 1702982656000000000) AND attributes_bool_value[indexOf(attributes_bool_key, 'key1.2')] = true AND " +
"has(attributes_bool_key, 'key1.1') group by `key1.1` order by value DESC) as B ON A.`key1.1` = B.`key1.1` AND A.`ts` = B.`ts`",
"has(attributes_bool_key, 'key1.1') group by `key1.1` order by value DESC) as B ON A.`key1.1` = B.`key1.1`",
},
{
Name: "test formula with dot in filter and group by materialized attribute",
@@ -707,12 +707,12 @@ var testLogsWithFormulaV2 = []struct {
},
},
},
ExpectedQuery: "SELECT A.`key1.1` as `key1.1`, A.`ts` as `ts`, A.value + B.value as value FROM (SELECT attributes_bool['key1.1'] as `key1.1`, " +
ExpectedQuery: "SELECT A.`key1.1` as `key1.1`, A.value + B.value as value FROM (SELECT attributes_bool['key1.1'] as `key1.1`, " +
"toFloat64(count(*)) as value from signoz_logs.distributed_logs_v2 where (timestamp >= 1702979056000000000 AND timestamp <= 1702982656000000000) AND (ts_bucket_start >= 1702977256 AND ts_bucket_start <= 1702982656) " +
"AND attributes_bool['key1.1'] = true AND mapContains(attributes_bool, 'key1.1') AND mapContains(attributes_bool, 'key1.1') group by `key1.1` order by value DESC) as A INNER JOIN (SELECT " +
"attributes_bool['key1.1'] as `key1.1`, toFloat64(count(*)) as value from signoz_logs.distributed_logs_v2 where (timestamp >= 1702979056000000000 AND timestamp <= 1702982656000000000) " +
"AND (ts_bucket_start >= 1702977256 AND ts_bucket_start <= 1702982656) AND attributes_bool['key1.2'] = true AND mapContains(attributes_bool, 'key1.2') AND " +
"mapContains(attributes_bool, 'key1.1') group by `key1.1` order by value DESC) as B ON A.`key1.1` = B.`key1.1` AND A.`ts` = B.`ts`",
"mapContains(attributes_bool, 'key1.1') group by `key1.1` order by value DESC) as B ON A.`key1.1` = B.`key1.1`",
},
{
Name: "test formula with dot in filter and group by materialized attribute",

View File

@@ -384,6 +384,11 @@ func LogCommentEnricher(next http.Handler) http.Handler {
client = "api"
}
email, err := auth.GetEmailFromJwt(r.Context())
if err != nil {
zap.S().Errorf("error while getting email from jwt: %v", err)
}
kvs := map[string]string{
"path": path,
"dashboardID": dashboardID,
@@ -392,6 +397,7 @@ func LogCommentEnricher(next http.Handler) http.Handler {
"client": client,
"viewName": viewName,
"servicesTab": tab,
"email": email,
}
r = r.WithContext(context.WithValue(r.Context(), common.LogCommentKey, kvs))

View File

@@ -10,7 +10,7 @@ import (
"go.signoz.io/signoz/pkg/query-service/utils"
)
var aggregateOperatorToPercentile = map[v3.AggregateOperator]float64{
var AggregateOperatorToPercentile = map[v3.AggregateOperator]float64{
v3.AggregateOperatorP05: 0.05,
v3.AggregateOperatorP10: 0.10,
v3.AggregateOperatorP20: 0.20,
@@ -22,7 +22,7 @@ var aggregateOperatorToPercentile = map[v3.AggregateOperator]float64{
v3.AggregateOperatorP99: 0.99,
}
var aggregateOperatorToSQLFunc = map[v3.AggregateOperator]string{
var AggregateOperatorToSQLFunc = map[v3.AggregateOperator]string{
v3.AggregateOperatorAvg: "avg",
v3.AggregateOperatorMax: "max",
v3.AggregateOperatorMin: "min",
@@ -109,7 +109,7 @@ func getSelectLabels(aggregatorOperator v3.AggregateOperator, groupBy []v3.Attri
return selectLabels
}
func getSelectKeys(aggregatorOperator v3.AggregateOperator, groupBy []v3.AttributeKey) string {
func GetSelectKeys(aggregatorOperator v3.AggregateOperator, groupBy []v3.AttributeKey) string {
var selectLabels []string
if aggregatorOperator == v3.AggregateOperatorNoOp {
return ""
@@ -173,7 +173,7 @@ func buildTracesFilterQuery(fs *v3.FilterSet) (string, error) {
conditions = append(conditions, fmt.Sprintf(operator, columnName, fmtVal))
case v3.FilterOperatorExists, v3.FilterOperatorNotExists:
if item.Key.IsColumn {
subQuery, err := existsSubQueryForFixedColumn(item.Key, item.Operator)
subQuery, err := ExistsSubQueryForFixedColumn(item.Key, item.Operator)
if err != nil {
return "", err
}
@@ -199,7 +199,7 @@ func buildTracesFilterQuery(fs *v3.FilterSet) (string, error) {
return queryString, nil
}
func existsSubQueryForFixedColumn(key v3.AttributeKey, op v3.FilterOperator) (string, error) {
func ExistsSubQueryForFixedColumn(key v3.AttributeKey, op v3.FilterOperator) (string, error) {
if key.DataType == v3.AttributeKeyDataTypeString {
if op == v3.FilterOperatorExists {
return fmt.Sprintf("%s %s ''", key.Key, tracesOperatorMappingV3[v3.FilterOperatorNotEqual]), nil
@@ -244,7 +244,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, _ string, pan
selectLabels := getSelectLabels(mq.AggregateOperator, mq.GroupBy)
having := having(mq.Having)
having := Having(mq.Having)
if having != "" {
having = " having " + having
}
@@ -272,7 +272,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, _ string, pan
// we don't need value for first query
if options.GraphLimitQtype == constants.FirstQueryGraphLimit {
queryTmpl = "SELECT " + getSelectKeys(mq.AggregateOperator, mq.GroupBy) + " from (" + queryTmpl + ")"
queryTmpl = "SELECT " + GetSelectKeys(mq.AggregateOperator, mq.GroupBy) + " from (" + queryTmpl + ")"
}
emptyValuesInGroupByFilter, err := handleEmptyValuesInGroupBy(mq.GroupBy)
@@ -281,7 +281,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, _ string, pan
}
filterSubQuery += emptyValuesInGroupByFilter
groupBy := groupByAttributeKeyTags(panelType, options.GraphLimitQtype, mq.GroupBy...)
groupBy := GroupByAttributeKeyTags(panelType, options.GraphLimitQtype, mq.GroupBy...)
if groupBy != "" {
groupBy = " group by " + groupBy
}
@@ -291,7 +291,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, _ string, pan
}
if options.GraphLimitQtype == constants.SecondQueryGraphLimit {
filterSubQuery = filterSubQuery + " AND " + fmt.Sprintf("(%s) GLOBAL IN (", getSelectKeys(mq.AggregateOperator, mq.GroupBy)) + "%s)"
filterSubQuery = filterSubQuery + " AND " + fmt.Sprintf("(%s) GLOBAL IN (", GetSelectKeys(mq.AggregateOperator, mq.GroupBy)) + "%s)"
}
aggregationKey := ""
@@ -311,7 +311,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, _ string, pan
rate = rate / 60.0
}
op := fmt.Sprintf("%s(%s)/%f", aggregateOperatorToSQLFunc[mq.AggregateOperator], aggregationKey, rate)
op := fmt.Sprintf("%s(%s)/%f", AggregateOperatorToSQLFunc[mq.AggregateOperator], aggregationKey, rate)
query := fmt.Sprintf(queryTmpl, op, filterSubQuery, groupBy, having, orderBy)
return query, nil
case
@@ -324,17 +324,17 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, _ string, pan
v3.AggregateOperatorP90,
v3.AggregateOperatorP95,
v3.AggregateOperatorP99:
op := fmt.Sprintf("quantile(%v)(%s)", aggregateOperatorToPercentile[mq.AggregateOperator], aggregationKey)
op := fmt.Sprintf("quantile(%v)(%s)", AggregateOperatorToPercentile[mq.AggregateOperator], aggregationKey)
query := fmt.Sprintf(queryTmpl, op, filterSubQuery, groupBy, having, orderBy)
return query, nil
case v3.AggregateOperatorAvg, v3.AggregateOperatorSum, v3.AggregateOperatorMin, v3.AggregateOperatorMax:
op := fmt.Sprintf("%s(%s)", aggregateOperatorToSQLFunc[mq.AggregateOperator], aggregationKey)
op := fmt.Sprintf("%s(%s)", AggregateOperatorToSQLFunc[mq.AggregateOperator], aggregationKey)
query := fmt.Sprintf(queryTmpl, op, filterSubQuery, groupBy, having, orderBy)
return query, nil
case v3.AggregateOperatorCount:
if mq.AggregateAttribute.Key != "" {
if mq.AggregateAttribute.IsColumn {
subQuery, err := existsSubQueryForFixedColumn(mq.AggregateAttribute, v3.FilterOperatorExists)
subQuery, err := ExistsSubQueryForFixedColumn(mq.AggregateAttribute, v3.FilterOperatorExists)
if err == nil {
filterSubQuery = fmt.Sprintf("%s AND %s", filterSubQuery, subQuery)
}
@@ -354,9 +354,9 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, _ string, pan
var query string
if panelType == v3.PanelTypeTrace {
withSubQuery := fmt.Sprintf(constants.TracesExplorerViewSQLSelectWithSubQuery, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_SPAN_INDEX_LOCAL_TABLENAME, spanIndexTableTimeFilter, filterSubQuery)
withSubQuery = addLimitToQuery(withSubQuery, mq.Limit)
withSubQuery = AddLimitToQuery(withSubQuery, mq.Limit)
if mq.Offset != 0 {
withSubQuery = addOffsetToQuery(withSubQuery, mq.Offset)
withSubQuery = AddOffsetToQuery(withSubQuery, mq.Offset)
}
// query = withSubQuery + ") " + fmt.Sprintf(constants.TracesExplorerViewSQLSelectQuery, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_SPAN_INDEX_TABLENAME, constants.SIGNOZ_SPAN_INDEX_TABLENAME)
query = fmt.Sprintf(constants.TracesExplorerViewSQLSelectBeforeSubQuery, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_SPAN_INDEX_TABLENAME) + withSubQuery + ") " + fmt.Sprintf(constants.TracesExplorerViewSQLSelectAfterSubQuery, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_SPAN_INDEX_TABLENAME, spanIndexTableTimeFilter)
@@ -403,7 +403,7 @@ func groupBy(panelType v3.PanelType, graphLimitQtype string, tags ...string) str
return strings.Join(tags, ",")
}
func groupByAttributeKeyTags(panelType v3.PanelType, graphLimitQtype string, tags ...v3.AttributeKey) string {
func GroupByAttributeKeyTags(panelType v3.PanelType, graphLimitQtype string, tags ...v3.AttributeKey) string {
groupTags := []string{}
for _, tag := range tags {
groupTags = append(groupTags, fmt.Sprintf("`%s`", tag.Key))
@@ -456,7 +456,7 @@ func orderByAttributeKeyTags(panelType v3.PanelType, items []v3.OrderBy, tags []
return str
}
func having(items []v3.Having) string {
func Having(items []v3.Having) string {
// aggregate something and filter on that aggregate
var having []string
for _, item := range items {
@@ -465,7 +465,7 @@ func having(items []v3.Having) string {
return strings.Join(having, " AND ")
}
func reduceToQuery(query string, reduceTo v3.ReduceToOperator, _ v3.AggregateOperator) (string, error) {
func ReduceToQuery(query string, reduceTo v3.ReduceToOperator, _ v3.AggregateOperator) (string, error) {
var groupBy string
switch reduceTo {
@@ -485,14 +485,14 @@ func reduceToQuery(query string, reduceTo v3.ReduceToOperator, _ v3.AggregateOpe
return query, nil
}
func addLimitToQuery(query string, limit uint64) string {
func AddLimitToQuery(query string, limit uint64) string {
if limit == 0 {
limit = 100
}
return fmt.Sprintf("%s LIMIT %d", query, limit)
}
func addOffsetToQuery(query string, offset uint64) string {
func AddOffsetToQuery(query string, offset uint64) string {
return fmt.Sprintf("%s OFFSET %d", query, offset)
}
@@ -513,7 +513,7 @@ func PrepareTracesQuery(start, end int64, panelType v3.PanelType, mq *v3.Builder
if err != nil {
return "", err
}
query = addLimitToQuery(query, mq.Limit)
query = AddLimitToQuery(query, mq.Limit)
return query, nil
} else if options.GraphLimitQtype == constants.SecondQueryGraphLimit {
@@ -529,13 +529,13 @@ func PrepareTracesQuery(start, end int64, panelType v3.PanelType, mq *v3.Builder
return "", err
}
if panelType == v3.PanelTypeValue {
query, err = reduceToQuery(query, mq.ReduceTo, mq.AggregateOperator)
query, err = ReduceToQuery(query, mq.ReduceTo, mq.AggregateOperator)
}
if panelType == v3.PanelTypeList || panelType == v3.PanelTypeTable {
query = addLimitToQuery(query, mq.Limit)
query = AddLimitToQuery(query, mq.Limit)
if mq.Offset != 0 {
query = addOffsetToQuery(query, mq.Offset)
query = AddOffsetToQuery(query, mq.Offset)
}
}
return query, err

View File

@@ -0,0 +1,118 @@
package v4
import (
"go.signoz.io/signoz/pkg/query-service/constants"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/query-service/utils"
)
// if the field is timestamp/id/value we don't need to enrich
// if the field is static we don't need to enrich
// for all others we need to enrich
// an attribute/resource can be materialized/dematerialized
// but the query should work regardless and shouldn't fail
func isEnriched(field v3.AttributeKey) bool {
// if it is timestamp/id dont check
if field.Key == "timestamp" || field.Key == constants.SigNozOrderByValue {
return true
}
// we need to check if the field is static and return false if isColumn is not set
if _, ok := constants.StaticFieldsTraces[field.Key]; ok && field.IsColumn {
return true
}
return false
}
func enrichKeyWithMetadata(key v3.AttributeKey, keys map[string]v3.AttributeKey) v3.AttributeKey {
if isEnriched(key) {
return key
}
if v, ok := constants.StaticFieldsTraces[key.Key]; ok {
return v
}
for _, key := range utils.GenerateEnrichmentKeys(key) {
if val, ok := keys[key]; ok {
return val
}
}
// enrich with default values if metadata is not found
if key.Type == "" {
key.Type = v3.AttributeKeyTypeTag
}
if key.DataType == "" {
key.DataType = v3.AttributeKeyDataTypeString
}
return key
}
func Enrich(params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) {
if params.CompositeQuery.QueryType != v3.QueryTypeBuilder {
return
}
for _, query := range params.CompositeQuery.BuilderQueries {
if query.DataSource == v3.DataSourceTraces {
EnrichTracesQuery(query, keys)
}
}
}
func EnrichTracesQuery(query *v3.BuilderQuery, keys map[string]v3.AttributeKey) {
// enrich aggregate attribute
query.AggregateAttribute = enrichKeyWithMetadata(query.AggregateAttribute, keys)
// enrich filter items
if query.Filters != nil && len(query.Filters.Items) > 0 {
for idx, filter := range query.Filters.Items {
query.Filters.Items[idx].Key = enrichKeyWithMetadata(filter.Key, keys)
// if the serviceName column is used, use the corresponding resource attribute as well during filtering
// since there is only one of these resource attributes we are adding it here directly.
// move it somewhere else if this list is big
if filter.Key.Key == "serviceName" {
query.Filters.Items[idx].Key = v3.AttributeKey{
Key: "service.name",
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
IsColumn: false,
}
}
}
}
// enrich group by
for idx, groupBy := range query.GroupBy {
query.GroupBy[idx] = enrichKeyWithMetadata(groupBy, keys)
}
// enrich order by
query.OrderBy = enrichOrderBy(query.OrderBy, keys)
// enrich select columns
for idx, selectColumn := range query.SelectColumns {
query.SelectColumns[idx] = enrichKeyWithMetadata(selectColumn, keys)
}
}
func enrichOrderBy(items []v3.OrderBy, keys map[string]v3.AttributeKey) []v3.OrderBy {
enrichedItems := []v3.OrderBy{}
for i := 0; i < len(items); i++ {
attributeKey := enrichKeyWithMetadata(v3.AttributeKey{
Key: items[i].ColumnName,
}, keys)
enrichedItems = append(enrichedItems, v3.OrderBy{
ColumnName: items[i].ColumnName,
Order: items[i].Order,
Key: attributeKey.Key,
DataType: attributeKey.DataType,
Type: attributeKey.Type,
IsColumn: attributeKey.IsColumn,
})
}
return enrichedItems
}

View File

@@ -0,0 +1,196 @@
package v4
import (
"reflect"
"testing"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
)
func TestEnrichTracesQuery(t *testing.T) {
type args struct {
query *v3.BuilderQuery
keys map[string]v3.AttributeKey
want *v3.BuilderQuery
}
tests := []struct {
name string
args args
}{
{
name: "test 1",
args: args{
query: &v3.BuilderQuery{
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "bytes", Type: v3.AttributeKeyTypeTag}, Value: 100, Operator: ">"},
},
},
OrderBy: []v3.OrderBy{},
},
keys: map[string]v3.AttributeKey{
"bytes##tag##int64": {Key: "bytes", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeTag},
},
want: &v3.BuilderQuery{
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "bytes", Type: v3.AttributeKeyTypeTag, DataType: v3.AttributeKeyDataTypeInt64}, Value: 100, Operator: ">"},
},
},
OrderBy: []v3.OrderBy{},
},
},
},
{
name: "test service name",
args: args{
query: &v3.BuilderQuery{
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "serviceName", DataType: v3.AttributeKeyDataTypeString, IsColumn: true}, Value: "myservice", Operator: "="},
{Key: v3.AttributeKey{Key: "serviceName"}, Value: "myservice", Operator: "="},
},
},
OrderBy: []v3.OrderBy{},
},
keys: map[string]v3.AttributeKey{},
want: &v3.BuilderQuery{
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "service.name", Type: v3.AttributeKeyTypeResource, DataType: v3.AttributeKeyDataTypeString}, Value: "myservice", Operator: "="},
{Key: v3.AttributeKey{Key: "service.name", Type: v3.AttributeKeyTypeResource, DataType: v3.AttributeKeyDataTypeString}, Value: "myservice", Operator: "="},
},
},
OrderBy: []v3.OrderBy{},
},
},
},
{
name: "test mat attrs",
args: args{
query: &v3.BuilderQuery{
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "http.route", DataType: v3.AttributeKeyDataTypeString, IsColumn: true}, Value: "/api", Operator: "="},
{Key: v3.AttributeKey{Key: "msgSystem"}, Value: "name", Operator: "="},
{Key: v3.AttributeKey{Key: "external_http_url"}, Value: "name", Operator: "="},
},
},
OrderBy: []v3.OrderBy{},
},
keys: map[string]v3.AttributeKey{},
want: &v3.BuilderQuery{
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "http.route", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Value: "/api", Operator: "="},
{Key: v3.AttributeKey{Key: "msgSystem", DataType: v3.AttributeKeyDataTypeString, IsColumn: true}, Value: "name", Operator: "="},
{Key: v3.AttributeKey{Key: "external_http_url", DataType: v3.AttributeKeyDataTypeString, IsColumn: true}, Value: "name", Operator: "="},
},
},
OrderBy: []v3.OrderBy{},
},
},
},
{
name: "test aggregateattr, filter, groupby, order by",
args: args{
query: &v3.BuilderQuery{
AggregateOperator: v3.AggregateOperatorCount,
AggregateAttribute: v3.AttributeKey{
Key: "http.route",
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeTag,
},
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "http.route", DataType: v3.AttributeKeyDataTypeString}, Value: "/api", Operator: "="},
},
},
GroupBy: []v3.AttributeKey{
{Key: "http.route", DataType: v3.AttributeKeyDataTypeString},
{Key: "msgSystem", DataType: v3.AttributeKeyDataTypeString},
},
OrderBy: []v3.OrderBy{
{ColumnName: "httpRoute", Order: v3.DirectionAsc},
},
},
keys: map[string]v3.AttributeKey{
"http.route##tag##string": {Key: "http.route", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true},
},
want: &v3.BuilderQuery{
AggregateAttribute: v3.AttributeKey{
Key: "http.route",
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeTag,
IsColumn: true,
},
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "http.route", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Value: "/api", Operator: "="},
},
},
GroupBy: []v3.AttributeKey{
{Key: "http.route", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true},
{Key: "msgSystem", DataType: v3.AttributeKeyDataTypeString, IsJSON: false, IsColumn: true},
},
OrderBy: []v3.OrderBy{
{Key: "httpRoute", Order: v3.DirectionAsc, ColumnName: "httpRoute", DataType: v3.AttributeKeyDataTypeString, IsColumn: true},
},
},
},
},
{
name: "enrich default values",
args: args{
query: &v3.BuilderQuery{
Filters: &v3.FilterSet{
Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "testattr"}},
},
},
OrderBy: []v3.OrderBy{{ColumnName: "timestamp", Order: v3.DirectionAsc}},
},
keys: map[string]v3.AttributeKey{},
want: &v3.BuilderQuery{
Filters: &v3.FilterSet{
Items: []v3.FilterItem{{Key: v3.AttributeKey{Key: "testattr", Type: v3.AttributeKeyTypeTag, DataType: v3.AttributeKeyDataTypeString}}},
},
// isColumn won't matter in timestamp as it will always be a column
OrderBy: []v3.OrderBy{{Key: "timestamp", Order: v3.DirectionAsc, ColumnName: "timestamp"}},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
EnrichTracesQuery(tt.args.query, tt.args.keys)
// Check AggregateAttribute
if tt.args.query.AggregateAttribute.Key != "" && !reflect.DeepEqual(tt.args.query.AggregateAttribute, tt.args.want.AggregateAttribute) {
t.Errorf("EnrichTracesQuery() AggregateAttribute = %v, want %v", tt.args.query.AggregateAttribute, tt.args.want.AggregateAttribute)
}
// Check Filters
if tt.args.query.Filters != nil && !reflect.DeepEqual(tt.args.query.Filters, tt.args.want.Filters) {
t.Errorf("EnrichTracesQuery() Filters = %v, want %v", tt.args.query.Filters, tt.args.want.Filters)
}
// Check GroupBy
if tt.args.query.GroupBy != nil && !reflect.DeepEqual(tt.args.query.GroupBy, tt.args.want.GroupBy) {
t.Errorf("EnrichTracesQuery() GroupBy = %v, want %v", tt.args.query.GroupBy, tt.args.want.GroupBy)
}
// Check OrderBy
if tt.args.query.OrderBy != nil && !reflect.DeepEqual(tt.args.query.OrderBy, tt.args.want.OrderBy) {
t.Errorf("EnrichTracesQuery() OrderBy = %v, want %v", tt.args.query.OrderBy, tt.args.want.OrderBy)
}
})
}
}

View File

@@ -0,0 +1,414 @@
package v4
import (
"fmt"
"strings"
"go.signoz.io/signoz/pkg/query-service/app/resource"
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
"go.signoz.io/signoz/pkg/query-service/constants"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/query-service/utils"
)
const NANOSECOND = 1000000000
var tracesOperatorMappingV3 = map[v3.FilterOperator]string{
v3.FilterOperatorIn: "IN",
v3.FilterOperatorNotIn: "NOT IN",
v3.FilterOperatorEqual: "=",
v3.FilterOperatorNotEqual: "!=",
v3.FilterOperatorLessThan: "<",
v3.FilterOperatorLessThanOrEq: "<=",
v3.FilterOperatorGreaterThan: ">",
v3.FilterOperatorGreaterThanOrEq: ">=",
v3.FilterOperatorLike: "ILIKE",
v3.FilterOperatorNotLike: "NOT ILIKE",
v3.FilterOperatorRegex: "match(%s, %s)",
v3.FilterOperatorNotRegex: "NOT match(%s, %s)",
v3.FilterOperatorContains: "ILIKE",
v3.FilterOperatorNotContains: "NOT ILIKE",
v3.FilterOperatorExists: "mapContains(%s, '%s')",
v3.FilterOperatorNotExists: "NOT mapContains(%s, '%s')",
}
func getClickHouseTracesColumnType(columnType v3.AttributeKeyType) string {
if columnType == v3.AttributeKeyTypeResource {
return "resources"
}
return "attributes"
}
func getClickHouseTracesColumnDataType(columnDataType v3.AttributeKeyDataType) string {
if columnDataType == v3.AttributeKeyDataTypeFloat64 || columnDataType == v3.AttributeKeyDataTypeInt64 {
return "number"
}
if columnDataType == v3.AttributeKeyDataTypeBool {
return "bool"
}
return "string"
}
func getColumnName(key v3.AttributeKey) string {
// if key present in static return as it is
if _, ok := constants.StaticFieldsTraces[key.Key]; ok {
return key.Key
}
if !key.IsColumn {
keyType := getClickHouseTracesColumnType(key.Type)
keyDType := getClickHouseTracesColumnDataType(key.DataType)
return fmt.Sprintf("%s_%s['%s']", keyType, keyDType, key.Key)
}
return "`" + utils.GetClickhouseColumnNameV2(string(key.Type), string(key.DataType), key.Key) + "`"
}
// getSelectLabels returns the select labels for the query based on groupBy and aggregateOperator
func getSelectLabels(groupBy []v3.AttributeKey) string {
var labels []string
for _, tag := range groupBy {
name := getColumnName(tag)
labels = append(labels, fmt.Sprintf(" %s as `%s`", name, tag.Key))
}
return strings.Join(labels, ",")
}
func buildTracesFilterQuery(fs *v3.FilterSet) (string, error) {
var conditions []string
if fs != nil && len(fs.Items) != 0 {
for _, item := range fs.Items {
// skip if it's a resource attribute
if item.Key.Type == v3.AttributeKeyTypeResource {
continue
}
val := item.Value
// generate the key
columnName := getColumnName(item.Key)
var fmtVal string
item.Operator = v3.FilterOperator(strings.ToLower(strings.TrimSpace(string(item.Operator))))
if item.Operator != v3.FilterOperatorExists && item.Operator != v3.FilterOperatorNotExists {
var err error
val, err = utils.ValidateAndCastValue(val, item.Key.DataType)
if err != nil {
return "", fmt.Errorf("invalid value for key %s: %v", item.Key.Key, err)
}
}
if val != nil {
fmtVal = utils.ClickHouseFormattedValue(val)
}
if operator, ok := tracesOperatorMappingV3[item.Operator]; ok {
switch item.Operator {
case v3.FilterOperatorContains, v3.FilterOperatorNotContains:
// we also want to treat %, _ as literals for contains
val := utils.QuoteEscapedStringForContains(fmt.Sprintf("%s", item.Value), false)
conditions = append(conditions, fmt.Sprintf("%s %s '%%%s%%'", columnName, operator, val))
case v3.FilterOperatorRegex, v3.FilterOperatorNotRegex:
conditions = append(conditions, fmt.Sprintf(operator, columnName, fmtVal))
case v3.FilterOperatorExists, v3.FilterOperatorNotExists:
if item.Key.IsColumn {
subQuery, err := tracesV3.ExistsSubQueryForFixedColumn(item.Key, item.Operator)
if err != nil {
return "", err
}
conditions = append(conditions, subQuery)
} else {
cType := getClickHouseTracesColumnType(item.Key.Type)
cDataType := getClickHouseTracesColumnDataType(item.Key.DataType)
col := fmt.Sprintf("%s_%s", cType, cDataType)
conditions = append(conditions, fmt.Sprintf(operator, col, item.Key.Key))
}
default:
conditions = append(conditions, fmt.Sprintf("%s %s %s", columnName, operator, fmtVal))
}
} else {
return "", fmt.Errorf("unsupported operator %s", item.Operator)
}
}
}
queryString := strings.Join(conditions, " AND ")
return queryString, nil
}
func handleEmptyValuesInGroupBy(groupBy []v3.AttributeKey) (string, error) {
// TODO(nitya): in future when we support user based mat column handle them
// skipping now as we don't support creating them
filterItems := []v3.FilterItem{}
if len(groupBy) != 0 {
for _, item := range groupBy {
if !item.IsColumn {
filterItems = append(filterItems, v3.FilterItem{
Key: item,
Operator: v3.FilterOperatorExists,
})
}
}
}
if len(filterItems) != 0 {
filterSet := v3.FilterSet{
Operator: "AND",
Items: filterItems,
}
return buildTracesFilterQuery(&filterSet)
}
return "", nil
}
// orderBy returns a string of comma separated tags for order by clause
// if there are remaining items which are not present in tags they are also added
// if the order is not specified, it defaults to ASC
func orderBy(panelType v3.PanelType, items []v3.OrderBy, tagLookup map[string]struct{}) []string {
var orderBy []string
for _, item := range items {
if item.ColumnName == constants.SigNozOrderByValue {
orderBy = append(orderBy, fmt.Sprintf("value %s", item.Order))
} else if _, ok := tagLookup[item.ColumnName]; ok {
orderBy = append(orderBy, fmt.Sprintf("`%s` %s", item.ColumnName, item.Order))
} else if panelType == v3.PanelTypeList {
attr := v3.AttributeKey{Key: item.ColumnName, DataType: item.DataType, Type: item.Type, IsColumn: item.IsColumn}
name := getColumnName(attr)
orderBy = append(orderBy, fmt.Sprintf("%s %s", name, item.Order))
}
}
return orderBy
}
func orderByAttributeKeyTags(panelType v3.PanelType, items []v3.OrderBy, tags []v3.AttributeKey) string {
tagLookup := map[string]struct{}{}
for _, v := range tags {
tagLookup[v.Key] = struct{}{}
}
orderByArray := orderBy(panelType, items, tagLookup)
if len(orderByArray) == 0 {
if panelType == v3.PanelTypeList {
orderByArray = append(orderByArray, constants.TIMESTAMP+" DESC")
} else {
orderByArray = append(orderByArray, "value DESC")
}
}
str := strings.Join(orderByArray, ",")
return str
}
func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, panelType v3.PanelType, options v3.QBOptions) (string, error) {
tracesStart := utils.GetEpochNanoSecs(start)
tracesEnd := utils.GetEpochNanoSecs(end)
// -1800 this is added so that the bucket start considers all the fingerprints.
bucketStart := tracesStart/NANOSECOND - 1800
bucketEnd := tracesEnd / NANOSECOND
timeFilter := fmt.Sprintf("(timestamp >= '%d' AND timestamp <= '%d') AND (ts_bucket_start >= %d AND ts_bucket_start <= %d)", tracesStart, tracesEnd, bucketStart, bucketEnd)
filterSubQuery, err := buildTracesFilterQuery(mq.Filters)
if err != nil {
return "", err
}
if filterSubQuery != "" {
filterSubQuery = " AND " + filterSubQuery
}
emptyValuesInGroupByFilter, err := handleEmptyValuesInGroupBy(mq.GroupBy)
if err != nil {
return "", err
}
if emptyValuesInGroupByFilter != "" {
filterSubQuery = filterSubQuery + " AND " + emptyValuesInGroupByFilter
}
resourceSubQuery, err := resource.BuildResourceSubQuery("signoz_traces", "distributed_traces_v3_resource", bucketStart, bucketEnd, mq.Filters, mq.GroupBy, mq.AggregateAttribute, false)
if err != nil {
return "", err
}
// join both the filter clauses
if resourceSubQuery != "" {
filterSubQuery = filterSubQuery + " AND (resource_fingerprint GLOBAL IN " + resourceSubQuery + ")"
}
// timerange will be sent in epoch millisecond
selectLabels := getSelectLabels(mq.GroupBy)
if selectLabels != "" {
selectLabels = selectLabels + ","
}
orderBy := orderByAttributeKeyTags(panelType, mq.OrderBy, mq.GroupBy)
if orderBy != "" {
orderBy = " order by " + orderBy
}
if mq.AggregateOperator == v3.AggregateOperatorNoOp {
var query string
if panelType == v3.PanelTypeTrace {
withSubQuery := fmt.Sprintf(constants.TracesExplorerViewSQLSelectWithSubQuery, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_SPAN_INDEX_V3_LOCAL_TABLENAME, timeFilter, filterSubQuery)
withSubQuery = tracesV3.AddLimitToQuery(withSubQuery, mq.Limit)
if mq.Offset != 0 {
withSubQuery = tracesV3.AddOffsetToQuery(withSubQuery, mq.Offset)
}
query = fmt.Sprintf(constants.TracesExplorerViewSQLSelectBeforeSubQuery, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_SPAN_INDEX_V3) + withSubQuery + ") " + fmt.Sprintf(constants.TracesExplorerViewSQLSelectAfterSubQuery, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_SPAN_INDEX_V3, timeFilter)
} else if panelType == v3.PanelTypeList {
if len(mq.SelectColumns) == 0 {
return "", fmt.Errorf("select columns cannot be empty for panelType %s", panelType)
}
// add it to the select labels
selectLabels = getSelectLabels(mq.SelectColumns)
queryNoOpTmpl := fmt.Sprintf("SELECT timestamp as timestamp_datetime, spanID, traceID,%s ", selectLabels) + "from " + constants.SIGNOZ_TRACE_DBNAME + "." + constants.SIGNOZ_SPAN_INDEX_V3 + " where %s %s" + "%s"
query = fmt.Sprintf(queryNoOpTmpl, timeFilter, filterSubQuery, orderBy)
} else {
return "", fmt.Errorf("unsupported aggregate operator %s for panelType %s", mq.AggregateOperator, panelType)
}
return query, nil
// ---- NOOP ends here ----
}
having := tracesV3.Having(mq.Having)
if having != "" {
having = " having " + having
}
groupBy := tracesV3.GroupByAttributeKeyTags(panelType, options.GraphLimitQtype, mq.GroupBy...)
if groupBy != "" {
groupBy = " group by " + groupBy
}
aggregationKey := ""
if mq.AggregateAttribute.Key != "" {
aggregationKey = getColumnName(mq.AggregateAttribute)
}
var queryTmpl string
if options.GraphLimitQtype == constants.FirstQueryGraphLimit {
queryTmpl = "SELECT"
} else if panelType == v3.PanelTypeTable {
queryTmpl =
"SELECT "
} else if panelType == v3.PanelTypeGraph || panelType == v3.PanelTypeValue {
// Select the aggregate value for interval
queryTmpl =
fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d SECOND) AS ts,", step)
}
queryTmpl = queryTmpl + selectLabels +
" %s as value " +
"from " + constants.SIGNOZ_TRACE_DBNAME + "." + constants.SIGNOZ_SPAN_INDEX_V3 +
" where " + timeFilter + "%s" +
"%s%s" +
"%s"
// we don't need value for first query
if options.GraphLimitQtype == constants.FirstQueryGraphLimit {
queryTmpl = "SELECT " + tracesV3.GetSelectKeys(mq.AggregateOperator, mq.GroupBy) + " from (" + queryTmpl + ")"
}
if options.GraphLimitQtype == constants.SecondQueryGraphLimit {
filterSubQuery = filterSubQuery + " AND " + fmt.Sprintf("(%s) GLOBAL IN (", tracesV3.GetSelectKeys(mq.AggregateOperator, mq.GroupBy)) + "%s)"
}
switch mq.AggregateOperator {
case v3.AggregateOperatorRateSum,
v3.AggregateOperatorRateMax,
v3.AggregateOperatorRateAvg,
v3.AggregateOperatorRateMin,
v3.AggregateOperatorRate:
rate := float64(step)
if options.PreferRPM {
rate = rate / 60.0
}
op := fmt.Sprintf("%s(%s)/%f", tracesV3.AggregateOperatorToSQLFunc[mq.AggregateOperator], aggregationKey, rate)
query := fmt.Sprintf(queryTmpl, op, filterSubQuery, groupBy, having, orderBy)
return query, nil
case
v3.AggregateOperatorP05,
v3.AggregateOperatorP10,
v3.AggregateOperatorP20,
v3.AggregateOperatorP25,
v3.AggregateOperatorP50,
v3.AggregateOperatorP75,
v3.AggregateOperatorP90,
v3.AggregateOperatorP95,
v3.AggregateOperatorP99:
op := fmt.Sprintf("quantile(%v)(%s)", tracesV3.AggregateOperatorToPercentile[mq.AggregateOperator], aggregationKey)
query := fmt.Sprintf(queryTmpl, op, filterSubQuery, groupBy, having, orderBy)
return query, nil
case v3.AggregateOperatorAvg, v3.AggregateOperatorSum, v3.AggregateOperatorMin, v3.AggregateOperatorMax:
op := fmt.Sprintf("%s(%s)", tracesV3.AggregateOperatorToSQLFunc[mq.AggregateOperator], aggregationKey)
query := fmt.Sprintf(queryTmpl, op, filterSubQuery, groupBy, having, orderBy)
return query, nil
case v3.AggregateOperatorCount:
if mq.AggregateAttribute.Key != "" {
if mq.AggregateAttribute.IsColumn {
subQuery, err := tracesV3.ExistsSubQueryForFixedColumn(mq.AggregateAttribute, v3.FilterOperatorExists)
if err == nil {
filterSubQuery = fmt.Sprintf("%s AND %s", filterSubQuery, subQuery)
}
} else {
column := getColumnName(mq.AggregateAttribute)
filterSubQuery = fmt.Sprintf("%s AND has(%s, '%s')", filterSubQuery, column, mq.AggregateAttribute.Key)
}
}
op := "toFloat64(count())"
query := fmt.Sprintf(queryTmpl, op, filterSubQuery, groupBy, having, orderBy)
return query, nil
case v3.AggregateOperatorCountDistinct:
op := fmt.Sprintf("toFloat64(count(distinct(%s)))", aggregationKey)
query := fmt.Sprintf(queryTmpl, op, filterSubQuery, groupBy, having, orderBy)
return query, nil
default:
return "", fmt.Errorf("unsupported aggregate operator %s", mq.AggregateOperator)
}
}
// PrepareTracesQuery returns the query string for traces
// start and end are in epoch millisecond
// step is in seconds
func PrepareTracesQuery(start, end int64, panelType v3.PanelType, mq *v3.BuilderQuery, options v3.QBOptions) (string, error) {
// adjust the start and end time to the step interval
if panelType == v3.PanelTypeGraph {
// adjust the start and end time to the step interval for graph panel types
start = start - (start % (mq.StepInterval * 1000))
end = end - (end % (mq.StepInterval * 1000))
}
if options.GraphLimitQtype == constants.FirstQueryGraphLimit {
// give me just the group by names
query, err := buildTracesQuery(start, end, mq.StepInterval, mq, panelType, options)
if err != nil {
return "", err
}
query = tracesV3.AddLimitToQuery(query, mq.Limit)
return query, nil
} else if options.GraphLimitQtype == constants.SecondQueryGraphLimit {
query, err := buildTracesQuery(start, end, mq.StepInterval, mq, panelType, options)
if err != nil {
return "", err
}
return query, nil
}
query, err := buildTracesQuery(start, end, mq.StepInterval, mq, panelType, options)
if err != nil {
return "", err
}
if panelType == v3.PanelTypeValue {
query, err = tracesV3.ReduceToQuery(query, mq.ReduceTo, mq.AggregateOperator)
}
if panelType == v3.PanelTypeList || panelType == v3.PanelTypeTable {
query = tracesV3.AddLimitToQuery(query, mq.Limit)
if mq.Offset != 0 {
query = tracesV3.AddOffsetToQuery(query, mq.Offset)
}
}
return query, err
}

View File

@@ -0,0 +1,729 @@
package v4
import (
"testing"
"go.signoz.io/signoz/pkg/query-service/constants"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
)
func Test_getClickHouseTracesColumnType(t *testing.T) {
type args struct {
columnType v3.AttributeKeyType
}
tests := []struct {
name string
args args
want string
}{
{
name: "tag",
args: args{
columnType: v3.AttributeKeyTypeTag,
},
want: "attributes",
},
{
name: "resource",
args: args{
columnType: v3.AttributeKeyTypeResource,
},
want: "resources",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := getClickHouseTracesColumnType(tt.args.columnType); got != tt.want {
t.Errorf("GetClickhouseTracesColumnType() = %v, want %v", got, tt.want)
}
})
}
}
func Test_getClickHouseTracesColumnDataType(t *testing.T) {
type args struct {
columnDataType v3.AttributeKeyDataType
}
tests := []struct {
name string
args args
want string
}{
{
name: "string",
args: args{
columnDataType: v3.AttributeKeyDataTypeString,
},
want: "string",
},
{
name: "float64",
args: args{
columnDataType: v3.AttributeKeyDataTypeFloat64,
},
want: "number",
},
{
name: "int64",
args: args{
columnDataType: v3.AttributeKeyDataTypeInt64,
},
want: "number",
},
{
name: "bool",
args: args{
columnDataType: v3.AttributeKeyDataTypeBool,
},
want: "bool",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := getClickHouseTracesColumnDataType(tt.args.columnDataType); got != tt.want {
t.Errorf("getClickhouseTracesColumnDataType() = %v, want %v", got, tt.want)
}
})
}
}
func Test_getColumnName(t *testing.T) {
type args struct {
key v3.AttributeKey
}
tests := []struct {
name string
args args
want string
}{
{
name: "tag",
args: args{
key: v3.AttributeKey{Key: "data", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
},
want: "attributes_string['data']",
},
{
name: "column",
args: args{
key: v3.AttributeKey{Key: "data", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true},
},
want: "`attribute_string_data`",
},
{
name: "static column",
args: args{
key: v3.AttributeKey{Key: "spanKind", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true},
},
want: "spanKind",
},
{
name: "missing meta",
args: args{
key: v3.AttributeKey{Key: "xyz"},
},
want: "attributes_string['xyz']",
},
{
name: "new composite column",
args: args{
key: v3.AttributeKey{Key: "response_status_code"},
},
want: "response_status_code",
},
{
name: "new composite column with metadata",
args: args{
key: v3.AttributeKey{Key: "response_status_code", DataType: v3.AttributeKeyDataTypeString, IsColumn: true},
},
want: "response_status_code",
},
{
name: "new normal column with metadata",
args: args{
key: v3.AttributeKey{Key: "http.route", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true},
},
want: "`attribute_string_http$$route`",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := getColumnName(tt.args.key); got != tt.want {
t.Errorf("getColumnName() = %v, want %v", got, tt.want)
}
})
}
}
func Test_getSelectLabels(t *testing.T) {
type args struct {
groupBy []v3.AttributeKey
}
tests := []struct {
name string
args args
want string
}{
{
name: "count",
args: args{
groupBy: []v3.AttributeKey{{Key: "user_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
},
want: " attributes_string['user_name'] as `user_name`",
},
{
name: "multiple group by",
args: args{
groupBy: []v3.AttributeKey{
{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, // static col
{Key: "service_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource, IsColumn: true},
},
},
want: " name as `name`, `resource_string_service_name` as `service_name`",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := getSelectLabels(tt.args.groupBy); got != tt.want {
t.Errorf("getSelectLabels() = %v, want %v", got, tt.want)
}
})
}
}
func Test_buildTracesFilterQuery(t *testing.T) {
type args struct {
fs *v3.FilterSet
}
tests := []struct {
name string
args args
want string
wantErr bool
}{
{
name: "Test ignore resource",
args: args{
fs: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "service.name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource}, Value: []interface{}{"service"}, Operator: v3.FilterOperatorIn},
},
}},
want: "",
},
{
name: "Test buildTracesFilterQuery in, nin",
args: args{
fs: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: []interface{}{"GET", "POST"}, Operator: v3.FilterOperatorIn},
{Key: v3.AttributeKey{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: []interface{}{"PUT"}, Operator: v3.FilterOperatorNotIn},
{Key: v3.AttributeKey{Key: "host", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource}, Value: []interface{}{"server"}, Operator: v3.FilterOperatorNotIn},
{Key: v3.AttributeKey{Key: "status.code", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeTag}, Value: []interface{}{200}, Operator: v3.FilterOperatorNotIn},
{Key: v3.AttributeKey{Key: "duration", DataType: v3.AttributeKeyDataTypeFloat64, Type: v3.AttributeKeyTypeTag}, Value: []interface{}{100.0}, Operator: v3.FilterOperatorIn},
{Key: v3.AttributeKey{Key: "isDone", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, Value: []interface{}{true}, Operator: v3.FilterOperatorIn},
}},
},
want: "attributes_string['method'] IN ['GET','POST'] AND attributes_string['method'] NOT IN ['PUT'] AND attributes_number['status.code'] NOT IN [200] AND attributes_number['duration'] IN [100] AND attributes_bool['isDone'] IN [true]",
wantErr: false,
},
{
name: "Test buildTracesFilterQuery not eq, neq, gt, lt, gte, lte",
args: args{
fs: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "duration", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeTag}, Value: 102, Operator: v3.FilterOperatorEqual},
{Key: v3.AttributeKey{Key: "duration", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeTag}, Value: 100, Operator: v3.FilterOperatorNotEqual},
{Key: v3.AttributeKey{Key: "duration", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeTag}, Value: 10, Operator: v3.FilterOperatorGreaterThan},
{Key: v3.AttributeKey{Key: "duration", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeTag}, Value: 200, Operator: v3.FilterOperatorLessThan},
{Key: v3.AttributeKey{Key: "duration", DataType: v3.AttributeKeyDataTypeFloat64, Type: v3.AttributeKeyTypeTag}, Value: 10.0, Operator: v3.FilterOperatorGreaterThanOrEq},
{Key: v3.AttributeKey{Key: "duration_str", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "200", Operator: v3.FilterOperatorLessThanOrEq},
}},
},
want: "attributes_number['duration'] = 102 AND attributes_number['duration'] != 100 AND attributes_number['duration'] > 10 AND attributes_number['duration'] < 200" +
" AND attributes_number['duration'] >= 10.000000 AND attributes_string['duration_str'] <= '200'",
wantErr: false,
},
{
name: "Test contains, ncontains, like, nlike, regex, nregex",
args: args{
fs: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "host", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "102.%", Operator: v3.FilterOperatorContains},
{Key: v3.AttributeKey{Key: "host", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "103_", Operator: v3.FilterOperatorNotContains},
{Key: v3.AttributeKey{Key: "host", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "102.", Operator: v3.FilterOperatorLike},
{Key: v3.AttributeKey{Key: "host", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "102", Operator: v3.FilterOperatorNotLike},
{Key: v3.AttributeKey{Key: "path", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Value: "/mypath", Operator: v3.FilterOperatorRegex},
{Key: v3.AttributeKey{Key: "path", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Value: "/health.*", Operator: v3.FilterOperatorNotRegex},
}},
},
want: "attributes_string['host'] ILIKE '%102.\\%%' AND attributes_string['host'] NOT ILIKE '%103\\_%' AND attributes_string['host'] ILIKE '102.' AND attributes_string['host'] NOT ILIKE '102' AND " +
"match(`attribute_string_path`, '/mypath') AND NOT match(`attribute_string_path`, '/health.*')",
},
{
name: "Test exists, nexists",
args: args{
fs: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "host", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Operator: v3.FilterOperatorExists},
{Key: v3.AttributeKey{Key: "duration", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeTag}, Operator: v3.FilterOperatorExists},
{Key: v3.AttributeKey{Key: "isDone", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, Operator: v3.FilterOperatorNotExists},
{Key: v3.AttributeKey{Key: "host1", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Operator: v3.FilterOperatorNotExists},
{Key: v3.AttributeKey{Key: "path", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Operator: v3.FilterOperatorNotExists},
}},
},
want: "mapContains(attributes_string, 'host') AND mapContains(attributes_number, 'duration') AND NOT mapContains(attributes_bool, 'isDone') AND NOT mapContains(attributes_string, 'host1') AND path = ''",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := buildTracesFilterQuery(tt.args.fs)
if (err != nil) != tt.wantErr {
t.Errorf("buildTracesFilterQuery() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("buildTracesFilterQuery() = %v, want %v", got, tt.want)
}
})
}
}
func Test_handleEmptyValuesInGroupBy(t *testing.T) {
type args struct {
groupBy []v3.AttributeKey
}
tests := []struct {
name string
args args
want string
wantErr bool
}{
{
name: "Test handleEmptyValuesInGroupBy",
args: args{
groupBy: []v3.AttributeKey{{Key: "bytes", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
},
want: "mapContains(attributes_string, 'bytes')",
wantErr: false,
},
{
name: "Test handleEmptyValuesInGroupBy",
args: args{
groupBy: []v3.AttributeKey{{Key: "bytes", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}},
},
want: "",
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := handleEmptyValuesInGroupBy(tt.args.groupBy)
if (err != nil) != tt.wantErr {
t.Errorf("handleEmptyValuesInGroupBy() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("handleEmptyValuesInGroupBy() = %v, want %v", got, tt.want)
}
})
}
}
func Test_orderByAttributeKeyTags(t *testing.T) {
type args struct {
panelType v3.PanelType
items []v3.OrderBy
tags []v3.AttributeKey
}
tests := []struct {
name string
args args
want string
}{
{
name: "test",
args: args{
panelType: v3.PanelTypeGraph,
items: []v3.OrderBy{{ColumnName: "name", Order: "ASC"}},
tags: []v3.AttributeKey{{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
},
want: "`name` ASC",
},
{
name: "order by value",
args: args{
panelType: v3.PanelTypeGraph,
items: []v3.OrderBy{{ColumnName: "name", Order: "ASC"}, {ColumnName: constants.SigNozOrderByValue, Order: "DESC"}},
tags: []v3.AttributeKey{{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
},
want: "`name` ASC,value DESC",
},
{
name: "test",
args: args{
panelType: v3.PanelTypeList,
items: []v3.OrderBy{{ColumnName: "status", Order: "DESC", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
{ColumnName: "route", Order: "DESC", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}},
},
want: "attributes_string['status'] DESC,`attribute_string_route` DESC",
},
{
name: "ignore order by in table panel",
args: args{
panelType: v3.PanelTypeTable,
items: []v3.OrderBy{{ColumnName: "timestamp", Order: "DESC"}},
tags: []v3.AttributeKey{},
},
want: "value DESC",
},
{
name: "add default order by ts for list panel",
args: args{
panelType: v3.PanelTypeList,
items: []v3.OrderBy{},
tags: []v3.AttributeKey{},
},
want: "timestamp DESC",
},
{
name: "add default order by value for graph panel",
args: args{
panelType: v3.PanelTypeGraph,
items: []v3.OrderBy{},
tags: []v3.AttributeKey{},
},
want: "value DESC",
},
{
name: "don't add default order by for table panel",
args: args{
panelType: v3.PanelTypeTable,
items: []v3.OrderBy{},
tags: []v3.AttributeKey{},
},
want: "value DESC",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := orderByAttributeKeyTags(tt.args.panelType, tt.args.items, tt.args.tags); got != tt.want {
t.Errorf("orderByAttributeKeyTags() = %v, want %v", got, tt.want)
}
})
}
}
func Test_buildTracesQuery(t *testing.T) {
type args struct {
start int64
end int64
step int64
mq *v3.BuilderQuery
panelType v3.PanelType
options v3.QBOptions
}
tests := []struct {
name string
args args
want string
wantErr bool
}{
{
name: "Test buildTracesQuery",
args: args{
panelType: v3.PanelTypeTable,
start: 1680066360726210000,
end: 1680066458000000000,
step: 1000,
mq: &v3.BuilderQuery{
AggregateOperator: v3.AggregateOperatorCount,
Filters: &v3.FilterSet{
Items: []v3.FilterItem{
{
Key: v3.AttributeKey{Key: "http.method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
Value: 100,
Operator: v3.FilterOperatorEqual,
},
},
},
GroupBy: []v3.AttributeKey{{Key: "http.method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
OrderBy: []v3.OrderBy{
{ColumnName: "http.method", Order: "ASC"}},
},
},
want: "SELECT attributes_string['http.method'] as `http.method`, toFloat64(count()) as value from signoz_traces.distributed_signoz_index_v3 where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') " +
"AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND attributes_string['http.method'] = '100' AND mapContains(attributes_string, 'http.method') " +
"group by `http.method` order by `http.method` ASC",
},
{
name: "Test buildTracesQuery",
args: args{
panelType: v3.PanelTypeTable,
start: 1680066360726210000,
end: 1680066458000000000,
step: 1000,
mq: &v3.BuilderQuery{
AggregateOperator: v3.AggregateOperatorCount,
Filters: &v3.FilterSet{
Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "bytes", Type: v3.AttributeKeyTypeTag, DataType: v3.AttributeKeyDataTypeInt64}, Value: 100, Operator: ">"},
{Key: v3.AttributeKey{Key: "service.name", Type: v3.AttributeKeyTypeResource, DataType: v3.AttributeKeyDataTypeString}, Value: "myService", Operator: "="},
},
},
GroupBy: []v3.AttributeKey{{Key: "host", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeResource}},
OrderBy: []v3.OrderBy{
{ColumnName: "host", Order: "ASC"}},
},
},
want: "SELECT resources_number['host'] as `host`, toFloat64(count()) as value from signoz_traces.distributed_signoz_index_v3 where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') " +
"AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND attributes_number['bytes'] > 100 AND " +
"(resource_fingerprint GLOBAL IN (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (seen_at_ts_bucket_start >= 1680064560) AND " +
"(seen_at_ts_bucket_start <= 1680066458) AND simpleJSONExtractString(labels, 'service.name') = 'myService' AND labels like '%service.name%myService%' AND " +
"( (simpleJSONHas(labels, 'host') AND labels like '%host%') ))) " +
"group by `host` order by `host` ASC",
},
{
name: "test noop list view",
args: args{
panelType: v3.PanelTypeList,
start: 1680066360726210000,
end: 1680066458000000000,
mq: &v3.BuilderQuery{
AggregateOperator: v3.AggregateOperatorNoOp,
Filters: &v3.FilterSet{},
SelectColumns: []v3.AttributeKey{{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}},
OrderBy: []v3.OrderBy{{ColumnName: "timestamp", Order: "ASC"}},
},
},
want: "SELECT timestamp as timestamp_datetime, spanID, traceID, name as `name` from signoz_traces.distributed_signoz_index_v3 where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') " +
"AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) order by timestamp ASC",
},
{
name: "test noop list view-without ts",
args: args{
panelType: v3.PanelTypeList,
start: 1680066360726210000,
end: 1680066458000000000,
mq: &v3.BuilderQuery{
AggregateOperator: v3.AggregateOperatorNoOp,
Filters: &v3.FilterSet{},
SelectColumns: []v3.AttributeKey{{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}},
},
},
want: "SELECT timestamp as timestamp_datetime, spanID, traceID, name as `name` from signoz_traces.distributed_signoz_index_v3 where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') " +
"AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) order by timestamp DESC",
},
{
name: "test noop trace view",
args: args{
panelType: v3.PanelTypeTrace,
start: 1680066360726210000,
end: 1680066458000000000,
mq: &v3.BuilderQuery{
AggregateOperator: v3.AggregateOperatorNoOp,
Filters: &v3.FilterSet{
Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "GET", Operator: "="},
{Key: v3.AttributeKey{Key: "service.name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource}, Value: "myService", Operator: "="},
},
},
},
},
want: "SELECT subQuery.serviceName, subQuery.name, count() AS span_count, subQuery.durationNano, subQuery.traceID AS traceID FROM signoz_traces.distributed_signoz_index_v3 INNER JOIN " +
"( SELECT * FROM (SELECT traceID, durationNano, serviceName, name FROM signoz_traces.signoz_index_v3 WHERE parentSpanID = '' AND (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') AND " +
"(ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND attributes_string['method'] = 'GET' AND (resource_fingerprint GLOBAL IN (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource " +
"WHERE (seen_at_ts_bucket_start >= 1680064560) AND (seen_at_ts_bucket_start <= 1680066458) AND simpleJSONExtractString(labels, 'service.name') = 'myService' AND labels like '%service.name%myService%')) " +
"ORDER BY durationNano DESC LIMIT 1 BY traceID LIMIT 100) AS inner_subquery ) AS subQuery ON signoz_traces.distributed_signoz_index_v3.traceID = subQuery.traceID WHERE (timestamp >= '1680066360726210000' AND " +
"timestamp <= '1680066458000000000') AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) GROUP BY subQuery.traceID, subQuery.durationNano, subQuery.name, subQuery.serviceName ORDER BY " +
"subQuery.durationNano desc LIMIT 1 BY subQuery.traceID;",
},
{
name: "Test order by value with having",
args: args{
panelType: v3.PanelTypeTable,
start: 1680066360726210000,
end: 1680066458000000000,
mq: &v3.BuilderQuery{
AggregateOperator: v3.AggregateOperatorCountDistinct,
Filters: &v3.FilterSet{},
AggregateAttribute: v3.AttributeKey{Key: "name", IsColumn: true, DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
OrderBy: []v3.OrderBy{{ColumnName: "#SIGNOZ_VALUE", Order: "ASC"}},
Having: []v3.Having{
{
ColumnName: "name",
Operator: ">",
Value: 10,
},
},
},
},
want: "SELECT toFloat64(count(distinct(name))) as value from signoz_traces.distributed_signoz_index_v3 where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') AND " +
"(ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) having value > 10 order by value ASC",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := buildTracesQuery(tt.args.start, tt.args.end, tt.args.step, tt.args.mq, tt.args.panelType, tt.args.options)
if (err != nil) != tt.wantErr {
t.Errorf("buildTracesQuery() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("buildTracesQuery() = %v, want %v", got, tt.want)
}
})
}
}
func TestPrepareTracesQuery(t *testing.T) {
type args struct {
start int64
end int64
panelType v3.PanelType
mq *v3.BuilderQuery
options v3.QBOptions
}
tests := []struct {
name string
args args
want string
wantErr bool
}{
{
name: "test with limit - first",
args: args{
start: 1680066360726210000,
end: 1680066458000000000,
panelType: v3.PanelTypeTable,
mq: &v3.BuilderQuery{
StepInterval: 60,
AggregateOperator: v3.AggregateOperatorCountDistinct,
Filters: &v3.FilterSet{},
AggregateAttribute: v3.AttributeKey{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
GroupBy: []v3.AttributeKey{{Key: "function", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
Limit: 10,
OrderBy: []v3.OrderBy{{ColumnName: "#SIGNOZ_VALUE", Order: "DESC"}},
},
options: v3.QBOptions{
GraphLimitQtype: constants.FirstQueryGraphLimit,
},
},
want: "SELECT `function` from (SELECT attributes_string['function'] as `function`, toFloat64(count(distinct(name))) as value from signoz_traces.distributed_signoz_index_v3 " +
"where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND mapContains(attributes_string, 'function') group by `function` order by value DESC) LIMIT 10",
},
{
name: "test with limit - second",
args: args{
start: 1680066360726210000,
end: 1680066458000000000,
panelType: v3.PanelTypeTable,
mq: &v3.BuilderQuery{
StepInterval: 60,
AggregateOperator: v3.AggregateOperatorCountDistinct,
Filters: &v3.FilterSet{},
AggregateAttribute: v3.AttributeKey{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
GroupBy: []v3.AttributeKey{{Key: "function", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
OrderBy: []v3.OrderBy{{ColumnName: "#SIGNOZ_VALUE", Order: "DESC"}},
Limit: 10,
},
options: v3.QBOptions{
GraphLimitQtype: constants.SecondQueryGraphLimit,
},
},
want: "SELECT attributes_string['function'] as `function`, toFloat64(count(distinct(name))) as value from signoz_traces.distributed_signoz_index_v3 where " +
"(timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND mapContains(attributes_string, 'function') AND (`function`) GLOBAL IN (%s) group by `function` order by value DESC",
},
{
name: "test with limit with resources- first",
args: args{
start: 1680066360726210000,
end: 1680066458000000000,
panelType: v3.PanelTypeTable,
mq: &v3.BuilderQuery{
StepInterval: 60,
AggregateOperator: v3.AggregateOperatorCountDistinct,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{
Key: v3.AttributeKey{Key: "line", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeTag},
Value: 100,
Operator: v3.FilterOperatorEqual,
},
{
Key: v3.AttributeKey{Key: "hostname", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource},
Value: "server1",
Operator: v3.FilterOperatorEqual,
},
},
},
AggregateAttribute: v3.AttributeKey{Key: "name", IsColumn: true, DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
GroupBy: []v3.AttributeKey{
{Key: "function", IsColumn: true, DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
{Key: "service.name", IsColumn: true, DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource},
},
Limit: 10,
OrderBy: []v3.OrderBy{{ColumnName: "#SIGNOZ_VALUE", Order: "DESC"}},
},
options: v3.QBOptions{
GraphLimitQtype: constants.FirstQueryGraphLimit,
},
},
want: "SELECT `function`,`service.name` from (SELECT `attribute_string_function` as `function`, `resource_string_service$$name` as `service.name`, toFloat64(count(distinct(name))) as value " +
"from signoz_traces.distributed_signoz_index_v3 where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) " +
"AND attributes_number['line'] = 100 AND (resource_fingerprint GLOBAL IN (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE " +
"(seen_at_ts_bucket_start >= 1680064560) AND (seen_at_ts_bucket_start <= 1680066458) AND simpleJSONExtractString(labels, 'hostname') = 'server1' AND labels like '%hostname%server1%' AND " +
"( (simpleJSONHas(labels, 'service.name') AND labels like '%service.name%') ))) group by `function`,`service.name` order by value DESC) LIMIT 10",
},
{
name: "test with limit with resources - second",
args: args{
start: 1680066360726210000,
end: 1680066458000000000,
panelType: v3.PanelTypeTable,
mq: &v3.BuilderQuery{
StepInterval: 60,
AggregateOperator: v3.AggregateOperatorCountDistinct,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{
Key: v3.AttributeKey{Key: "line", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeTag},
Value: 100,
Operator: v3.FilterOperatorEqual,
},
{
Key: v3.AttributeKey{Key: "hostname", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource},
Value: "server1",
Operator: v3.FilterOperatorEqual,
},
},
},
AggregateAttribute: v3.AttributeKey{Key: "name", IsColumn: true, DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
GroupBy: []v3.AttributeKey{
{Key: "function", IsColumn: true, DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
{Key: "serviceName", IsColumn: true, DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
},
OrderBy: []v3.OrderBy{{ColumnName: "#SIGNOZ_VALUE", Order: "DESC"}},
Limit: 10,
},
options: v3.QBOptions{
GraphLimitQtype: constants.SecondQueryGraphLimit,
},
},
want: "SELECT `attribute_string_function` as `function`, serviceName as `serviceName`, toFloat64(count(distinct(name))) as value from signoz_traces.distributed_signoz_index_v3 " +
"where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND attributes_number['line'] = 100 " +
"AND (resource_fingerprint GLOBAL IN (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (seen_at_ts_bucket_start >= 1680064560) AND (seen_at_ts_bucket_start <= 1680066458) " +
"AND simpleJSONExtractString(labels, 'hostname') = 'server1' AND labels like '%hostname%server1%')) AND (`function`,`serviceName`) GLOBAL IN (%s) group by `function`,`serviceName` order by value DESC",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := PrepareTracesQuery(tt.args.start, tt.args.end, tt.args.panelType, tt.args.mq, tt.args.options)
if (err != nil) != tt.wantErr {
t.Errorf("PrepareTracesQuery() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("PrepareTracesQuery() = %v, want %v", got, tt.want)
}
})
}
}

View File

@@ -239,6 +239,8 @@ const (
SIGNOZ_TRACE_DBNAME = "signoz_traces"
SIGNOZ_SPAN_INDEX_TABLENAME = "distributed_signoz_index_v2"
SIGNOZ_SPAN_INDEX_LOCAL_TABLENAME = "signoz_index_v2"
SIGNOZ_SPAN_INDEX_V3 = "distributed_signoz_index_v3"
SIGNOZ_SPAN_INDEX_V3_LOCAL_TABLENAME = "signoz_index_v3"
SIGNOZ_TIMESERIES_v4_LOCAL_TABLENAME = "time_series_v4"
SIGNOZ_TIMESERIES_v4_6HRS_LOCAL_TABLENAME = "time_series_v4_6hrs"
SIGNOZ_TIMESERIES_v4_1DAY_LOCAL_TABLENAME = "time_series_v4_1day"
@@ -444,3 +446,203 @@ const MaxFilterSuggestionsExamplesLimit = 10
var SpanRenderLimitStr = GetOrDefaultEnv("SPAN_RENDER_LIMIT", "2500")
var MaxSpansInTraceStr = GetOrDefaultEnv("MAX_SPANS_IN_TRACE", "250000")
var StaticFieldsTraces = map[string]v3.AttributeKey{
"timestamp": {},
"traceID": {
Key: "traceID",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"spanID": {
Key: "spanID",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"parentSpanID": {
Key: "parentSpanID",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"name": {
Key: "name",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"serviceName": {
Key: "serviceName",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"kind": {
Key: "kind",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"spanKind": {
Key: "spanKind",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"durationNano": {
Key: "durationNano",
DataType: v3.AttributeKeyDataTypeFloat64,
IsColumn: true,
},
"statusCode": {
Key: "statusCode",
DataType: v3.AttributeKeyDataTypeFloat64,
IsColumn: true,
},
"hasError": {
Key: "hasError",
DataType: v3.AttributeKeyDataTypeBool,
IsColumn: true,
},
"statusMessage": {
Key: "statusMessage",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"statusCodeString": {
Key: "statusCodeString",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"externalHttpMethod": {
Key: "externalHttpMethod",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"externalHttpUrl": {
Key: "externalHttpUrl",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"dbSystem": {
Key: "dbSystem",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"dbName": {
Key: "dbName",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"dbOperation": {
Key: "dbOperation",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"peerService": {
Key: "peerService",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"httpMethod": {
Key: "httpMethod",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"httpUrl": {
Key: "httpUrl",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"httpRoute": {
Key: "httpRoute",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"httpHost": {
Key: "httpHost",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"msgSystem": {
Key: "msgSystem",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"msgOperation": {
Key: "msgOperation",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"rpcSystem": {
Key: "rpcSystem",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"rpcService": {
Key: "rpcService",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"rpcMethod": {
Key: "rpcMethod",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"responseStatusCode": {
Key: "responseStatusCode",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
// new support
"response_status_code": {
Key: "response_status_code",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"external_http_url": {
Key: "external_http_url",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"http_url": {
Key: "http_url",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"external_http_method": {
Key: "external_http_method",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"http_method": {
Key: "http_method",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"http_host": {
Key: "http_host",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"db_name": {
Key: "db_name",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"db_operation": {
Key: "db_operation",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"has_error": {
Key: "has_error",
DataType: v3.AttributeKeyDataTypeBool,
IsColumn: true,
},
"is_remote": {
Key: "is_remote",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
// the simple attributes are not present here as
// they are taken care by new format <attribute_type>_<attribute_datatype>_'<attribute_key>'
}
const TRACE_V4_MAX_PAGINATION_LIMIT = 10000

View File

@@ -183,7 +183,7 @@ func PrepareFilters(labels map[string]string, whereClauseItems []v3.FilterItem,
var attrFound bool
// as of now this logic will only apply for logs
for _, tKey := range utils.GenerateLogEnrichmentKeys(v3.AttributeKey{Key: key}) {
for _, tKey := range utils.GenerateEnrichmentKeys(v3.AttributeKey{Key: key}) {
if val, ok := keys[tKey]; ok {
attributeKey = val
attrFound = true

View File

@@ -29,15 +29,10 @@ type Reader interface {
// GetDisks returns a list of disks configured in the underlying DB. It is supported by
// clickhouse only.
GetDisks(ctx context.Context) (*[]model.DiskItem, *model.ApiError)
GetSpanFilters(ctx context.Context, query *model.SpanFilterParams) (*model.SpanFiltersResponse, *model.ApiError)
GetTraceAggregateAttributes(ctx context.Context, req *v3.AggregateAttributeRequest) (*v3.AggregateAttributeResponse, error)
GetTraceAttributeKeys(ctx context.Context, req *v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error)
GetTraceAttributeValues(ctx context.Context, req *v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error)
GetSpanAttributeKeys(ctx context.Context) (map[string]v3.AttributeKey, error)
GetTagFilters(ctx context.Context, query *model.TagFilterParams) (*model.TagFilters, *model.ApiError)
GetTagValues(ctx context.Context, query *model.TagFilterParams) (*model.TagValues, *model.ApiError)
GetFilteredSpans(ctx context.Context, query *model.GetFilteredSpansParams) (*model.GetFilterSpansResponse, *model.ApiError)
GetFilteredSpansAggregates(ctx context.Context, query *model.GetFilteredSpanAggregatesParams) (*model.GetFilteredSpansAggregatesResponse, *model.ApiError)
ListErrors(ctx context.Context, params *model.ListErrorsParams) (*[]model.Error, *model.ApiError)
CountErrors(ctx context.Context, params *model.CountErrorsParams) (uint64, *model.ApiError)

View File

@@ -173,3 +173,125 @@ type ClusterListRecord struct {
MemoryAllocatable float64 `json:"memoryAllocatable"`
Meta map[string]string `json:"meta"`
}
type DeploymentListRequest struct {
Start int64 `json:"start"` // epoch time in ms
End int64 `json:"end"` // epoch time in ms
Filters *v3.FilterSet `json:"filters"`
GroupBy []v3.AttributeKey `json:"groupBy"`
OrderBy *v3.OrderBy `json:"orderBy"`
Offset int `json:"offset"`
Limit int `json:"limit"`
}
type DeploymentListResponse struct {
Type ResponseType `json:"type"`
Records []DeploymentListRecord `json:"records"`
Total int `json:"total"`
}
type DeploymentListRecord struct {
DeploymentName string `json:"deploymentName"`
CPUUsage float64 `json:"cpuUsage"`
MemoryUsage float64 `json:"memoryUsage"`
DesiredPods int `json:"desiredPods"`
AvailablePods int `json:"availablePods"`
CPURequest float64 `json:"cpuRequest"`
MemoryRequest float64 `json:"memoryRequest"`
CPULimit float64 `json:"cpuLimit"`
MemoryLimit float64 `json:"memoryLimit"`
Restarts int `json:"restarts"`
Meta map[string]string `json:"meta"`
}
type DaemonSetListRequest struct {
Start int64 `json:"start"` // epoch time in ms
End int64 `json:"end"` // epoch time in ms
Filters *v3.FilterSet `json:"filters"`
GroupBy []v3.AttributeKey `json:"groupBy"`
OrderBy *v3.OrderBy `json:"orderBy"`
Offset int `json:"offset"`
Limit int `json:"limit"`
}
type DaemonSetListResponse struct {
Type ResponseType `json:"type"`
Records []DaemonSetListRecord `json:"records"`
Total int `json:"total"`
}
type DaemonSetListRecord struct {
DaemonSetName string `json:"daemonSetName"`
CPUUsage float64 `json:"cpuUsage"`
MemoryUsage float64 `json:"memoryUsage"`
CPURequest float64 `json:"cpuRequest"`
MemoryRequest float64 `json:"memoryRequest"`
CPULimit float64 `json:"cpuLimit"`
MemoryLimit float64 `json:"memoryLimit"`
Restarts int `json:"restarts"`
DesiredNodes int `json:"desiredNodes"`
AvailableNodes int `json:"availableNodes"`
Meta map[string]string `json:"meta"`
}
type StatefulSetListRequest struct {
Start int64 `json:"start"` // epoch time in ms
End int64 `json:"end"` // epoch time in ms
Filters *v3.FilterSet `json:"filters"`
GroupBy []v3.AttributeKey `json:"groupBy"`
OrderBy *v3.OrderBy `json:"orderBy"`
Offset int `json:"offset"`
Limit int `json:"limit"`
}
type StatefulSetListResponse struct {
Type ResponseType `json:"type"`
Records []StatefulSetListRecord `json:"records"`
Total int `json:"total"`
}
type StatefulSetListRecord struct {
StatefulSetName string `json:"statefulSetName"`
CPUUsage float64 `json:"cpuUsage"`
MemoryUsage float64 `json:"memoryUsage"`
CPURequest float64 `json:"cpuRequest"`
MemoryRequest float64 `json:"memoryRequest"`
CPULimit float64 `json:"cpuLimit"`
MemoryLimit float64 `json:"memoryLimit"`
Restarts int `json:"restarts"`
DesiredPods int `json:"desiredPods"`
AvailablePods int `json:"availablePods"`
Meta map[string]string `json:"meta"`
}
type JobListRequest struct {
Start int64 `json:"start"` // epoch time in ms
End int64 `json:"end"` // epoch time in ms
Filters *v3.FilterSet `json:"filters"`
GroupBy []v3.AttributeKey `json:"groupBy"`
OrderBy *v3.OrderBy `json:"orderBy"`
Offset int `json:"offset"`
Limit int `json:"limit"`
}
type JobListResponse struct {
Type ResponseType `json:"type"`
Records []JobListRecord `json:"records"`
Total int `json:"total"`
}
type JobListRecord struct {
JobName string `json:"jobName"`
CPUUsage float64 `json:"cpuUsage"`
MemoryUsage float64 `json:"memoryUsage"`
CPURequest float64 `json:"cpuRequest"`
MemoryRequest float64 `json:"memoryRequest"`
CPULimit float64 `json:"cpuLimit"`
MemoryLimit float64 `json:"memoryLimit"`
Restarts int `json:"restarts"`
DesiredSuccessfulPods int `json:"desiredSuccessfulPods"`
ActivePods int `json:"activePods"`
FailedPods int `json:"failedPods"`
SuccessfulPods int `json:"successfulPods"`
Meta map[string]string `json:"meta"`
}

View File

@@ -546,6 +546,9 @@ type SignozLogV2 struct {
SeverityText string `json:"severity_text" ch:"severity_text"`
SeverityNumber uint8 `json:"severity_number" ch:"severity_number"`
Body string `json:"body" ch:"body"`
ScopeName string `json:"scope_name" ch:"scope_name"`
ScopeVersion string `json:"scope_version" ch:"scope_version"`
ScopeString map[string]string `json:"scope_string" ch:"scope_string"`
Resources_string map[string]string `json:"resources_string" ch:"resources_string"`
Attributes_string map[string]string `json:"attributes_string" ch:"attributes_string"`
Attributes_number map[string]float64 `json:"attributes_float" ch:"attributes_number"`

View File

@@ -463,9 +463,9 @@ func (r *BaseRule) ShouldAlert(series v3.Series) (Sample, bool) {
}
} else if r.compareOp() == ValueOutsideBounds {
for _, smpl := range series.Points {
if math.Abs(smpl.Value) >= r.targetVal() {
if math.Abs(smpl.Value) < r.targetVal() {
alertSmpl = Sample{Point: Point{V: smpl.Value}, Metric: lbls}
shouldAlert = true
shouldAlert = false
break
}
}

View File

@@ -347,6 +347,7 @@ func createTelemetry() {
"alertsWithTSV2": alertsInfo.AlertsWithTSV2,
"logsBasedAlerts": alertsInfo.LogsBasedAlerts,
"metricBasedAlerts": alertsInfo.MetricBasedAlerts,
"anomalyBasedAlerts": alertsInfo.AnomalyBasedAlerts,
"tracesBasedAlerts": alertsInfo.TracesBasedAlerts,
"totalChannels": alertsInfo.TotalChannels,
"totalSavedViews": savedViewsInfo.TotalSavedViews,

View File

@@ -9,7 +9,7 @@ type LogsListTsRange struct {
End int64
}
func GetLogsListTsRanges(start, end int64) []LogsListTsRange {
func GetListTsRanges(start, end int64) []LogsListTsRange {
startNano := GetEpochNanoSecs(start)
endNano := GetEpochNanoSecs(end)
result := []LogsListTsRange{}
@@ -35,13 +35,15 @@ func GetLogsListTsRanges(start, end int64) []LogsListTsRange {
tStartNano = startNano
}
}
} else {
result = append(result, LogsListTsRange{Start: start, End: end})
}
return result
}
// This tries to see all possible fields that it can fall back to if some meta is missing
// check Test_GenerateLogEnrichmentKeys for example
func GenerateLogEnrichmentKeys(field v3.AttributeKey) []string {
// check Test_GenerateEnrichmentKeys for example
func GenerateEnrichmentKeys(field v3.AttributeKey) []string {
names := []string{}
if field.Type != v3.AttributeKeyTypeUnspecified && field.DataType != v3.AttributeKeyDataTypeUnspecified {
names = append(names, field.Key+"##"+field.Type.String()+"##"+field.DataType.String())

View File

@@ -7,7 +7,7 @@ import (
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
)
func TestLogsListTsRange(t *testing.T) {
func TestListTsRange(t *testing.T) {
startEndData := []struct {
name string
start int64
@@ -18,7 +18,7 @@ func TestLogsListTsRange(t *testing.T) {
name: "testing for less then one hour",
start: 1722262800000000000, // July 29, 2024 7:50:00 PM
end: 1722263800000000000, // July 29, 2024 8:06:40 PM
res: []LogsListTsRange{},
res: []LogsListTsRange{{1722262800000000000, 1722263800000000000}},
},
{
name: "testing for more than one hour",
@@ -44,7 +44,7 @@ func TestLogsListTsRange(t *testing.T) {
}
for _, test := range startEndData {
res := GetLogsListTsRanges(test.start, test.end)
res := GetListTsRanges(test.start, test.end)
for i, v := range res {
if test.res[i].Start != v.Start || test.res[i].End != v.End {
t.Errorf("expected range was %v - %v, got %v - %v", v.Start, v.End, test.res[i].Start, test.res[i].End)
@@ -53,7 +53,7 @@ func TestLogsListTsRange(t *testing.T) {
}
}
func Test_GenerateLogEnrichmentKeys(t *testing.T) {
func Test_GenerateEnrichmentKeys(t *testing.T) {
type args struct {
field v3.AttributeKey
}
@@ -96,8 +96,8 @@ func Test_GenerateLogEnrichmentKeys(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := GenerateLogEnrichmentKeys(tt.args.field); !reflect.DeepEqual(got, tt.want) {
t.Errorf("generateLogEnrichmentKeys() = %v, want %v", got, tt.want)
if got := GenerateEnrichmentKeys(tt.args.field); !reflect.DeepEqual(got, tt.want) {
t.Errorf("GenerateEnrichmentKeys() = %v, want %v", got, tt.want)
}
})
}