Compare commits
298 Commits
fix/query-
...
v0.95.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c9568be5d8 | ||
|
|
1c257f3e14 | ||
|
|
ff8ac96d37 | ||
|
|
e8035b7dd2 | ||
|
|
cc77b829af | ||
|
|
49306cbe3d | ||
|
|
233a8e4cc3 | ||
|
|
629378bbec | ||
|
|
d96073f478 | ||
|
|
ba8a49929a | ||
|
|
a90904951e | ||
|
|
6c57735a81 | ||
|
|
4851527840 | ||
|
|
c5051128fa | ||
|
|
2acdd101d8 | ||
|
|
39c2738ef9 | ||
|
|
d075ceecba | ||
|
|
ac81eab7bb | ||
|
|
c982b1e76d | ||
|
|
252786deb6 | ||
|
|
38ca467d13 | ||
|
|
a686941880 | ||
|
|
ae58915020 | ||
|
|
e9222ab3e0 | ||
|
|
d801fcee76 | ||
|
|
61acd946cc | ||
|
|
c477ec65da | ||
|
|
9d999feabb | ||
|
|
0658c561b9 | ||
|
|
b1ea7eab70 | ||
|
|
31e042adf7 | ||
|
|
f23000831c | ||
|
|
f82e9b55f8 | ||
|
|
f91115948a | ||
|
|
011b769d4d | ||
|
|
0129326a0b | ||
|
|
6c7275d355 | ||
|
|
c83eaf3d50 | ||
|
|
57013e1c4f | ||
|
|
717efaf167 | ||
|
|
6709b09646 | ||
|
|
144e866afc | ||
|
|
3f2763251a | ||
|
|
e67a576c07 | ||
|
|
c737a7e070 | ||
|
|
74be8f5611 | ||
|
|
1d3a8ecd66 | ||
|
|
0f5825a2b3 | ||
|
|
eee96503ff | ||
|
|
7f925bd50e | ||
|
|
1aa7e8b5d9 | ||
|
|
bf704333b3 | ||
|
|
f63f175a77 | ||
|
|
b6f5c053a0 | ||
|
|
abeadc7672 | ||
|
|
faadc60c74 | ||
|
|
360e8309c8 | ||
|
|
27580b62ba | ||
|
|
bcd21cee74 | ||
|
|
2dbe0777f4 | ||
|
|
7602d863dd | ||
|
|
68d9c6c3cc | ||
|
|
10c6e1fac7 | ||
|
|
3999a64c64 | ||
|
|
729bfb31f1 | ||
|
|
052fb8b703 | ||
|
|
5d9247f591 | ||
|
|
c0a9948146 | ||
|
|
f3569a9a02 | ||
|
|
0df1ed3b57 | ||
|
|
d0132f11ae | ||
|
|
f61e859901 | ||
|
|
4daec45d98 | ||
|
|
382d9d4a87 | ||
|
|
87ce197631 | ||
|
|
3cc5a24a4b | ||
|
|
9b8a892079 | ||
|
|
396e0cdc2d | ||
|
|
c838d7e2d4 | ||
|
|
1a193fb1a9 | ||
|
|
88dff3f552 | ||
|
|
5bb6d78c42 | ||
|
|
369f77977d | ||
|
|
836605def5 | ||
|
|
cc80923265 | ||
|
|
92e5986af2 | ||
|
|
912a34da8d | ||
|
|
8b99ba0f9f | ||
|
|
841abf8c0b | ||
|
|
df54e6350d | ||
|
|
f6bc30050b | ||
|
|
1e76046c7c | ||
|
|
910751713d | ||
|
|
85c671c8d5 | ||
|
|
4d2094b4ce | ||
|
|
32410baa72 | ||
|
|
2a5fb9fd6f | ||
|
|
514bceca34 | ||
|
|
ac7d8bcde2 | ||
|
|
88312e971d | ||
|
|
17533b2f1c | ||
|
|
c4044fa2c5 | ||
|
|
deddf47e84 | ||
|
|
08323e4dfd | ||
|
|
ee19f1749b | ||
|
|
b21db878e8 | ||
|
|
a7ddd2ddf0 | ||
|
|
4d72f47758 | ||
|
|
b5b513f1e0 | ||
|
|
4878f725ea | ||
|
|
eca13075e9 | ||
|
|
e5ab664483 | ||
|
|
a3f32b3d85 | ||
|
|
9c2f127282 | ||
|
|
e30de5f13e | ||
|
|
019083983a | ||
|
|
fdcad997f5 | ||
|
|
03359a40a2 | ||
|
|
4f45801729 | ||
|
|
674556d672 | ||
|
|
af987e53ce | ||
|
|
59d5accd33 | ||
|
|
5a7ad670d8 | ||
|
|
9d04b397ac | ||
|
|
a4f3be5e46 | ||
|
|
8f833fa62c | ||
|
|
7029233596 | ||
|
|
d26efd2833 | ||
|
|
0e3ac2a179 | ||
|
|
249f8be845 | ||
|
|
9c952942ad | ||
|
|
dac46d82ff | ||
|
|
802ce6de01 | ||
|
|
6853f0c99d | ||
|
|
3f8a2870e4 | ||
|
|
5fa70ea802 | ||
|
|
3a952fa330 | ||
|
|
6d97db1d9d | ||
|
|
5412e7f70b | ||
|
|
8e5cb9046d | ||
|
|
760eabb2dc | ||
|
|
35ddaaa2fc | ||
|
|
a51ee66c02 | ||
|
|
75d189162b | ||
|
|
932918e3a4 | ||
|
|
aa3bc16dcb | ||
|
|
b5098e00a3 | ||
|
|
20dc561bfe | ||
|
|
99bbb87738 | ||
|
|
f1ce93171c | ||
|
|
92794389d6 | ||
|
|
bd02848623 | ||
|
|
b5016b061b | ||
|
|
c308e8668c | ||
|
|
41ee4176ad | ||
|
|
994663110d | ||
|
|
3a2eab2019 | ||
|
|
01202b5800 | ||
|
|
2901e052ae | ||
|
|
372372694e | ||
|
|
8e5b1be106 | ||
|
|
301d9ca4dd | ||
|
|
f350b0e2f0 | ||
|
|
090538f11f | ||
|
|
db13f85a3c | ||
|
|
3d874c22b0 | ||
|
|
e68ce11183 | ||
|
|
587b0ef6c4 | ||
|
|
bb6c366031 | ||
|
|
5c1f070d8f | ||
|
|
1ce150d4b0 | ||
|
|
f6d96c2118 | ||
|
|
ff3235bd02 | ||
|
|
8e9a1b34cb | ||
|
|
3d80a03f8a | ||
|
|
1c650c3c23 | ||
|
|
6b1d62ba8f | ||
|
|
3c53ba308f | ||
|
|
f2abddd2ed | ||
|
|
f53a13e7fa | ||
|
|
b69ac637c3 | ||
|
|
a3c039006f | ||
|
|
2141b1b90a | ||
|
|
771ba45d01 | ||
|
|
7df5c33ce9 | ||
|
|
537c95e05a | ||
|
|
7d9e0523c9 | ||
|
|
360285ef33 | ||
|
|
c17241272f | ||
|
|
fa936a7e0d | ||
|
|
498d398ea3 | ||
|
|
8b2ed674a4 | ||
|
|
9a06603ff3 | ||
|
|
4888491a79 | ||
|
|
7c9f05c2cc | ||
|
|
160802fe11 | ||
|
|
86057cad9f | ||
|
|
210393e281 | ||
|
|
e96ed433fe | ||
|
|
52636284fc | ||
|
|
2639f975ee | ||
|
|
f9db796489 | ||
|
|
65018abc4a | ||
|
|
43706f877a | ||
|
|
d7fdbcd90d | ||
|
|
db0f362482 | ||
|
|
db440a6eb4 | ||
|
|
76b58b7317 | ||
|
|
bba3e95914 | ||
|
|
78df27a140 | ||
|
|
b40fda02cf | ||
|
|
c9e8114b5e | ||
|
|
d712dc1f28 | ||
|
|
7cdff13343 | ||
|
|
08db2febe1 | ||
|
|
a576982497 | ||
|
|
55eadf914b | ||
|
|
b91407416b | ||
|
|
24d6d83575 | ||
|
|
fe95ee716a | ||
|
|
b053ce23cd | ||
|
|
57febd2f52 | ||
|
|
ba6a1c594b | ||
|
|
6afdecbd0f | ||
|
|
41661a5e28 | ||
|
|
507dc86af2 | ||
|
|
ff3bb04655 | ||
|
|
31c4f800fc | ||
|
|
51c2bbcd4b | ||
|
|
5610cb1f81 | ||
|
|
478d28eda1 | ||
|
|
ebb2f1fd63 | ||
|
|
629e502703 | ||
|
|
cf4e44d341 | ||
|
|
7ce1a1cbca | ||
|
|
e2253ec7c0 | ||
|
|
86be2869a9 | ||
|
|
9ec503e302 | ||
|
|
77ee201bb7 | ||
|
|
168a7baf6c | ||
|
|
c36c492877 | ||
|
|
15332b90c1 | ||
|
|
53b71d7062 | ||
|
|
5e0bf930d6 | ||
|
|
d6eed8e79d | ||
|
|
6137740907 | ||
|
|
1aa6c98822 | ||
|
|
8cf511cdb9 | ||
|
|
a7ce6da7d1 | ||
|
|
ddb08b3883 | ||
|
|
893b11c4a0 | ||
|
|
b7025af703 | ||
|
|
552d44d208 | ||
|
|
497315579f | ||
|
|
bfaac15ccb | ||
|
|
5e18be6a23 | ||
|
|
1793706f87 | ||
|
|
da2a3c738a | ||
|
|
d17dab9a1d | ||
|
|
88b75d4e72 | ||
|
|
6327ab5ec6 | ||
|
|
5b09490ad7 | ||
|
|
b50127b567 | ||
|
|
ba2ed3ad22 | ||
|
|
eb3dfbf63b | ||
|
|
c3e048470d | ||
|
|
4563ff0e62 | ||
|
|
c9e48b6de9 | ||
|
|
06ef9ff384 | ||
|
|
26d55875f5 | ||
|
|
b1864ee328 | ||
|
|
8b62c8dced | ||
|
|
273452352d | ||
|
|
8274ebfe37 | ||
|
|
7d5e14abb6 | ||
|
|
7c17ac42b1 | ||
|
|
74ee7bb2c7 | ||
|
|
2f5640b2e6 | ||
|
|
121debcecc | ||
|
|
ff13504a74 | ||
|
|
d4e373443b | ||
|
|
3ccf822d67 | ||
|
|
0e270e6f51 | ||
|
|
749df2a979 | ||
|
|
9ee5d5d599 | ||
|
|
4940dfd46f | ||
|
|
79a31cc205 | ||
|
|
5102cf2b7b | ||
|
|
9ec5594648 | ||
|
|
b6c2ebd6d7 | ||
|
|
9a3a8c8305 | ||
|
|
2ac45b0174 | ||
|
|
2a53918ebd | ||
|
|
9daefeb881 | ||
|
|
526cf01cb7 | ||
|
|
cd4766ec2b | ||
|
|
2196b58d36 | ||
|
|
53c58b9983 |
@@ -1,6 +1,6 @@
|
|||||||
services:
|
services:
|
||||||
clickhouse:
|
clickhouse:
|
||||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
image: clickhouse/clickhouse-server:25.5.6
|
||||||
container_name: clickhouse
|
container_name: clickhouse
|
||||||
volumes:
|
volumes:
|
||||||
- ${PWD}/fs/etc/clickhouse-server/config.d/config.xml:/etc/clickhouse-server/config.d/config.xml
|
- ${PWD}/fs/etc/clickhouse-server/config.d/config.xml:/etc/clickhouse-server/config.d/config.xml
|
||||||
@@ -23,8 +23,10 @@ services:
|
|||||||
retries: 3
|
retries: 3
|
||||||
depends_on:
|
depends_on:
|
||||||
- zookeeper
|
- zookeeper
|
||||||
|
environment:
|
||||||
|
- CLICKHOUSE_SKIP_USER_SETUP=1
|
||||||
zookeeper:
|
zookeeper:
|
||||||
image: bitnami/zookeeper:3.7.1
|
image: signoz/zookeeper:3.7.1
|
||||||
container_name: zookeeper
|
container_name: zookeeper
|
||||||
volumes:
|
volumes:
|
||||||
- ${PWD}/fs/tmp/zookeeper:/bitnami/zookeeper
|
- ${PWD}/fs/tmp/zookeeper:/bitnami/zookeeper
|
||||||
@@ -40,7 +42,7 @@ services:
|
|||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 3
|
retries: 3
|
||||||
schema-migrator-sync:
|
schema-migrator-sync:
|
||||||
image: signoz/signoz-schema-migrator:v0.111.42
|
image: signoz/signoz-schema-migrator:v0.129.5
|
||||||
container_name: schema-migrator-sync
|
container_name: schema-migrator-sync
|
||||||
command:
|
command:
|
||||||
- sync
|
- sync
|
||||||
@@ -53,7 +55,7 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
schema-migrator-async:
|
schema-migrator-async:
|
||||||
image: signoz/signoz-schema-migrator:v0.111.42
|
image: signoz/signoz-schema-migrator:v0.129.5
|
||||||
container_name: schema-migrator-async
|
container_name: schema-migrator-async
|
||||||
command:
|
command:
|
||||||
- async
|
- async
|
||||||
|
|||||||
29
.devenv/docker/signoz-otel-collector/compose.yaml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
services:
|
||||||
|
signoz-otel-collector:
|
||||||
|
image: signoz/signoz-otel-collector:v0.128.2
|
||||||
|
container_name: signoz-otel-collector-dev
|
||||||
|
command:
|
||||||
|
- --config=/etc/otel-collector-config.yaml
|
||||||
|
- --feature-gates=-pkg.translator.prometheus.NormalizeName
|
||||||
|
volumes:
|
||||||
|
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||||
|
environment:
|
||||||
|
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||||
|
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||||
|
ports:
|
||||||
|
- "4317:4317" # OTLP gRPC receiver
|
||||||
|
- "4318:4318" # OTLP HTTP receiver
|
||||||
|
- "13133:13133" # health check extension
|
||||||
|
healthcheck:
|
||||||
|
test:
|
||||||
|
- CMD
|
||||||
|
- wget
|
||||||
|
- --spider
|
||||||
|
- -q
|
||||||
|
- localhost:13133
|
||||||
|
interval: 30s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
restart: unless-stopped
|
||||||
|
extra_hosts:
|
||||||
|
- "host.docker.internal:host-gateway"
|
||||||
@@ -0,0 +1,96 @@
|
|||||||
|
receivers:
|
||||||
|
otlp:
|
||||||
|
protocols:
|
||||||
|
grpc:
|
||||||
|
endpoint: 0.0.0.0:4317
|
||||||
|
http:
|
||||||
|
endpoint: 0.0.0.0:4318
|
||||||
|
prometheus:
|
||||||
|
config:
|
||||||
|
global:
|
||||||
|
scrape_interval: 60s
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: otel-collector
|
||||||
|
static_configs:
|
||||||
|
- targets:
|
||||||
|
- localhost:8888
|
||||||
|
labels:
|
||||||
|
job_name: otel-collector
|
||||||
|
|
||||||
|
processors:
|
||||||
|
batch:
|
||||||
|
send_batch_size: 10000
|
||||||
|
send_batch_max_size: 11000
|
||||||
|
timeout: 10s
|
||||||
|
resourcedetection:
|
||||||
|
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||||
|
detectors: [env, system]
|
||||||
|
timeout: 2s
|
||||||
|
signozspanmetrics/delta:
|
||||||
|
metrics_exporter: signozclickhousemetrics
|
||||||
|
metrics_flush_interval: 60s
|
||||||
|
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||||
|
dimensions_cache_size: 100000
|
||||||
|
aggregation_temporality: AGGREGATION_TEMPORALITY_DELTA
|
||||||
|
enable_exp_histogram: true
|
||||||
|
dimensions:
|
||||||
|
- name: service.namespace
|
||||||
|
default: default
|
||||||
|
- name: deployment.environment
|
||||||
|
default: default
|
||||||
|
# This is added to ensure the uniqueness of the timeseries
|
||||||
|
# Otherwise, identical timeseries produced by multiple replicas of
|
||||||
|
# collectors result in incorrect APM metrics
|
||||||
|
- name: signoz.collector.id
|
||||||
|
- name: service.version
|
||||||
|
- name: browser.platform
|
||||||
|
- name: browser.mobile
|
||||||
|
- name: k8s.cluster.name
|
||||||
|
- name: k8s.node.name
|
||||||
|
- name: k8s.namespace.name
|
||||||
|
- name: host.name
|
||||||
|
- name: host.type
|
||||||
|
- name: container.name
|
||||||
|
|
||||||
|
extensions:
|
||||||
|
health_check:
|
||||||
|
endpoint: 0.0.0.0:13133
|
||||||
|
pprof:
|
||||||
|
endpoint: 0.0.0.0:1777
|
||||||
|
|
||||||
|
exporters:
|
||||||
|
clickhousetraces:
|
||||||
|
datasource: tcp://host.docker.internal:9000/signoz_traces
|
||||||
|
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
|
||||||
|
use_new_schema: true
|
||||||
|
signozclickhousemetrics:
|
||||||
|
dsn: tcp://host.docker.internal:9000/signoz_metrics
|
||||||
|
clickhouselogsexporter:
|
||||||
|
dsn: tcp://host.docker.internal:9000/signoz_logs
|
||||||
|
timeout: 10s
|
||||||
|
use_new_schema: true
|
||||||
|
|
||||||
|
service:
|
||||||
|
telemetry:
|
||||||
|
logs:
|
||||||
|
encoding: json
|
||||||
|
extensions:
|
||||||
|
- health_check
|
||||||
|
- pprof
|
||||||
|
pipelines:
|
||||||
|
traces:
|
||||||
|
receivers: [otlp]
|
||||||
|
processors: [signozspanmetrics/delta, batch]
|
||||||
|
exporters: [clickhousetraces]
|
||||||
|
metrics:
|
||||||
|
receivers: [otlp]
|
||||||
|
processors: [batch]
|
||||||
|
exporters: [signozclickhousemetrics]
|
||||||
|
metrics/prometheus:
|
||||||
|
receivers: [prometheus]
|
||||||
|
processors: [batch]
|
||||||
|
exporters: [signozclickhousemetrics]
|
||||||
|
logs:
|
||||||
|
receivers: [otlp]
|
||||||
|
processors: [batch]
|
||||||
|
exporters: [clickhouselogsexporter]
|
||||||
73
.github/CODEOWNERS
vendored
@@ -5,16 +5,83 @@
|
|||||||
/frontend/ @SigNoz/frontend @YounixM
|
/frontend/ @SigNoz/frontend @YounixM
|
||||||
/frontend/src/container/MetricsApplication @srikanthccv
|
/frontend/src/container/MetricsApplication @srikanthccv
|
||||||
/frontend/src/container/NewWidget/RightContainer/types.ts @srikanthccv
|
/frontend/src/container/NewWidget/RightContainer/types.ts @srikanthccv
|
||||||
|
|
||||||
|
# Dashboard, Alert, Metrics, Service Map, Services
|
||||||
|
/frontend/src/container/ListOfDashboard/ @srikanthccv
|
||||||
|
/frontend/src/container/NewDashboard/ @srikanthccv
|
||||||
|
/frontend/src/pages/DashboardsListPage/ @srikanthccv
|
||||||
|
/frontend/src/pages/DashboardWidget/ @srikanthccv
|
||||||
|
/frontend/src/pages/NewDashboard/ @srikanthccv
|
||||||
|
/frontend/src/providers/Dashboard/ @srikanthccv
|
||||||
|
|
||||||
|
# Alerts
|
||||||
|
/frontend/src/container/AlertHistory/ @srikanthccv
|
||||||
|
/frontend/src/container/AllAlertChannels/ @srikanthccv
|
||||||
|
/frontend/src/container/AnomalyAlertEvaluationView/ @srikanthccv
|
||||||
|
/frontend/src/container/CreateAlertChannels/ @srikanthccv
|
||||||
|
/frontend/src/container/CreateAlertRule/ @srikanthccv
|
||||||
|
/frontend/src/container/EditAlertChannels/ @srikanthccv
|
||||||
|
/frontend/src/container/FormAlertChannels/ @srikanthccv
|
||||||
|
/frontend/src/container/FormAlertRules/ @srikanthccv
|
||||||
|
/frontend/src/container/ListAlertRules/ @srikanthccv
|
||||||
|
/frontend/src/container/TriggeredAlerts/ @srikanthccv
|
||||||
|
/frontend/src/pages/AlertChannelCreate/ @srikanthccv
|
||||||
|
/frontend/src/pages/AlertDetails/ @srikanthccv
|
||||||
|
/frontend/src/pages/AlertHistory/ @srikanthccv
|
||||||
|
/frontend/src/pages/AlertList/ @srikanthccv
|
||||||
|
/frontend/src/pages/CreateAlert/ @srikanthccv
|
||||||
|
/frontend/src/providers/Alert.tsx @srikanthccv
|
||||||
|
|
||||||
|
# Metrics
|
||||||
|
/frontend/src/container/MetricsExplorer/ @srikanthccv
|
||||||
|
/frontend/src/pages/MetricsApplication/ @srikanthccv
|
||||||
|
/frontend/src/pages/MetricsExplorer/ @srikanthccv
|
||||||
|
|
||||||
|
# Services and Service Map
|
||||||
|
/frontend/src/container/ServiceApplication/ @srikanthccv
|
||||||
|
/frontend/src/container/ServiceTable/ @srikanthccv
|
||||||
|
/frontend/src/pages/Services/ @srikanthccv
|
||||||
|
/frontend/src/pages/ServiceTopLevelOperations/ @srikanthccv
|
||||||
|
/frontend/src/container/Home/Services/ @srikanthccv
|
||||||
|
|
||||||
/deploy/ @SigNoz/devops
|
/deploy/ @SigNoz/devops
|
||||||
.github @SigNoz/devops
|
.github @SigNoz/devops
|
||||||
|
|
||||||
|
# Scaffold Owners
|
||||||
/pkg/config/ @grandwizard28
|
/pkg/config/ @grandwizard28
|
||||||
/pkg/errors/ @grandwizard28
|
/pkg/errors/ @grandwizard28
|
||||||
/pkg/factory/ @grandwizard28
|
/pkg/factory/ @grandwizard28
|
||||||
/pkg/types/ @grandwizard28
|
/pkg/types/ @grandwizard28
|
||||||
|
/pkg/valuer/ @grandwizard28
|
||||||
|
/cmd/ @grandwizard28
|
||||||
.golangci.yml @grandwizard28
|
.golangci.yml @grandwizard28
|
||||||
|
|
||||||
|
# Zeus Owners
|
||||||
/pkg/zeus/ @vikrantgupta25
|
/pkg/zeus/ @vikrantgupta25
|
||||||
/pkg/licensing/ @vikrantgupta25
|
|
||||||
/pkg/sqlmigration/ @vikrantgupta25
|
|
||||||
/ee/zeus/ @vikrantgupta25
|
/ee/zeus/ @vikrantgupta25
|
||||||
|
/pkg/licensing/ @vikrantgupta25
|
||||||
/ee/licensing/ @vikrantgupta25
|
/ee/licensing/ @vikrantgupta25
|
||||||
/ee/sqlmigration/ @vikrantgupta25
|
|
||||||
|
# SQL Owners
|
||||||
|
/pkg/sqlmigration/ @vikrantgupta25
|
||||||
|
/ee/sqlmigration/ @vikrantgupta25
|
||||||
|
/pkg/sqlschema/ @vikrantgupta25
|
||||||
|
/ee/sqlschema/ @vikrantgupta25
|
||||||
|
|
||||||
|
# Analytics Owners
|
||||||
|
/pkg/analytics/ @vikrantgupta25
|
||||||
|
/pkg/statsreporter/ @vikrantgupta25
|
||||||
|
|
||||||
|
# Querier Owners
|
||||||
|
/pkg/querier/ @srikanthccv
|
||||||
|
/pkg/variables/ @srikanthccv
|
||||||
|
/pkg/types/querybuildertypes/ @srikanthccv
|
||||||
|
/pkg/querybuilder/ @srikanthccv
|
||||||
|
/pkg/telemetrylogs/ @srikanthccv
|
||||||
|
/pkg/telemetrymetadata/ @srikanthccv
|
||||||
|
/pkg/telemetrymetrics/ @srikanthccv
|
||||||
|
/pkg/telemetrytraces/ @srikanthccv
|
||||||
|
|
||||||
|
# AuthN / AuthZ Owners
|
||||||
|
|
||||||
|
/pkg/authz/ @vikrantgupta25 @grandwizard28
|
||||||
|
|||||||
6
.github/workflows/build-community.yaml
vendored
@@ -62,11 +62,11 @@ jobs:
|
|||||||
secrets: inherit
|
secrets: inherit
|
||||||
with:
|
with:
|
||||||
PRIMUS_REF: main
|
PRIMUS_REF: main
|
||||||
GO_VERSION: 1.23
|
GO_VERSION: 1.24
|
||||||
GO_NAME: signoz-community
|
GO_NAME: signoz-community
|
||||||
GO_INPUT_ARTIFACT_CACHE_KEY: community-jsbuild-${{ github.sha }}
|
GO_INPUT_ARTIFACT_CACHE_KEY: community-jsbuild-${{ github.sha }}
|
||||||
GO_INPUT_ARTIFACT_PATH: frontend/build
|
GO_INPUT_ARTIFACT_PATH: frontend/build
|
||||||
GO_BUILD_CONTEXT: ./pkg/query-service
|
GO_BUILD_CONTEXT: ./cmd/community
|
||||||
GO_BUILD_FLAGS: >-
|
GO_BUILD_FLAGS: >-
|
||||||
-tags timetzdata
|
-tags timetzdata
|
||||||
-ldflags='-linkmode external -extldflags \"-static\" -s -w
|
-ldflags='-linkmode external -extldflags \"-static\" -s -w
|
||||||
@@ -78,6 +78,6 @@ jobs:
|
|||||||
-X github.com/SigNoz/signoz/pkg/analytics.key=9kRrJ7oPCGPEJLF6QjMPLt5bljFhRQBr'
|
-X github.com/SigNoz/signoz/pkg/analytics.key=9kRrJ7oPCGPEJLF6QjMPLt5bljFhRQBr'
|
||||||
GO_CGO_ENABLED: 1
|
GO_CGO_ENABLED: 1
|
||||||
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
||||||
DOCKER_DOCKERFILE_PATH: ./pkg/query-service/Dockerfile.multi-arch
|
DOCKER_DOCKERFILE_PATH: ./cmd/community/Dockerfile.multi-arch
|
||||||
DOCKER_MANIFEST: true
|
DOCKER_MANIFEST: true
|
||||||
DOCKER_PROVIDERS: dockerhub
|
DOCKER_PROVIDERS: dockerhub
|
||||||
|
|||||||
6
.github/workflows/build-enterprise.yaml
vendored
@@ -93,10 +93,10 @@ jobs:
|
|||||||
secrets: inherit
|
secrets: inherit
|
||||||
with:
|
with:
|
||||||
PRIMUS_REF: main
|
PRIMUS_REF: main
|
||||||
GO_VERSION: 1.23
|
GO_VERSION: 1.24
|
||||||
GO_INPUT_ARTIFACT_CACHE_KEY: enterprise-jsbuild-${{ github.sha }}
|
GO_INPUT_ARTIFACT_CACHE_KEY: enterprise-jsbuild-${{ github.sha }}
|
||||||
GO_INPUT_ARTIFACT_PATH: frontend/build
|
GO_INPUT_ARTIFACT_PATH: frontend/build
|
||||||
GO_BUILD_CONTEXT: ./ee/query-service
|
GO_BUILD_CONTEXT: ./cmd/enterprise
|
||||||
GO_BUILD_FLAGS: >-
|
GO_BUILD_FLAGS: >-
|
||||||
-tags timetzdata
|
-tags timetzdata
|
||||||
-ldflags='-linkmode external -extldflags \"-static\" -s -w
|
-ldflags='-linkmode external -extldflags \"-static\" -s -w
|
||||||
@@ -112,6 +112,6 @@ jobs:
|
|||||||
-X github.com/SigNoz/signoz/pkg/analytics.key=9kRrJ7oPCGPEJLF6QjMPLt5bljFhRQBr'
|
-X github.com/SigNoz/signoz/pkg/analytics.key=9kRrJ7oPCGPEJLF6QjMPLt5bljFhRQBr'
|
||||||
GO_CGO_ENABLED: 1
|
GO_CGO_ENABLED: 1
|
||||||
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
||||||
DOCKER_DOCKERFILE_PATH: ./ee/query-service/Dockerfile.multi-arch
|
DOCKER_DOCKERFILE_PATH: ./cmd/enterprise/Dockerfile.multi-arch
|
||||||
DOCKER_MANIFEST: true
|
DOCKER_MANIFEST: true
|
||||||
DOCKER_PROVIDERS: ${{ needs.prepare.outputs.docker_providers }}
|
DOCKER_PROVIDERS: ${{ needs.prepare.outputs.docker_providers }}
|
||||||
|
|||||||
6
.github/workflows/build-staging.yaml
vendored
@@ -92,10 +92,10 @@ jobs:
|
|||||||
secrets: inherit
|
secrets: inherit
|
||||||
with:
|
with:
|
||||||
PRIMUS_REF: main
|
PRIMUS_REF: main
|
||||||
GO_VERSION: 1.23
|
GO_VERSION: 1.24
|
||||||
GO_INPUT_ARTIFACT_CACHE_KEY: staging-jsbuild-${{ github.sha }}
|
GO_INPUT_ARTIFACT_CACHE_KEY: staging-jsbuild-${{ github.sha }}
|
||||||
GO_INPUT_ARTIFACT_PATH: frontend/build
|
GO_INPUT_ARTIFACT_PATH: frontend/build
|
||||||
GO_BUILD_CONTEXT: ./ee/query-service
|
GO_BUILD_CONTEXT: ./cmd/enterprise
|
||||||
GO_BUILD_FLAGS: >-
|
GO_BUILD_FLAGS: >-
|
||||||
-tags timetzdata
|
-tags timetzdata
|
||||||
-ldflags='-linkmode external -extldflags \"-static\" -s -w
|
-ldflags='-linkmode external -extldflags \"-static\" -s -w
|
||||||
@@ -111,7 +111,7 @@ jobs:
|
|||||||
-X github.com/SigNoz/signoz/pkg/analytics.key=9kRrJ7oPCGPEJLF6QjMPLt5bljFhRQBr'
|
-X github.com/SigNoz/signoz/pkg/analytics.key=9kRrJ7oPCGPEJLF6QjMPLt5bljFhRQBr'
|
||||||
GO_CGO_ENABLED: 1
|
GO_CGO_ENABLED: 1
|
||||||
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
||||||
DOCKER_DOCKERFILE_PATH: ./ee/query-service/Dockerfile.multi-arch
|
DOCKER_DOCKERFILE_PATH: ./cmd/enterprise/Dockerfile.multi-arch
|
||||||
DOCKER_MANIFEST: true
|
DOCKER_MANIFEST: true
|
||||||
DOCKER_PROVIDERS: gcp
|
DOCKER_PROVIDERS: gcp
|
||||||
staging:
|
staging:
|
||||||
|
|||||||
10
.github/workflows/goci.yaml
vendored
@@ -18,7 +18,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
PRIMUS_REF: main
|
PRIMUS_REF: main
|
||||||
GO_TEST_CONTEXT: ./...
|
GO_TEST_CONTEXT: ./...
|
||||||
GO_VERSION: 1.23
|
GO_VERSION: 1.24
|
||||||
fmt:
|
fmt:
|
||||||
if: |
|
if: |
|
||||||
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||||
@@ -27,7 +27,7 @@ jobs:
|
|||||||
secrets: inherit
|
secrets: inherit
|
||||||
with:
|
with:
|
||||||
PRIMUS_REF: main
|
PRIMUS_REF: main
|
||||||
GO_VERSION: 1.23
|
GO_VERSION: 1.24
|
||||||
lint:
|
lint:
|
||||||
if: |
|
if: |
|
||||||
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||||
@@ -36,7 +36,7 @@ jobs:
|
|||||||
secrets: inherit
|
secrets: inherit
|
||||||
with:
|
with:
|
||||||
PRIMUS_REF: main
|
PRIMUS_REF: main
|
||||||
GO_VERSION: 1.23
|
GO_VERSION: 1.24
|
||||||
deps:
|
deps:
|
||||||
if: |
|
if: |
|
||||||
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||||
@@ -45,7 +45,7 @@ jobs:
|
|||||||
secrets: inherit
|
secrets: inherit
|
||||||
with:
|
with:
|
||||||
PRIMUS_REF: main
|
PRIMUS_REF: main
|
||||||
GO_VERSION: 1.23
|
GO_VERSION: 1.24
|
||||||
build:
|
build:
|
||||||
if: |
|
if: |
|
||||||
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||||
@@ -57,7 +57,7 @@ jobs:
|
|||||||
- name: go-install
|
- name: go-install
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: "1.23"
|
go-version: "1.24"
|
||||||
- name: qemu-install
|
- name: qemu-install
|
||||||
uses: docker/setup-qemu-action@v3
|
uses: docker/setup-qemu-action@v3
|
||||||
- name: aarch64-install
|
- name: aarch64-install
|
||||||
|
|||||||
8
.github/workflows/gor-signoz-community.yaml
vendored
@@ -36,7 +36,7 @@ jobs:
|
|||||||
- ubuntu-latest
|
- ubuntu-latest
|
||||||
- macos-latest
|
- macos-latest
|
||||||
env:
|
env:
|
||||||
CONFIG_PATH: pkg/query-service/.goreleaser.yaml
|
CONFIG_PATH: cmd/community/.goreleaser.yaml
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
steps:
|
steps:
|
||||||
- name: checkout
|
- name: checkout
|
||||||
@@ -58,7 +58,7 @@ jobs:
|
|||||||
- name: setup-go
|
- name: setup-go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: "1.23"
|
go-version: "1.24"
|
||||||
- name: cross-compilation-tools
|
- name: cross-compilation-tools
|
||||||
if: matrix.os == 'ubuntu-latest'
|
if: matrix.os == 'ubuntu-latest'
|
||||||
run: |
|
run: |
|
||||||
@@ -100,7 +100,7 @@ jobs:
|
|||||||
needs: build
|
needs: build
|
||||||
env:
|
env:
|
||||||
DOCKER_CLI_EXPERIMENTAL: "enabled"
|
DOCKER_CLI_EXPERIMENTAL: "enabled"
|
||||||
WORKDIR: pkg/query-service
|
WORKDIR: cmd/community
|
||||||
steps:
|
steps:
|
||||||
- name: checkout
|
- name: checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@@ -122,7 +122,7 @@ jobs:
|
|||||||
- name: setup-go
|
- name: setup-go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: "1.23"
|
go-version: "1.24"
|
||||||
|
|
||||||
# copy the caches from build
|
# copy the caches from build
|
||||||
- name: get-sha
|
- name: get-sha
|
||||||
|
|||||||
6
.github/workflows/gor-signoz.yaml
vendored
@@ -50,7 +50,7 @@ jobs:
|
|||||||
- ubuntu-latest
|
- ubuntu-latest
|
||||||
- macos-latest
|
- macos-latest
|
||||||
env:
|
env:
|
||||||
CONFIG_PATH: ee/query-service/.goreleaser.yaml
|
CONFIG_PATH: cmd/enterprise/.goreleaser.yaml
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
steps:
|
steps:
|
||||||
- name: checkout
|
- name: checkout
|
||||||
@@ -72,7 +72,7 @@ jobs:
|
|||||||
- name: setup-go
|
- name: setup-go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: "1.23"
|
go-version: "1.24"
|
||||||
- name: cross-compilation-tools
|
- name: cross-compilation-tools
|
||||||
if: matrix.os == 'ubuntu-latest'
|
if: matrix.os == 'ubuntu-latest'
|
||||||
run: |
|
run: |
|
||||||
@@ -135,7 +135,7 @@ jobs:
|
|||||||
- name: setup-go
|
- name: setup-go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: "1.23"
|
go-version: "1.24"
|
||||||
|
|
||||||
# copy the caches from build
|
# copy the caches from build
|
||||||
- name: get-sha
|
- name: get-sha
|
||||||
|
|||||||
6
.github/workflows/integrationci.yaml
vendored
@@ -15,14 +15,16 @@ jobs:
|
|||||||
matrix:
|
matrix:
|
||||||
src:
|
src:
|
||||||
- bootstrap
|
- bootstrap
|
||||||
|
- auth
|
||||||
|
- querier
|
||||||
sqlstore-provider:
|
sqlstore-provider:
|
||||||
- postgres
|
- postgres
|
||||||
- sqlite
|
- sqlite
|
||||||
clickhouse-version:
|
clickhouse-version:
|
||||||
- 24.1.2-alpine
|
- 24.1.2-alpine
|
||||||
- 24.12-alpine
|
- 25.5.6
|
||||||
schema-migrator-version:
|
schema-migrator-version:
|
||||||
- v0.111.38
|
- v0.128.1
|
||||||
postgres-version:
|
postgres-version:
|
||||||
- 15
|
- 15
|
||||||
if: |
|
if: |
|
||||||
|
|||||||
4
.github/workflows/prereleaser.yaml
vendored
@@ -1,10 +1,6 @@
|
|||||||
name: prereleaser
|
name: prereleaser
|
||||||
|
|
||||||
on:
|
on:
|
||||||
# schedule every wednesday 6:30 AM UTC (12:00 PM IST)
|
|
||||||
schedule:
|
|
||||||
- cron: '30 6 * * 3'
|
|
||||||
|
|
||||||
# allow manual triggering of the workflow by a maintainer
|
# allow manual triggering of the workflow by a maintainer
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
|
|||||||
62
.github/workflows/run-e2e.yaml
vendored
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
name: e2eci
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
userRole:
|
||||||
|
description: "Role of the user (ADMIN, EDITOR, VIEWER)"
|
||||||
|
required: true
|
||||||
|
type: choice
|
||||||
|
options:
|
||||||
|
- ADMIN
|
||||||
|
- EDITOR
|
||||||
|
- VIEWER
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
name: Run Playwright Tests
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
timeout-minutes: 60
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Setup Node.js
|
||||||
|
uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: lts/*
|
||||||
|
|
||||||
|
- name: Mask secrets and input
|
||||||
|
run: |
|
||||||
|
echo "::add-mask::${{ secrets.BASE_URL }}"
|
||||||
|
echo "::add-mask::${{ secrets.LOGIN_USERNAME }}"
|
||||||
|
echo "::add-mask::${{ secrets.LOGIN_PASSWORD }}"
|
||||||
|
echo "::add-mask::${{ github.event.inputs.userRole }}"
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
working-directory: frontend
|
||||||
|
run: |
|
||||||
|
npm install -g yarn
|
||||||
|
yarn
|
||||||
|
|
||||||
|
- name: Install Playwright Browsers
|
||||||
|
working-directory: frontend
|
||||||
|
run: yarn playwright install --with-deps
|
||||||
|
|
||||||
|
- name: Run Playwright Tests
|
||||||
|
working-directory: frontend
|
||||||
|
run: |
|
||||||
|
BASE_URL="${{ secrets.BASE_URL }}" \
|
||||||
|
LOGIN_USERNAME="${{ secrets.LOGIN_USERNAME }}" \
|
||||||
|
LOGIN_PASSWORD="${{ secrets.LOGIN_PASSWORD }}" \
|
||||||
|
USER_ROLE="${{ github.event.inputs.userRole }}" \
|
||||||
|
yarn playwright test
|
||||||
|
|
||||||
|
- name: Upload Playwright Report
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
if: always()
|
||||||
|
with:
|
||||||
|
name: playwright-report
|
||||||
|
path: frontend/playwright-report/
|
||||||
|
retention-days: 30
|
||||||
2
.gitignore
vendored
@@ -86,6 +86,8 @@ queries.active
|
|||||||
.devenv/**/tmp/**
|
.devenv/**/tmp/**
|
||||||
.qodo
|
.qodo
|
||||||
|
|
||||||
|
.dev
|
||||||
|
|
||||||
### Python ###
|
### Python ###
|
||||||
# Byte-compiled / optimized / DLL files
|
# Byte-compiled / optimized / DLL files
|
||||||
__pycache__/
|
__pycache__/
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ linters:
|
|||||||
- depguard
|
- depguard
|
||||||
- iface
|
- iface
|
||||||
- unparam
|
- unparam
|
||||||
|
- forbidigo
|
||||||
|
|
||||||
linters-settings:
|
linters-settings:
|
||||||
sloglint:
|
sloglint:
|
||||||
@@ -24,6 +25,10 @@ linters-settings:
|
|||||||
deny:
|
deny:
|
||||||
- pkg: "go.uber.org/zap"
|
- pkg: "go.uber.org/zap"
|
||||||
desc: "Do not use zap logger. Use slog instead."
|
desc: "Do not use zap logger. Use slog instead."
|
||||||
|
noerrors:
|
||||||
|
deny:
|
||||||
|
- pkg: "errors"
|
||||||
|
desc: "Do not use errors package. Use github.com/SigNoz/signoz/pkg/errors instead."
|
||||||
iface:
|
iface:
|
||||||
enable:
|
enable:
|
||||||
- identical
|
- identical
|
||||||
|
|||||||
62
ADVOCATE.md
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
# SigNoz Community Advocate Program
|
||||||
|
|
||||||
|
Our community is filled with passionate developers who love SigNoz and have been helping spread the word about observability across the world. The SigNoz Community Advocate Program is our way of recognizing these incredible community members and creating deeper collaboration opportunities.
|
||||||
|
|
||||||
|
## What is the SigNoz Community Advocate Program?
|
||||||
|
|
||||||
|
The SigNoz Community Advocate Program celebrates and supports community members who are already passionate about observability and helping fellow developers. If you're someone who loves discussing SigNoz, helping others with their implementations, or sharing knowledge about observability practices, this program is designed with you in mind.
|
||||||
|
|
||||||
|
Our advocates are the heart of the SigNoz community, helping other developers succeed with observability and providing valuable insights that help us build better products.
|
||||||
|
|
||||||
|
## What Do Advocates Do?
|
||||||
|
|
||||||
|
1. **Community Support**
|
||||||
|
|
||||||
|
- Help fellow developers in our Slack community and GitHub Discussions
|
||||||
|
- Answer questions and share solutions
|
||||||
|
- Guide newcomers through SigNoz self-host implementations
|
||||||
|
|
||||||
|
2. **Knowledge Sharing**
|
||||||
|
|
||||||
|
- Spread awareness about observability best practices on developer forums
|
||||||
|
- Create content like blog posts, social media posts, and videos
|
||||||
|
- Host local meetups and events in their regions
|
||||||
|
|
||||||
|
3. **Product Collaboration**
|
||||||
|
|
||||||
|
- Provide insights on features, changes, and improvements the community needs
|
||||||
|
- Beta test new features and provide early feedback
|
||||||
|
- Help us understand real-world use cases and pain points
|
||||||
|
|
||||||
|
## What's In It For You?
|
||||||
|
|
||||||
|
**Recognition & Swag**
|
||||||
|
|
||||||
|
- Official recognition as a SigNoz advocate
|
||||||
|
- Welcome hamper upon joining
|
||||||
|
- Exclusive swag box within your first 3 months
|
||||||
|
- Feature on our website (with your permission)
|
||||||
|
|
||||||
|
**Early Access**
|
||||||
|
|
||||||
|
- First look at new features and updates
|
||||||
|
- Direct line to the SigNoz team for feedback and suggestions
|
||||||
|
- Opportunity to influence product roadmap
|
||||||
|
|
||||||
|
**Community Impact**
|
||||||
|
|
||||||
|
- Help shape the observability landscape
|
||||||
|
- Build your reputation in the developer community
|
||||||
|
- Connect with like-minded developers globally
|
||||||
|
|
||||||
|
## How Does It Work?
|
||||||
|
|
||||||
|
Currently, the SigNoz Community Advocate Program is **invite-only**. We're starting with a small group of passionate community members who have already been making a difference.
|
||||||
|
|
||||||
|
We'll be working closely with our first advocates to shape the program details, benefits, and structure based on what works best for everyone involved.
|
||||||
|
|
||||||
|
If you're interested in learning more about the program or want to get more involved in the SigNoz community, join our [Slack community](https://signoz-community.slack.com/) and let us know!
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
*The SigNoz Community Advocate Program recognizes and celebrates the amazing community members who are already passionate about helping fellow developers succeed with observability.*
|
||||||
@@ -78,3 +78,5 @@ Need assistance? Join our Slack community:
|
|||||||
|
|
||||||
- Set up your [development environment](docs/contributing/development.md)
|
- Set up your [development environment](docs/contributing/development.md)
|
||||||
- Deploy and observe [SigNoz in action with OpenTelemetry Demo Application](docs/otel-demo-docs.md)
|
- Deploy and observe [SigNoz in action with OpenTelemetry Demo Application](docs/otel-demo-docs.md)
|
||||||
|
- Explore the [SigNoz Community Advocate Program](ADVOCATE.md), which recognises contributors who support the community, share their expertise, and help shape SigNoz's future.
|
||||||
|
- Write [integration tests](docs/contributing/go/integration.md)
|
||||||
|
|||||||
2
LICENSE
@@ -2,7 +2,7 @@ Copyright (c) 2020-present SigNoz Inc.
|
|||||||
|
|
||||||
Portions of this software are licensed as follows:
|
Portions of this software are licensed as follows:
|
||||||
|
|
||||||
* All content that resides under the "ee/" directory of this repository, if that directory exists, is licensed under the license defined in "ee/LICENSE".
|
* All content that resides under the "ee/" and the "cmd/enterprise/" directory of this repository, if that directory exists, is licensed under the license defined in "ee/LICENSE".
|
||||||
* All third party components incorporated into the SigNoz Software are licensed under the original license provided by the owner of the applicable component.
|
* All third party components incorporated into the SigNoz Software are licensed under the original license provided by the owner of the applicable component.
|
||||||
* Content outside of the above mentioned directories or restrictions above is available under the "MIT Expat" license as defined below.
|
* Content outside of the above mentioned directories or restrictions above is available under the "MIT Expat" license as defined below.
|
||||||
|
|
||||||
|
|||||||
23
Makefile
@@ -20,18 +20,18 @@ GO_BUILD_LDFLAG_LICENSE_SIGNOZ_IO = -X github.com/SigNoz/signoz/ee/zeus.depreca
|
|||||||
|
|
||||||
GO_BUILD_VERSION_LDFLAGS = -X github.com/SigNoz/signoz/pkg/version.version=$(VERSION) -X github.com/SigNoz/signoz/pkg/version.hash=$(COMMIT_SHORT_SHA) -X github.com/SigNoz/signoz/pkg/version.time=$(TIMESTAMP) -X github.com/SigNoz/signoz/pkg/version.branch=$(BRANCH_NAME)
|
GO_BUILD_VERSION_LDFLAGS = -X github.com/SigNoz/signoz/pkg/version.version=$(VERSION) -X github.com/SigNoz/signoz/pkg/version.hash=$(COMMIT_SHORT_SHA) -X github.com/SigNoz/signoz/pkg/version.time=$(TIMESTAMP) -X github.com/SigNoz/signoz/pkg/version.branch=$(BRANCH_NAME)
|
||||||
GO_BUILD_ARCHS_COMMUNITY = $(addprefix go-build-community-,$(ARCHS))
|
GO_BUILD_ARCHS_COMMUNITY = $(addprefix go-build-community-,$(ARCHS))
|
||||||
GO_BUILD_CONTEXT_COMMUNITY = $(SRC)/pkg/query-service
|
GO_BUILD_CONTEXT_COMMUNITY = $(SRC)/cmd/community
|
||||||
GO_BUILD_LDFLAGS_COMMUNITY = $(GO_BUILD_VERSION_LDFLAGS) -X github.com/SigNoz/signoz/pkg/version.variant=community
|
GO_BUILD_LDFLAGS_COMMUNITY = $(GO_BUILD_VERSION_LDFLAGS) -X github.com/SigNoz/signoz/pkg/version.variant=community
|
||||||
GO_BUILD_ARCHS_ENTERPRISE = $(addprefix go-build-enterprise-,$(ARCHS))
|
GO_BUILD_ARCHS_ENTERPRISE = $(addprefix go-build-enterprise-,$(ARCHS))
|
||||||
GO_BUILD_ARCHS_ENTERPRISE_RACE = $(addprefix go-build-enterprise-race-,$(ARCHS))
|
GO_BUILD_ARCHS_ENTERPRISE_RACE = $(addprefix go-build-enterprise-race-,$(ARCHS))
|
||||||
GO_BUILD_CONTEXT_ENTERPRISE = $(SRC)/ee/query-service
|
GO_BUILD_CONTEXT_ENTERPRISE = $(SRC)/cmd/enterprise
|
||||||
GO_BUILD_LDFLAGS_ENTERPRISE = $(GO_BUILD_VERSION_LDFLAGS) -X github.com/SigNoz/signoz/pkg/version.variant=enterprise $(GO_BUILD_LDFLAG_ZEUS_URL) $(GO_BUILD_LDFLAG_LICENSE_SIGNOZ_IO)
|
GO_BUILD_LDFLAGS_ENTERPRISE = $(GO_BUILD_VERSION_LDFLAGS) -X github.com/SigNoz/signoz/pkg/version.variant=enterprise $(GO_BUILD_LDFLAG_ZEUS_URL) $(GO_BUILD_LDFLAG_LICENSE_SIGNOZ_IO)
|
||||||
|
|
||||||
DOCKER_BUILD_ARCHS_COMMUNITY = $(addprefix docker-build-community-,$(ARCHS))
|
DOCKER_BUILD_ARCHS_COMMUNITY = $(addprefix docker-build-community-,$(ARCHS))
|
||||||
DOCKERFILE_COMMUNITY = $(SRC)/pkg/query-service/Dockerfile
|
DOCKERFILE_COMMUNITY = $(SRC)/cmd/community/Dockerfile
|
||||||
DOCKER_REGISTRY_COMMUNITY ?= docker.io/signoz/signoz-community
|
DOCKER_REGISTRY_COMMUNITY ?= docker.io/signoz/signoz-community
|
||||||
DOCKER_BUILD_ARCHS_ENTERPRISE = $(addprefix docker-build-enterprise-,$(ARCHS))
|
DOCKER_BUILD_ARCHS_ENTERPRISE = $(addprefix docker-build-enterprise-,$(ARCHS))
|
||||||
DOCKERFILE_ENTERPRISE = $(SRC)/ee/query-service/Dockerfile
|
DOCKERFILE_ENTERPRISE = $(SRC)/cmd/enterprise/Dockerfile
|
||||||
DOCKER_REGISTRY_ENTERPRISE ?= docker.io/signoz/signoz
|
DOCKER_REGISTRY_ENTERPRISE ?= docker.io/signoz/signoz
|
||||||
JS_BUILD_CONTEXT = $(SRC)/frontend
|
JS_BUILD_CONTEXT = $(SRC)/frontend
|
||||||
|
|
||||||
@@ -61,6 +61,17 @@ devenv-postgres: ## Run postgres in devenv
|
|||||||
@cd .devenv/docker/postgres; \
|
@cd .devenv/docker/postgres; \
|
||||||
docker compose -f compose.yaml up -d
|
docker compose -f compose.yaml up -d
|
||||||
|
|
||||||
|
.PHONY: devenv-signoz-otel-collector
|
||||||
|
devenv-signoz-otel-collector: ## Run signoz-otel-collector in devenv (requires clickhouse to be running)
|
||||||
|
@cd .devenv/docker/signoz-otel-collector; \
|
||||||
|
docker compose -f compose.yaml up -d
|
||||||
|
|
||||||
|
.PHONY: devenv-up
|
||||||
|
devenv-up: devenv-clickhouse devenv-signoz-otel-collector ## Start both clickhouse and signoz-otel-collector for local development
|
||||||
|
@echo "Development environment is ready!"
|
||||||
|
@echo " - ClickHouse: http://localhost:8123"
|
||||||
|
@echo " - Signoz OTel Collector: grpc://localhost:4317, http://localhost:4318"
|
||||||
|
|
||||||
##############################################################
|
##############################################################
|
||||||
# go commands
|
# go commands
|
||||||
##############################################################
|
##############################################################
|
||||||
@@ -74,7 +85,7 @@ go-run-enterprise: ## Runs the enterprise go backend server
|
|||||||
SIGNOZ_TELEMETRYSTORE_PROVIDER=clickhouse \
|
SIGNOZ_TELEMETRYSTORE_PROVIDER=clickhouse \
|
||||||
SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://127.0.0.1:9000 \
|
SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://127.0.0.1:9000 \
|
||||||
go run -race \
|
go run -race \
|
||||||
$(GO_BUILD_CONTEXT_ENTERPRISE)/main.go \
|
$(GO_BUILD_CONTEXT_ENTERPRISE)/*.go \
|
||||||
--config ./conf/prometheus.yml \
|
--config ./conf/prometheus.yml \
|
||||||
--cluster cluster
|
--cluster cluster
|
||||||
|
|
||||||
@@ -92,7 +103,7 @@ go-run-community: ## Runs the community go backend server
|
|||||||
SIGNOZ_TELEMETRYSTORE_PROVIDER=clickhouse \
|
SIGNOZ_TELEMETRYSTORE_PROVIDER=clickhouse \
|
||||||
SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://127.0.0.1:9000 \
|
SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://127.0.0.1:9000 \
|
||||||
go run -race \
|
go run -race \
|
||||||
$(GO_BUILD_CONTEXT_COMMUNITY)/main.go \
|
$(GO_BUILD_CONTEXT_COMMUNITY)/*.go server \
|
||||||
--config ./conf/prometheus.yml \
|
--config ./conf/prometheus.yml \
|
||||||
--cluster cluster
|
--cluster cluster
|
||||||
|
|
||||||
|
|||||||
@@ -8,7 +8,6 @@
|
|||||||
<p align="center">All your logs, metrics, and traces in one place. Monitor your application, spot issues before they occur and troubleshoot downtime quickly with rich context. SigNoz is a cost-effective open-source alternative to Datadog and New Relic. Visit <a href="https://signoz.io" target="_blank">signoz.io</a> for the full documentation, tutorials, and guide.</p>
|
<p align="center">All your logs, metrics, and traces in one place. Monitor your application, spot issues before they occur and troubleshoot downtime quickly with rich context. SigNoz is a cost-effective open-source alternative to Datadog and New Relic. Visit <a href="https://signoz.io" target="_blank">signoz.io</a> for the full documentation, tutorials, and guide.</p>
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/signoz.svg?label=Docker%20Downloads"> </a>
|
|
||||||
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
||||||
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
||||||
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
||||||
@@ -231,6 +230,8 @@ Not sure how to get started? Just ping us on `#contributing` in our [slack commu
|
|||||||
- [Shaheer Kochai](https://github.com/ahmadshaheer)
|
- [Shaheer Kochai](https://github.com/ahmadshaheer)
|
||||||
- [Amlan Kumar Nandy](https://github.com/amlannandy)
|
- [Amlan Kumar Nandy](https://github.com/amlannandy)
|
||||||
- [Sahil Khan](https://github.com/sawhil)
|
- [Sahil Khan](https://github.com/sawhil)
|
||||||
|
- [Aditya Singh](https://github.com/aks07)
|
||||||
|
- [Abhi Kumar](https://github.com/ahrefabhi)
|
||||||
|
|
||||||
#### DevOps
|
#### DevOps
|
||||||
|
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ before:
|
|||||||
builds:
|
builds:
|
||||||
- id: signoz
|
- id: signoz
|
||||||
binary: bin/signoz
|
binary: bin/signoz
|
||||||
main: pkg/query-service/main.go
|
main: ./cmd/community
|
||||||
env:
|
env:
|
||||||
- CGO_ENABLED=1
|
- CGO_ENABLED=1
|
||||||
- >-
|
- >-
|
||||||
@@ -16,4 +16,4 @@ COPY frontend/build/ /etc/signoz/web/
|
|||||||
|
|
||||||
RUN chmod 755 /root /root/signoz
|
RUN chmod 755 /root /root/signoz
|
||||||
|
|
||||||
ENTRYPOINT ["./signoz"]
|
ENTRYPOINT ["./signoz", "server"]
|
||||||
@@ -17,4 +17,4 @@ COPY frontend/build/ /etc/signoz/web/
|
|||||||
|
|
||||||
RUN chmod 755 /root /root/signoz-community
|
RUN chmod 755 /root /root/signoz-community
|
||||||
|
|
||||||
ENTRYPOINT ["./signoz-community"]
|
ENTRYPOINT ["./signoz-community", "server"]
|
||||||
18
cmd/community/main.go
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log/slog"
|
||||||
|
|
||||||
|
"github.com/SigNoz/signoz/cmd"
|
||||||
|
"github.com/SigNoz/signoz/pkg/instrumentation"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// initialize logger for logging in the cmd/ package. This logger is different from the logger used in the application.
|
||||||
|
logger := instrumentation.NewLogger(instrumentation.Config{Logs: instrumentation.LogsConfig{Level: slog.LevelInfo}})
|
||||||
|
|
||||||
|
// register a list of commands to the root command
|
||||||
|
registerServer(cmd.RootCmd, logger)
|
||||||
|
|
||||||
|
cmd.Execute(logger)
|
||||||
|
}
|
||||||
116
cmd/community/server.go
Normal file
@@ -0,0 +1,116 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"log/slog"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/SigNoz/signoz/cmd"
|
||||||
|
"github.com/SigNoz/signoz/ee/sqlstore/postgressqlstore"
|
||||||
|
"github.com/SigNoz/signoz/pkg/analytics"
|
||||||
|
"github.com/SigNoz/signoz/pkg/factory"
|
||||||
|
"github.com/SigNoz/signoz/pkg/licensing"
|
||||||
|
"github.com/SigNoz/signoz/pkg/licensing/nooplicensing"
|
||||||
|
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||||
|
"github.com/SigNoz/signoz/pkg/query-service/app"
|
||||||
|
"github.com/SigNoz/signoz/pkg/signoz"
|
||||||
|
"github.com/SigNoz/signoz/pkg/sqlschema"
|
||||||
|
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||||
|
"github.com/SigNoz/signoz/pkg/sqlstore/sqlstorehook"
|
||||||
|
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||||
|
"github.com/SigNoz/signoz/pkg/version"
|
||||||
|
"github.com/SigNoz/signoz/pkg/zeus"
|
||||||
|
"github.com/SigNoz/signoz/pkg/zeus/noopzeus"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
func registerServer(parentCmd *cobra.Command, logger *slog.Logger) {
|
||||||
|
var flags signoz.DeprecatedFlags
|
||||||
|
|
||||||
|
serverCmd := &cobra.Command{
|
||||||
|
Use: "server",
|
||||||
|
Short: "Run the SigNoz server",
|
||||||
|
FParseErrWhitelist: cobra.FParseErrWhitelist{UnknownFlags: true},
|
||||||
|
RunE: func(currCmd *cobra.Command, args []string) error {
|
||||||
|
config, err := cmd.NewSigNozConfig(currCmd.Context(), logger, flags)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return runServer(currCmd.Context(), config, logger)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
flags.RegisterFlags(serverCmd)
|
||||||
|
parentCmd.AddCommand(serverCmd)
|
||||||
|
}
|
||||||
|
|
||||||
|
func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) error {
|
||||||
|
// print the version
|
||||||
|
version.Info.PrettyPrint(config.Version)
|
||||||
|
|
||||||
|
// add enterprise sqlstore factories to the community sqlstore factories
|
||||||
|
sqlstoreFactories := signoz.NewSQLStoreProviderFactories()
|
||||||
|
if err := sqlstoreFactories.Add(postgressqlstore.NewFactory(sqlstorehook.NewLoggingFactory())); err != nil {
|
||||||
|
logger.ErrorContext(ctx, "failed to add postgressqlstore factory", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
jwt := authtypes.NewJWT(cmd.NewJWTSecret(ctx, logger), 30*time.Minute, 30*24*time.Hour)
|
||||||
|
|
||||||
|
signoz, err := signoz.New(
|
||||||
|
ctx,
|
||||||
|
config,
|
||||||
|
jwt,
|
||||||
|
zeus.Config{},
|
||||||
|
noopzeus.NewProviderFactory(),
|
||||||
|
licensing.Config{},
|
||||||
|
func(_ sqlstore.SQLStore, _ zeus.Zeus, _ organization.Getter, _ analytics.Analytics) factory.ProviderFactory[licensing.Licensing, licensing.Config] {
|
||||||
|
return nooplicensing.NewFactory()
|
||||||
|
},
|
||||||
|
signoz.NewEmailingProviderFactories(),
|
||||||
|
signoz.NewCacheProviderFactories(),
|
||||||
|
signoz.NewWebProviderFactories(),
|
||||||
|
func(sqlstore sqlstore.SQLStore) factory.NamedMap[factory.ProviderFactory[sqlschema.SQLSchema, sqlschema.Config]] {
|
||||||
|
return signoz.NewSQLSchemaProviderFactories(sqlstore)
|
||||||
|
},
|
||||||
|
signoz.NewSQLStoreProviderFactories(),
|
||||||
|
signoz.NewTelemetryStoreProviderFactories(),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
logger.ErrorContext(ctx, "failed to create signoz", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
server, err := app.NewServer(config, signoz, jwt)
|
||||||
|
if err != nil {
|
||||||
|
logger.ErrorContext(ctx, "failed to create server", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := server.Start(ctx); err != nil {
|
||||||
|
logger.ErrorContext(ctx, "failed to start server", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
signoz.Start(ctx)
|
||||||
|
|
||||||
|
if err := signoz.Wait(ctx); err != nil {
|
||||||
|
logger.ErrorContext(ctx, "failed to start signoz", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = server.Stop(ctx)
|
||||||
|
if err != nil {
|
||||||
|
logger.ErrorContext(ctx, "failed to stop server", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = signoz.Stop(ctx)
|
||||||
|
if err != nil {
|
||||||
|
logger.ErrorContext(ctx, "failed to stop signoz", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
41
cmd/config.go
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"log/slog"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/SigNoz/signoz/pkg/config"
|
||||||
|
"github.com/SigNoz/signoz/pkg/config/envprovider"
|
||||||
|
"github.com/SigNoz/signoz/pkg/config/fileprovider"
|
||||||
|
"github.com/SigNoz/signoz/pkg/signoz"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewSigNozConfig(ctx context.Context, logger *slog.Logger, flags signoz.DeprecatedFlags) (signoz.Config, error) {
|
||||||
|
config, err := signoz.NewConfig(
|
||||||
|
ctx,
|
||||||
|
logger,
|
||||||
|
config.ResolverConfig{
|
||||||
|
Uris: []string{"env:"},
|
||||||
|
ProviderFactories: []config.ProviderFactory{
|
||||||
|
envprovider.NewFactory(),
|
||||||
|
fileprovider.NewFactory(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
flags,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return signoz.Config{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return config, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewJWTSecret(ctx context.Context, logger *slog.Logger) string {
|
||||||
|
jwtSecret := os.Getenv("SIGNOZ_JWT_SECRET")
|
||||||
|
if len(jwtSecret) == 0 {
|
||||||
|
logger.ErrorContext(ctx, "🚨 CRITICAL SECURITY ISSUE: No JWT secret key specified!", "error", "SIGNOZ_JWT_SECRET environment variable is not set. This has dire consequences for the security of the application. Without a JWT secret, user sessions are vulnerable to tampering and unauthorized access. Please set the SIGNOZ_JWT_SECRET environment variable immediately. For more information, please refer to https://github.com/SigNoz/signoz/issues/8400.")
|
||||||
|
}
|
||||||
|
|
||||||
|
return jwtSecret
|
||||||
|
}
|
||||||
@@ -11,7 +11,7 @@ before:
|
|||||||
builds:
|
builds:
|
||||||
- id: signoz
|
- id: signoz
|
||||||
binary: bin/signoz
|
binary: bin/signoz
|
||||||
main: ee/query-service/main.go
|
main: ./cmd/enterprise
|
||||||
env:
|
env:
|
||||||
- CGO_ENABLED=1
|
- CGO_ENABLED=1
|
||||||
- >-
|
- >-
|
||||||
@@ -16,4 +16,4 @@ COPY frontend/build/ /etc/signoz/web/
|
|||||||
|
|
||||||
RUN chmod 755 /root /root/signoz
|
RUN chmod 755 /root /root/signoz
|
||||||
|
|
||||||
ENTRYPOINT ["./signoz"]
|
ENTRYPOINT ["./signoz", "server"]
|
||||||
@@ -1,4 +1,12 @@
|
|||||||
FROM golang:1.23-bullseye
|
FROM node:18-bullseye AS build
|
||||||
|
|
||||||
|
WORKDIR /opt/
|
||||||
|
COPY ./frontend/ ./
|
||||||
|
ENV NODE_OPTIONS=--max-old-space-size=8192
|
||||||
|
RUN CI=1 yarn install
|
||||||
|
RUN CI=1 yarn build
|
||||||
|
|
||||||
|
FROM golang:1.24-bullseye
|
||||||
|
|
||||||
ARG OS="linux"
|
ARG OS="linux"
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
@@ -23,6 +31,7 @@ COPY go.mod go.sum ./
|
|||||||
|
|
||||||
RUN go mod download
|
RUN go mod download
|
||||||
|
|
||||||
|
COPY ./cmd/ ./cmd/
|
||||||
COPY ./ee/ ./ee/
|
COPY ./ee/ ./ee/
|
||||||
COPY ./pkg/ ./pkg/
|
COPY ./pkg/ ./pkg/
|
||||||
COPY ./templates/email /root/templates
|
COPY ./templates/email /root/templates
|
||||||
@@ -31,6 +40,8 @@ COPY Makefile Makefile
|
|||||||
RUN TARGET_DIR=/root ARCHS=${TARGETARCH} ZEUS_URL=${ZEUSURL} LICENSE_URL=${ZEUSURL}/api/v1 make go-build-enterprise-race
|
RUN TARGET_DIR=/root ARCHS=${TARGETARCH} ZEUS_URL=${ZEUSURL} LICENSE_URL=${ZEUSURL}/api/v1 make go-build-enterprise-race
|
||||||
RUN mv /root/linux-${TARGETARCH}/signoz /root/signoz
|
RUN mv /root/linux-${TARGETARCH}/signoz /root/signoz
|
||||||
|
|
||||||
|
COPY --from=build /opt/build ./web/
|
||||||
|
|
||||||
RUN chmod 755 /root /root/signoz
|
RUN chmod 755 /root /root/signoz
|
||||||
|
|
||||||
ENTRYPOINT ["/root/signoz"]
|
ENTRYPOINT ["/root/signoz", "server"]
|
||||||
@@ -17,4 +17,4 @@ COPY frontend/build/ /etc/signoz/web/
|
|||||||
|
|
||||||
RUN chmod 755 /root /root/signoz
|
RUN chmod 755 /root /root/signoz
|
||||||
|
|
||||||
ENTRYPOINT ["./signoz"]
|
ENTRYPOINT ["./signoz", "server"]
|
||||||
18
cmd/enterprise/main.go
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log/slog"
|
||||||
|
|
||||||
|
"github.com/SigNoz/signoz/cmd"
|
||||||
|
"github.com/SigNoz/signoz/pkg/instrumentation"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// initialize logger for logging in the cmd/ package. This logger is different from the logger used in the application.
|
||||||
|
logger := instrumentation.NewLogger(instrumentation.Config{Logs: instrumentation.LogsConfig{Level: slog.LevelInfo}})
|
||||||
|
|
||||||
|
// register a list of commands to the root command
|
||||||
|
registerServer(cmd.RootCmd, logger)
|
||||||
|
|
||||||
|
cmd.Execute(logger)
|
||||||
|
}
|
||||||
124
cmd/enterprise/server.go
Normal file
@@ -0,0 +1,124 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"log/slog"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/SigNoz/signoz/cmd"
|
||||||
|
enterpriselicensing "github.com/SigNoz/signoz/ee/licensing"
|
||||||
|
"github.com/SigNoz/signoz/ee/licensing/httplicensing"
|
||||||
|
enterpriseapp "github.com/SigNoz/signoz/ee/query-service/app"
|
||||||
|
"github.com/SigNoz/signoz/ee/sqlschema/postgressqlschema"
|
||||||
|
"github.com/SigNoz/signoz/ee/sqlstore/postgressqlstore"
|
||||||
|
enterprisezeus "github.com/SigNoz/signoz/ee/zeus"
|
||||||
|
"github.com/SigNoz/signoz/ee/zeus/httpzeus"
|
||||||
|
"github.com/SigNoz/signoz/pkg/analytics"
|
||||||
|
"github.com/SigNoz/signoz/pkg/factory"
|
||||||
|
"github.com/SigNoz/signoz/pkg/licensing"
|
||||||
|
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||||
|
"github.com/SigNoz/signoz/pkg/signoz"
|
||||||
|
"github.com/SigNoz/signoz/pkg/sqlschema"
|
||||||
|
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||||
|
"github.com/SigNoz/signoz/pkg/sqlstore/sqlstorehook"
|
||||||
|
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||||
|
"github.com/SigNoz/signoz/pkg/version"
|
||||||
|
"github.com/SigNoz/signoz/pkg/zeus"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
func registerServer(parentCmd *cobra.Command, logger *slog.Logger) {
|
||||||
|
var flags signoz.DeprecatedFlags
|
||||||
|
|
||||||
|
serverCmd := &cobra.Command{
|
||||||
|
Use: "server",
|
||||||
|
Short: "Run the SigNoz server",
|
||||||
|
FParseErrWhitelist: cobra.FParseErrWhitelist{UnknownFlags: true},
|
||||||
|
RunE: func(currCmd *cobra.Command, args []string) error {
|
||||||
|
config, err := cmd.NewSigNozConfig(currCmd.Context(), logger, flags)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return runServer(currCmd.Context(), config, logger)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
flags.RegisterFlags(serverCmd)
|
||||||
|
parentCmd.AddCommand(serverCmd)
|
||||||
|
}
|
||||||
|
|
||||||
|
func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) error {
|
||||||
|
// print the version
|
||||||
|
version.Info.PrettyPrint(config.Version)
|
||||||
|
|
||||||
|
// add enterprise sqlstore factories to the community sqlstore factories
|
||||||
|
sqlstoreFactories := signoz.NewSQLStoreProviderFactories()
|
||||||
|
if err := sqlstoreFactories.Add(postgressqlstore.NewFactory(sqlstorehook.NewLoggingFactory())); err != nil {
|
||||||
|
logger.ErrorContext(ctx, "failed to add postgressqlstore factory", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
jwt := authtypes.NewJWT(cmd.NewJWTSecret(ctx, logger), 30*time.Minute, 30*24*time.Hour)
|
||||||
|
|
||||||
|
signoz, err := signoz.New(
|
||||||
|
ctx,
|
||||||
|
config,
|
||||||
|
jwt,
|
||||||
|
enterprisezeus.Config(),
|
||||||
|
httpzeus.NewProviderFactory(),
|
||||||
|
enterpriselicensing.Config(24*time.Hour, 3),
|
||||||
|
func(sqlstore sqlstore.SQLStore, zeus zeus.Zeus, orgGetter organization.Getter, analytics analytics.Analytics) factory.ProviderFactory[licensing.Licensing, licensing.Config] {
|
||||||
|
return httplicensing.NewProviderFactory(sqlstore, zeus, orgGetter, analytics)
|
||||||
|
},
|
||||||
|
signoz.NewEmailingProviderFactories(),
|
||||||
|
signoz.NewCacheProviderFactories(),
|
||||||
|
signoz.NewWebProviderFactories(),
|
||||||
|
func(sqlstore sqlstore.SQLStore) factory.NamedMap[factory.ProviderFactory[sqlschema.SQLSchema, sqlschema.Config]] {
|
||||||
|
existingFactories := signoz.NewSQLSchemaProviderFactories(sqlstore)
|
||||||
|
if err := existingFactories.Add(postgressqlschema.NewFactory(sqlstore)); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return existingFactories
|
||||||
|
},
|
||||||
|
sqlstoreFactories,
|
||||||
|
signoz.NewTelemetryStoreProviderFactories(),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
logger.ErrorContext(ctx, "failed to create signoz", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
server, err := enterpriseapp.NewServer(config, signoz, jwt)
|
||||||
|
if err != nil {
|
||||||
|
logger.ErrorContext(ctx, "failed to create server", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := server.Start(ctx); err != nil {
|
||||||
|
logger.ErrorContext(ctx, "failed to start server", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
signoz.Start(ctx)
|
||||||
|
|
||||||
|
if err := signoz.Wait(ctx); err != nil {
|
||||||
|
logger.ErrorContext(ctx, "failed to start signoz", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = server.Stop(ctx)
|
||||||
|
if err != nil {
|
||||||
|
logger.ErrorContext(ctx, "failed to stop server", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = signoz.Stop(ctx)
|
||||||
|
if err != nil {
|
||||||
|
logger.ErrorContext(ctx, "failed to stop signoz", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
33
cmd/root.go
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log/slog"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/SigNoz/signoz/pkg/version"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"go.uber.org/zap" //nolint:depguard
|
||||||
|
)
|
||||||
|
|
||||||
|
var RootCmd = &cobra.Command{
|
||||||
|
Use: "signoz",
|
||||||
|
Short: "OpenTelemetry-Native Logs, Metrics and Traces in a single pane",
|
||||||
|
Version: version.Info.Version(),
|
||||||
|
SilenceUsage: true,
|
||||||
|
SilenceErrors: true,
|
||||||
|
CompletionOptions: cobra.CompletionOptions{DisableDefaultCmd: true},
|
||||||
|
}
|
||||||
|
|
||||||
|
func Execute(logger *slog.Logger) {
|
||||||
|
zapLogger := newZapLogger()
|
||||||
|
zap.ReplaceGlobals(zapLogger)
|
||||||
|
defer func() {
|
||||||
|
_ = zapLogger.Sync()
|
||||||
|
}()
|
||||||
|
|
||||||
|
err := RootCmd.Execute()
|
||||||
|
if err != nil {
|
||||||
|
logger.ErrorContext(RootCmd.Context(), "error running command", "error", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
15
cmd/zap.go
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"go.uber.org/zap" //nolint:depguard
|
||||||
|
"go.uber.org/zap/zapcore" //nolint:depguard
|
||||||
|
)
|
||||||
|
|
||||||
|
// Deprecated: Use `NewLogger` from `pkg/instrumentation` instead.
|
||||||
|
func newZapLogger() *zap.Logger {
|
||||||
|
config := zap.NewProductionConfig()
|
||||||
|
config.EncoderConfig.TimeKey = "timestamp"
|
||||||
|
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||||
|
logger, _ := config.Build()
|
||||||
|
return logger
|
||||||
|
}
|
||||||
@@ -121,6 +121,8 @@ telemetrystore:
|
|||||||
timeout_before_checking_execution_speed: 0
|
timeout_before_checking_execution_speed: 0
|
||||||
max_bytes_to_read: 0
|
max_bytes_to_read: 0
|
||||||
max_result_rows: 0
|
max_result_rows: 0
|
||||||
|
ignore_data_skipping_indices: ""
|
||||||
|
secondary_indices_enable_bulk_filtering: false
|
||||||
|
|
||||||
##################### Prometheus #####################
|
##################### Prometheus #####################
|
||||||
prometheus:
|
prometheus:
|
||||||
@@ -135,10 +137,7 @@ prometheus:
|
|||||||
##################### Alertmanager #####################
|
##################### Alertmanager #####################
|
||||||
alertmanager:
|
alertmanager:
|
||||||
# Specifies the alertmanager provider to use.
|
# Specifies the alertmanager provider to use.
|
||||||
provider: legacy
|
provider: signoz
|
||||||
legacy:
|
|
||||||
# The API URL (with prefix) of the legacy Alertmanager instance.
|
|
||||||
api_url: http://localhost:9093/api
|
|
||||||
signoz:
|
signoz:
|
||||||
# The poll interval for periodically syncing the alertmanager with the config in the store.
|
# The poll interval for periodically syncing the alertmanager with the config in the store.
|
||||||
poll_interval: 1m
|
poll_interval: 1m
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ x-common: &common
|
|||||||
max-file: "3"
|
max-file: "3"
|
||||||
x-clickhouse-defaults: &clickhouse-defaults
|
x-clickhouse-defaults: &clickhouse-defaults
|
||||||
!!merge <<: *common
|
!!merge <<: *common
|
||||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
image: clickhouse/clickhouse-server:25.5.6
|
||||||
tty: true
|
tty: true
|
||||||
deploy:
|
deploy:
|
||||||
labels:
|
labels:
|
||||||
@@ -37,9 +37,11 @@ x-clickhouse-defaults: &clickhouse-defaults
|
|||||||
nofile:
|
nofile:
|
||||||
soft: 262144
|
soft: 262144
|
||||||
hard: 262144
|
hard: 262144
|
||||||
|
environment:
|
||||||
|
- CLICKHOUSE_SKIP_USER_SETUP=1
|
||||||
x-zookeeper-defaults: &zookeeper-defaults
|
x-zookeeper-defaults: &zookeeper-defaults
|
||||||
!!merge <<: *common
|
!!merge <<: *common
|
||||||
image: bitnami/zookeeper:3.7.1
|
image: signoz/zookeeper:3.7.1
|
||||||
user: root
|
user: root
|
||||||
deploy:
|
deploy:
|
||||||
labels:
|
labels:
|
||||||
@@ -63,7 +65,7 @@ x-db-depend: &db-depend
|
|||||||
services:
|
services:
|
||||||
init-clickhouse:
|
init-clickhouse:
|
||||||
!!merge <<: *common
|
!!merge <<: *common
|
||||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
image: clickhouse/clickhouse-server:25.5.6
|
||||||
command:
|
command:
|
||||||
- bash
|
- bash
|
||||||
- -c
|
- -c
|
||||||
@@ -174,7 +176,7 @@ services:
|
|||||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
signoz:
|
signoz:
|
||||||
!!merge <<: *db-depend
|
!!merge <<: *db-depend
|
||||||
image: signoz/signoz:v0.87.0
|
image: signoz/signoz:v0.95.0
|
||||||
command:
|
command:
|
||||||
- --config=/root/config/prometheus.yml
|
- --config=/root/config/prometheus.yml
|
||||||
ports:
|
ports:
|
||||||
@@ -194,6 +196,7 @@ services:
|
|||||||
- TELEMETRY_ENABLED=true
|
- TELEMETRY_ENABLED=true
|
||||||
- DEPLOYMENT_TYPE=docker-swarm
|
- DEPLOYMENT_TYPE=docker-swarm
|
||||||
- SIGNOZ_JWT_SECRET=secret
|
- SIGNOZ_JWT_SECRET=secret
|
||||||
|
- DOT_METRICS_ENABLED=true
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test:
|
test:
|
||||||
- CMD
|
- CMD
|
||||||
@@ -206,7 +209,7 @@ services:
|
|||||||
retries: 3
|
retries: 3
|
||||||
otel-collector:
|
otel-collector:
|
||||||
!!merge <<: *db-depend
|
!!merge <<: *db-depend
|
||||||
image: signoz/signoz-otel-collector:v0.111.42
|
image: signoz/signoz-otel-collector:v0.129.5
|
||||||
command:
|
command:
|
||||||
- --config=/etc/otel-collector-config.yaml
|
- --config=/etc/otel-collector-config.yaml
|
||||||
- --manager-config=/etc/manager-config.yaml
|
- --manager-config=/etc/manager-config.yaml
|
||||||
@@ -230,7 +233,7 @@ services:
|
|||||||
- signoz
|
- signoz
|
||||||
schema-migrator:
|
schema-migrator:
|
||||||
!!merge <<: *common
|
!!merge <<: *common
|
||||||
image: signoz/signoz-schema-migrator:v0.111.42
|
image: signoz/signoz-schema-migrator:v0.129.5
|
||||||
deploy:
|
deploy:
|
||||||
restart_policy:
|
restart_policy:
|
||||||
condition: on-failure
|
condition: on-failure
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ x-common: &common
|
|||||||
max-file: "3"
|
max-file: "3"
|
||||||
x-clickhouse-defaults: &clickhouse-defaults
|
x-clickhouse-defaults: &clickhouse-defaults
|
||||||
!!merge <<: *common
|
!!merge <<: *common
|
||||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
image: clickhouse/clickhouse-server:25.5.6
|
||||||
tty: true
|
tty: true
|
||||||
deploy:
|
deploy:
|
||||||
labels:
|
labels:
|
||||||
@@ -36,9 +36,11 @@ x-clickhouse-defaults: &clickhouse-defaults
|
|||||||
nofile:
|
nofile:
|
||||||
soft: 262144
|
soft: 262144
|
||||||
hard: 262144
|
hard: 262144
|
||||||
|
environment:
|
||||||
|
- CLICKHOUSE_SKIP_USER_SETUP=1
|
||||||
x-zookeeper-defaults: &zookeeper-defaults
|
x-zookeeper-defaults: &zookeeper-defaults
|
||||||
!!merge <<: *common
|
!!merge <<: *common
|
||||||
image: bitnami/zookeeper:3.7.1
|
image: signoz/zookeeper:3.7.1
|
||||||
user: root
|
user: root
|
||||||
deploy:
|
deploy:
|
||||||
labels:
|
labels:
|
||||||
@@ -60,7 +62,7 @@ x-db-depend: &db-depend
|
|||||||
services:
|
services:
|
||||||
init-clickhouse:
|
init-clickhouse:
|
||||||
!!merge <<: *common
|
!!merge <<: *common
|
||||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
image: clickhouse/clickhouse-server:25.5.6
|
||||||
command:
|
command:
|
||||||
- bash
|
- bash
|
||||||
- -c
|
- -c
|
||||||
@@ -100,7 +102,7 @@ services:
|
|||||||
# - "9000:9000"
|
# - "9000:9000"
|
||||||
# - "8123:8123"
|
# - "8123:8123"
|
||||||
# - "9181:9181"
|
# - "9181:9181"
|
||||||
|
|
||||||
configs:
|
configs:
|
||||||
- source: clickhouse-config
|
- source: clickhouse-config
|
||||||
target: /etc/clickhouse-server/config.xml
|
target: /etc/clickhouse-server/config.xml
|
||||||
@@ -110,13 +112,12 @@ services:
|
|||||||
target: /etc/clickhouse-server/custom-function.xml
|
target: /etc/clickhouse-server/custom-function.xml
|
||||||
- source: clickhouse-cluster
|
- source: clickhouse-cluster
|
||||||
target: /etc/clickhouse-server/config.d/cluster.xml
|
target: /etc/clickhouse-server/config.d/cluster.xml
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
- clickhouse:/var/lib/clickhouse/
|
- clickhouse:/var/lib/clickhouse/
|
||||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
signoz:
|
signoz:
|
||||||
!!merge <<: *db-depend
|
!!merge <<: *db-depend
|
||||||
image: signoz/signoz:v0.87.0
|
image: signoz/signoz:v0.95.0
|
||||||
command:
|
command:
|
||||||
- --config=/root/config/prometheus.yml
|
- --config=/root/config/prometheus.yml
|
||||||
ports:
|
ports:
|
||||||
@@ -136,6 +137,7 @@ services:
|
|||||||
- GODEBUG=netdns=go
|
- GODEBUG=netdns=go
|
||||||
- TELEMETRY_ENABLED=true
|
- TELEMETRY_ENABLED=true
|
||||||
- DEPLOYMENT_TYPE=docker-swarm
|
- DEPLOYMENT_TYPE=docker-swarm
|
||||||
|
- DOT_METRICS_ENABLED=true
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test:
|
test:
|
||||||
- CMD
|
- CMD
|
||||||
@@ -148,7 +150,7 @@ services:
|
|||||||
retries: 3
|
retries: 3
|
||||||
otel-collector:
|
otel-collector:
|
||||||
!!merge <<: *db-depend
|
!!merge <<: *db-depend
|
||||||
image: signoz/signoz-otel-collector:v0.111.42
|
image: signoz/signoz-otel-collector:v0.129.5
|
||||||
command:
|
command:
|
||||||
- --config=/etc/otel-collector-config.yaml
|
- --config=/etc/otel-collector-config.yaml
|
||||||
- --manager-config=/etc/manager-config.yaml
|
- --manager-config=/etc/manager-config.yaml
|
||||||
@@ -174,7 +176,7 @@ services:
|
|||||||
- signoz
|
- signoz
|
||||||
schema-migrator:
|
schema-migrator:
|
||||||
!!merge <<: *common
|
!!merge <<: *common
|
||||||
image: signoz/signoz-schema-migrator:v0.111.42
|
image: signoz/signoz-schema-migrator:v0.129.5
|
||||||
deploy:
|
deploy:
|
||||||
restart_policy:
|
restart_policy:
|
||||||
condition: on-failure
|
condition: on-failure
|
||||||
@@ -195,7 +197,6 @@ volumes:
|
|||||||
name: signoz-sqlite
|
name: signoz-sqlite
|
||||||
zookeeper-1:
|
zookeeper-1:
|
||||||
name: signoz-zookeeper-1
|
name: signoz-zookeeper-1
|
||||||
|
|
||||||
configs:
|
configs:
|
||||||
clickhouse-config:
|
clickhouse-config:
|
||||||
file: ../common/clickhouse/config.xml
|
file: ../common/clickhouse/config.xml
|
||||||
@@ -205,7 +206,6 @@ configs:
|
|||||||
file: ../common/clickhouse/custom-function.xml
|
file: ../common/clickhouse/custom-function.xml
|
||||||
clickhouse-cluster:
|
clickhouse-cluster:
|
||||||
file: ../common/clickhouse/cluster.xml
|
file: ../common/clickhouse/cluster.xml
|
||||||
|
|
||||||
signoz-prometheus-config:
|
signoz-prometheus-config:
|
||||||
file: ../common/signoz/prometheus.yml
|
file: ../common/signoz/prometheus.yml
|
||||||
# If you have multiple dashboard files, you can list them individually:
|
# If you have multiple dashboard files, you can list them individually:
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ processors:
|
|||||||
detectors: [env, system]
|
detectors: [env, system]
|
||||||
timeout: 2s
|
timeout: 2s
|
||||||
signozspanmetrics/delta:
|
signozspanmetrics/delta:
|
||||||
metrics_exporter: clickhousemetricswrite, signozclickhousemetrics
|
metrics_exporter: signozclickhousemetrics
|
||||||
metrics_flush_interval: 60s
|
metrics_flush_interval: 60s
|
||||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||||
dimensions_cache_size: 100000
|
dimensions_cache_size: 100000
|
||||||
@@ -60,27 +60,16 @@ exporters:
|
|||||||
datasource: tcp://clickhouse:9000/signoz_traces
|
datasource: tcp://clickhouse:9000/signoz_traces
|
||||||
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
|
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
|
||||||
use_new_schema: true
|
use_new_schema: true
|
||||||
clickhousemetricswrite:
|
|
||||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
|
||||||
resource_to_telemetry_conversion:
|
|
||||||
enabled: true
|
|
||||||
disable_v2: true
|
|
||||||
clickhousemetricswrite/prometheus:
|
|
||||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
|
||||||
disable_v2: true
|
|
||||||
signozclickhousemetrics:
|
signozclickhousemetrics:
|
||||||
dsn: tcp://clickhouse:9000/signoz_metrics
|
dsn: tcp://clickhouse:9000/signoz_metrics
|
||||||
clickhouselogsexporter:
|
clickhouselogsexporter:
|
||||||
dsn: tcp://clickhouse:9000/signoz_logs
|
dsn: tcp://clickhouse:9000/signoz_logs
|
||||||
timeout: 10s
|
timeout: 10s
|
||||||
use_new_schema: true
|
use_new_schema: true
|
||||||
# debug: {}
|
|
||||||
service:
|
service:
|
||||||
telemetry:
|
telemetry:
|
||||||
logs:
|
logs:
|
||||||
encoding: json
|
encoding: json
|
||||||
metrics:
|
|
||||||
address: 0.0.0.0:8888
|
|
||||||
extensions:
|
extensions:
|
||||||
- health_check
|
- health_check
|
||||||
- pprof
|
- pprof
|
||||||
@@ -92,11 +81,11 @@ service:
|
|||||||
metrics:
|
metrics:
|
||||||
receivers: [otlp]
|
receivers: [otlp]
|
||||||
processors: [batch]
|
processors: [batch]
|
||||||
exporters: [clickhousemetricswrite, signozclickhousemetrics]
|
exporters: [signozclickhousemetrics]
|
||||||
metrics/prometheus:
|
metrics/prometheus:
|
||||||
receivers: [prometheus]
|
receivers: [prometheus]
|
||||||
processors: [batch]
|
processors: [batch]
|
||||||
exporters: [clickhousemetricswrite/prometheus, signozclickhousemetrics]
|
exporters: [signozclickhousemetrics]
|
||||||
logs:
|
logs:
|
||||||
receivers: [otlp]
|
receivers: [otlp]
|
||||||
processors: [batch]
|
processors: [batch]
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ x-common: &common
|
|||||||
x-clickhouse-defaults: &clickhouse-defaults
|
x-clickhouse-defaults: &clickhouse-defaults
|
||||||
!!merge <<: *common
|
!!merge <<: *common
|
||||||
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
|
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
|
||||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
image: clickhouse/clickhouse-server:25.5.6
|
||||||
tty: true
|
tty: true
|
||||||
labels:
|
labels:
|
||||||
signoz.io/scrape: "true"
|
signoz.io/scrape: "true"
|
||||||
@@ -40,9 +40,11 @@ x-clickhouse-defaults: &clickhouse-defaults
|
|||||||
nofile:
|
nofile:
|
||||||
soft: 262144
|
soft: 262144
|
||||||
hard: 262144
|
hard: 262144
|
||||||
|
environment:
|
||||||
|
- CLICKHOUSE_SKIP_USER_SETUP=1
|
||||||
x-zookeeper-defaults: &zookeeper-defaults
|
x-zookeeper-defaults: &zookeeper-defaults
|
||||||
!!merge <<: *common
|
!!merge <<: *common
|
||||||
image: bitnami/zookeeper:3.7.1
|
image: signoz/zookeeper:3.7.1
|
||||||
user: root
|
user: root
|
||||||
labels:
|
labels:
|
||||||
signoz.io/scrape: "true"
|
signoz.io/scrape: "true"
|
||||||
@@ -65,7 +67,7 @@ x-db-depend: &db-depend
|
|||||||
services:
|
services:
|
||||||
init-clickhouse:
|
init-clickhouse:
|
||||||
!!merge <<: *common
|
!!merge <<: *common
|
||||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
image: clickhouse/clickhouse-server:25.5.6
|
||||||
container_name: signoz-init-clickhouse
|
container_name: signoz-init-clickhouse
|
||||||
command:
|
command:
|
||||||
- bash
|
- bash
|
||||||
@@ -177,7 +179,7 @@ services:
|
|||||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
signoz:
|
signoz:
|
||||||
!!merge <<: *db-depend
|
!!merge <<: *db-depend
|
||||||
image: signoz/signoz:${VERSION:-v0.87.0}
|
image: signoz/signoz:${VERSION:-v0.95.0}
|
||||||
container_name: signoz
|
container_name: signoz
|
||||||
command:
|
command:
|
||||||
- --config=/root/config/prometheus.yml
|
- --config=/root/config/prometheus.yml
|
||||||
@@ -197,6 +199,7 @@ services:
|
|||||||
- GODEBUG=netdns=go
|
- GODEBUG=netdns=go
|
||||||
- TELEMETRY_ENABLED=true
|
- TELEMETRY_ENABLED=true
|
||||||
- DEPLOYMENT_TYPE=docker-standalone-amd
|
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||||
|
- DOT_METRICS_ENABLED=true
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test:
|
test:
|
||||||
- CMD
|
- CMD
|
||||||
@@ -210,7 +213,7 @@ services:
|
|||||||
# TODO: support otel-collector multiple replicas. Nginx/Traefik for loadbalancing?
|
# TODO: support otel-collector multiple replicas. Nginx/Traefik for loadbalancing?
|
||||||
otel-collector:
|
otel-collector:
|
||||||
!!merge <<: *db-depend
|
!!merge <<: *db-depend
|
||||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.42}
|
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.129.5}
|
||||||
container_name: signoz-otel-collector
|
container_name: signoz-otel-collector
|
||||||
command:
|
command:
|
||||||
- --config=/etc/otel-collector-config.yaml
|
- --config=/etc/otel-collector-config.yaml
|
||||||
@@ -236,7 +239,7 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
schema-migrator-sync:
|
schema-migrator-sync:
|
||||||
!!merge <<: *common
|
!!merge <<: *common
|
||||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.42}
|
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.5}
|
||||||
container_name: schema-migrator-sync
|
container_name: schema-migrator-sync
|
||||||
command:
|
command:
|
||||||
- sync
|
- sync
|
||||||
@@ -247,7 +250,7 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
schema-migrator-async:
|
schema-migrator-async:
|
||||||
!!merge <<: *db-depend
|
!!merge <<: *db-depend
|
||||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.42}
|
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.5}
|
||||||
container_name: schema-migrator-async
|
container_name: schema-migrator-async
|
||||||
command:
|
command:
|
||||||
- async
|
- async
|
||||||
|
|||||||
@@ -9,8 +9,7 @@ x-common: &common
|
|||||||
max-file: "3"
|
max-file: "3"
|
||||||
x-clickhouse-defaults: &clickhouse-defaults
|
x-clickhouse-defaults: &clickhouse-defaults
|
||||||
!!merge <<: *common
|
!!merge <<: *common
|
||||||
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
|
image: clickhouse/clickhouse-server:25.5.6
|
||||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
|
||||||
tty: true
|
tty: true
|
||||||
labels:
|
labels:
|
||||||
signoz.io/scrape: "true"
|
signoz.io/scrape: "true"
|
||||||
@@ -36,9 +35,11 @@ x-clickhouse-defaults: &clickhouse-defaults
|
|||||||
nofile:
|
nofile:
|
||||||
soft: 262144
|
soft: 262144
|
||||||
hard: 262144
|
hard: 262144
|
||||||
|
environment:
|
||||||
|
- CLICKHOUSE_SKIP_USER_SETUP=1
|
||||||
x-zookeeper-defaults: &zookeeper-defaults
|
x-zookeeper-defaults: &zookeeper-defaults
|
||||||
!!merge <<: *common
|
!!merge <<: *common
|
||||||
image: bitnami/zookeeper:3.7.1
|
image: signoz/zookeeper:3.7.1
|
||||||
user: root
|
user: root
|
||||||
labels:
|
labels:
|
||||||
signoz.io/scrape: "true"
|
signoz.io/scrape: "true"
|
||||||
@@ -61,7 +62,7 @@ x-db-depend: &db-depend
|
|||||||
services:
|
services:
|
||||||
init-clickhouse:
|
init-clickhouse:
|
||||||
!!merge <<: *common
|
!!merge <<: *common
|
||||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
image: clickhouse/clickhouse-server:25.5.6
|
||||||
container_name: signoz-init-clickhouse
|
container_name: signoz-init-clickhouse
|
||||||
command:
|
command:
|
||||||
- bash
|
- bash
|
||||||
@@ -110,7 +111,7 @@ services:
|
|||||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
signoz:
|
signoz:
|
||||||
!!merge <<: *db-depend
|
!!merge <<: *db-depend
|
||||||
image: signoz/signoz:${VERSION:-v0.87.0}
|
image: signoz/signoz:${VERSION:-v0.95.0}
|
||||||
container_name: signoz
|
container_name: signoz
|
||||||
command:
|
command:
|
||||||
- --config=/root/config/prometheus.yml
|
- --config=/root/config/prometheus.yml
|
||||||
@@ -130,6 +131,7 @@ services:
|
|||||||
- GODEBUG=netdns=go
|
- GODEBUG=netdns=go
|
||||||
- TELEMETRY_ENABLED=true
|
- TELEMETRY_ENABLED=true
|
||||||
- DEPLOYMENT_TYPE=docker-standalone-amd
|
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||||
|
- DOT_METRICS_ENABLED=true
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test:
|
test:
|
||||||
- CMD
|
- CMD
|
||||||
@@ -142,7 +144,7 @@ services:
|
|||||||
retries: 3
|
retries: 3
|
||||||
otel-collector:
|
otel-collector:
|
||||||
!!merge <<: *db-depend
|
!!merge <<: *db-depend
|
||||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.42}
|
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.129.5}
|
||||||
container_name: signoz-otel-collector
|
container_name: signoz-otel-collector
|
||||||
command:
|
command:
|
||||||
- --config=/etc/otel-collector-config.yaml
|
- --config=/etc/otel-collector-config.yaml
|
||||||
@@ -164,7 +166,7 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
schema-migrator-sync:
|
schema-migrator-sync:
|
||||||
!!merge <<: *common
|
!!merge <<: *common
|
||||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.42}
|
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.5}
|
||||||
container_name: schema-migrator-sync
|
container_name: schema-migrator-sync
|
||||||
command:
|
command:
|
||||||
- sync
|
- sync
|
||||||
@@ -176,7 +178,7 @@ services:
|
|||||||
restart: on-failure
|
restart: on-failure
|
||||||
schema-migrator-async:
|
schema-migrator-async:
|
||||||
!!merge <<: *db-depend
|
!!merge <<: *db-depend
|
||||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.42}
|
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.5}
|
||||||
container_name: schema-migrator-async
|
container_name: schema-migrator-async
|
||||||
command:
|
command:
|
||||||
- async
|
- async
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ processors:
|
|||||||
detectors: [env, system]
|
detectors: [env, system]
|
||||||
timeout: 2s
|
timeout: 2s
|
||||||
signozspanmetrics/delta:
|
signozspanmetrics/delta:
|
||||||
metrics_exporter: clickhousemetricswrite, signozclickhousemetrics
|
metrics_exporter: signozclickhousemetrics
|
||||||
metrics_flush_interval: 60s
|
metrics_flush_interval: 60s
|
||||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||||
dimensions_cache_size: 100000
|
dimensions_cache_size: 100000
|
||||||
@@ -60,27 +60,16 @@ exporters:
|
|||||||
datasource: tcp://clickhouse:9000/signoz_traces
|
datasource: tcp://clickhouse:9000/signoz_traces
|
||||||
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
|
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
|
||||||
use_new_schema: true
|
use_new_schema: true
|
||||||
clickhousemetricswrite:
|
|
||||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
|
||||||
disable_v2: true
|
|
||||||
resource_to_telemetry_conversion:
|
|
||||||
enabled: true
|
|
||||||
clickhousemetricswrite/prometheus:
|
|
||||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
|
||||||
disable_v2: true
|
|
||||||
signozclickhousemetrics:
|
signozclickhousemetrics:
|
||||||
dsn: tcp://clickhouse:9000/signoz_metrics
|
dsn: tcp://clickhouse:9000/signoz_metrics
|
||||||
clickhouselogsexporter:
|
clickhouselogsexporter:
|
||||||
dsn: tcp://clickhouse:9000/signoz_logs
|
dsn: tcp://clickhouse:9000/signoz_logs
|
||||||
timeout: 10s
|
timeout: 10s
|
||||||
use_new_schema: true
|
use_new_schema: true
|
||||||
# debug: {}
|
|
||||||
service:
|
service:
|
||||||
telemetry:
|
telemetry:
|
||||||
logs:
|
logs:
|
||||||
encoding: json
|
encoding: json
|
||||||
metrics:
|
|
||||||
address: 0.0.0.0:8888
|
|
||||||
extensions:
|
extensions:
|
||||||
- health_check
|
- health_check
|
||||||
- pprof
|
- pprof
|
||||||
@@ -92,11 +81,11 @@ service:
|
|||||||
metrics:
|
metrics:
|
||||||
receivers: [otlp]
|
receivers: [otlp]
|
||||||
processors: [batch]
|
processors: [batch]
|
||||||
exporters: [clickhousemetricswrite, signozclickhousemetrics]
|
exporters: [signozclickhousemetrics]
|
||||||
metrics/prometheus:
|
metrics/prometheus:
|
||||||
receivers: [prometheus]
|
receivers: [prometheus]
|
||||||
processors: [batch]
|
processors: [batch]
|
||||||
exporters: [clickhousemetricswrite/prometheus, signozclickhousemetrics]
|
exporters: [signozclickhousemetrics]
|
||||||
logs:
|
logs:
|
||||||
receivers: [otlp]
|
receivers: [otlp]
|
||||||
processors: [batch]
|
processors: [batch]
|
||||||
|
|||||||
@@ -44,20 +44,35 @@ Before diving in, make sure you have these tools installed:
|
|||||||
|
|
||||||
SigNoz has three main components: Clickhouse, Backend, and Frontend. Let's set them up one by one.
|
SigNoz has three main components: Clickhouse, Backend, and Frontend. Let's set them up one by one.
|
||||||
|
|
||||||
### 1. Setting up Clickhouse
|
### 1. Setting up ClickHouse
|
||||||
|
|
||||||
First, we need to get Clickhouse running:
|
First, we need to get ClickHouse running:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
make devenv-clickhouse
|
make devenv-clickhouse
|
||||||
```
|
```
|
||||||
|
|
||||||
This command:
|
This command:
|
||||||
- Starts Clickhouse in a single-shard, single-replica cluster
|
- Starts ClickHouse in a single-shard, single-replica cluster
|
||||||
- Sets up Zookeeper
|
- Sets up Zookeeper
|
||||||
- Runs the latest schema migrations
|
- Runs the latest schema migrations
|
||||||
|
|
||||||
### 2. Starting the Backend
|
### 2. Setting up SigNoz OpenTelemetry Collector
|
||||||
|
|
||||||
|
Next, start the OpenTelemetry Collector to receive telemetry data:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
make devenv-signoz-otel-collector
|
||||||
|
```
|
||||||
|
|
||||||
|
This command:
|
||||||
|
- Starts the SigNoz OpenTelemetry Collector
|
||||||
|
- Listens on port 4317 (gRPC) and 4318 (HTTP) for incoming telemetry data
|
||||||
|
- Forwards data to ClickHouse for storage
|
||||||
|
|
||||||
|
> 💡 **Quick Setup**: Use `make devenv-up` to start both ClickHouse and OTel Collector together
|
||||||
|
|
||||||
|
### 3. Starting the Backend
|
||||||
|
|
||||||
1. Run the backend server:
|
1. Run the backend server:
|
||||||
```bash
|
```bash
|
||||||
@@ -73,19 +88,24 @@ This command:
|
|||||||
|
|
||||||
> 💡 **Tip**: The API server runs at `http://localhost:8080/` by default
|
> 💡 **Tip**: The API server runs at `http://localhost:8080/` by default
|
||||||
|
|
||||||
### 3. Setting up the Frontend
|
### 4. Setting up the Frontend
|
||||||
|
|
||||||
1. Install dependencies:
|
1. Navigate to the frontend directory:
|
||||||
|
```bash
|
||||||
|
cd frontend
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Install dependencies:
|
||||||
```bash
|
```bash
|
||||||
yarn install
|
yarn install
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Create a `.env` file in the `frontend` directory:
|
3. Create a `.env` file in this directory:
|
||||||
```env
|
```env
|
||||||
FRONTEND_API_ENDPOINT=http://localhost:8080
|
FRONTEND_API_ENDPOINT=http://localhost:8080
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Start the development server:
|
4. Start the development server:
|
||||||
```bash
|
```bash
|
||||||
yarn dev
|
yarn dev
|
||||||
```
|
```
|
||||||
@@ -93,3 +113,25 @@ This command:
|
|||||||
> 💡 **Tip**: `yarn dev` will automatically rebuild when you make changes to the code
|
> 💡 **Tip**: `yarn dev` will automatically rebuild when you make changes to the code
|
||||||
|
|
||||||
Now you're all set to start developing! Happy coding! 🎉
|
Now you're all set to start developing! Happy coding! 🎉
|
||||||
|
|
||||||
|
## Verifying Your Setup
|
||||||
|
To verify everything is working correctly:
|
||||||
|
|
||||||
|
1. **Check ClickHouse**: `curl http://localhost:8123/ping` (should return "Ok.")
|
||||||
|
2. **Check OTel Collector**: `curl http://localhost:13133` (should return health status)
|
||||||
|
3. **Check Backend**: `curl http://localhost:8080/api/v1/health` (should return `{"status":"ok"}`)
|
||||||
|
4. **Check Frontend**: Open `http://localhost:3301` in your browser
|
||||||
|
|
||||||
|
## How to send test data?
|
||||||
|
|
||||||
|
You can now send telemetry data to your local SigNoz instance:
|
||||||
|
|
||||||
|
- **OTLP gRPC**: `localhost:4317`
|
||||||
|
- **OTLP HTTP**: `localhost:4318`
|
||||||
|
|
||||||
|
For example, using `curl` to send a test trace:
|
||||||
|
```bash
|
||||||
|
curl -X POST http://localhost:4318/v1/traces \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d '{"resourceSpans":[{"resource":{"attributes":[{"key":"service.name","value":{"stringValue":"test-service"}}]},"scopeSpans":[{"spans":[{"traceId":"12345678901234567890123456789012","spanId":"1234567890123456","name":"test-span","startTimeUnixNano":"1609459200000000000","endTimeUnixNano":"1609459201000000000"}]}]}]}'
|
||||||
|
```
|
||||||
|
|||||||
213
docs/contributing/go/integration.md
Normal file
@@ -0,0 +1,213 @@
|
|||||||
|
# Integration Tests
|
||||||
|
|
||||||
|
SigNoz uses integration tests to verify that different components work together correctly in a real environment. These tests run against actual services (ClickHouse, PostgreSQL, etc.) to ensure end-to-end functionality.
|
||||||
|
|
||||||
|
## How to set up the integration test environment?
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
|
||||||
|
Before running integration tests, ensure you have the following installed:
|
||||||
|
|
||||||
|
- Python 3.13+
|
||||||
|
- Poetry (for dependency management)
|
||||||
|
- Docker (for containerized services)
|
||||||
|
|
||||||
|
### Initial Setup
|
||||||
|
|
||||||
|
1. Navigate to the integration tests directory:
|
||||||
|
```bash
|
||||||
|
cd tests/integration
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Install dependencies using Poetry:
|
||||||
|
```bash
|
||||||
|
poetry install --no-root
|
||||||
|
```
|
||||||
|
|
||||||
|
### Starting the Test Environment
|
||||||
|
|
||||||
|
To spin up all the containers necessary for writing integration tests and keep them running:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
poetry run pytest --basetemp=./tmp/ -vv --reuse src/bootstrap/setup.py::test_setup
|
||||||
|
```
|
||||||
|
|
||||||
|
This command will:
|
||||||
|
- Start all required services (ClickHouse, PostgreSQL, Zookeeper, etc.)
|
||||||
|
- Keep containers running due to the `--reuse` flag
|
||||||
|
- Verify that the setup is working correctly
|
||||||
|
|
||||||
|
### Stopping the Test Environment
|
||||||
|
|
||||||
|
When you're done writing integration tests, clean up the environment:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
poetry run pytest --basetemp=./tmp/ -vv --teardown -s src/bootstrap/setup.py::test_teardown
|
||||||
|
```
|
||||||
|
|
||||||
|
This will destroy the running integration test setup and clean up resources.
|
||||||
|
|
||||||
|
## Understanding the Integration Test Framework
|
||||||
|
|
||||||
|
Python and pytest form the foundation of the integration testing framework. Testcontainers are used to spin up disposable integration environments. Wiremock is used to spin up **test doubles** of other services.
|
||||||
|
|
||||||
|
- **Why Python/pytest?** It's expressive, low-boilerplate, and has powerful fixture capabilities that make integration testing straightforward. Extensive libraries for HTTP requests, JSON handling, and data analysis (numpy) make it easier to test APIs and verify data
|
||||||
|
- **Why testcontainers?** They let us spin up isolated dependencies that match our production environment without complex setup.
|
||||||
|
- **Why wiremock?** Well maintained, documented and extensible.
|
||||||
|
|
||||||
|
```
|
||||||
|
.
|
||||||
|
├── conftest.py
|
||||||
|
├── fixtures
|
||||||
|
│ ├── __init__.py
|
||||||
|
│ ├── auth.py
|
||||||
|
│ ├── clickhouse.py
|
||||||
|
│ ├── fs.py
|
||||||
|
│ ├── http.py
|
||||||
|
│ ├── migrator.py
|
||||||
|
│ ├── network.py
|
||||||
|
│ ├── postgres.py
|
||||||
|
│ ├── signoz.py
|
||||||
|
│ ├── sql.py
|
||||||
|
│ ├── sqlite.py
|
||||||
|
│ ├── types.py
|
||||||
|
│ └── zookeeper.py
|
||||||
|
├── poetry.lock
|
||||||
|
├── pyproject.toml
|
||||||
|
└── src
|
||||||
|
└── bootstrap
|
||||||
|
├── __init__.py
|
||||||
|
├── a_database.py
|
||||||
|
├── b_register.py
|
||||||
|
└── c_license.py
|
||||||
|
```
|
||||||
|
|
||||||
|
Each test suite follows some important principles:
|
||||||
|
|
||||||
|
1. **Organization**: Test suites live under `src/` in self-contained packages. Fixtures (a pytest concept) live inside `fixtures/`.
|
||||||
|
2. **Execution Order**: Files are prefixed with `a_`, `b_`, `c_` to ensure sequential execution.
|
||||||
|
3. **Time Constraints**: Each suite should complete in under 10 minutes (setup takes ~4 mins).
|
||||||
|
|
||||||
|
### Test Suite Design
|
||||||
|
|
||||||
|
Test suites should target functional domains or subsystems within SigNoz. When designing a test suite, consider these principles:
|
||||||
|
|
||||||
|
- **Functional Cohesion**: Group tests around a specific capability or service boundary
|
||||||
|
- **Data Flow**: Follow the path of data through related components
|
||||||
|
- **Change Patterns**: Components frequently modified together should be tested together
|
||||||
|
|
||||||
|
The exact boundaries for modules are intentionally flexible, allowing teams to define logical groupings based on their specific context and knowledge of the system.
|
||||||
|
|
||||||
|
Eg: The **bootstrap** integration test suite validates core system functionality:
|
||||||
|
|
||||||
|
- Database initialization
|
||||||
|
- Version check
|
||||||
|
|
||||||
|
Other test suites can be **pipelines, auth, querier.**
|
||||||
|
|
||||||
|
## How to write an integration test?
|
||||||
|
|
||||||
|
Now start writing an integration test. Create a new file `src/bootstrap/e_version.py` and paste the following:
|
||||||
|
|
||||||
|
```python
|
||||||
|
import requests
|
||||||
|
|
||||||
|
from fixtures import types
|
||||||
|
from fixtures.logger import setup_logger
|
||||||
|
|
||||||
|
logger = setup_logger(__name__)
|
||||||
|
|
||||||
|
def test_version(signoz: types.SigNoz) -> None:
|
||||||
|
response = requests.get(signoz.self.host_config.get("/api/v1/version"), timeout=2)
|
||||||
|
logger.info(response)
|
||||||
|
```
|
||||||
|
|
||||||
|
We have written a simple test which calls the `version` endpoint of the container in step 1. In **order to just run this function, run the following command:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
poetry run pytest --basetemp=./tmp/ -vv --reuse src/bootstrap/e_version.py::test_version
|
||||||
|
```
|
||||||
|
|
||||||
|
> Note: The `--reuse` flag is used to reuse the environment if it is already running. Always use this flag when writing and running integration tests. If you don't use this flag, the environment will be destroyed and recreated every time you run the test.
|
||||||
|
|
||||||
|
Here's another example of how to write a more comprehensive integration test:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from http import HTTPStatus
|
||||||
|
import requests
|
||||||
|
from fixtures import types
|
||||||
|
from fixtures.logger import setup_logger
|
||||||
|
|
||||||
|
logger = setup_logger(__name__)
|
||||||
|
|
||||||
|
def test_user_registration(signoz: types.SigNoz) -> None:
|
||||||
|
"""Test user registration functionality."""
|
||||||
|
response = requests.post(
|
||||||
|
signoz.self.host_configs["8080"].get("/api/v1/register"),
|
||||||
|
json={
|
||||||
|
"name": "testuser",
|
||||||
|
"orgId": "",
|
||||||
|
"orgName": "test.org",
|
||||||
|
"email": "test@example.com",
|
||||||
|
"password": "password123Z$",
|
||||||
|
},
|
||||||
|
timeout=2,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert response.status_code == HTTPStatus.OK
|
||||||
|
assert response.json()["setupCompleted"] is True
|
||||||
|
```
|
||||||
|
|
||||||
|
## How to run integration tests?
|
||||||
|
|
||||||
|
### Running All Tests
|
||||||
|
|
||||||
|
```bash
|
||||||
|
poetry run pytest --basetemp=./tmp/ -vv --reuse src/
|
||||||
|
```
|
||||||
|
|
||||||
|
### Running Specific Test Categories
|
||||||
|
|
||||||
|
```bash
|
||||||
|
poetry run pytest --basetemp=./tmp/ -vv --reuse src/<suite>
|
||||||
|
|
||||||
|
# Run querier tests
|
||||||
|
poetry run pytest --basetemp=./tmp/ -vv --reuse src/querier/
|
||||||
|
# Run auth tests
|
||||||
|
poetry run pytest --basetemp=./tmp/ -vv --reuse src/auth/
|
||||||
|
```
|
||||||
|
|
||||||
|
### Running Individual Tests
|
||||||
|
|
||||||
|
```bash
|
||||||
|
poetry run pytest --basetemp=./tmp/ -vv --reuse src/<suite>/<file>.py::test_name
|
||||||
|
|
||||||
|
# Run test_register in file a_register.py in auth suite
|
||||||
|
poetry run pytest --basetemp=./tmp/ -vv --reuse src/auth/a_register.py::test_register
|
||||||
|
```
|
||||||
|
|
||||||
|
## How to configure different options for integration tests?
|
||||||
|
|
||||||
|
Tests can be configured using pytest options:
|
||||||
|
|
||||||
|
- `--sqlstore-provider` - Choose database provider (default: postgres)
|
||||||
|
- `--postgres-version` - PostgreSQL version (default: 15)
|
||||||
|
- `--clickhouse-version` - ClickHouse version (default: 24.1.2-alpine)
|
||||||
|
- `--zookeeper-version` - Zookeeper version (default: 3.7.1)
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```bash
|
||||||
|
poetry run pytest --basetemp=./tmp/ -vv --reuse --sqlstore-provider=postgres --postgres-version=14 src/auth/
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## What should I remember?
|
||||||
|
|
||||||
|
- **Always use the `--reuse` flag** when setting up the environment to keep containers running
|
||||||
|
- **Use the `--teardown` flag** when cleaning up to avoid resource leaks
|
||||||
|
- **Follow the naming convention** with alphabetical prefixes for test execution order
|
||||||
|
- **Use proper timeouts** in HTTP requests to avoid hanging tests
|
||||||
|
- **Clean up test data** between tests to avoid interference
|
||||||
|
- **Use descriptive test names** that clearly indicate what is being tested
|
||||||
|
- **Leverage fixtures** for common setup and authentication
|
||||||
|
- **Test both success and failure scenarios** to ensure robust functionality
|
||||||
@@ -16,7 +16,7 @@ __Table of Contents__
|
|||||||
- [Prerequisites](#prerequisites-1)
|
- [Prerequisites](#prerequisites-1)
|
||||||
- [Install Helm Repo and Charts](#install-helm-repo-and-charts)
|
- [Install Helm Repo and Charts](#install-helm-repo-and-charts)
|
||||||
- [Start the OpenTelemetry Demo App](#start-the-opentelemetry-demo-app-1)
|
- [Start the OpenTelemetry Demo App](#start-the-opentelemetry-demo-app-1)
|
||||||
- [Moniitor with SigNoz (Kubernetes)](#monitor-with-signoz-kubernetes)
|
- [Monitor with SigNoz (Kubernetes)](#monitor-with-signoz-kubernetes)
|
||||||
- [What's next](#whats-next)
|
- [What's next](#whats-next)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
34
ee/anomaly/daily.go
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
package anomaly
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/SigNoz/signoz/pkg/valuer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type DailyProvider struct {
|
||||||
|
BaseSeasonalProvider
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ BaseProvider = (*DailyProvider)(nil)
|
||||||
|
|
||||||
|
func (dp *DailyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider {
|
||||||
|
return &dp.BaseSeasonalProvider
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDailyProvider(opts ...GenericProviderOption[*DailyProvider]) *DailyProvider {
|
||||||
|
dp := &DailyProvider{
|
||||||
|
BaseSeasonalProvider: BaseSeasonalProvider{},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(dp)
|
||||||
|
}
|
||||||
|
|
||||||
|
return dp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *DailyProvider) GetAnomalies(ctx context.Context, orgID valuer.UUID, req *AnomaliesRequest) (*AnomaliesResponse, error) {
|
||||||
|
req.Seasonality = SeasonalityDaily
|
||||||
|
return p.getAnomalies(ctx, orgID, req)
|
||||||
|
}
|
||||||
35
ee/anomaly/hourly.go
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
package anomaly
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/SigNoz/signoz/pkg/valuer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type HourlyProvider struct {
|
||||||
|
BaseSeasonalProvider
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ BaseProvider = (*HourlyProvider)(nil)
|
||||||
|
|
||||||
|
func (hp *HourlyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider {
|
||||||
|
return &hp.BaseSeasonalProvider
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHourlyProvider now uses the generic option type
|
||||||
|
func NewHourlyProvider(opts ...GenericProviderOption[*HourlyProvider]) *HourlyProvider {
|
||||||
|
hp := &HourlyProvider{
|
||||||
|
BaseSeasonalProvider: BaseSeasonalProvider{},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(hp)
|
||||||
|
}
|
||||||
|
|
||||||
|
return hp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *HourlyProvider) GetAnomalies(ctx context.Context, orgID valuer.UUID, req *AnomaliesRequest) (*AnomaliesResponse, error) {
|
||||||
|
req.Seasonality = SeasonalityHourly
|
||||||
|
return p.getAnomalies(ctx, orgID, req)
|
||||||
|
}
|
||||||
223
ee/anomaly/params.go
Normal file
@@ -0,0 +1,223 @@
|
|||||||
|
package anomaly
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||||
|
"github.com/SigNoz/signoz/pkg/valuer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Seasonality struct{ valuer.String }
|
||||||
|
|
||||||
|
var (
|
||||||
|
SeasonalityHourly = Seasonality{valuer.NewString("hourly")}
|
||||||
|
SeasonalityDaily = Seasonality{valuer.NewString("daily")}
|
||||||
|
SeasonalityWeekly = Seasonality{valuer.NewString("weekly")}
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
oneWeekOffset = uint64(24 * 7 * time.Hour.Milliseconds())
|
||||||
|
oneDayOffset = uint64(24 * time.Hour.Milliseconds())
|
||||||
|
oneHourOffset = uint64(time.Hour.Milliseconds())
|
||||||
|
fiveMinOffset = uint64(5 * time.Minute.Milliseconds())
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s Seasonality) IsValid() bool {
|
||||||
|
switch s {
|
||||||
|
case SeasonalityHourly, SeasonalityDaily, SeasonalityWeekly:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type AnomaliesRequest struct {
|
||||||
|
Params qbtypes.QueryRangeRequest
|
||||||
|
Seasonality Seasonality
|
||||||
|
}
|
||||||
|
|
||||||
|
type AnomaliesResponse struct {
|
||||||
|
Results []*qbtypes.TimeSeriesData
|
||||||
|
}
|
||||||
|
|
||||||
|
// anomalyParams is the params for anomaly detection
|
||||||
|
// prediction = avg(past_period_query) + avg(current_season_query) - mean(past_season_query, past2_season_query, past3_season_query)
|
||||||
|
//
|
||||||
|
// ^ ^
|
||||||
|
// | |
|
||||||
|
// (rounded value for past peiod) + (seasonal growth)
|
||||||
|
//
|
||||||
|
// score = abs(value - prediction) / stddev (current_season_query)
|
||||||
|
type anomalyQueryParams struct {
|
||||||
|
// CurrentPeriodQuery is the query range params for period user is looking at or eval window
|
||||||
|
// Example: (now-5m, now), (now-30m, now), (now-1h, now)
|
||||||
|
// The results obtained from this query are used to compare with predicted values
|
||||||
|
// and to detect anomalies
|
||||||
|
CurrentPeriodQuery qbtypes.QueryRangeRequest
|
||||||
|
// PastPeriodQuery is the query range params for past period of seasonality
|
||||||
|
// Example: For weekly seasonality, (now-1w-5m, now-1w)
|
||||||
|
// : For daily seasonality, (now-1d-5m, now-1d)
|
||||||
|
// : For hourly seasonality, (now-1h-5m, now-1h)
|
||||||
|
PastPeriodQuery qbtypes.QueryRangeRequest
|
||||||
|
// CurrentSeasonQuery is the query range params for current period (seasonal)
|
||||||
|
// Example: For weekly seasonality, this is the query range params for the (now-1w-5m, now)
|
||||||
|
// : For daily seasonality, this is the query range params for the (now-1d-5m, now)
|
||||||
|
// : For hourly seasonality, this is the query range params for the (now-1h-5m, now)
|
||||||
|
CurrentSeasonQuery qbtypes.QueryRangeRequest
|
||||||
|
// PastSeasonQuery is the query range params for past seasonal period to the current season
|
||||||
|
// Example: For weekly seasonality, this is the query range params for the (now-2w-5m, now-1w)
|
||||||
|
// : For daily seasonality, this is the query range params for the (now-2d-5m, now-1d)
|
||||||
|
// : For hourly seasonality, this is the query range params for the (now-2h-5m, now-1h)
|
||||||
|
PastSeasonQuery qbtypes.QueryRangeRequest
|
||||||
|
// Past2SeasonQuery is the query range params for past 2 seasonal period to the current season
|
||||||
|
// Example: For weekly seasonality, this is the query range params for the (now-3w-5m, now-2w)
|
||||||
|
// : For daily seasonality, this is the query range params for the (now-3d-5m, now-2d)
|
||||||
|
// : For hourly seasonality, this is the query range params for the (now-3h-5m, now-2h)
|
||||||
|
Past2SeasonQuery qbtypes.QueryRangeRequest
|
||||||
|
// Past3SeasonQuery is the query range params for past 3 seasonal period to the current season
|
||||||
|
// Example: For weekly seasonality, this is the query range params for the (now-4w-5m, now-3w)
|
||||||
|
// : For daily seasonality, this is the query range params for the (now-4d-5m, now-3d)
|
||||||
|
// : For hourly seasonality, this is the query range params for the (now-4h-5m, now-3h)
|
||||||
|
Past3SeasonQuery qbtypes.QueryRangeRequest
|
||||||
|
}
|
||||||
|
|
||||||
|
func prepareAnomalyQueryParams(req qbtypes.QueryRangeRequest, seasonality Seasonality) *anomalyQueryParams {
|
||||||
|
start := req.Start
|
||||||
|
end := req.End
|
||||||
|
|
||||||
|
currentPeriodQuery := qbtypes.QueryRangeRequest{
|
||||||
|
Start: start,
|
||||||
|
End: end,
|
||||||
|
RequestType: qbtypes.RequestTypeTimeSeries,
|
||||||
|
CompositeQuery: req.CompositeQuery,
|
||||||
|
NoCache: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
var pastPeriodStart, pastPeriodEnd uint64
|
||||||
|
|
||||||
|
switch seasonality {
|
||||||
|
// for one week period, we fetch the data from the past week with 5 min offset
|
||||||
|
case SeasonalityWeekly:
|
||||||
|
pastPeriodStart = start - oneWeekOffset - fiveMinOffset
|
||||||
|
pastPeriodEnd = end - oneWeekOffset
|
||||||
|
// for one day period, we fetch the data from the past day with 5 min offset
|
||||||
|
case SeasonalityDaily:
|
||||||
|
pastPeriodStart = start - oneDayOffset - fiveMinOffset
|
||||||
|
pastPeriodEnd = end - oneDayOffset
|
||||||
|
// for one hour period, we fetch the data from the past hour with 5 min offset
|
||||||
|
case SeasonalityHourly:
|
||||||
|
pastPeriodStart = start - oneHourOffset - fiveMinOffset
|
||||||
|
pastPeriodEnd = end - oneHourOffset
|
||||||
|
}
|
||||||
|
|
||||||
|
pastPeriodQuery := qbtypes.QueryRangeRequest{
|
||||||
|
Start: pastPeriodStart,
|
||||||
|
End: pastPeriodEnd,
|
||||||
|
RequestType: qbtypes.RequestTypeTimeSeries,
|
||||||
|
CompositeQuery: req.CompositeQuery,
|
||||||
|
NoCache: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
// seasonality growth trend
|
||||||
|
var currentGrowthPeriodStart, currentGrowthPeriodEnd uint64
|
||||||
|
switch seasonality {
|
||||||
|
case SeasonalityWeekly:
|
||||||
|
currentGrowthPeriodStart = start - oneWeekOffset
|
||||||
|
currentGrowthPeriodEnd = start
|
||||||
|
case SeasonalityDaily:
|
||||||
|
currentGrowthPeriodStart = start - oneDayOffset
|
||||||
|
currentGrowthPeriodEnd = start
|
||||||
|
case SeasonalityHourly:
|
||||||
|
currentGrowthPeriodStart = start - oneHourOffset
|
||||||
|
currentGrowthPeriodEnd = start
|
||||||
|
}
|
||||||
|
|
||||||
|
currentGrowthQuery := qbtypes.QueryRangeRequest{
|
||||||
|
Start: currentGrowthPeriodStart,
|
||||||
|
End: currentGrowthPeriodEnd,
|
||||||
|
RequestType: qbtypes.RequestTypeTimeSeries,
|
||||||
|
CompositeQuery: req.CompositeQuery,
|
||||||
|
NoCache: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
var pastGrowthPeriodStart, pastGrowthPeriodEnd uint64
|
||||||
|
switch seasonality {
|
||||||
|
case SeasonalityWeekly:
|
||||||
|
pastGrowthPeriodStart = start - 2*oneWeekOffset
|
||||||
|
pastGrowthPeriodEnd = start - 1*oneWeekOffset
|
||||||
|
case SeasonalityDaily:
|
||||||
|
pastGrowthPeriodStart = start - 2*oneDayOffset
|
||||||
|
pastGrowthPeriodEnd = start - 1*oneDayOffset
|
||||||
|
case SeasonalityHourly:
|
||||||
|
pastGrowthPeriodStart = start - 2*oneHourOffset
|
||||||
|
pastGrowthPeriodEnd = start - 1*oneHourOffset
|
||||||
|
}
|
||||||
|
|
||||||
|
pastGrowthQuery := qbtypes.QueryRangeRequest{
|
||||||
|
Start: pastGrowthPeriodStart,
|
||||||
|
End: pastGrowthPeriodEnd,
|
||||||
|
RequestType: qbtypes.RequestTypeTimeSeries,
|
||||||
|
CompositeQuery: req.CompositeQuery,
|
||||||
|
NoCache: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
var past2GrowthPeriodStart, past2GrowthPeriodEnd uint64
|
||||||
|
switch seasonality {
|
||||||
|
case SeasonalityWeekly:
|
||||||
|
past2GrowthPeriodStart = start - 3*oneWeekOffset
|
||||||
|
past2GrowthPeriodEnd = start - 2*oneWeekOffset
|
||||||
|
case SeasonalityDaily:
|
||||||
|
past2GrowthPeriodStart = start - 3*oneDayOffset
|
||||||
|
past2GrowthPeriodEnd = start - 2*oneDayOffset
|
||||||
|
case SeasonalityHourly:
|
||||||
|
past2GrowthPeriodStart = start - 3*oneHourOffset
|
||||||
|
past2GrowthPeriodEnd = start - 2*oneHourOffset
|
||||||
|
}
|
||||||
|
|
||||||
|
past2GrowthQuery := qbtypes.QueryRangeRequest{
|
||||||
|
Start: past2GrowthPeriodStart,
|
||||||
|
End: past2GrowthPeriodEnd,
|
||||||
|
RequestType: qbtypes.RequestTypeTimeSeries,
|
||||||
|
CompositeQuery: req.CompositeQuery,
|
||||||
|
NoCache: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
var past3GrowthPeriodStart, past3GrowthPeriodEnd uint64
|
||||||
|
switch seasonality {
|
||||||
|
case SeasonalityWeekly:
|
||||||
|
past3GrowthPeriodStart = start - 4*oneWeekOffset
|
||||||
|
past3GrowthPeriodEnd = start - 3*oneWeekOffset
|
||||||
|
case SeasonalityDaily:
|
||||||
|
past3GrowthPeriodStart = start - 4*oneDayOffset
|
||||||
|
past3GrowthPeriodEnd = start - 3*oneDayOffset
|
||||||
|
case SeasonalityHourly:
|
||||||
|
past3GrowthPeriodStart = start - 4*oneHourOffset
|
||||||
|
past3GrowthPeriodEnd = start - 3*oneHourOffset
|
||||||
|
}
|
||||||
|
|
||||||
|
past3GrowthQuery := qbtypes.QueryRangeRequest{
|
||||||
|
Start: past3GrowthPeriodStart,
|
||||||
|
End: past3GrowthPeriodEnd,
|
||||||
|
RequestType: qbtypes.RequestTypeTimeSeries,
|
||||||
|
CompositeQuery: req.CompositeQuery,
|
||||||
|
NoCache: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
return &anomalyQueryParams{
|
||||||
|
CurrentPeriodQuery: currentPeriodQuery,
|
||||||
|
PastPeriodQuery: pastPeriodQuery,
|
||||||
|
CurrentSeasonQuery: currentGrowthQuery,
|
||||||
|
PastSeasonQuery: pastGrowthQuery,
|
||||||
|
Past2SeasonQuery: past2GrowthQuery,
|
||||||
|
Past3SeasonQuery: past3GrowthQuery,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type anomalyQueryResults struct {
|
||||||
|
CurrentPeriodResults []*qbtypes.TimeSeriesData
|
||||||
|
PastPeriodResults []*qbtypes.TimeSeriesData
|
||||||
|
CurrentSeasonResults []*qbtypes.TimeSeriesData
|
||||||
|
PastSeasonResults []*qbtypes.TimeSeriesData
|
||||||
|
Past2SeasonResults []*qbtypes.TimeSeriesData
|
||||||
|
Past3SeasonResults []*qbtypes.TimeSeriesData
|
||||||
|
}
|
||||||
11
ee/anomaly/provider.go
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
package anomaly
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/SigNoz/signoz/pkg/valuer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Provider interface {
|
||||||
|
GetAnomalies(ctx context.Context, orgID valuer.UUID, req *AnomaliesRequest) (*AnomaliesResponse, error)
|
||||||
|
}
|
||||||
463
ee/anomaly/seasonal.go
Normal file
@@ -0,0 +1,463 @@
|
|||||||
|
package anomaly
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"log/slog"
|
||||||
|
"math"
|
||||||
|
|
||||||
|
"github.com/SigNoz/signoz/pkg/querier"
|
||||||
|
"github.com/SigNoz/signoz/pkg/valuer"
|
||||||
|
|
||||||
|
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// TODO(srikanthccv): make this configurable?
|
||||||
|
movingAvgWindowSize = 7
|
||||||
|
)
|
||||||
|
|
||||||
|
// BaseProvider is an interface that includes common methods for all provider types
|
||||||
|
type BaseProvider interface {
|
||||||
|
GetBaseSeasonalProvider() *BaseSeasonalProvider
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenericProviderOption is a generic type for provider options
|
||||||
|
type GenericProviderOption[T BaseProvider] func(T)
|
||||||
|
|
||||||
|
func WithQuerier[T BaseProvider](querier querier.Querier) GenericProviderOption[T] {
|
||||||
|
return func(p T) {
|
||||||
|
p.GetBaseSeasonalProvider().querier = querier
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithLogger[T BaseProvider](logger *slog.Logger) GenericProviderOption[T] {
|
||||||
|
return func(p T) {
|
||||||
|
p.GetBaseSeasonalProvider().logger = logger
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type BaseSeasonalProvider struct {
|
||||||
|
querier querier.Querier
|
||||||
|
logger *slog.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *BaseSeasonalProvider) getQueryParams(req *AnomaliesRequest) *anomalyQueryParams {
|
||||||
|
if !req.Seasonality.IsValid() {
|
||||||
|
req.Seasonality = SeasonalityDaily
|
||||||
|
}
|
||||||
|
return prepareAnomalyQueryParams(req.Params, req.Seasonality)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *BaseSeasonalProvider) toTSResults(ctx context.Context, resp *qbtypes.QueryRangeResponse) []*qbtypes.TimeSeriesData {
|
||||||
|
|
||||||
|
tsData := []*qbtypes.TimeSeriesData{}
|
||||||
|
|
||||||
|
if resp == nil {
|
||||||
|
p.logger.InfoContext(ctx, "nil response from query range")
|
||||||
|
return tsData
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, item := range resp.Data.Results {
|
||||||
|
if resultData, ok := item.(*qbtypes.TimeSeriesData); ok {
|
||||||
|
tsData = append(tsData, resultData)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return tsData
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *BaseSeasonalProvider) getResults(ctx context.Context, orgID valuer.UUID, params *anomalyQueryParams) (*anomalyQueryResults, error) {
|
||||||
|
// TODO(srikanthccv): parallelize this?
|
||||||
|
p.logger.InfoContext(ctx, "fetching results for current period", "anomaly_current_period_query", params.CurrentPeriodQuery)
|
||||||
|
currentPeriodResults, err := p.querier.QueryRange(ctx, orgID, ¶ms.CurrentPeriodQuery)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
p.logger.InfoContext(ctx, "fetching results for past period", "anomaly_past_period_query", params.PastPeriodQuery)
|
||||||
|
pastPeriodResults, err := p.querier.QueryRange(ctx, orgID, ¶ms.PastPeriodQuery)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
p.logger.InfoContext(ctx, "fetching results for current season", "anomaly_current_season_query", params.CurrentSeasonQuery)
|
||||||
|
currentSeasonResults, err := p.querier.QueryRange(ctx, orgID, ¶ms.CurrentSeasonQuery)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
p.logger.InfoContext(ctx, "fetching results for past season", "anomaly_past_season_query", params.PastSeasonQuery)
|
||||||
|
pastSeasonResults, err := p.querier.QueryRange(ctx, orgID, ¶ms.PastSeasonQuery)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
p.logger.InfoContext(ctx, "fetching results for past 2 season", "anomaly_past_2season_query", params.Past2SeasonQuery)
|
||||||
|
past2SeasonResults, err := p.querier.QueryRange(ctx, orgID, ¶ms.Past2SeasonQuery)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
p.logger.InfoContext(ctx, "fetching results for past 3 season", "anomaly_past_3season_query", params.Past3SeasonQuery)
|
||||||
|
past3SeasonResults, err := p.querier.QueryRange(ctx, orgID, ¶ms.Past3SeasonQuery)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &anomalyQueryResults{
|
||||||
|
CurrentPeriodResults: p.toTSResults(ctx, currentPeriodResults),
|
||||||
|
PastPeriodResults: p.toTSResults(ctx, pastPeriodResults),
|
||||||
|
CurrentSeasonResults: p.toTSResults(ctx, currentSeasonResults),
|
||||||
|
PastSeasonResults: p.toTSResults(ctx, pastSeasonResults),
|
||||||
|
Past2SeasonResults: p.toTSResults(ctx, past2SeasonResults),
|
||||||
|
Past3SeasonResults: p.toTSResults(ctx, past3SeasonResults),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getMatchingSeries gets the matching series from the query result
|
||||||
|
// for the given series
|
||||||
|
func (p *BaseSeasonalProvider) getMatchingSeries(_ context.Context, queryResult *qbtypes.TimeSeriesData, series *qbtypes.TimeSeries) *qbtypes.TimeSeries {
|
||||||
|
if queryResult == nil || len(queryResult.Aggregations) == 0 || len(queryResult.Aggregations[0].Series) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, curr := range queryResult.Aggregations[0].Series {
|
||||||
|
currLabelsKey := qbtypes.GetUniqueSeriesKey(curr.Labels)
|
||||||
|
seriesLabelsKey := qbtypes.GetUniqueSeriesKey(series.Labels)
|
||||||
|
if currLabelsKey == seriesLabelsKey {
|
||||||
|
return curr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *BaseSeasonalProvider) getAvg(series *qbtypes.TimeSeries) float64 {
|
||||||
|
if series == nil || len(series.Values) == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
var sum float64
|
||||||
|
for _, smpl := range series.Values {
|
||||||
|
sum += smpl.Value
|
||||||
|
}
|
||||||
|
return sum / float64(len(series.Values))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *BaseSeasonalProvider) getStdDev(series *qbtypes.TimeSeries) float64 {
|
||||||
|
if series == nil || len(series.Values) == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
avg := p.getAvg(series)
|
||||||
|
var sum float64
|
||||||
|
for _, smpl := range series.Values {
|
||||||
|
sum += math.Pow(smpl.Value-avg, 2)
|
||||||
|
}
|
||||||
|
return math.Sqrt(sum / float64(len(series.Values)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// getMovingAvg gets the moving average for the given series
|
||||||
|
// for the given window size and start index
|
||||||
|
func (p *BaseSeasonalProvider) getMovingAvg(series *qbtypes.TimeSeries, movingAvgWindowSize, startIdx int) float64 {
|
||||||
|
if series == nil || len(series.Values) == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
if startIdx >= len(series.Values)-movingAvgWindowSize {
|
||||||
|
startIdx = int(math.Max(0, float64(len(series.Values)-movingAvgWindowSize)))
|
||||||
|
}
|
||||||
|
var sum float64
|
||||||
|
points := series.Values[startIdx:]
|
||||||
|
windowSize := int(math.Min(float64(movingAvgWindowSize), float64(len(points))))
|
||||||
|
for i := 0; i < windowSize; i++ {
|
||||||
|
sum += points[i].Value
|
||||||
|
}
|
||||||
|
avg := sum / float64(windowSize)
|
||||||
|
return avg
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *BaseSeasonalProvider) getMean(floats ...float64) float64 {
|
||||||
|
if len(floats) == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
var sum float64
|
||||||
|
for _, f := range floats {
|
||||||
|
sum += f
|
||||||
|
}
|
||||||
|
return sum / float64(len(floats))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *BaseSeasonalProvider) getPredictedSeries(
|
||||||
|
ctx context.Context,
|
||||||
|
series, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries *qbtypes.TimeSeries,
|
||||||
|
) *qbtypes.TimeSeries {
|
||||||
|
predictedSeries := &qbtypes.TimeSeries{
|
||||||
|
Labels: series.Labels,
|
||||||
|
Values: make([]*qbtypes.TimeSeriesValue, 0),
|
||||||
|
}
|
||||||
|
|
||||||
|
// for each point in the series, get the predicted value
|
||||||
|
// the predicted value is the moving average (with window size = 7) of the previous period series
|
||||||
|
// plus the average of the current season series
|
||||||
|
// minus the mean of the past season series, past2 season series and past3 season series
|
||||||
|
for idx, curr := range series.Values {
|
||||||
|
movingAvg := p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
|
||||||
|
avg := p.getAvg(currentSeasonSeries)
|
||||||
|
mean := p.getMean(p.getAvg(pastSeasonSeries), p.getAvg(past2SeasonSeries), p.getAvg(past3SeasonSeries))
|
||||||
|
predictedValue := movingAvg + avg - mean
|
||||||
|
|
||||||
|
if predictedValue < 0 {
|
||||||
|
// this should not happen (except when the data has extreme outliers)
|
||||||
|
// we will use the moving avg of the previous period series in this case
|
||||||
|
p.logger.WarnContext(ctx, "predicted value is less than 0 for series", "anomaly_predicted_value", predictedValue, "anomaly_labels", series.Labels)
|
||||||
|
predictedValue = p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
|
||||||
|
}
|
||||||
|
|
||||||
|
p.logger.DebugContext(ctx, "predicted value for series",
|
||||||
|
"anomaly_moving_avg", movingAvg,
|
||||||
|
"anomaly_avg", avg,
|
||||||
|
"anomaly_mean", mean,
|
||||||
|
"anomaly_labels", series.Labels,
|
||||||
|
"anomaly_predicted_value", predictedValue,
|
||||||
|
"anomaly_curr", curr.Value,
|
||||||
|
)
|
||||||
|
predictedSeries.Values = append(predictedSeries.Values, &qbtypes.TimeSeriesValue{
|
||||||
|
Timestamp: curr.Timestamp,
|
||||||
|
Value: predictedValue,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return predictedSeries
|
||||||
|
}
|
||||||
|
|
||||||
|
// getBounds gets the upper and lower bounds for the given series
|
||||||
|
// for the given z score threshold
|
||||||
|
// moving avg of the previous period series + z score threshold * std dev of the series
|
||||||
|
// moving avg of the previous period series - z score threshold * std dev of the series
|
||||||
|
func (p *BaseSeasonalProvider) getBounds(
|
||||||
|
series, predictedSeries *qbtypes.TimeSeries,
|
||||||
|
zScoreThreshold float64,
|
||||||
|
) (*qbtypes.TimeSeries, *qbtypes.TimeSeries) {
|
||||||
|
upperBoundSeries := &qbtypes.TimeSeries{
|
||||||
|
Labels: series.Labels,
|
||||||
|
Values: make([]*qbtypes.TimeSeriesValue, 0),
|
||||||
|
}
|
||||||
|
|
||||||
|
lowerBoundSeries := &qbtypes.TimeSeries{
|
||||||
|
Labels: series.Labels,
|
||||||
|
Values: make([]*qbtypes.TimeSeriesValue, 0),
|
||||||
|
}
|
||||||
|
|
||||||
|
for idx, curr := range series.Values {
|
||||||
|
upperBound := p.getMovingAvg(predictedSeries, movingAvgWindowSize, idx) + zScoreThreshold*p.getStdDev(series)
|
||||||
|
lowerBound := p.getMovingAvg(predictedSeries, movingAvgWindowSize, idx) - zScoreThreshold*p.getStdDev(series)
|
||||||
|
upperBoundSeries.Values = append(upperBoundSeries.Values, &qbtypes.TimeSeriesValue{
|
||||||
|
Timestamp: curr.Timestamp,
|
||||||
|
Value: upperBound,
|
||||||
|
})
|
||||||
|
lowerBoundSeries.Values = append(lowerBoundSeries.Values, &qbtypes.TimeSeriesValue{
|
||||||
|
Timestamp: curr.Timestamp,
|
||||||
|
Value: math.Max(lowerBound, 0),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return upperBoundSeries, lowerBoundSeries
|
||||||
|
}
|
||||||
|
|
||||||
|
// getExpectedValue gets the expected value for the given series
|
||||||
|
// for the given index
|
||||||
|
// prevSeriesAvg + currentSeasonSeriesAvg - mean of past season series, past2 season series and past3 season series
|
||||||
|
func (p *BaseSeasonalProvider) getExpectedValue(
|
||||||
|
_, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries *qbtypes.TimeSeries, idx int,
|
||||||
|
) float64 {
|
||||||
|
prevSeriesAvg := p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
|
||||||
|
currentSeasonSeriesAvg := p.getAvg(currentSeasonSeries)
|
||||||
|
pastSeasonSeriesAvg := p.getAvg(pastSeasonSeries)
|
||||||
|
past2SeasonSeriesAvg := p.getAvg(past2SeasonSeries)
|
||||||
|
past3SeasonSeriesAvg := p.getAvg(past3SeasonSeries)
|
||||||
|
return prevSeriesAvg + currentSeasonSeriesAvg - p.getMean(pastSeasonSeriesAvg, past2SeasonSeriesAvg, past3SeasonSeriesAvg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getScore gets the anomaly score for the given series
|
||||||
|
// for the given index
|
||||||
|
// (value - expectedValue) / std dev of the series
|
||||||
|
func (p *BaseSeasonalProvider) getScore(
|
||||||
|
series, prevSeries, weekSeries, weekPrevSeries, past2SeasonSeries, past3SeasonSeries *qbtypes.TimeSeries, value float64, idx int,
|
||||||
|
) float64 {
|
||||||
|
expectedValue := p.getExpectedValue(series, prevSeries, weekSeries, weekPrevSeries, past2SeasonSeries, past3SeasonSeries, idx)
|
||||||
|
if expectedValue < 0 {
|
||||||
|
expectedValue = p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
|
||||||
|
}
|
||||||
|
return (value - expectedValue) / p.getStdDev(weekSeries)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getAnomalyScores gets the anomaly scores for the given series
|
||||||
|
// for the given index
|
||||||
|
// (value - expectedValue) / std dev of the series
|
||||||
|
func (p *BaseSeasonalProvider) getAnomalyScores(
|
||||||
|
series, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries *qbtypes.TimeSeries,
|
||||||
|
) *qbtypes.TimeSeries {
|
||||||
|
anomalyScoreSeries := &qbtypes.TimeSeries{
|
||||||
|
Labels: series.Labels,
|
||||||
|
Values: make([]*qbtypes.TimeSeriesValue, 0),
|
||||||
|
}
|
||||||
|
|
||||||
|
for idx, curr := range series.Values {
|
||||||
|
anomalyScore := p.getScore(series, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries, curr.Value, idx)
|
||||||
|
anomalyScoreSeries.Values = append(anomalyScoreSeries.Values, &qbtypes.TimeSeriesValue{
|
||||||
|
Timestamp: curr.Timestamp,
|
||||||
|
Value: anomalyScore,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return anomalyScoreSeries
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *BaseSeasonalProvider) getAnomalies(ctx context.Context, orgID valuer.UUID, req *AnomaliesRequest) (*AnomaliesResponse, error) {
|
||||||
|
anomalyParams := p.getQueryParams(req)
|
||||||
|
anomalyQueryResults, err := p.getResults(ctx, orgID, anomalyParams)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
currentPeriodResults := make(map[string]*qbtypes.TimeSeriesData)
|
||||||
|
for _, result := range anomalyQueryResults.CurrentPeriodResults {
|
||||||
|
currentPeriodResults[result.QueryName] = result
|
||||||
|
}
|
||||||
|
|
||||||
|
pastPeriodResults := make(map[string]*qbtypes.TimeSeriesData)
|
||||||
|
for _, result := range anomalyQueryResults.PastPeriodResults {
|
||||||
|
pastPeriodResults[result.QueryName] = result
|
||||||
|
}
|
||||||
|
|
||||||
|
currentSeasonResults := make(map[string]*qbtypes.TimeSeriesData)
|
||||||
|
for _, result := range anomalyQueryResults.CurrentSeasonResults {
|
||||||
|
currentSeasonResults[result.QueryName] = result
|
||||||
|
}
|
||||||
|
|
||||||
|
pastSeasonResults := make(map[string]*qbtypes.TimeSeriesData)
|
||||||
|
for _, result := range anomalyQueryResults.PastSeasonResults {
|
||||||
|
pastSeasonResults[result.QueryName] = result
|
||||||
|
}
|
||||||
|
|
||||||
|
past2SeasonResults := make(map[string]*qbtypes.TimeSeriesData)
|
||||||
|
for _, result := range anomalyQueryResults.Past2SeasonResults {
|
||||||
|
past2SeasonResults[result.QueryName] = result
|
||||||
|
}
|
||||||
|
|
||||||
|
past3SeasonResults := make(map[string]*qbtypes.TimeSeriesData)
|
||||||
|
for _, result := range anomalyQueryResults.Past3SeasonResults {
|
||||||
|
past3SeasonResults[result.QueryName] = result
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, result := range currentPeriodResults {
|
||||||
|
funcs := req.Params.FuncsForQuery(result.QueryName)
|
||||||
|
|
||||||
|
var zScoreThreshold float64
|
||||||
|
for _, f := range funcs {
|
||||||
|
if f.Name == qbtypes.FunctionNameAnomaly {
|
||||||
|
for _, arg := range f.Args {
|
||||||
|
if arg.Name != "z_score_threshold" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
value, ok := arg.Value.(float64)
|
||||||
|
if ok {
|
||||||
|
zScoreThreshold = value
|
||||||
|
} else {
|
||||||
|
p.logger.InfoContext(ctx, "z_score_threshold not provided, defaulting")
|
||||||
|
zScoreThreshold = 3
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pastPeriodResult, ok := pastPeriodResults[result.QueryName]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
currentSeasonResult, ok := currentSeasonResults[result.QueryName]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
pastSeasonResult, ok := pastSeasonResults[result.QueryName]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
past2SeasonResult, ok := past2SeasonResults[result.QueryName]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
past3SeasonResult, ok := past3SeasonResults[result.QueryName]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// no data;
|
||||||
|
if len(result.Aggregations) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
aggOfInterest := result.Aggregations[0]
|
||||||
|
|
||||||
|
for _, series := range aggOfInterest.Series {
|
||||||
|
stdDev := p.getStdDev(series)
|
||||||
|
p.logger.InfoContext(ctx, "calculated standard deviation for series", "anomaly_std_dev", stdDev, "anomaly_labels", series.Labels)
|
||||||
|
|
||||||
|
pastPeriodSeries := p.getMatchingSeries(ctx, pastPeriodResult, series)
|
||||||
|
currentSeasonSeries := p.getMatchingSeries(ctx, currentSeasonResult, series)
|
||||||
|
pastSeasonSeries := p.getMatchingSeries(ctx, pastSeasonResult, series)
|
||||||
|
past2SeasonSeries := p.getMatchingSeries(ctx, past2SeasonResult, series)
|
||||||
|
past3SeasonSeries := p.getMatchingSeries(ctx, past3SeasonResult, series)
|
||||||
|
|
||||||
|
prevSeriesAvg := p.getAvg(pastPeriodSeries)
|
||||||
|
currentSeasonSeriesAvg := p.getAvg(currentSeasonSeries)
|
||||||
|
pastSeasonSeriesAvg := p.getAvg(pastSeasonSeries)
|
||||||
|
past2SeasonSeriesAvg := p.getAvg(past2SeasonSeries)
|
||||||
|
past3SeasonSeriesAvg := p.getAvg(past3SeasonSeries)
|
||||||
|
p.logger.InfoContext(ctx, "calculated mean for series",
|
||||||
|
"anomaly_prev_series_avg", prevSeriesAvg,
|
||||||
|
"anomaly_current_season_series_avg", currentSeasonSeriesAvg,
|
||||||
|
"anomaly_past_season_series_avg", pastSeasonSeriesAvg,
|
||||||
|
"anomaly_past_2season_series_avg", past2SeasonSeriesAvg,
|
||||||
|
"anomaly_past_3season_series_avg", past3SeasonSeriesAvg,
|
||||||
|
"anomaly_labels", series.Labels,
|
||||||
|
)
|
||||||
|
|
||||||
|
predictedSeries := p.getPredictedSeries(
|
||||||
|
ctx,
|
||||||
|
series,
|
||||||
|
pastPeriodSeries,
|
||||||
|
currentSeasonSeries,
|
||||||
|
pastSeasonSeries,
|
||||||
|
past2SeasonSeries,
|
||||||
|
past3SeasonSeries,
|
||||||
|
)
|
||||||
|
aggOfInterest.PredictedSeries = append(aggOfInterest.PredictedSeries, predictedSeries)
|
||||||
|
|
||||||
|
upperBoundSeries, lowerBoundSeries := p.getBounds(
|
||||||
|
series,
|
||||||
|
predictedSeries,
|
||||||
|
zScoreThreshold,
|
||||||
|
)
|
||||||
|
aggOfInterest.UpperBoundSeries = append(aggOfInterest.UpperBoundSeries, upperBoundSeries)
|
||||||
|
aggOfInterest.LowerBoundSeries = append(aggOfInterest.LowerBoundSeries, lowerBoundSeries)
|
||||||
|
|
||||||
|
anomalyScoreSeries := p.getAnomalyScores(
|
||||||
|
series,
|
||||||
|
pastPeriodSeries,
|
||||||
|
currentSeasonSeries,
|
||||||
|
pastSeasonSeries,
|
||||||
|
past2SeasonSeries,
|
||||||
|
past3SeasonSeries,
|
||||||
|
)
|
||||||
|
aggOfInterest.AnomalyScores = append(aggOfInterest.AnomalyScores, anomalyScoreSeries)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
results := make([]*qbtypes.TimeSeriesData, 0, len(currentPeriodResults))
|
||||||
|
for _, result := range currentPeriodResults {
|
||||||
|
results = append(results, result)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &AnomaliesResponse{
|
||||||
|
Results: results,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
34
ee/anomaly/weekly.go
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
package anomaly
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/SigNoz/signoz/pkg/valuer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type WeeklyProvider struct {
|
||||||
|
BaseSeasonalProvider
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ BaseProvider = (*WeeklyProvider)(nil)
|
||||||
|
|
||||||
|
func (wp *WeeklyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider {
|
||||||
|
return &wp.BaseSeasonalProvider
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewWeeklyProvider(opts ...GenericProviderOption[*WeeklyProvider]) *WeeklyProvider {
|
||||||
|
wp := &WeeklyProvider{
|
||||||
|
BaseSeasonalProvider: BaseSeasonalProvider{},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(wp)
|
||||||
|
}
|
||||||
|
|
||||||
|
return wp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *WeeklyProvider) GetAnomalies(ctx context.Context, orgID valuer.UUID, req *AnomaliesRequest) (*AnomaliesResponse, error) {
|
||||||
|
req.Seasonality = SeasonalityWeekly
|
||||||
|
return p.getAnomalies(ctx, orgID, req)
|
||||||
|
}
|
||||||
@@ -1,4 +0,0 @@
|
|||||||
.vscode
|
|
||||||
README.md
|
|
||||||
signoz.db
|
|
||||||
bin
|
|
||||||
@@ -59,7 +59,7 @@ func NewAPIHandler(opts APIHandlerOptions, signoz *signoz.SigNoz) (*APIHandler,
|
|||||||
LicensingAPI: httplicensing.NewLicensingAPI(signoz.Licensing),
|
LicensingAPI: httplicensing.NewLicensingAPI(signoz.Licensing),
|
||||||
FieldsAPI: fields.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.TelemetryStore),
|
FieldsAPI: fields.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.TelemetryStore),
|
||||||
Signoz: signoz,
|
Signoz: signoz,
|
||||||
QuerierAPI: querierAPI.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.Querier),
|
QuerierAPI: querierAPI.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.Querier, signoz.Analytics),
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -110,6 +110,11 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *middleware.AuthZ) {
|
|||||||
// v4
|
// v4
|
||||||
router.HandleFunc("/api/v4/query_range", am.ViewAccess(ah.queryRangeV4)).Methods(http.MethodPost)
|
router.HandleFunc("/api/v4/query_range", am.ViewAccess(ah.queryRangeV4)).Methods(http.MethodPost)
|
||||||
|
|
||||||
|
// v5
|
||||||
|
router.HandleFunc("/api/v5/query_range", am.ViewAccess(ah.queryRangeV5)).Methods(http.MethodPost)
|
||||||
|
|
||||||
|
router.HandleFunc("/api/v5/substitute_vars", am.ViewAccess(ah.QuerierAPI.ReplaceVariables)).Methods(http.MethodPost)
|
||||||
|
|
||||||
// Gateway
|
// Gateway
|
||||||
router.PathPrefix(gateway.RoutePrefix).HandlerFunc(am.EditAccess(ah.ServeGatewayHTTP))
|
router.PathPrefix(gateway.RoutePrefix).HandlerFunc(am.EditAccess(ah.ServeGatewayHTTP))
|
||||||
|
|
||||||
|
|||||||
@@ -13,11 +13,11 @@ import (
|
|||||||
"github.com/SigNoz/signoz/ee/query-service/constants"
|
"github.com/SigNoz/signoz/ee/query-service/constants"
|
||||||
"github.com/SigNoz/signoz/pkg/errors"
|
"github.com/SigNoz/signoz/pkg/errors"
|
||||||
"github.com/SigNoz/signoz/pkg/http/render"
|
"github.com/SigNoz/signoz/pkg/http/render"
|
||||||
|
"github.com/SigNoz/signoz/pkg/modules/user"
|
||||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||||
"github.com/SigNoz/signoz/pkg/types"
|
"github.com/SigNoz/signoz/pkg/types"
|
||||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||||
"github.com/SigNoz/signoz/pkg/valuer"
|
"github.com/SigNoz/signoz/pkg/valuer"
|
||||||
"github.com/google/uuid"
|
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
@@ -192,14 +192,14 @@ func (ah *APIHandler) getOrCreateCloudIntegrationUser(
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
password, err := types.NewFactorPassword(uuid.NewString())
|
password := types.MustGenerateFactorPassword(newUser.ID.StringValue())
|
||||||
|
|
||||||
integrationUser, err := ah.Signoz.Modules.User.CreateUserWithPassword(ctx, newUser, password)
|
err = ah.Signoz.Modules.User.CreateUser(ctx, newUser, user.WithFactorPassword(password))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, basemodel.InternalError(fmt.Errorf("couldn't create cloud integration user: %w", err))
|
return nil, basemodel.InternalError(fmt.Errorf("couldn't create cloud integration user: %w", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
return integrationUser, nil
|
return newUser, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getIngestionUrlAndSigNozAPIUrl(ctx context.Context, licenseKey string) (
|
func getIngestionUrlAndSigNozAPIUrl(ctx context.Context, licenseKey string) (
|
||||||
|
|||||||
@@ -2,11 +2,16 @@ package api
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"runtime/debug"
|
||||||
|
|
||||||
|
anomalyV2 "github.com/SigNoz/signoz/ee/anomaly"
|
||||||
"github.com/SigNoz/signoz/ee/query-service/anomaly"
|
"github.com/SigNoz/signoz/ee/query-service/anomaly"
|
||||||
|
"github.com/SigNoz/signoz/pkg/errors"
|
||||||
"github.com/SigNoz/signoz/pkg/http/render"
|
"github.com/SigNoz/signoz/pkg/http/render"
|
||||||
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
|
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
|
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
|
||||||
@@ -15,6 +20,8 @@ import (
|
|||||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||||
"github.com/SigNoz/signoz/pkg/valuer"
|
"github.com/SigNoz/signoz/pkg/valuer"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
|
||||||
|
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
|
func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
|
||||||
@@ -136,3 +143,139 @@ func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
|
|||||||
aH.QueryRangeV4(w, r)
|
aH.QueryRangeV4(w, r)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func extractSeasonality(anomalyQuery *qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]) anomalyV2.Seasonality {
|
||||||
|
for _, fn := range anomalyQuery.Functions {
|
||||||
|
if fn.Name == qbtypes.FunctionNameAnomaly {
|
||||||
|
for _, arg := range fn.Args {
|
||||||
|
if arg.Name == "seasonality" {
|
||||||
|
if seasonalityStr, ok := arg.Value.(string); ok {
|
||||||
|
switch seasonalityStr {
|
||||||
|
case "weekly":
|
||||||
|
return anomalyV2.SeasonalityWeekly
|
||||||
|
case "hourly":
|
||||||
|
return anomalyV2.SeasonalityHourly
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return anomalyV2.SeasonalityDaily // default
|
||||||
|
}
|
||||||
|
|
||||||
|
func createAnomalyProvider(aH *APIHandler, seasonality anomalyV2.Seasonality) anomalyV2.Provider {
|
||||||
|
switch seasonality {
|
||||||
|
case anomalyV2.SeasonalityWeekly:
|
||||||
|
return anomalyV2.NewWeeklyProvider(
|
||||||
|
anomalyV2.WithQuerier[*anomalyV2.WeeklyProvider](aH.Signoz.Querier),
|
||||||
|
anomalyV2.WithLogger[*anomalyV2.WeeklyProvider](aH.Signoz.Instrumentation.Logger()),
|
||||||
|
)
|
||||||
|
case anomalyV2.SeasonalityHourly:
|
||||||
|
return anomalyV2.NewHourlyProvider(
|
||||||
|
anomalyV2.WithQuerier[*anomalyV2.HourlyProvider](aH.Signoz.Querier),
|
||||||
|
anomalyV2.WithLogger[*anomalyV2.HourlyProvider](aH.Signoz.Instrumentation.Logger()),
|
||||||
|
)
|
||||||
|
default:
|
||||||
|
return anomalyV2.NewDailyProvider(
|
||||||
|
anomalyV2.WithQuerier[*anomalyV2.DailyProvider](aH.Signoz.Querier),
|
||||||
|
anomalyV2.WithLogger[*anomalyV2.DailyProvider](aH.Signoz.Instrumentation.Logger()),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aH *APIHandler) handleAnomalyQuery(ctx context.Context, orgID valuer.UUID, anomalyQuery *qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation], queryRangeRequest qbtypes.QueryRangeRequest) (*anomalyV2.AnomaliesResponse, error) {
|
||||||
|
seasonality := extractSeasonality(anomalyQuery)
|
||||||
|
provider := createAnomalyProvider(aH, seasonality)
|
||||||
|
|
||||||
|
return provider.GetAnomalies(ctx, orgID, &anomalyV2.AnomaliesRequest{Params: queryRangeRequest})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aH *APIHandler) queryRangeV5(rw http.ResponseWriter, req *http.Request) {
|
||||||
|
|
||||||
|
bodyBytes, err := io.ReadAll(req.Body)
|
||||||
|
if err != nil {
|
||||||
|
render.Error(rw, errors.NewInvalidInputf(errors.CodeInvalidInput, "failed to read request body: %v", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
req.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
|
||||||
|
|
||||||
|
ctx := req.Context()
|
||||||
|
|
||||||
|
claims, err := authtypes.ClaimsFromContext(ctx)
|
||||||
|
if err != nil {
|
||||||
|
render.Error(rw, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var queryRangeRequest qbtypes.QueryRangeRequest
|
||||||
|
if err := json.NewDecoder(req.Body).Decode(&queryRangeRequest); err != nil {
|
||||||
|
render.Error(rw, errors.NewInvalidInputf(errors.CodeInvalidInput, "failed to decode request body: %v", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
stackTrace := string(debug.Stack())
|
||||||
|
|
||||||
|
queryJSON, _ := json.Marshal(queryRangeRequest)
|
||||||
|
|
||||||
|
aH.Signoz.Instrumentation.Logger().ErrorContext(ctx, "panic in QueryRange",
|
||||||
|
"error", r,
|
||||||
|
"user", claims.UserID,
|
||||||
|
"payload", string(queryJSON),
|
||||||
|
"stacktrace", stackTrace,
|
||||||
|
)
|
||||||
|
|
||||||
|
render.Error(rw, errors.NewInternalf(
|
||||||
|
errors.CodeInternal,
|
||||||
|
"Something went wrong on our end. It's not you, it's us. Our team is notified about it. Reach out to support if issue persists.",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if err := queryRangeRequest.Validate(); err != nil {
|
||||||
|
render.Error(rw, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
orgID, err := valuer.NewUUID(claims.OrgID)
|
||||||
|
if err != nil {
|
||||||
|
render.Error(rw, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if anomalyQuery, ok := queryRangeRequest.IsAnomalyRequest(); ok {
|
||||||
|
anomalies, err := aH.handleAnomalyQuery(ctx, orgID, anomalyQuery, queryRangeRequest)
|
||||||
|
if err != nil {
|
||||||
|
render.Error(rw, errors.NewInternalf(errors.CodeInternal, "failed to get anomalies: %v", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
results := []any{}
|
||||||
|
for _, item := range anomalies.Results {
|
||||||
|
results = append(results, item)
|
||||||
|
}
|
||||||
|
|
||||||
|
finalResp := &qbtypes.QueryRangeResponse{
|
||||||
|
Type: queryRangeRequest.RequestType,
|
||||||
|
Data: struct {
|
||||||
|
Results []any `json:"results"`
|
||||||
|
}{
|
||||||
|
Results: results,
|
||||||
|
},
|
||||||
|
Meta: struct {
|
||||||
|
RowsScanned uint64 `json:"rowsScanned"`
|
||||||
|
BytesScanned uint64 `json:"bytesScanned"`
|
||||||
|
DurationMS uint64 `json:"durationMs"`
|
||||||
|
}{},
|
||||||
|
}
|
||||||
|
|
||||||
|
render.Success(rw, http.StatusOK, finalResp)
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
// regular query range request, let the querier handle it
|
||||||
|
req.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
|
||||||
|
aH.QuerierAPI.QueryRange(rw, req)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package app
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
_ "net/http/pprof" // http profiler
|
_ "net/http/pprof" // http profiler
|
||||||
@@ -18,6 +19,7 @@ import (
|
|||||||
"github.com/SigNoz/signoz/pkg/http/middleware"
|
"github.com/SigNoz/signoz/pkg/http/middleware"
|
||||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||||
|
"github.com/SigNoz/signoz/pkg/querier"
|
||||||
"github.com/SigNoz/signoz/pkg/signoz"
|
"github.com/SigNoz/signoz/pkg/signoz"
|
||||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||||
@@ -42,19 +44,6 @@ import (
|
|||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ServerOptions struct {
|
|
||||||
Config signoz.Config
|
|
||||||
SigNoz *signoz.SigNoz
|
|
||||||
HTTPHostPort string
|
|
||||||
PrivateHostPort string
|
|
||||||
PreferSpanMetrics bool
|
|
||||||
FluxInterval string
|
|
||||||
FluxIntervalForTraceDetail string
|
|
||||||
Cluster string
|
|
||||||
GatewayUrl string
|
|
||||||
Jwt *authtypes.JWT
|
|
||||||
}
|
|
||||||
|
|
||||||
// Server runs HTTP, Mux and a grpc server
|
// Server runs HTTP, Mux and a grpc server
|
||||||
type Server struct {
|
type Server struct {
|
||||||
config signoz.Config
|
config signoz.Config
|
||||||
@@ -67,11 +56,6 @@ type Server struct {
|
|||||||
httpServer *http.Server
|
httpServer *http.Server
|
||||||
httpHostPort string
|
httpHostPort string
|
||||||
|
|
||||||
// private http
|
|
||||||
privateConn net.Listener
|
|
||||||
privateHTTP *http.Server
|
|
||||||
privateHostPort string
|
|
||||||
|
|
||||||
opampServer *opamp.Server
|
opampServer *opamp.Server
|
||||||
|
|
||||||
// Usage manager
|
// Usage manager
|
||||||
@@ -104,6 +88,8 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz, jwt *authtypes.JWT)
|
|||||||
signoz.TelemetryStore,
|
signoz.TelemetryStore,
|
||||||
signoz.Prometheus,
|
signoz.Prometheus,
|
||||||
signoz.Modules.OrgGetter,
|
signoz.Modules.OrgGetter,
|
||||||
|
signoz.Querier,
|
||||||
|
signoz.Instrumentation.Logger(),
|
||||||
)
|
)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -179,7 +165,6 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz, jwt *authtypes.JWT)
|
|||||||
jwt: jwt,
|
jwt: jwt,
|
||||||
ruleManager: rm,
|
ruleManager: rm,
|
||||||
httpHostPort: baseconst.HTTPHostPort,
|
httpHostPort: baseconst.HTTPHostPort,
|
||||||
privateHostPort: baseconst.PrivateHostPort,
|
|
||||||
unavailableChannel: make(chan healthcheck.Status),
|
unavailableChannel: make(chan healthcheck.Status),
|
||||||
usageManager: usageManager,
|
usageManager: usageManager,
|
||||||
}
|
}
|
||||||
@@ -192,28 +177,10 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz, jwt *authtypes.JWT)
|
|||||||
|
|
||||||
s.httpServer = httpServer
|
s.httpServer = httpServer
|
||||||
|
|
||||||
privateServer, err := s.createPrivateServer(apiHandler)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
s.privateHTTP = privateServer
|
|
||||||
|
|
||||||
s.opampServer = opamp.InitializeServer(
|
s.opampServer = opamp.InitializeServer(
|
||||||
&opAmpModel.AllAgents, agentConfMgr, signoz.Instrumentation,
|
&opAmpModel.AllAgents, agentConfMgr, signoz.Instrumentation,
|
||||||
)
|
)
|
||||||
|
|
||||||
orgs, err := apiHandler.Signoz.Modules.OrgGetter.ListByOwnedKeyRange(context.Background())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for _, org := range orgs {
|
|
||||||
errorList := reader.PreloadMetricsMetadata(context.Background(), org.ID)
|
|
||||||
for _, er := range errorList {
|
|
||||||
zap.L().Error("failed to preload metrics metadata", zap.Error(er))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return s, nil
|
return s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -222,36 +189,6 @@ func (s Server) HealthCheckStatus() chan healthcheck.Status {
|
|||||||
return s.unavailableChannel
|
return s.unavailableChannel
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server, error) {
|
|
||||||
r := baseapp.NewRouter()
|
|
||||||
|
|
||||||
r.Use(middleware.NewAuth(s.jwt, []string{"Authorization", "Sec-WebSocket-Protocol"}, s.signoz.Sharder, s.signoz.Instrumentation.Logger()).Wrap)
|
|
||||||
r.Use(middleware.NewAPIKey(s.signoz.SQLStore, []string{"SIGNOZ-API-KEY"}, s.signoz.Instrumentation.Logger(), s.signoz.Sharder).Wrap)
|
|
||||||
r.Use(middleware.NewTimeout(s.signoz.Instrumentation.Logger(),
|
|
||||||
s.config.APIServer.Timeout.ExcludedRoutes,
|
|
||||||
s.config.APIServer.Timeout.Default,
|
|
||||||
s.config.APIServer.Timeout.Max,
|
|
||||||
).Wrap)
|
|
||||||
r.Use(middleware.NewLogging(s.signoz.Instrumentation.Logger(), s.config.APIServer.Logging.ExcludedRoutes).Wrap)
|
|
||||||
|
|
||||||
apiHandler.RegisterPrivateRoutes(r)
|
|
||||||
|
|
||||||
c := cors.New(cors.Options{
|
|
||||||
//todo(amol): find out a way to add exact domain or
|
|
||||||
// ip here for alert manager
|
|
||||||
AllowedOrigins: []string{"*"},
|
|
||||||
AllowedMethods: []string{"GET", "DELETE", "POST", "PUT", "PATCH"},
|
|
||||||
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "SIGNOZ-API-KEY", "X-SIGNOZ-QUERY-ID", "Sec-WebSocket-Protocol"},
|
|
||||||
})
|
|
||||||
|
|
||||||
handler := c.Handler(r)
|
|
||||||
handler = handlers.CompressHandler(handler)
|
|
||||||
|
|
||||||
return &http.Server{
|
|
||||||
Handler: handler,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*http.Server, error) {
|
func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*http.Server, error) {
|
||||||
r := baseapp.NewRouter()
|
r := baseapp.NewRouter()
|
||||||
am := middleware.NewAuthZ(s.signoz.Instrumentation.Logger())
|
am := middleware.NewAuthZ(s.signoz.Instrumentation.Logger())
|
||||||
@@ -264,6 +201,7 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*h
|
|||||||
s.config.APIServer.Timeout.Max,
|
s.config.APIServer.Timeout.Max,
|
||||||
).Wrap)
|
).Wrap)
|
||||||
r.Use(middleware.NewLogging(s.signoz.Instrumentation.Logger(), s.config.APIServer.Logging.ExcludedRoutes).Wrap)
|
r.Use(middleware.NewLogging(s.signoz.Instrumentation.Logger(), s.config.APIServer.Logging.ExcludedRoutes).Wrap)
|
||||||
|
r.Use(middleware.NewComment().Wrap)
|
||||||
|
|
||||||
apiHandler.RegisterRoutes(r, am)
|
apiHandler.RegisterRoutes(r, am)
|
||||||
apiHandler.RegisterLogsRoutes(r, am)
|
apiHandler.RegisterLogsRoutes(r, am)
|
||||||
@@ -316,19 +254,6 @@ func (s *Server) initListeners() error {
|
|||||||
|
|
||||||
zap.L().Info(fmt.Sprintf("Query server started listening on %s...", s.httpHostPort))
|
zap.L().Info(fmt.Sprintf("Query server started listening on %s...", s.httpHostPort))
|
||||||
|
|
||||||
// listen on private port to support internal services
|
|
||||||
privateHostPort := s.privateHostPort
|
|
||||||
|
|
||||||
if privateHostPort == "" {
|
|
||||||
return fmt.Errorf("baseconst.PrivateHostPort is required")
|
|
||||||
}
|
|
||||||
|
|
||||||
s.privateConn, err = net.Listen("tcp", privateHostPort)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
zap.L().Info(fmt.Sprintf("Query server started listening on private port %s...", s.privateHostPort))
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -367,26 +292,6 @@ func (s *Server) Start(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
var privatePort int
|
|
||||||
if port, err := utils.GetPort(s.privateConn.Addr()); err == nil {
|
|
||||||
privatePort = port
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
zap.L().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.privateHostPort))
|
|
||||||
|
|
||||||
switch err := s.privateHTTP.Serve(s.privateConn); err {
|
|
||||||
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
|
|
||||||
// normal exit, nothing to do
|
|
||||||
zap.L().Info("private http server closed")
|
|
||||||
default:
|
|
||||||
zap.L().Error("Could not start private HTTP server", zap.Error(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
s.unavailableChannel <- healthcheck.Unavailable
|
|
||||||
|
|
||||||
}()
|
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
zap.L().Info("Starting OpAmp Websocket server", zap.String("addr", baseconst.OpAmpWsEndpoint))
|
zap.L().Info("Starting OpAmp Websocket server", zap.String("addr", baseconst.OpAmpWsEndpoint))
|
||||||
err := s.opampServer.Start(baseconst.OpAmpWsEndpoint)
|
err := s.opampServer.Start(baseconst.OpAmpWsEndpoint)
|
||||||
@@ -406,12 +311,6 @@ func (s *Server) Stop(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.privateHTTP != nil {
|
|
||||||
if err := s.privateHTTP.Shutdown(ctx); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
s.opampServer.Stop()
|
s.opampServer.Stop()
|
||||||
|
|
||||||
if s.ruleManager != nil {
|
if s.ruleManager != nil {
|
||||||
@@ -432,6 +331,8 @@ func makeRulesManager(
|
|||||||
telemetryStore telemetrystore.TelemetryStore,
|
telemetryStore telemetrystore.TelemetryStore,
|
||||||
prometheus prometheus.Prometheus,
|
prometheus prometheus.Prometheus,
|
||||||
orgGetter organization.Getter,
|
orgGetter organization.Getter,
|
||||||
|
querier querier.Querier,
|
||||||
|
logger *slog.Logger,
|
||||||
) (*baserules.Manager, error) {
|
) (*baserules.Manager, error) {
|
||||||
// create manager opts
|
// create manager opts
|
||||||
managerOpts := &baserules.ManagerOptions{
|
managerOpts := &baserules.ManagerOptions{
|
||||||
@@ -440,6 +341,8 @@ func makeRulesManager(
|
|||||||
Context: context.Background(),
|
Context: context.Background(),
|
||||||
Logger: zap.L(),
|
Logger: zap.L(),
|
||||||
Reader: ch,
|
Reader: ch,
|
||||||
|
Querier: querier,
|
||||||
|
SLogger: logger,
|
||||||
Cache: cache,
|
Cache: cache,
|
||||||
EvalDelay: baseconst.GetEvalDelay(),
|
EvalDelay: baseconst.GetEvalDelay(),
|
||||||
PrepareTaskFunc: rules.PrepareTaskFunc,
|
PrepareTaskFunc: rules.PrepareTaskFunc,
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ var IsDotMetricsEnabled = false
|
|||||||
var IsPreferSpanMetrics = false
|
var IsPreferSpanMetrics = false
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
if GetOrDefaultEnv(DotMetricsEnabled, "false") == "true" {
|
if GetOrDefaultEnv(DotMetricsEnabled, "true") == "true" {
|
||||||
IsDotMetricsEnabled = true
|
IsDotMetricsEnabled = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,179 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"flag"
|
|
||||||
"os"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/SigNoz/signoz/ee/licensing"
|
|
||||||
"github.com/SigNoz/signoz/ee/licensing/httplicensing"
|
|
||||||
"github.com/SigNoz/signoz/ee/query-service/app"
|
|
||||||
"github.com/SigNoz/signoz/ee/sqlstore/postgressqlstore"
|
|
||||||
"github.com/SigNoz/signoz/ee/zeus"
|
|
||||||
"github.com/SigNoz/signoz/ee/zeus/httpzeus"
|
|
||||||
"github.com/SigNoz/signoz/pkg/analytics"
|
|
||||||
"github.com/SigNoz/signoz/pkg/config"
|
|
||||||
"github.com/SigNoz/signoz/pkg/config/envprovider"
|
|
||||||
"github.com/SigNoz/signoz/pkg/config/fileprovider"
|
|
||||||
"github.com/SigNoz/signoz/pkg/factory"
|
|
||||||
pkglicensing "github.com/SigNoz/signoz/pkg/licensing"
|
|
||||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
|
||||||
baseconst "github.com/SigNoz/signoz/pkg/query-service/constants"
|
|
||||||
"github.com/SigNoz/signoz/pkg/signoz"
|
|
||||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
|
||||||
"github.com/SigNoz/signoz/pkg/sqlstore/sqlstorehook"
|
|
||||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
|
||||||
"github.com/SigNoz/signoz/pkg/version"
|
|
||||||
pkgzeus "github.com/SigNoz/signoz/pkg/zeus"
|
|
||||||
|
|
||||||
"go.uber.org/zap"
|
|
||||||
"go.uber.org/zap/zapcore"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Deprecated: Please use the logger from pkg/instrumentation.
|
|
||||||
func initZapLog() *zap.Logger {
|
|
||||||
config := zap.NewProductionConfig()
|
|
||||||
config.EncoderConfig.TimeKey = "timestamp"
|
|
||||||
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
|
||||||
logger, _ := config.Build()
|
|
||||||
return logger
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
var promConfigPath, skipTopLvlOpsPath string
|
|
||||||
|
|
||||||
// disables rule execution but allows change to the rule definition
|
|
||||||
var disableRules bool
|
|
||||||
|
|
||||||
// the url used to build link in the alert messages in slack and other systems
|
|
||||||
var ruleRepoURL string
|
|
||||||
var cluster string
|
|
||||||
|
|
||||||
var useLogsNewSchema bool
|
|
||||||
var useTraceNewSchema bool
|
|
||||||
var cacheConfigPath, fluxInterval, fluxIntervalForTraceDetail string
|
|
||||||
var preferSpanMetrics bool
|
|
||||||
|
|
||||||
var maxIdleConns int
|
|
||||||
var maxOpenConns int
|
|
||||||
var dialTimeout time.Duration
|
|
||||||
var gatewayUrl string
|
|
||||||
var useLicensesV3 bool
|
|
||||||
|
|
||||||
// Deprecated
|
|
||||||
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
|
|
||||||
// Deprecated
|
|
||||||
flag.BoolVar(&useTraceNewSchema, "use-trace-new-schema", false, "use new schema for traces")
|
|
||||||
// Deprecated
|
|
||||||
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
|
|
||||||
// Deprecated
|
|
||||||
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
|
|
||||||
// Deprecated
|
|
||||||
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
|
|
||||||
flag.BoolVar(&preferSpanMetrics, "prefer-span-metrics", false, "(prefer span metrics for service level metrics)")
|
|
||||||
// Deprecated
|
|
||||||
flag.IntVar(&maxIdleConns, "max-idle-conns", 50, "(number of connections to maintain in the pool.)")
|
|
||||||
// Deprecated
|
|
||||||
flag.IntVar(&maxOpenConns, "max-open-conns", 100, "(max connections for use at any time.)")
|
|
||||||
// Deprecated
|
|
||||||
flag.DurationVar(&dialTimeout, "dial-timeout", 5*time.Second, "(the maximum time to establish a connection.)")
|
|
||||||
// Deprecated
|
|
||||||
flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)")
|
|
||||||
// Deprecated
|
|
||||||
flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)")
|
|
||||||
flag.StringVar(&fluxInterval, "flux-interval", "5m", "(the interval to exclude data from being cached to avoid incorrect cache for data in motion)")
|
|
||||||
flag.StringVar(&fluxIntervalForTraceDetail, "flux-interval-trace-detail", "2m", "(the interval to exclude data from being cached to avoid incorrect cache for trace data in motion)")
|
|
||||||
flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')")
|
|
||||||
flag.StringVar(&gatewayUrl, "gateway-url", "", "(url to the gateway)")
|
|
||||||
// Deprecated
|
|
||||||
flag.BoolVar(&useLicensesV3, "use-licenses-v3", false, "use licenses_v3 schema for licenses")
|
|
||||||
flag.Parse()
|
|
||||||
|
|
||||||
loggerMgr := initZapLog()
|
|
||||||
zap.ReplaceGlobals(loggerMgr)
|
|
||||||
defer loggerMgr.Sync() // flushes buffer, if any
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
config, err := signoz.NewConfig(ctx, config.ResolverConfig{
|
|
||||||
Uris: []string{"env:"},
|
|
||||||
ProviderFactories: []config.ProviderFactory{
|
|
||||||
envprovider.NewFactory(),
|
|
||||||
fileprovider.NewFactory(),
|
|
||||||
},
|
|
||||||
}, signoz.DeprecatedFlags{
|
|
||||||
MaxIdleConns: maxIdleConns,
|
|
||||||
MaxOpenConns: maxOpenConns,
|
|
||||||
DialTimeout: dialTimeout,
|
|
||||||
Config: promConfigPath,
|
|
||||||
FluxInterval: fluxInterval,
|
|
||||||
FluxIntervalForTraceDetail: fluxIntervalForTraceDetail,
|
|
||||||
Cluster: cluster,
|
|
||||||
GatewayUrl: gatewayUrl,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
zap.L().Fatal("Failed to create config", zap.Error(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
version.Info.PrettyPrint(config.Version)
|
|
||||||
|
|
||||||
sqlStoreFactories := signoz.NewSQLStoreProviderFactories()
|
|
||||||
if err := sqlStoreFactories.Add(postgressqlstore.NewFactory(sqlstorehook.NewLoggingFactory())); err != nil {
|
|
||||||
zap.L().Fatal("Failed to add postgressqlstore factory", zap.Error(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
jwtSecret := os.Getenv("SIGNOZ_JWT_SECRET")
|
|
||||||
|
|
||||||
if len(jwtSecret) == 0 {
|
|
||||||
zap.L().Warn("No JWT secret key is specified.")
|
|
||||||
} else {
|
|
||||||
zap.L().Info("JWT secret key set successfully.")
|
|
||||||
}
|
|
||||||
|
|
||||||
jwt := authtypes.NewJWT(jwtSecret, 30*time.Minute, 30*24*time.Hour)
|
|
||||||
|
|
||||||
signoz, err := signoz.New(
|
|
||||||
context.Background(),
|
|
||||||
config,
|
|
||||||
jwt,
|
|
||||||
zeus.Config(),
|
|
||||||
httpzeus.NewProviderFactory(),
|
|
||||||
licensing.Config(24*time.Hour, 3),
|
|
||||||
func(sqlstore sqlstore.SQLStore, zeus pkgzeus.Zeus, orgGetter organization.Getter, analytics analytics.Analytics) factory.ProviderFactory[pkglicensing.Licensing, pkglicensing.Config] {
|
|
||||||
return httplicensing.NewProviderFactory(sqlstore, zeus, orgGetter, analytics)
|
|
||||||
},
|
|
||||||
signoz.NewEmailingProviderFactories(),
|
|
||||||
signoz.NewCacheProviderFactories(),
|
|
||||||
signoz.NewWebProviderFactories(),
|
|
||||||
sqlStoreFactories,
|
|
||||||
signoz.NewTelemetryStoreProviderFactories(),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
zap.L().Fatal("Failed to create signoz", zap.Error(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
server, err := app.NewServer(config, signoz, jwt)
|
|
||||||
if err != nil {
|
|
||||||
zap.L().Fatal("Failed to create server", zap.Error(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := server.Start(ctx); err != nil {
|
|
||||||
zap.L().Fatal("Could not start server", zap.Error(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
signoz.Start(ctx)
|
|
||||||
|
|
||||||
if err := signoz.Wait(ctx); err != nil {
|
|
||||||
zap.L().Fatal("Failed to start signoz", zap.Error(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
err = server.Stop(ctx)
|
|
||||||
if err != nil {
|
|
||||||
zap.L().Fatal("Failed to stop server", zap.Error(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
err = signoz.Stop(ctx)
|
|
||||||
if err != nil {
|
|
||||||
zap.L().Fatal("Failed to stop signoz", zap.Error(err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
package model
|
package model
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"errors"
|
||||||
|
|
||||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||||
)
|
)
|
||||||
@@ -57,7 +57,7 @@ func Unauthorized(err error) *ApiError {
|
|||||||
func BadRequestStr(s string) *ApiError {
|
func BadRequestStr(s string) *ApiError {
|
||||||
return &ApiError{
|
return &ApiError{
|
||||||
Typ: basemodel.ErrorBadData,
|
Typ: basemodel.ErrorBadData,
|
||||||
Err: fmt.Errorf(s),
|
Err: errors.New(s),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -73,7 +73,7 @@ func InternalError(err error) *ApiError {
|
|||||||
func InternalErrorStr(s string) *ApiError {
|
func InternalErrorStr(s string) *ApiError {
|
||||||
return &ApiError{
|
return &ApiError{
|
||||||
Typ: basemodel.ErrorInternal,
|
Typ: basemodel.ErrorInternal,
|
||||||
Err: fmt.Errorf(s),
|
Err: errors.New(s),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -4,17 +4,17 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"math"
|
"math"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"go.uber.org/zap"
|
|
||||||
|
|
||||||
"github.com/SigNoz/signoz/ee/query-service/anomaly"
|
"github.com/SigNoz/signoz/ee/query-service/anomaly"
|
||||||
"github.com/SigNoz/signoz/pkg/cache"
|
"github.com/SigNoz/signoz/pkg/cache"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/common"
|
"github.com/SigNoz/signoz/pkg/query-service/common"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||||
|
"github.com/SigNoz/signoz/pkg/transition"
|
||||||
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
|
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
|
||||||
"github.com/SigNoz/signoz/pkg/valuer"
|
"github.com/SigNoz/signoz/pkg/valuer"
|
||||||
|
|
||||||
@@ -30,7 +30,11 @@ import (
|
|||||||
|
|
||||||
baserules "github.com/SigNoz/signoz/pkg/query-service/rules"
|
baserules "github.com/SigNoz/signoz/pkg/query-service/rules"
|
||||||
|
|
||||||
yaml "gopkg.in/yaml.v2"
|
querierV5 "github.com/SigNoz/signoz/pkg/querier"
|
||||||
|
|
||||||
|
anomalyV2 "github.com/SigNoz/signoz/ee/anomaly"
|
||||||
|
|
||||||
|
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -47,7 +51,14 @@ type AnomalyRule struct {
|
|||||||
// querierV2 is used for alerts created after the introduction of new metrics query builder
|
// querierV2 is used for alerts created after the introduction of new metrics query builder
|
||||||
querierV2 interfaces.Querier
|
querierV2 interfaces.Querier
|
||||||
|
|
||||||
provider anomaly.Provider
|
// querierV5 is used for alerts migrated after the introduction of new query builder
|
||||||
|
querierV5 querierV5.Querier
|
||||||
|
|
||||||
|
provider anomaly.Provider
|
||||||
|
providerV2 anomalyV2.Provider
|
||||||
|
|
||||||
|
version string
|
||||||
|
logger *slog.Logger
|
||||||
|
|
||||||
seasonality anomaly.Seasonality
|
seasonality anomaly.Seasonality
|
||||||
}
|
}
|
||||||
@@ -57,11 +68,15 @@ func NewAnomalyRule(
|
|||||||
orgID valuer.UUID,
|
orgID valuer.UUID,
|
||||||
p *ruletypes.PostableRule,
|
p *ruletypes.PostableRule,
|
||||||
reader interfaces.Reader,
|
reader interfaces.Reader,
|
||||||
|
querierV5 querierV5.Querier,
|
||||||
|
logger *slog.Logger,
|
||||||
cache cache.Cache,
|
cache cache.Cache,
|
||||||
opts ...baserules.RuleOption,
|
opts ...baserules.RuleOption,
|
||||||
) (*AnomalyRule, error) {
|
) (*AnomalyRule, error) {
|
||||||
|
|
||||||
zap.L().Info("creating new AnomalyRule", zap.String("id", id), zap.Any("opts", opts))
|
logger.Info("creating new AnomalyRule", "rule_id", id)
|
||||||
|
|
||||||
|
opts = append(opts, baserules.WithLogger(logger))
|
||||||
|
|
||||||
if p.RuleCondition.CompareOp == ruletypes.ValueIsBelow {
|
if p.RuleCondition.CompareOp == ruletypes.ValueIsBelow {
|
||||||
target := -1 * *p.RuleCondition.Target
|
target := -1 * *p.RuleCondition.Target
|
||||||
@@ -88,7 +103,7 @@ func NewAnomalyRule(
|
|||||||
t.seasonality = anomaly.SeasonalityDaily
|
t.seasonality = anomaly.SeasonalityDaily
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.L().Info("using seasonality", zap.String("seasonality", t.seasonality.String()))
|
logger.Info("using seasonality", "seasonality", t.seasonality.String())
|
||||||
|
|
||||||
querierOptsV2 := querierV2.QuerierOptions{
|
querierOptsV2 := querierV2.QuerierOptions{
|
||||||
Reader: reader,
|
Reader: reader,
|
||||||
@@ -117,6 +132,27 @@ func NewAnomalyRule(
|
|||||||
anomaly.WithReader[*anomaly.WeeklyProvider](reader),
|
anomaly.WithReader[*anomaly.WeeklyProvider](reader),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if t.seasonality == anomaly.SeasonalityHourly {
|
||||||
|
t.providerV2 = anomalyV2.NewHourlyProvider(
|
||||||
|
anomalyV2.WithQuerier[*anomalyV2.HourlyProvider](querierV5),
|
||||||
|
anomalyV2.WithLogger[*anomalyV2.HourlyProvider](logger),
|
||||||
|
)
|
||||||
|
} else if t.seasonality == anomaly.SeasonalityDaily {
|
||||||
|
t.providerV2 = anomalyV2.NewDailyProvider(
|
||||||
|
anomalyV2.WithQuerier[*anomalyV2.DailyProvider](querierV5),
|
||||||
|
anomalyV2.WithLogger[*anomalyV2.DailyProvider](logger),
|
||||||
|
)
|
||||||
|
} else if t.seasonality == anomaly.SeasonalityWeekly {
|
||||||
|
t.providerV2 = anomalyV2.NewWeeklyProvider(
|
||||||
|
anomalyV2.WithQuerier[*anomalyV2.WeeklyProvider](querierV5),
|
||||||
|
anomalyV2.WithLogger[*anomalyV2.WeeklyProvider](logger),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.querierV5 = querierV5
|
||||||
|
t.version = p.Version
|
||||||
|
t.logger = logger
|
||||||
return &t, nil
|
return &t, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -124,20 +160,15 @@ func (r *AnomalyRule) Type() ruletypes.RuleType {
|
|||||||
return RuleTypeAnomaly
|
return RuleTypeAnomaly
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *AnomalyRule) prepareQueryRange(ts time.Time) (*v3.QueryRangeParamsV3, error) {
|
func (r *AnomalyRule) prepareQueryRange(ctx context.Context, ts time.Time) (*v3.QueryRangeParamsV3, error) {
|
||||||
|
|
||||||
zap.L().Info("prepareQueryRange", zap.Int64("ts", ts.UnixMilli()), zap.Int64("evalWindow", r.EvalWindow().Milliseconds()), zap.Int64("evalDelay", r.EvalDelay().Milliseconds()))
|
r.logger.InfoContext(
|
||||||
|
ctx, "prepare query range request v4", "ts", ts.UnixMilli(), "eval_window", r.EvalWindow().Milliseconds(), "eval_delay", r.EvalDelay().Milliseconds(),
|
||||||
|
)
|
||||||
|
|
||||||
start := ts.Add(-time.Duration(r.EvalWindow())).UnixMilli()
|
st, en := r.Timestamps(ts)
|
||||||
end := ts.UnixMilli()
|
start := st.UnixMilli()
|
||||||
|
end := en.UnixMilli()
|
||||||
if r.EvalDelay() > 0 {
|
|
||||||
start = start - int64(r.EvalDelay().Milliseconds())
|
|
||||||
end = end - int64(r.EvalDelay().Milliseconds())
|
|
||||||
}
|
|
||||||
// round to minute otherwise we could potentially miss data
|
|
||||||
start = start - (start % (60 * 1000))
|
|
||||||
end = end - (end % (60 * 1000))
|
|
||||||
|
|
||||||
compositeQuery := r.Condition().CompositeQuery
|
compositeQuery := r.Condition().CompositeQuery
|
||||||
|
|
||||||
@@ -156,13 +187,34 @@ func (r *AnomalyRule) prepareQueryRange(ts time.Time) (*v3.QueryRangeParamsV3, e
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *AnomalyRule) prepareQueryRangeV5(ctx context.Context, ts time.Time) (*qbtypes.QueryRangeRequest, error) {
|
||||||
|
|
||||||
|
r.logger.InfoContext(ctx, "prepare query range request v5", "ts", ts.UnixMilli(), "eval_window", r.EvalWindow().Milliseconds(), "eval_delay", r.EvalDelay().Milliseconds())
|
||||||
|
|
||||||
|
startTs, endTs := r.Timestamps(ts)
|
||||||
|
start, end := startTs.UnixMilli(), endTs.UnixMilli()
|
||||||
|
|
||||||
|
req := &qbtypes.QueryRangeRequest{
|
||||||
|
Start: uint64(start),
|
||||||
|
End: uint64(end),
|
||||||
|
RequestType: qbtypes.RequestTypeTimeSeries,
|
||||||
|
CompositeQuery: qbtypes.CompositeQuery{
|
||||||
|
Queries: make([]qbtypes.QueryEnvelope, 0),
|
||||||
|
},
|
||||||
|
NoCache: true,
|
||||||
|
}
|
||||||
|
req.CompositeQuery.Queries = make([]qbtypes.QueryEnvelope, len(r.Condition().CompositeQuery.Queries))
|
||||||
|
copy(req.CompositeQuery.Queries, r.Condition().CompositeQuery.Queries)
|
||||||
|
return req, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (r *AnomalyRule) GetSelectedQuery() string {
|
func (r *AnomalyRule) GetSelectedQuery() string {
|
||||||
return r.Condition().GetSelectedQueryName()
|
return r.Condition().GetSelectedQueryName()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *AnomalyRule) buildAndRunQuery(ctx context.Context, orgID valuer.UUID, ts time.Time) (ruletypes.Vector, error) {
|
func (r *AnomalyRule) buildAndRunQuery(ctx context.Context, orgID valuer.UUID, ts time.Time) (ruletypes.Vector, error) {
|
||||||
|
|
||||||
params, err := r.prepareQueryRange(ts)
|
params, err := r.prepareQueryRange(ctx, ts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -190,13 +242,70 @@ func (r *AnomalyRule) buildAndRunQuery(ctx context.Context, orgID valuer.UUID, t
|
|||||||
var resultVector ruletypes.Vector
|
var resultVector ruletypes.Vector
|
||||||
|
|
||||||
scoresJSON, _ := json.Marshal(queryResult.AnomalyScores)
|
scoresJSON, _ := json.Marshal(queryResult.AnomalyScores)
|
||||||
zap.L().Info("anomaly scores", zap.String("scores", string(scoresJSON)))
|
r.logger.InfoContext(ctx, "anomaly scores", "scores", string(scoresJSON))
|
||||||
|
|
||||||
for _, series := range queryResult.AnomalyScores {
|
for _, series := range queryResult.AnomalyScores {
|
||||||
smpl, shouldAlert := r.ShouldAlert(*series)
|
if r.Condition() != nil && r.Condition().RequireMinPoints {
|
||||||
if shouldAlert {
|
if len(series.Points) < r.Condition().RequiredNumPoints {
|
||||||
resultVector = append(resultVector, smpl)
|
r.logger.InfoContext(ctx, "not enough data points to evaluate series, skipping", "ruleid", r.ID(), "numPoints", len(series.Points), "requiredPoints", r.Condition().RequiredNumPoints)
|
||||||
|
continue
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
results, err := r.Threshold.ShouldAlert(*series)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resultVector = append(resultVector, results...)
|
||||||
|
}
|
||||||
|
return resultVector, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *AnomalyRule) buildAndRunQueryV5(ctx context.Context, orgID valuer.UUID, ts time.Time) (ruletypes.Vector, error) {
|
||||||
|
|
||||||
|
params, err := r.prepareQueryRangeV5(ctx, ts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
anomalies, err := r.providerV2.GetAnomalies(ctx, orgID, &anomalyV2.AnomaliesRequest{
|
||||||
|
Params: *params,
|
||||||
|
Seasonality: anomalyV2.Seasonality{String: valuer.NewString(r.seasonality.String())},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var qbResult *qbtypes.TimeSeriesData
|
||||||
|
for _, result := range anomalies.Results {
|
||||||
|
if result.QueryName == r.GetSelectedQuery() {
|
||||||
|
qbResult = result
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if qbResult == nil {
|
||||||
|
r.logger.WarnContext(ctx, "nil qb result", "ts", ts.UnixMilli())
|
||||||
|
}
|
||||||
|
|
||||||
|
queryResult := transition.ConvertV5TimeSeriesDataToV4Result(qbResult)
|
||||||
|
|
||||||
|
var resultVector ruletypes.Vector
|
||||||
|
|
||||||
|
scoresJSON, _ := json.Marshal(queryResult.AnomalyScores)
|
||||||
|
r.logger.InfoContext(ctx, "anomaly scores", "scores", string(scoresJSON))
|
||||||
|
|
||||||
|
for _, series := range queryResult.AnomalyScores {
|
||||||
|
if r.Condition().RequireMinPoints {
|
||||||
|
if len(series.Points) < r.Condition().RequiredNumPoints {
|
||||||
|
r.logger.InfoContext(ctx, "not enough data points to evaluate series, skipping", "ruleid", r.ID(), "numPoints", len(series.Points), "requiredPoints", r.Condition().RequiredNumPoints)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
results, err := r.Threshold.ShouldAlert(*series)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resultVector = append(resultVector, results...)
|
||||||
}
|
}
|
||||||
return resultVector, nil
|
return resultVector, nil
|
||||||
}
|
}
|
||||||
@@ -206,8 +315,17 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
|
|||||||
prevState := r.State()
|
prevState := r.State()
|
||||||
|
|
||||||
valueFormatter := formatter.FromUnit(r.Unit())
|
valueFormatter := formatter.FromUnit(r.Unit())
|
||||||
res, err := r.buildAndRunQuery(ctx, r.OrgID(), ts)
|
|
||||||
|
|
||||||
|
var res ruletypes.Vector
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if r.version == "v5" {
|
||||||
|
r.logger.InfoContext(ctx, "running v5 query")
|
||||||
|
res, err = r.buildAndRunQueryV5(ctx, r.OrgID(), ts)
|
||||||
|
} else {
|
||||||
|
r.logger.InfoContext(ctx, "running v4 query")
|
||||||
|
res, err = r.buildAndRunQuery(ctx, r.OrgID(), ts)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -226,7 +344,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
|
|||||||
|
|
||||||
value := valueFormatter.Format(smpl.V, r.Unit())
|
value := valueFormatter.Format(smpl.V, r.Unit())
|
||||||
threshold := valueFormatter.Format(r.TargetVal(), r.Unit())
|
threshold := valueFormatter.Format(r.TargetVal(), r.Unit())
|
||||||
zap.L().Debug("Alert template data for rule", zap.String("name", r.Name()), zap.String("formatter", valueFormatter.Name()), zap.String("value", value), zap.String("threshold", threshold))
|
r.logger.DebugContext(ctx, "Alert template data for rule", "rule_name", r.Name(), "formatter", valueFormatter.Name(), "value", value, "threshold", threshold)
|
||||||
|
|
||||||
tmplData := ruletypes.AlertTemplateData(l, value, threshold)
|
tmplData := ruletypes.AlertTemplateData(l, value, threshold)
|
||||||
// Inject some convenience variables that are easier to remember for users
|
// Inject some convenience variables that are easier to remember for users
|
||||||
@@ -247,7 +365,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
|
|||||||
result, err := tmpl.Expand()
|
result, err := tmpl.Expand()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
result = fmt.Sprintf("<error expanding template: %s>", err)
|
result = fmt.Sprintf("<error expanding template: %s>", err)
|
||||||
zap.L().Error("Expanding alert template failed", zap.Error(err), zap.Any("data", tmplData))
|
r.logger.ErrorContext(ctx, "Expanding alert template failed", "error", err, "data", tmplData, "rule_name", r.Name())
|
||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
@@ -276,7 +394,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
|
|||||||
resultFPs[h] = struct{}{}
|
resultFPs[h] = struct{}{}
|
||||||
|
|
||||||
if _, ok := alerts[h]; ok {
|
if _, ok := alerts[h]; ok {
|
||||||
zap.L().Error("the alert query returns duplicate records", zap.String("ruleid", r.ID()), zap.Any("alert", alerts[h]))
|
r.logger.ErrorContext(ctx, "the alert query returns duplicate records", "rule_id", r.ID(), "alert", alerts[h])
|
||||||
err = fmt.Errorf("duplicate alert found, vector contains metrics with the same labelset after applying alert labels")
|
err = fmt.Errorf("duplicate alert found, vector contains metrics with the same labelset after applying alert labels")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -294,7 +412,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.L().Info("number of alerts found", zap.String("name", r.Name()), zap.Int("count", len(alerts)))
|
r.logger.InfoContext(ctx, "number of alerts found", "rule_name", r.Name(), "alerts_count", len(alerts))
|
||||||
|
|
||||||
// alerts[h] is ready, add or update active list now
|
// alerts[h] is ready, add or update active list now
|
||||||
for h, a := range alerts {
|
for h, a := range alerts {
|
||||||
@@ -317,7 +435,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
|
|||||||
for fp, a := range r.Active {
|
for fp, a := range r.Active {
|
||||||
labelsJSON, err := json.Marshal(a.QueryResultLables)
|
labelsJSON, err := json.Marshal(a.QueryResultLables)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.L().Error("error marshaling labels", zap.Error(err), zap.Any("labels", a.Labels))
|
r.logger.ErrorContext(ctx, "error marshaling labels", "error", err, "labels", a.Labels)
|
||||||
}
|
}
|
||||||
if _, ok := resultFPs[fp]; !ok {
|
if _, ok := resultFPs[fp]; !ok {
|
||||||
// If the alert was previously firing, keep it around for a given
|
// If the alert was previously firing, keep it around for a given
|
||||||
@@ -387,7 +505,7 @@ func (r *AnomalyRule) String() string {
|
|||||||
PreferredChannels: r.PreferredChannels(),
|
PreferredChannels: r.PreferredChannels(),
|
||||||
}
|
}
|
||||||
|
|
||||||
byt, err := yaml.Marshal(ar)
|
byt, err := json.Marshal(ar)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Sprintf("error marshaling alerting rule: %s", err.Error())
|
return fmt.Sprintf("error marshaling alerting rule: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,8 +3,10 @@ package rules
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/SigNoz/signoz/pkg/errors"
|
||||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||||
baserules "github.com/SigNoz/signoz/pkg/query-service/rules"
|
baserules "github.com/SigNoz/signoz/pkg/query-service/rules"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/utils/labels"
|
"github.com/SigNoz/signoz/pkg/query-service/utils/labels"
|
||||||
@@ -20,6 +22,10 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
|
|||||||
var task baserules.Task
|
var task baserules.Task
|
||||||
|
|
||||||
ruleId := baserules.RuleIdFromTaskName(opts.TaskName)
|
ruleId := baserules.RuleIdFromTaskName(opts.TaskName)
|
||||||
|
evaluation, err := opts.Rule.Evaluation.GetEvaluation()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.NewInvalidInputf(errors.CodeInvalidInput, "evaluation is invalid: %v", err)
|
||||||
|
}
|
||||||
if opts.Rule.RuleType == ruletypes.RuleTypeThreshold {
|
if opts.Rule.RuleType == ruletypes.RuleTypeThreshold {
|
||||||
// create a threshold rule
|
// create a threshold rule
|
||||||
tr, err := baserules.NewThresholdRule(
|
tr, err := baserules.NewThresholdRule(
|
||||||
@@ -27,6 +33,8 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
|
|||||||
opts.OrgID,
|
opts.OrgID,
|
||||||
opts.Rule,
|
opts.Rule,
|
||||||
opts.Reader,
|
opts.Reader,
|
||||||
|
opts.Querier,
|
||||||
|
opts.SLogger,
|
||||||
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
|
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
|
||||||
baserules.WithSQLStore(opts.SQLStore),
|
baserules.WithSQLStore(opts.SQLStore),
|
||||||
)
|
)
|
||||||
@@ -38,7 +46,7 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
|
|||||||
rules = append(rules, tr)
|
rules = append(rules, tr)
|
||||||
|
|
||||||
// create ch rule task for evalution
|
// create ch rule task for evalution
|
||||||
task = newTask(baserules.TaskTypeCh, opts.TaskName, time.Duration(opts.Rule.Frequency), rules, opts.ManagerOpts, opts.NotifyFunc, opts.MaintenanceStore, opts.OrgID)
|
task = newTask(baserules.TaskTypeCh, opts.TaskName, time.Duration(evaluation.GetFrequency()), rules, opts.ManagerOpts, opts.NotifyFunc, opts.MaintenanceStore, opts.OrgID)
|
||||||
|
|
||||||
} else if opts.Rule.RuleType == ruletypes.RuleTypeProm {
|
} else if opts.Rule.RuleType == ruletypes.RuleTypeProm {
|
||||||
|
|
||||||
@@ -47,7 +55,7 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
|
|||||||
ruleId,
|
ruleId,
|
||||||
opts.OrgID,
|
opts.OrgID,
|
||||||
opts.Rule,
|
opts.Rule,
|
||||||
opts.Logger,
|
opts.SLogger,
|
||||||
opts.Reader,
|
opts.Reader,
|
||||||
opts.ManagerOpts.Prometheus,
|
opts.ManagerOpts.Prometheus,
|
||||||
baserules.WithSQLStore(opts.SQLStore),
|
baserules.WithSQLStore(opts.SQLStore),
|
||||||
@@ -60,7 +68,7 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
|
|||||||
rules = append(rules, pr)
|
rules = append(rules, pr)
|
||||||
|
|
||||||
// create promql rule task for evalution
|
// create promql rule task for evalution
|
||||||
task = newTask(baserules.TaskTypeProm, opts.TaskName, time.Duration(opts.Rule.Frequency), rules, opts.ManagerOpts, opts.NotifyFunc, opts.MaintenanceStore, opts.OrgID)
|
task = newTask(baserules.TaskTypeProm, opts.TaskName, time.Duration(evaluation.GetFrequency()), rules, opts.ManagerOpts, opts.NotifyFunc, opts.MaintenanceStore, opts.OrgID)
|
||||||
|
|
||||||
} else if opts.Rule.RuleType == ruletypes.RuleTypeAnomaly {
|
} else if opts.Rule.RuleType == ruletypes.RuleTypeAnomaly {
|
||||||
// create anomaly rule
|
// create anomaly rule
|
||||||
@@ -69,6 +77,8 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
|
|||||||
opts.OrgID,
|
opts.OrgID,
|
||||||
opts.Rule,
|
opts.Rule,
|
||||||
opts.Reader,
|
opts.Reader,
|
||||||
|
opts.Querier,
|
||||||
|
opts.SLogger,
|
||||||
opts.Cache,
|
opts.Cache,
|
||||||
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
|
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
|
||||||
baserules.WithSQLStore(opts.SQLStore),
|
baserules.WithSQLStore(opts.SQLStore),
|
||||||
@@ -80,7 +90,7 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
|
|||||||
rules = append(rules, ar)
|
rules = append(rules, ar)
|
||||||
|
|
||||||
// create anomaly rule task for evalution
|
// create anomaly rule task for evalution
|
||||||
task = newTask(baserules.TaskTypeCh, opts.TaskName, time.Duration(opts.Rule.Frequency), rules, opts.ManagerOpts, opts.NotifyFunc, opts.MaintenanceStore, opts.OrgID)
|
task = newTask(baserules.TaskTypeCh, opts.TaskName, time.Duration(evaluation.GetFrequency()), rules, opts.ManagerOpts, opts.NotifyFunc, opts.MaintenanceStore, opts.OrgID)
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
return nil, fmt.Errorf("unsupported rule type %s. Supported types: %s, %s", opts.Rule.RuleType, ruletypes.RuleTypeProm, ruletypes.RuleTypeThreshold)
|
return nil, fmt.Errorf("unsupported rule type %s. Supported types: %s, %s", opts.Rule.RuleType, ruletypes.RuleTypeProm, ruletypes.RuleTypeThreshold)
|
||||||
@@ -126,6 +136,8 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
|
|||||||
opts.OrgID,
|
opts.OrgID,
|
||||||
parsedRule,
|
parsedRule,
|
||||||
opts.Reader,
|
opts.Reader,
|
||||||
|
opts.Querier,
|
||||||
|
opts.SLogger,
|
||||||
baserules.WithSendAlways(),
|
baserules.WithSendAlways(),
|
||||||
baserules.WithSendUnmatched(),
|
baserules.WithSendUnmatched(),
|
||||||
baserules.WithSQLStore(opts.SQLStore),
|
baserules.WithSQLStore(opts.SQLStore),
|
||||||
@@ -143,7 +155,7 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
|
|||||||
alertname,
|
alertname,
|
||||||
opts.OrgID,
|
opts.OrgID,
|
||||||
parsedRule,
|
parsedRule,
|
||||||
opts.Logger,
|
opts.SLogger,
|
||||||
opts.Reader,
|
opts.Reader,
|
||||||
opts.ManagerOpts.Prometheus,
|
opts.ManagerOpts.Prometheus,
|
||||||
baserules.WithSendAlways(),
|
baserules.WithSendAlways(),
|
||||||
@@ -162,6 +174,8 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
|
|||||||
opts.OrgID,
|
opts.OrgID,
|
||||||
parsedRule,
|
parsedRule,
|
||||||
opts.Reader,
|
opts.Reader,
|
||||||
|
opts.Querier,
|
||||||
|
opts.SLogger,
|
||||||
opts.Cache,
|
opts.Cache,
|
||||||
baserules.WithSendAlways(),
|
baserules.WithSendAlways(),
|
||||||
baserules.WithSendUnmatched(),
|
baserules.WithSendUnmatched(),
|
||||||
|
|||||||
36
ee/sqlschema/postgressqlschema/formatter.go
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
package postgressqlschema
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/SigNoz/signoz/pkg/sqlschema"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Formatter struct {
|
||||||
|
sqlschema.Formatter
|
||||||
|
}
|
||||||
|
|
||||||
|
func (formatter Formatter) SQLDataTypeOf(dataType sqlschema.DataType) string {
|
||||||
|
if dataType == sqlschema.DataTypeTimestamp {
|
||||||
|
return "TIMESTAMPTZ"
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.ToUpper(dataType.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (formatter Formatter) DataTypeOf(dataType string) sqlschema.DataType {
|
||||||
|
switch strings.ToUpper(dataType) {
|
||||||
|
case "TIMESTAMPTZ", "TIMESTAMP", "TIMESTAMP WITHOUT TIME ZONE", "TIMESTAMP WITH TIME ZONE":
|
||||||
|
return sqlschema.DataTypeTimestamp
|
||||||
|
case "INT8":
|
||||||
|
return sqlschema.DataTypeBigInt
|
||||||
|
case "INT2", "INT4", "SMALLINT", "INTEGER":
|
||||||
|
return sqlschema.DataTypeInteger
|
||||||
|
case "BOOL", "BOOLEAN":
|
||||||
|
return sqlschema.DataTypeBoolean
|
||||||
|
case "VARCHAR", "CHARACTER VARYING", "CHARACTER":
|
||||||
|
return sqlschema.DataTypeText
|
||||||
|
}
|
||||||
|
|
||||||
|
return formatter.Formatter.DataTypeOf(dataType)
|
||||||
|
}
|
||||||
285
ee/sqlschema/postgressqlschema/provider.go
Normal file
@@ -0,0 +1,285 @@
|
|||||||
|
package postgressqlschema
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/SigNoz/signoz/pkg/factory"
|
||||||
|
"github.com/SigNoz/signoz/pkg/sqlschema"
|
||||||
|
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
)
|
||||||
|
|
||||||
|
type provider struct {
|
||||||
|
settings factory.ScopedProviderSettings
|
||||||
|
fmter sqlschema.SQLFormatter
|
||||||
|
sqlstore sqlstore.SQLStore
|
||||||
|
operator sqlschema.SQLOperator
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewFactory(sqlstore sqlstore.SQLStore) factory.ProviderFactory[sqlschema.SQLSchema, sqlschema.Config] {
|
||||||
|
return factory.NewProviderFactory(factory.MustNewName("postgres"), func(ctx context.Context, providerSettings factory.ProviderSettings, config sqlschema.Config) (sqlschema.SQLSchema, error) {
|
||||||
|
return New(ctx, providerSettings, config, sqlstore)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(ctx context.Context, providerSettings factory.ProviderSettings, config sqlschema.Config, sqlstore sqlstore.SQLStore) (sqlschema.SQLSchema, error) {
|
||||||
|
settings := factory.NewScopedProviderSettings(providerSettings, "github.com/SigNoz/signoz/pkg/sqlschema/postgressqlschema")
|
||||||
|
fmter := Formatter{Formatter: sqlschema.NewFormatter(sqlstore.BunDB().Dialect())}
|
||||||
|
|
||||||
|
return &provider{
|
||||||
|
sqlstore: sqlstore,
|
||||||
|
fmter: fmter,
|
||||||
|
settings: settings,
|
||||||
|
operator: sqlschema.NewOperator(fmter, sqlschema.OperatorSupport{
|
||||||
|
DropConstraint: true,
|
||||||
|
ColumnIfNotExistsExists: true,
|
||||||
|
AlterColumnSetNotNull: true,
|
||||||
|
}),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (provider *provider) Formatter() sqlschema.SQLFormatter {
|
||||||
|
return provider.fmter
|
||||||
|
}
|
||||||
|
|
||||||
|
func (provider *provider) Operator() sqlschema.SQLOperator {
|
||||||
|
return provider.operator
|
||||||
|
}
|
||||||
|
|
||||||
|
func (provider *provider) GetTable(ctx context.Context, tableName sqlschema.TableName) (*sqlschema.Table, []*sqlschema.UniqueConstraint, error) {
|
||||||
|
rows, err := provider.
|
||||||
|
sqlstore.
|
||||||
|
BunDB().
|
||||||
|
QueryContext(ctx, `
|
||||||
|
SELECT
|
||||||
|
c.column_name,
|
||||||
|
c.is_nullable = 'YES',
|
||||||
|
c.udt_name,
|
||||||
|
c.column_default
|
||||||
|
FROM
|
||||||
|
information_schema.columns AS c
|
||||||
|
WHERE
|
||||||
|
c.table_name = ?`, string(tableName))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if err := rows.Close(); err != nil {
|
||||||
|
provider.settings.Logger().ErrorContext(ctx, "error closing rows", "error", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
columns := make([]*sqlschema.Column, 0)
|
||||||
|
for rows.Next() {
|
||||||
|
var (
|
||||||
|
name string
|
||||||
|
sqlDataType string
|
||||||
|
nullable bool
|
||||||
|
defaultVal *string
|
||||||
|
)
|
||||||
|
if err := rows.Scan(&name, &nullable, &sqlDataType, &defaultVal); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
columnDefault := ""
|
||||||
|
if defaultVal != nil {
|
||||||
|
columnDefault = *defaultVal
|
||||||
|
}
|
||||||
|
|
||||||
|
columns = append(columns, &sqlschema.Column{
|
||||||
|
Name: sqlschema.ColumnName(name),
|
||||||
|
Nullable: nullable,
|
||||||
|
DataType: provider.fmter.DataTypeOf(sqlDataType),
|
||||||
|
Default: columnDefault,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
constraintsRows, err := provider.
|
||||||
|
sqlstore.
|
||||||
|
BunDB().
|
||||||
|
QueryContext(ctx, `
|
||||||
|
SELECT
|
||||||
|
c.column_name,
|
||||||
|
constraint_name,
|
||||||
|
constraint_type
|
||||||
|
FROM
|
||||||
|
information_schema.table_constraints tc
|
||||||
|
JOIN information_schema.constraint_column_usage AS ccu USING (constraint_schema, constraint_catalog, table_name, constraint_name)
|
||||||
|
JOIN information_schema.columns AS c ON c.table_schema = tc.constraint_schema AND tc.table_name = c.table_name AND ccu.column_name = c.column_name
|
||||||
|
WHERE
|
||||||
|
c.table_name = ?`, string(tableName))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if err := constraintsRows.Close(); err != nil {
|
||||||
|
provider.settings.Logger().ErrorContext(ctx, "error closing rows", "error", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
var primaryKeyConstraint *sqlschema.PrimaryKeyConstraint
|
||||||
|
uniqueConstraintsMap := make(map[string]*sqlschema.UniqueConstraint)
|
||||||
|
for constraintsRows.Next() {
|
||||||
|
var (
|
||||||
|
name string
|
||||||
|
constraintName string
|
||||||
|
constraintType string
|
||||||
|
)
|
||||||
|
|
||||||
|
if err := constraintsRows.Scan(&name, &constraintName, &constraintType); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if constraintType == "PRIMARY KEY" {
|
||||||
|
if primaryKeyConstraint == nil {
|
||||||
|
primaryKeyConstraint = (&sqlschema.PrimaryKeyConstraint{
|
||||||
|
ColumnNames: []sqlschema.ColumnName{sqlschema.ColumnName(name)},
|
||||||
|
}).Named(constraintName).(*sqlschema.PrimaryKeyConstraint)
|
||||||
|
} else {
|
||||||
|
primaryKeyConstraint.ColumnNames = append(primaryKeyConstraint.ColumnNames, sqlschema.ColumnName(name))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if constraintType == "UNIQUE" {
|
||||||
|
if _, ok := uniqueConstraintsMap[constraintName]; !ok {
|
||||||
|
uniqueConstraintsMap[constraintName] = (&sqlschema.UniqueConstraint{
|
||||||
|
ColumnNames: []sqlschema.ColumnName{sqlschema.ColumnName(name)},
|
||||||
|
}).Named(constraintName).(*sqlschema.UniqueConstraint)
|
||||||
|
} else {
|
||||||
|
uniqueConstraintsMap[constraintName].ColumnNames = append(uniqueConstraintsMap[constraintName].ColumnNames, sqlschema.ColumnName(name))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
foreignKeyConstraintsRows, err := provider.
|
||||||
|
sqlstore.
|
||||||
|
BunDB().
|
||||||
|
QueryContext(ctx, `
|
||||||
|
SELECT
|
||||||
|
tc.constraint_name,
|
||||||
|
kcu.table_name AS referencing_table,
|
||||||
|
kcu.column_name AS referencing_column,
|
||||||
|
ccu.table_name AS referenced_table,
|
||||||
|
ccu.column_name AS referenced_column
|
||||||
|
FROM
|
||||||
|
information_schema.key_column_usage kcu
|
||||||
|
JOIN information_schema.table_constraints tc ON kcu.constraint_name = tc.constraint_name AND kcu.table_schema = tc.table_schema
|
||||||
|
JOIN information_schema.constraint_column_usage ccu ON ccu.constraint_name = tc.constraint_name AND ccu.table_schema = tc.table_schema
|
||||||
|
WHERE
|
||||||
|
tc.constraint_type = ?
|
||||||
|
AND kcu.table_name = ?`, "FOREIGN KEY", string(tableName))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if err := foreignKeyConstraintsRows.Close(); err != nil {
|
||||||
|
provider.settings.Logger().ErrorContext(ctx, "error closing rows", "error", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
foreignKeyConstraints := make([]*sqlschema.ForeignKeyConstraint, 0)
|
||||||
|
for foreignKeyConstraintsRows.Next() {
|
||||||
|
var (
|
||||||
|
constraintName string
|
||||||
|
referencingTable string
|
||||||
|
referencingColumn string
|
||||||
|
referencedTable string
|
||||||
|
referencedColumn string
|
||||||
|
)
|
||||||
|
|
||||||
|
if err := foreignKeyConstraintsRows.Scan(&constraintName, &referencingTable, &referencingColumn, &referencedTable, &referencedColumn); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
foreignKeyConstraints = append(foreignKeyConstraints, (&sqlschema.ForeignKeyConstraint{
|
||||||
|
ReferencingColumnName: sqlschema.ColumnName(referencingColumn),
|
||||||
|
ReferencedTableName: sqlschema.TableName(referencedTable),
|
||||||
|
ReferencedColumnName: sqlschema.ColumnName(referencedColumn),
|
||||||
|
}).Named(constraintName).(*sqlschema.ForeignKeyConstraint))
|
||||||
|
}
|
||||||
|
|
||||||
|
uniqueConstraints := make([]*sqlschema.UniqueConstraint, 0)
|
||||||
|
for _, uniqueConstraint := range uniqueConstraintsMap {
|
||||||
|
uniqueConstraints = append(uniqueConstraints, uniqueConstraint)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &sqlschema.Table{
|
||||||
|
Name: tableName,
|
||||||
|
Columns: columns,
|
||||||
|
PrimaryKeyConstraint: primaryKeyConstraint,
|
||||||
|
ForeignKeyConstraints: foreignKeyConstraints,
|
||||||
|
}, uniqueConstraints, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (provider *provider) GetIndices(ctx context.Context, name sqlschema.TableName) ([]sqlschema.Index, error) {
|
||||||
|
rows, err := provider.
|
||||||
|
sqlstore.
|
||||||
|
BunDB().
|
||||||
|
QueryContext(ctx, `
|
||||||
|
SELECT
|
||||||
|
ct.relname AS table_name,
|
||||||
|
ci.relname AS index_name,
|
||||||
|
i.indisunique AS unique,
|
||||||
|
i.indisprimary AS primary,
|
||||||
|
a.attname AS column_name
|
||||||
|
FROM
|
||||||
|
pg_index i
|
||||||
|
LEFT JOIN pg_class ct ON ct.oid = i.indrelid
|
||||||
|
LEFT JOIN pg_class ci ON ci.oid = i.indexrelid
|
||||||
|
LEFT JOIN pg_attribute a ON a.attrelid = ct.oid
|
||||||
|
LEFT JOIN pg_constraint con ON con.conindid = i.indexrelid
|
||||||
|
WHERE
|
||||||
|
a.attnum = ANY(i.indkey)
|
||||||
|
AND con.oid IS NULL
|
||||||
|
AND ct.relkind = 'r'
|
||||||
|
AND ct.relname = ?`, string(name))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if err := rows.Close(); err != nil {
|
||||||
|
provider.settings.Logger().ErrorContext(ctx, "error closing rows", "error", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
uniqueIndicesMap := make(map[string]*sqlschema.UniqueIndex)
|
||||||
|
for rows.Next() {
|
||||||
|
var (
|
||||||
|
tableName string
|
||||||
|
indexName string
|
||||||
|
unique bool
|
||||||
|
primary bool
|
||||||
|
columnName string
|
||||||
|
)
|
||||||
|
|
||||||
|
if err := rows.Scan(&tableName, &indexName, &unique, &primary, &columnName); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if unique {
|
||||||
|
if _, ok := uniqueIndicesMap[indexName]; !ok {
|
||||||
|
uniqueIndicesMap[indexName] = &sqlschema.UniqueIndex{
|
||||||
|
TableName: name,
|
||||||
|
ColumnNames: []sqlschema.ColumnName{sqlschema.ColumnName(columnName)},
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
uniqueIndicesMap[indexName].ColumnNames = append(uniqueIndicesMap[indexName].ColumnNames, sqlschema.ColumnName(columnName))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
indices := make([]sqlschema.Index, 0)
|
||||||
|
for _, index := range uniqueIndicesMap {
|
||||||
|
indices = append(indices, index)
|
||||||
|
}
|
||||||
|
|
||||||
|
return indices, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (provider *provider) ToggleFKEnforcement(_ context.Context, _ bun.IDB, _ bool) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -1,4 +1,5 @@
|
|||||||
node_modules
|
node_modules
|
||||||
build
|
build
|
||||||
*.typegen.ts
|
*.typegen.ts
|
||||||
i18-generate-hash.js
|
i18-generate-hash.js
|
||||||
|
src/parser/TraceOperatorParser/**
|
||||||
@@ -1,4 +1,5 @@
|
|||||||
module.exports = {
|
module.exports = {
|
||||||
|
ignorePatterns: ['src/parser/*.ts'],
|
||||||
env: {
|
env: {
|
||||||
browser: true,
|
browser: true,
|
||||||
es2021: true,
|
es2021: true,
|
||||||
|
|||||||
27
frontend/.gitignore
vendored
@@ -2,3 +2,30 @@
|
|||||||
# Sentry Config File
|
# Sentry Config File
|
||||||
.env.sentry-build-plugin
|
.env.sentry-build-plugin
|
||||||
.qodo
|
.qodo
|
||||||
|
|
||||||
|
# Playwright
|
||||||
|
node_modules/
|
||||||
|
/test-results/
|
||||||
|
/playwright-report/
|
||||||
|
/blob-report/
|
||||||
|
/playwright/.cache/
|
||||||
|
/playwright/test-results/
|
||||||
|
/playwright/blob-report/
|
||||||
|
/playwright/playwright-report/
|
||||||
|
|
||||||
|
e2e/test-plan/alerts/
|
||||||
|
e2e/test-plan/dashboards/
|
||||||
|
e2e/test-plan/exceptions/
|
||||||
|
e2e/test-plan/external-apis/
|
||||||
|
e2e/test-plan/help-support/
|
||||||
|
e2e/test-plan/infrastructure/
|
||||||
|
e2e/test-plan/logs/
|
||||||
|
e2e/test-plan/messaging-queues/
|
||||||
|
e2e/test-plan/metrics/
|
||||||
|
e2e/test-plan/navigation/
|
||||||
|
e2e/test-plan/onboarding/
|
||||||
|
e2e/test-plan/saved-views/
|
||||||
|
e2e/test-plan/service-map/
|
||||||
|
e2e/test-plan/services/
|
||||||
|
e2e/test-plan/traces/
|
||||||
|
e2e/test-plan/user-preferences/
|
||||||
@@ -8,3 +8,8 @@ public/
|
|||||||
|
|
||||||
# Ignore all JSON files:
|
# Ignore all JSON files:
|
||||||
**/*.json
|
**/*.json
|
||||||
|
|
||||||
|
# Ignore all files in parser folder:
|
||||||
|
src/parser/**
|
||||||
|
|
||||||
|
src/TraceOperator/parser/**
|
||||||
29
frontend/e2e/test-plan/README.md
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
# SigNoz E2E Test Plan
|
||||||
|
|
||||||
|
This directory contains the structured test plan for the SigNoz application. Each subfolder corresponds to a main module or feature area, and contains scenario files for all user journeys, edge cases, and cross-module flows. These documents serve as the basis for generating Playwright MCP-driven E2E tests.
|
||||||
|
|
||||||
|
## Structure
|
||||||
|
|
||||||
|
- Each main module (e.g., logs, traces, dashboards, alerts, settings, etc.) has its own folder or markdown file.
|
||||||
|
- Each file contains detailed scenario templates, including preconditions, step-by-step actions, and expected outcomes.
|
||||||
|
- Use these documents to write, review, and update test cases as the application evolves.
|
||||||
|
|
||||||
|
## Folders & Files
|
||||||
|
|
||||||
|
- `logs/` — Logs module scenarios
|
||||||
|
- `traces/` — Traces module scenarios
|
||||||
|
- `metrics/` — Metrics module scenarios
|
||||||
|
- `dashboards/` — Dashboards module scenarios
|
||||||
|
- `alerts/` — Alerts module scenarios
|
||||||
|
- `services/` — Services module scenarios
|
||||||
|
- `settings/` — Settings and all sub-settings scenarios
|
||||||
|
- `onboarding/` — Onboarding and signup flows
|
||||||
|
- `navigation/` — Navigation, sidebar, and cross-module flows
|
||||||
|
- `exceptions/` — Exception and error handling scenarios
|
||||||
|
- `external-apis/` — External API monitoring scenarios
|
||||||
|
- `messaging-queues/` — Messaging queue scenarios
|
||||||
|
- `infrastructure/` — Infrastructure monitoring scenarios
|
||||||
|
- `help-support/` — Help & support scenarios
|
||||||
|
- `user-preferences/` — User preferences and personalization scenarios
|
||||||
|
- `service-map/` — Service map scenarios
|
||||||
|
- `saved-views/` — Saved views scenarios
|
||||||
16
frontend/e2e/test-plan/settings/README.md
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
# Settings Module Test Plan
|
||||||
|
|
||||||
|
This folder contains E2E test scenarios for the Settings module and all sub-settings.
|
||||||
|
|
||||||
|
## Scenario Categories
|
||||||
|
|
||||||
|
- General settings (org/workspace, branding, version info)
|
||||||
|
- Billing settings
|
||||||
|
- Members & SSO
|
||||||
|
- Custom domain
|
||||||
|
- Integrations
|
||||||
|
- Notification channels
|
||||||
|
- API keys
|
||||||
|
- Ingestion
|
||||||
|
- Account settings (profile, password, preferences)
|
||||||
|
- Keyboard shortcuts
|
||||||
43
frontend/e2e/test-plan/settings/account-settings.md
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
# Account Settings E2E Scenarios (Updated)
|
||||||
|
|
||||||
|
## 1. Update Name
|
||||||
|
|
||||||
|
- **Precondition:** User is logged in
|
||||||
|
- **Steps:**
|
||||||
|
1. Click 'Update name' button
|
||||||
|
2. Edit name field in the modal/dialog
|
||||||
|
3. Save changes
|
||||||
|
- **Expected:** Name is updated in the UI
|
||||||
|
|
||||||
|
## 2. Update Email
|
||||||
|
|
||||||
|
- **Note:** The email field is not editable in the current UI.
|
||||||
|
|
||||||
|
## 3. Reset Password
|
||||||
|
|
||||||
|
- **Precondition:** User is logged in
|
||||||
|
- **Steps:**
|
||||||
|
1. Click 'Reset password' button
|
||||||
|
2. Complete reset flow (modal/dialog or external flow)
|
||||||
|
- **Expected:** Password is reset
|
||||||
|
|
||||||
|
## 4. Toggle 'Adapt to my timezone'
|
||||||
|
|
||||||
|
- **Precondition:** User is logged in
|
||||||
|
- **Steps:**
|
||||||
|
1. Toggle 'Adapt to my timezone' switch
|
||||||
|
- **Expected:** Timezone adapts accordingly (UI feedback/confirmation should be checked)
|
||||||
|
|
||||||
|
## 5. Toggle Theme (Dark/Light)
|
||||||
|
|
||||||
|
- **Precondition:** User is logged in
|
||||||
|
- **Steps:**
|
||||||
|
1. Toggle theme radio buttons ('Dark', 'Light Beta')
|
||||||
|
- **Expected:** Theme changes
|
||||||
|
|
||||||
|
## 6. Toggle Sidebar Always Open
|
||||||
|
|
||||||
|
- **Precondition:** User is logged in
|
||||||
|
- **Steps:**
|
||||||
|
1. Toggle 'Keep the primary sidebar always open' switch
|
||||||
|
- **Expected:** Sidebar remains open/closed as per toggle
|
||||||
26
frontend/e2e/test-plan/settings/api-keys.md
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
# API Keys E2E Scenarios (Updated)
|
||||||
|
|
||||||
|
## 1. Create a New API Key
|
||||||
|
|
||||||
|
- **Precondition:** User is admin
|
||||||
|
- **Steps:**
|
||||||
|
1. Click 'New Key' button
|
||||||
|
2. Enter details in the modal/dialog
|
||||||
|
3. Click 'Save'
|
||||||
|
- **Expected:** API key is created and listed in the table
|
||||||
|
|
||||||
|
## 2. Revoke an API Key
|
||||||
|
|
||||||
|
- **Precondition:** API key exists
|
||||||
|
- **Steps:**
|
||||||
|
1. In the table, locate the API key row
|
||||||
|
2. Click the revoke/delete button (icon button in the Action column)
|
||||||
|
3. Confirm if prompted
|
||||||
|
- **Expected:** API key is revoked/removed from the table
|
||||||
|
|
||||||
|
## 3. View API Key Usage
|
||||||
|
|
||||||
|
- **Precondition:** API key exists
|
||||||
|
- **Steps:**
|
||||||
|
1. View the 'Last used' and 'Expired' columns in the table
|
||||||
|
- **Expected:** Usage data is displayed for each API key
|
||||||
17
frontend/e2e/test-plan/settings/billing.md
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
# Billing Settings E2E Scenarios (Updated)
|
||||||
|
|
||||||
|
## 1. View Billing Information
|
||||||
|
|
||||||
|
- **Precondition:** User is admin
|
||||||
|
- **Steps:**
|
||||||
|
1. Navigate to Billing Settings
|
||||||
|
2. Wait for the billing chart/data to finish loading
|
||||||
|
- **Expected:**
|
||||||
|
- Billing heading and subheading are displayed
|
||||||
|
- Usage/cost table is visible with columns: Unit, Data Ingested, Price per Unit, Cost (Billing period to date)
|
||||||
|
- "Download CSV" and "Manage Billing" buttons are present and enabled after loading
|
||||||
|
- Test clicking "Download CSV" and "Manage Billing" for expected behavior (e.g., file download, navigation, or modal)
|
||||||
|
|
||||||
|
> Note: If these features are expected to trigger specific flows, document the observed behavior for each button.
|
||||||
|
|
||||||
|
|
||||||
18
frontend/e2e/test-plan/settings/custom-domain.md
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
# Custom Domain E2E Scenarios (Updated)
|
||||||
|
|
||||||
|
## 1. Add or Update Custom Domain
|
||||||
|
|
||||||
|
- **Precondition:** User is admin
|
||||||
|
- **Steps:**
|
||||||
|
1. Click 'Customize team’s URL' button
|
||||||
|
2. In the 'Customize your team’s URL' dialog, enter the preferred subdomain
|
||||||
|
3. Click 'Apply Changes'
|
||||||
|
- **Expected:** Domain is set/updated for the team (UI feedback/confirmation should be checked)
|
||||||
|
|
||||||
|
## 2. Verify Domain Ownership
|
||||||
|
|
||||||
|
- **Note:** No explicit 'Verify' button or flow is present in the current UI. If verification is required, it may be handled automatically or via support.
|
||||||
|
|
||||||
|
## 3. Remove a Custom Domain
|
||||||
|
|
||||||
|
- **Note:** No explicit 'Remove' button or flow is present in the current UI. The only available action is to update the subdomain.
|
||||||
31
frontend/e2e/test-plan/settings/general.md
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
# General Settings E2E Scenarios
|
||||||
|
|
||||||
|
## 1. View General Settings
|
||||||
|
|
||||||
|
- **Precondition:** User is logged in
|
||||||
|
- **Steps:**
|
||||||
|
1. Navigate to General Settings
|
||||||
|
- **Expected:** General settings are displayed
|
||||||
|
|
||||||
|
## 2. Update Organization/Workspace Name
|
||||||
|
|
||||||
|
- **Precondition:** User is admin
|
||||||
|
- **Steps:**
|
||||||
|
1. Edit organization/workspace name
|
||||||
|
2. Save changes
|
||||||
|
- **Expected:** Name is updated and visible
|
||||||
|
|
||||||
|
## 3. Update Logo or Branding
|
||||||
|
|
||||||
|
- **Precondition:** User is admin
|
||||||
|
- **Steps:**
|
||||||
|
1. Upload new logo/branding
|
||||||
|
2. Save changes
|
||||||
|
- **Expected:** Branding is updated
|
||||||
|
|
||||||
|
## 4. View Version/Build Info
|
||||||
|
|
||||||
|
- **Precondition:** User is logged in
|
||||||
|
- **Steps:**
|
||||||
|
1. View version/build info section
|
||||||
|
- **Expected:** Version/build info is displayed
|
||||||
20
frontend/e2e/test-plan/settings/ingestion.md
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
# Ingestion E2E Scenarios (Updated)
|
||||||
|
|
||||||
|
## 1. View Ingestion Sources
|
||||||
|
|
||||||
|
- **Precondition:** User is admin
|
||||||
|
- **Steps:**
|
||||||
|
1. Navigate to the Integrations page
|
||||||
|
- **Expected:** List of available data sources/integrations is displayed
|
||||||
|
|
||||||
|
## 2. Configure Ingestion Sources
|
||||||
|
|
||||||
|
- **Precondition:** User is admin
|
||||||
|
- **Steps:**
|
||||||
|
1. Click 'Configure' for a data source/integration
|
||||||
|
2. Complete the configuration flow (modal or page, as available)
|
||||||
|
- **Expected:** Source is configured (UI feedback/confirmation should be checked)
|
||||||
|
|
||||||
|
## 3. Disable/Enable Ingestion
|
||||||
|
|
||||||
|
- **Note:** No visible enable/disable toggle for ingestion sources in the current UI. Ingestion is managed via the Integrations configuration flows.
|
||||||
51
frontend/e2e/test-plan/settings/integrations.md
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
# Integrations E2E Scenarios (Updated)
|
||||||
|
|
||||||
|
## 1. View List of Available Integrations
|
||||||
|
|
||||||
|
- **Precondition:** User is logged in
|
||||||
|
- **Steps:**
|
||||||
|
1. Navigate to Integrations
|
||||||
|
- **Expected:** List of integrations is displayed, each with a name, description, and 'Configure' button
|
||||||
|
|
||||||
|
## 2. Search Integrations by Name/Type
|
||||||
|
|
||||||
|
- **Precondition:** Integrations exist
|
||||||
|
- **Steps:**
|
||||||
|
1. Enter search/filter criteria in the 'Search for an integration...' box
|
||||||
|
- **Expected:** Only matching integrations are shown
|
||||||
|
|
||||||
|
## 3. Connect a New Integration
|
||||||
|
|
||||||
|
- **Precondition:** User is admin
|
||||||
|
- **Steps:**
|
||||||
|
1. Click 'Configure' for an integration
|
||||||
|
2. Complete the configuration flow (modal or page, as available)
|
||||||
|
- **Expected:** Integration is connected/configured (UI feedback/confirmation should be checked)
|
||||||
|
|
||||||
|
## 4. Disconnect an Integration
|
||||||
|
|
||||||
|
- **Note:** No visible 'Disconnect' button in the main list. This may be available in the configuration flow for a connected integration.
|
||||||
|
|
||||||
|
## 5. Configure Integration Settings
|
||||||
|
|
||||||
|
- **Note:** Configuration is handled in the flow after clicking 'Configure' for an integration.
|
||||||
|
|
||||||
|
## 6. Test Integration Connection
|
||||||
|
|
||||||
|
- **Note:** No visible 'Test Connection' button in the main list. This may be available in the configuration flow.
|
||||||
|
|
||||||
|
## 7. View Integration Status/Logs
|
||||||
|
|
||||||
|
- **Note:** No visible status/logs section in the main list. This may be available in the configuration flow.
|
||||||
|
|
||||||
|
## 8. Filter Integrations by Category
|
||||||
|
|
||||||
|
- **Note:** No explicit category filter in the current UI, only a search box.
|
||||||
|
|
||||||
|
## 9. View Integration Documentation/Help
|
||||||
|
|
||||||
|
- **Note:** No visible 'Help/Docs' button in the main list. This may be available in the configuration flow.
|
||||||
|
|
||||||
|
## 10. Update Integration Configuration
|
||||||
|
|
||||||
|
- **Note:** Configuration is handled in the flow after clicking 'Configure' for an integration.
|
||||||
19
frontend/e2e/test-plan/settings/keyboard-shortcuts.md
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
# Keyboard Shortcuts E2E Scenarios (Updated)
|
||||||
|
|
||||||
|
## 1. View Keyboard Shortcuts
|
||||||
|
|
||||||
|
- **Precondition:** User is logged in
|
||||||
|
- **Steps:**
|
||||||
|
1. Navigate to Keyboard Shortcuts
|
||||||
|
- **Expected:** Shortcuts are displayed in categorized tables (Global, Logs Explorer, Query Builder, Dashboard)
|
||||||
|
|
||||||
|
## 2. Customize Keyboard Shortcuts (if supported)
|
||||||
|
|
||||||
|
- **Note:** Customization is not available in the current UI. Shortcuts are view-only.
|
||||||
|
|
||||||
|
## 3. Use Keyboard Shortcuts for Navigation/Actions
|
||||||
|
|
||||||
|
- **Precondition:** User is logged in
|
||||||
|
- **Steps:**
|
||||||
|
1. Use shortcut for navigation/action (e.g., shift+s for Services, cmd+enter for running query)
|
||||||
|
- **Expected:** Navigation/action is performed as per shortcut
|
||||||
49
frontend/e2e/test-plan/settings/members-sso.md
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
# Members & SSO E2E Scenarios (Updated)
|
||||||
|
|
||||||
|
## 1. Invite a New Member
|
||||||
|
|
||||||
|
- **Precondition:** User is admin
|
||||||
|
- **Steps:**
|
||||||
|
1. Click 'Invite Members' button
|
||||||
|
2. In the 'Invite team members' dialog, enter email address, name (optional), and select role
|
||||||
|
3. (Optional) Click 'Add another team member' to invite more
|
||||||
|
4. Click 'Invite team members' to send invite(s)
|
||||||
|
- **Expected:** Pending invite appears in the 'Pending Invites' table
|
||||||
|
|
||||||
|
## 2. Remove a Member
|
||||||
|
|
||||||
|
- **Precondition:** User is admin, member exists
|
||||||
|
- **Steps:**
|
||||||
|
1. In the 'Members' table, locate the member row
|
||||||
|
2. Click 'Delete' in the Action column
|
||||||
|
3. Confirm removal if prompted
|
||||||
|
- **Expected:** Member is removed from the table
|
||||||
|
|
||||||
|
## 3. Update Member Roles
|
||||||
|
|
||||||
|
- **Precondition:** User is admin, member exists
|
||||||
|
- **Steps:**
|
||||||
|
1. In the 'Members' table, locate the member row
|
||||||
|
2. Click 'Edit' in the Action column
|
||||||
|
3. Change role in the edit dialog/modal
|
||||||
|
4. Save changes
|
||||||
|
- **Expected:** Member role is updated in the table
|
||||||
|
|
||||||
|
## 4. Configure SSO
|
||||||
|
|
||||||
|
- **Precondition:** User is admin
|
||||||
|
- **Steps:**
|
||||||
|
1. In the 'Authenticated Domains' section, locate the domain row
|
||||||
|
2. Click 'Configure SSO' or 'Edit Google Auth' as available
|
||||||
|
3. Complete SSO provider configuration in the modal/dialog
|
||||||
|
4. Save settings
|
||||||
|
- **Expected:** SSO is configured for the domain
|
||||||
|
|
||||||
|
## 5. Login via SSO
|
||||||
|
|
||||||
|
- **Precondition:** SSO is configured
|
||||||
|
- **Steps:**
|
||||||
|
1. Log out from the app
|
||||||
|
2. On the login page, click 'Login with SSO'
|
||||||
|
3. Complete SSO login flow
|
||||||
|
- **Expected:** User is logged in via SSO
|
||||||
39
frontend/e2e/test-plan/settings/notification-channels.md
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
# Notification Channels E2E Scenarios (Updated)
|
||||||
|
|
||||||
|
## 1. Add a New Notification Channel
|
||||||
|
|
||||||
|
- **Precondition:** User is admin
|
||||||
|
- **Steps:**
|
||||||
|
1. Click 'New Alert Channel' button
|
||||||
|
2. In the 'New Notification Channel' form, fill in required fields (Name, Type, Webhook URL, etc.)
|
||||||
|
3. (Optional) Toggle 'Send resolved alerts'
|
||||||
|
4. (Optional) Click 'Test' to send a test notification
|
||||||
|
5. Click 'Save' to add the channel
|
||||||
|
- **Expected:** Channel is added and listed in the table
|
||||||
|
|
||||||
|
## 2. Test Notification Channel
|
||||||
|
|
||||||
|
- **Precondition:** Channel is being created or edited
|
||||||
|
- **Steps:**
|
||||||
|
1. In the 'New Notification Channel' or 'Edit Notification Channel' form, click 'Test'
|
||||||
|
- **Expected:** Test notification is sent (UI feedback/confirmation should be checked)
|
||||||
|
|
||||||
|
## 3. Remove a Notification Channel
|
||||||
|
|
||||||
|
- **Precondition:** Channel is added
|
||||||
|
- **Steps:**
|
||||||
|
1. In the table, locate the channel row
|
||||||
|
2. Click 'Delete' in the Action column
|
||||||
|
3. Confirm removal if prompted
|
||||||
|
- **Expected:** Channel is removed from the table
|
||||||
|
|
||||||
|
## 4. Update Notification Channel Settings
|
||||||
|
|
||||||
|
- **Precondition:** Channel is added
|
||||||
|
- **Steps:**
|
||||||
|
1. In the table, locate the channel row
|
||||||
|
2. Click 'Edit' in the Action column
|
||||||
|
3. In the 'Edit Notification Channel' form, update fields as needed
|
||||||
|
4. (Optional) Click 'Test' to send a test notification
|
||||||
|
5. Click 'Save' to update the channel
|
||||||
|
- **Expected:** Settings are updated
|
||||||
199
frontend/e2e/test-plan/validation-report.md
Normal file
@@ -0,0 +1,199 @@
|
|||||||
|
# SigNoz Test Plan Validation Report
|
||||||
|
|
||||||
|
This report documents the validation of the E2E test plan against the current live application using Playwright MCP. Each module is reviewed for coverage, gaps, and required updates.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Home Module
|
||||||
|
|
||||||
|
- **Coverage:**
|
||||||
|
- Widgets for logs, traces, metrics, dashboards, alerts, services, saved views, onboarding checklist
|
||||||
|
- Quick access buttons: Explore Logs, Create dashboard, Create an alert
|
||||||
|
- **Gaps/Updates:**
|
||||||
|
- Add scenarios for checklist interactions (e.g., “I’ll do this later”, progress tracking)
|
||||||
|
- Add scenarios for Saved Views and cross-module links
|
||||||
|
- Add scenario for onboarding checklist completion
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Logs Module
|
||||||
|
|
||||||
|
- **Coverage:**
|
||||||
|
- Explorer, Pipelines, Views tabs
|
||||||
|
- Filtering by service, environment, severity, host, k8s, etc.
|
||||||
|
- Search, save view, create alert, add to dashboard, export, view mode switching
|
||||||
|
- **Gaps/Updates:**
|
||||||
|
- Add scenario for quick filter customization
|
||||||
|
- Add scenario for “Old Explorer” button
|
||||||
|
- Add scenario for frequency chart toggle
|
||||||
|
- Add scenario for “Stage & Run Query” workflow
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Traces Module
|
||||||
|
|
||||||
|
- **Coverage:**
|
||||||
|
- Tabs: Explorer, Funnels, Views
|
||||||
|
- Filtering by name, error status, duration, environment, function, service, RPC, status code, HTTP, trace ID, etc.
|
||||||
|
- Search, save view, create alert, add to dashboard, export, view mode switching (List, Traces, Time Series, Table)
|
||||||
|
- Pagination, quick filter customization, group by, aggregation
|
||||||
|
- **Gaps/Updates:**
|
||||||
|
- Add scenario for quick filter customization
|
||||||
|
- Add scenario for “Stage & Run Query” workflow
|
||||||
|
- Add scenario for all view modes (List, Traces, Time Series, Table)
|
||||||
|
- Add scenario for group by/aggregation
|
||||||
|
- Add scenario for trace detail navigation (clicking on trace row)
|
||||||
|
- Add scenario for Funnels tab (create/edit/delete funnel)
|
||||||
|
- Add scenario for Views tab (manage saved views)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Metrics Module
|
||||||
|
|
||||||
|
- **Coverage:**
|
||||||
|
- Tabs: Summary, Explorer, Views
|
||||||
|
- Filtering by metric, type, unit, etc.
|
||||||
|
- Search, save view, add to dashboard, export, view mode switching (chart, table, proportion view)
|
||||||
|
- Pagination, group by, aggregation, custom queries
|
||||||
|
- **Gaps/Updates:**
|
||||||
|
- Add scenario for Proportion View in Summary
|
||||||
|
- Add scenario for all view modes (chart, table, proportion)
|
||||||
|
- Add scenario for group by/aggregation
|
||||||
|
- Add scenario for custom queries in Explorer
|
||||||
|
- Add scenario for Views tab (manage saved views)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Dashboards Module
|
||||||
|
|
||||||
|
- **Coverage:**
|
||||||
|
- List, search, and filter dashboards
|
||||||
|
- Create new dashboard (button and template link)
|
||||||
|
- Edit, delete, and view dashboard details
|
||||||
|
- Add/edit/delete widgets (implied by dashboard detail)
|
||||||
|
- Pagination through dashboards
|
||||||
|
- **Gaps/Updates:**
|
||||||
|
- Add scenario for browsing dashboard templates (external link)
|
||||||
|
- Add scenario for requesting new template
|
||||||
|
- Add scenario for dashboard owner and creation info
|
||||||
|
- Add scenario for dashboard tags and filtering by tags
|
||||||
|
- Add scenario for dashboard sharing (if available)
|
||||||
|
- Add scenario for dashboard image/preview
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Messaging Queues Module
|
||||||
|
|
||||||
|
- **Coverage:**
|
||||||
|
- Overview tab: queue metrics, filters (Service Name, Span Name, Msg System, Destination, Kind)
|
||||||
|
- Search across all columns
|
||||||
|
- Pagination of queue data
|
||||||
|
- Sync and Share buttons
|
||||||
|
- Tabs for Kafka and Celery
|
||||||
|
- **Gaps/Updates:**
|
||||||
|
- Add scenario for Kafka tab (detailed metrics, actions)
|
||||||
|
- Add scenario for Celery tab (detailed metrics, actions)
|
||||||
|
- Add scenario for filter combinations and edge cases
|
||||||
|
- Add scenario for sharing queue data
|
||||||
|
- Add scenario for time range selection
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## External APIs Module
|
||||||
|
|
||||||
|
- **Coverage:**
|
||||||
|
- Accessed via side navigation under MORE
|
||||||
|
- Explorer tab: domain, endpoints, last used, rate, error %, avg. latency
|
||||||
|
- Filters: Deployment Environment, Service Name, Rpc Method, Show IP addresses
|
||||||
|
- Table pagination
|
||||||
|
- Share and Stage & Run Query buttons
|
||||||
|
- **Gaps/Updates:**
|
||||||
|
- Add scenario for customizing quick filters
|
||||||
|
- Add scenario for running and staging queries
|
||||||
|
- Add scenario for sharing API data
|
||||||
|
- Add scenario for edge cases in filters and table data
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Alerts Module
|
||||||
|
|
||||||
|
- **Coverage:**
|
||||||
|
- Alert Rules tab: list, search, create (New Alert), edit, delete, enable/disable, severity, labels, actions
|
||||||
|
- Triggered Alerts tab (visible in tablist)
|
||||||
|
- Configuration tab (visible in tablist)
|
||||||
|
- Table pagination
|
||||||
|
- **Gaps/Updates:**
|
||||||
|
- Add scenario for triggered alerts (view, acknowledge, resolve)
|
||||||
|
- Add scenario for alert configuration (settings, integrations)
|
||||||
|
- Add scenario for edge cases in alert creation and management
|
||||||
|
- Add scenario for searching and filtering alerts
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Integrations Module
|
||||||
|
|
||||||
|
- **Coverage:**
|
||||||
|
- Integrations tab: list, search, configure (e.g., AWS), request new integration
|
||||||
|
- One-click setup for AWS monitoring
|
||||||
|
- Request more integrations (form)
|
||||||
|
- **Gaps/Updates:**
|
||||||
|
- Add scenario for configuring integrations (step-by-step)
|
||||||
|
- Add scenario for searching and filtering integrations
|
||||||
|
- Add scenario for requesting new integrations
|
||||||
|
- Add scenario for edge cases (e.g., failed configuration)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Exceptions Module
|
||||||
|
|
||||||
|
- **Coverage:**
|
||||||
|
- All Exceptions: list, search, filter (Deployment Environment, Service Name, Host Name, K8s Cluster/Deployment/Namespace, Net Peer Name)
|
||||||
|
- Table: Exception Type, Error Message, Count, Last Seen, First Seen, Application
|
||||||
|
- Pagination
|
||||||
|
- Exception detail links
|
||||||
|
- Share and Stage & Run Query buttons
|
||||||
|
- **Gaps/Updates:**
|
||||||
|
- Add scenario for exception detail view
|
||||||
|
- Add scenario for advanced filtering and edge cases
|
||||||
|
- Add scenario for sharing and running queries
|
||||||
|
- Add scenario for error grouping and navigation
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Service Map Module
|
||||||
|
|
||||||
|
- **Coverage:**
|
||||||
|
- Service Map visualization (main graph)
|
||||||
|
- Filters: environment, resource attributes
|
||||||
|
- Time range selection
|
||||||
|
- Sync and Share buttons
|
||||||
|
- **Gaps/Updates:**
|
||||||
|
- Add scenario for interacting with the map (zoom, pan, select service)
|
||||||
|
- Add scenario for filtering and edge cases
|
||||||
|
- Add scenario for sharing the map
|
||||||
|
- Add scenario for time range and environment combinations
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Billing Module
|
||||||
|
|
||||||
|
- **Coverage:**
|
||||||
|
- Billing overview: cost monitoring, invoices, CSV download (disabled), manage billing (disabled)
|
||||||
|
- Teams Cloud section
|
||||||
|
- Billing table: Unit, Data Ingested, Price per Unit, Cost (Billing period to date)
|
||||||
|
- **Gaps/Updates:**
|
||||||
|
- Add scenario for invoice download and management (when enabled)
|
||||||
|
- Add scenario for cost monitoring and edge cases
|
||||||
|
- Add scenario for billing table data validation
|
||||||
|
- Add scenario for permissions and access control
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Usage Explorer Module
|
||||||
|
|
||||||
|
- **Status:**
|
||||||
|
- Not accessible in the current environment. Removing from test plan flows.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## [Next modules will be filled as validation proceeds]
|
||||||
@@ -0,0 +1,42 @@
|
|||||||
|
import { expect, test } from '@playwright/test';
|
||||||
|
|
||||||
|
import { ensureLoggedIn } from '../../../utils/login.util';
|
||||||
|
|
||||||
|
test('Account Settings - View and Assert Static Controls', async ({ page }) => {
|
||||||
|
await ensureLoggedIn(page);
|
||||||
|
|
||||||
|
// 1. Open the sidebar settings menu using data-testid
|
||||||
|
await page.getByTestId('settings-nav-item').click();
|
||||||
|
|
||||||
|
// 2. Click Account Settings in the dropdown (by role/name or data-testid if available)
|
||||||
|
await page.getByRole('menuitem', { name: 'Account Settings' }).click();
|
||||||
|
|
||||||
|
// Assert the main tabpanel/heading (confirmed by DOM)
|
||||||
|
await expect(page.getByTestId('settings-page-title')).toBeVisible();
|
||||||
|
|
||||||
|
// Assert General section and controls (confirmed by DOM)
|
||||||
|
await expect(
|
||||||
|
page.getByLabel('My Settings').getByText('General'),
|
||||||
|
).toBeVisible();
|
||||||
|
await expect(page.getByText('Manage your account settings.')).toBeVisible();
|
||||||
|
await expect(page.getByRole('button', { name: 'Update name' })).toBeVisible();
|
||||||
|
await expect(
|
||||||
|
page.getByRole('button', { name: 'Reset password' }),
|
||||||
|
).toBeVisible();
|
||||||
|
|
||||||
|
// Assert User Preferences section and controls (confirmed by DOM)
|
||||||
|
await expect(page.getByText('User Preferences')).toBeVisible();
|
||||||
|
await expect(
|
||||||
|
page.getByText('Tailor the SigNoz console to work according to your needs.'),
|
||||||
|
).toBeVisible();
|
||||||
|
await expect(page.getByText('Select your theme')).toBeVisible();
|
||||||
|
|
||||||
|
const themeSelector = page.getByTestId('theme-selector');
|
||||||
|
|
||||||
|
await expect(themeSelector.getByText('Dark')).toBeVisible();
|
||||||
|
await expect(themeSelector.getByText('Light')).toBeVisible();
|
||||||
|
await expect(themeSelector.getByText('System')).toBeVisible();
|
||||||
|
|
||||||
|
await expect(page.getByTestId('timezone-adaptation-switch')).toBeVisible();
|
||||||
|
await expect(page.getByTestId('side-nav-pinned-switch')).toBeVisible();
|
||||||
|
});
|
||||||
@@ -0,0 +1,42 @@
|
|||||||
|
import { expect, test } from '@playwright/test';
|
||||||
|
|
||||||
|
import { ensureLoggedIn } from '../../../utils/login.util';
|
||||||
|
|
||||||
|
test('API Keys Settings - View and Interact', async ({ page }) => {
|
||||||
|
await ensureLoggedIn(page);
|
||||||
|
|
||||||
|
// 1. Open the sidebar settings menu using data-testid
|
||||||
|
await page.getByTestId('settings-nav-item').click();
|
||||||
|
|
||||||
|
// 2. Click Account Settings in the dropdown (by role/name or data-testid if available)
|
||||||
|
await page.getByRole('menuitem', { name: 'Account Settings' }).click();
|
||||||
|
|
||||||
|
// Assert the main tabpanel/heading (confirmed by DOM)
|
||||||
|
await expect(page.getByTestId('settings-page-title')).toBeVisible();
|
||||||
|
|
||||||
|
// Focus on the settings page sidenav
|
||||||
|
await page.getByTestId('settings-page-sidenav').focus();
|
||||||
|
|
||||||
|
// Click API Keys tab in the settings sidebar (by data-testid)
|
||||||
|
await page.getByTestId('api-keys').click();
|
||||||
|
|
||||||
|
// Assert heading and subheading
|
||||||
|
await expect(page.getByRole('heading', { name: 'API Keys' })).toBeVisible();
|
||||||
|
await expect(
|
||||||
|
page.getByText('Create and manage API keys for the SigNoz API'),
|
||||||
|
).toBeVisible();
|
||||||
|
|
||||||
|
// Assert presence of New Key button
|
||||||
|
const newKeyBtn = page.getByRole('button', { name: 'New Key' });
|
||||||
|
await expect(newKeyBtn).toBeVisible();
|
||||||
|
|
||||||
|
// Assert table columns
|
||||||
|
await expect(page.getByText('Last used').first()).toBeVisible();
|
||||||
|
await expect(page.getByText('Expired').first()).toBeVisible();
|
||||||
|
|
||||||
|
// Assert at least one API key row with action buttons
|
||||||
|
// Select the first action cell's first button (icon button)
|
||||||
|
const firstActionCell = page.locator('table tr').nth(1).locator('td').last();
|
||||||
|
const deleteBtn = firstActionCell.locator('button').first();
|
||||||
|
await expect(deleteBtn).toBeVisible();
|
||||||
|
});
|
||||||
71
frontend/e2e/tests/settings/billing/billing-settings.spec.ts
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
import { expect, test } from '@playwright/test';
|
||||||
|
|
||||||
|
import { ensureLoggedIn } from '../../../utils/login.util';
|
||||||
|
|
||||||
|
// E2E: Billing Settings - View Billing Information and Button Actions
|
||||||
|
|
||||||
|
test('View Billing Information and Button Actions', async ({
|
||||||
|
page,
|
||||||
|
context,
|
||||||
|
}) => {
|
||||||
|
// Ensure user is logged in
|
||||||
|
await ensureLoggedIn(page);
|
||||||
|
|
||||||
|
// 1. Open the sidebar settings menu using data-testid
|
||||||
|
await page.getByTestId('settings-nav-item').click();
|
||||||
|
|
||||||
|
// 2. Click Account Settings in the dropdown (by role/name or data-testid if available)
|
||||||
|
await page.getByRole('menuitem', { name: 'Account Settings' }).click();
|
||||||
|
|
||||||
|
// Assert the main tabpanel/heading (confirmed by DOM)
|
||||||
|
await expect(page.getByTestId('settings-page-title')).toBeVisible();
|
||||||
|
|
||||||
|
// Focus on the settings page sidenav
|
||||||
|
await page.getByTestId('settings-page-sidenav').focus();
|
||||||
|
|
||||||
|
// Click Billing tab in the settings sidebar (by data-testid)
|
||||||
|
await page.getByTestId('billing').click();
|
||||||
|
|
||||||
|
// Wait for billing chart/data to finish loading
|
||||||
|
await page.getByText('loading').first().waitFor({ state: 'hidden' });
|
||||||
|
|
||||||
|
// Assert visibility of subheading (unique)
|
||||||
|
await expect(
|
||||||
|
page.getByText(
|
||||||
|
'Manage your billing information, invoices, and monitor costs.',
|
||||||
|
),
|
||||||
|
).toBeVisible();
|
||||||
|
// Assert visibility of Teams Cloud heading
|
||||||
|
await expect(page.getByRole('heading', { name: 'Teams Cloud' })).toBeVisible();
|
||||||
|
|
||||||
|
// Assert presence of summary and detailed tables
|
||||||
|
await expect(page.getByText('TOTAL SPENT')).toBeVisible();
|
||||||
|
await expect(page.getByText('Data Ingested')).toBeVisible();
|
||||||
|
await expect(page.getByText('Price per Unit')).toBeVisible();
|
||||||
|
await expect(page.getByText('Cost (Billing period to date)')).toBeVisible();
|
||||||
|
|
||||||
|
// Assert presence of alert and note
|
||||||
|
await expect(
|
||||||
|
page.getByText('Your current billing period is from', { exact: false }),
|
||||||
|
).toBeVisible();
|
||||||
|
await expect(
|
||||||
|
page.getByText('Billing metrics are updated once every 24 hours.'),
|
||||||
|
).toBeVisible();
|
||||||
|
|
||||||
|
// Test Download CSV button
|
||||||
|
const [download] = await Promise.all([
|
||||||
|
page.waitForEvent('download'),
|
||||||
|
page.getByRole('button', { name: 'cloud-download Download CSV' }).click(),
|
||||||
|
]);
|
||||||
|
// Optionally, check download file name
|
||||||
|
expect(download.suggestedFilename()).toContain('billing_usage');
|
||||||
|
|
||||||
|
// Test Manage Billing button (opens Stripe in new tab)
|
||||||
|
const [newPage] = await Promise.all([
|
||||||
|
context.waitForEvent('page'),
|
||||||
|
page.getByTestId('header-billing-button').click(),
|
||||||
|
]);
|
||||||
|
await newPage.waitForLoadState();
|
||||||
|
expect(newPage.url()).toContain('stripe.com');
|
||||||
|
await newPage.close();
|
||||||
|
});
|
||||||
@@ -0,0 +1,52 @@
|
|||||||
|
import { expect, test } from '@playwright/test';
|
||||||
|
|
||||||
|
import { ensureLoggedIn } from '../../../utils/login.util';
|
||||||
|
|
||||||
|
test('Custom Domain Settings - View and Interact', async ({ page }) => {
|
||||||
|
await ensureLoggedIn(page);
|
||||||
|
|
||||||
|
// 1. Open the sidebar settings menu using data-testid
|
||||||
|
await page.getByTestId('settings-nav-item').click();
|
||||||
|
|
||||||
|
// 2. Click Account Settings in the dropdown (by role/name or data-testid if available)
|
||||||
|
await page.getByRole('menuitem', { name: 'Account Settings' }).click();
|
||||||
|
|
||||||
|
// Assert the main tabpanel/heading (confirmed by DOM)
|
||||||
|
await expect(page.getByTestId('settings-page-title')).toBeVisible();
|
||||||
|
|
||||||
|
// Focus on the settings page sidenav
|
||||||
|
await page.getByTestId('settings-page-sidenav').focus();
|
||||||
|
|
||||||
|
// Click Custom Domain tab in the settings sidebar (by data-testid)
|
||||||
|
await page.getByTestId('custom-domain').click();
|
||||||
|
|
||||||
|
// Wait for custom domain chart/data to finish loading
|
||||||
|
await page.getByText('loading').first().waitFor({ state: 'hidden' });
|
||||||
|
|
||||||
|
// Assert heading and subheading
|
||||||
|
await expect(
|
||||||
|
page.getByRole('heading', { name: 'Custom Domain Settings' }),
|
||||||
|
).toBeVisible();
|
||||||
|
await expect(
|
||||||
|
page.getByText('Personalize your workspace domain effortlessly.'),
|
||||||
|
).toBeVisible();
|
||||||
|
|
||||||
|
// Assert presence of Customize team’s URL button
|
||||||
|
const customizeBtn = page.getByRole('button', {
|
||||||
|
name: 'Customize team’s URL',
|
||||||
|
});
|
||||||
|
await expect(customizeBtn).toBeVisible();
|
||||||
|
await customizeBtn.click();
|
||||||
|
|
||||||
|
// Assert modal/dialog fields and buttons
|
||||||
|
await expect(
|
||||||
|
page.getByRole('dialog', { name: 'Customize your team’s URL' }),
|
||||||
|
).toBeVisible();
|
||||||
|
await expect(page.getByLabel('Team’s URL subdomain')).toBeVisible();
|
||||||
|
await expect(
|
||||||
|
page.getByRole('button', { name: 'Apply Changes' }),
|
||||||
|
).toBeVisible();
|
||||||
|
await expect(page.getByRole('button', { name: 'Close' })).toBeVisible();
|
||||||
|
// Close the modal
|
||||||
|
await page.getByRole('button', { name: 'Close' }).click();
|
||||||
|
});
|
||||||
32
frontend/e2e/tests/settings/general/general-settings.spec.ts
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
import { expect, test } from '@playwright/test';
|
||||||
|
|
||||||
|
import { ensureLoggedIn } from '../../../utils/login.util';
|
||||||
|
|
||||||
|
test('View General Settings', async ({ page }) => {
|
||||||
|
await ensureLoggedIn(page);
|
||||||
|
|
||||||
|
// 1. Open the sidebar settings menu using data-testid
|
||||||
|
await page.getByTestId('settings-nav-item').click();
|
||||||
|
|
||||||
|
// 2. Click Account Settings in the dropdown (by role/name or data-testid if available)
|
||||||
|
await page.getByRole('menuitem', { name: 'Account Settings' }).click();
|
||||||
|
|
||||||
|
// Assert the main tabpanel/heading (confirmed by DOM)
|
||||||
|
await expect(page.getByTestId('settings-page-title')).toBeVisible();
|
||||||
|
|
||||||
|
// Focus on the settings page sidenav
|
||||||
|
await page.getByTestId('settings-page-sidenav').focus();
|
||||||
|
|
||||||
|
// Click General tab in the settings sidebar (by data-testid)
|
||||||
|
await page.getByTestId('general').click();
|
||||||
|
|
||||||
|
// Wait for General tab to be visible
|
||||||
|
await page.getByRole('tabpanel', { name: 'General' }).waitFor();
|
||||||
|
|
||||||
|
// Assert visibility of definitive/static elements
|
||||||
|
await expect(page.getByRole('heading', { name: 'Metrics' })).toBeVisible();
|
||||||
|
await expect(page.getByRole('heading', { name: 'Traces' })).toBeVisible();
|
||||||
|
await expect(page.getByRole('heading', { name: 'Logs' })).toBeVisible();
|
||||||
|
await expect(page.getByText('Please')).toBeVisible();
|
||||||
|
await expect(page.getByRole('link', { name: 'email us' })).toBeVisible();
|
||||||
|
});
|
||||||
@@ -0,0 +1,48 @@
|
|||||||
|
import { expect, test } from '@playwright/test';
|
||||||
|
|
||||||
|
import { ensureLoggedIn } from '../../../utils/login.util';
|
||||||
|
|
||||||
|
test('Ingestion Settings - View and Interact', async ({ page }) => {
|
||||||
|
await ensureLoggedIn(page);
|
||||||
|
|
||||||
|
// 1. Open the sidebar settings menu using data-testid
|
||||||
|
await page.getByTestId('settings-nav-item').click();
|
||||||
|
|
||||||
|
// 2. Click Account Settings in the dropdown (by role/name or data-testid if available)
|
||||||
|
await page.getByRole('menuitem', { name: 'Account Settings' }).click();
|
||||||
|
|
||||||
|
// Assert the main tabpanel/heading (confirmed by DOM)
|
||||||
|
await expect(page.getByTestId('settings-page-title')).toBeVisible();
|
||||||
|
|
||||||
|
// Focus on the settings page sidenav
|
||||||
|
await page.getByTestId('settings-page-sidenav').focus();
|
||||||
|
|
||||||
|
// Click Ingestion tab in the settings sidebar (by data-testid)
|
||||||
|
await page.getByTestId('ingestion').click();
|
||||||
|
|
||||||
|
// Assert heading and subheading (Integrations page)
|
||||||
|
await expect(
|
||||||
|
page.getByRole('heading', { name: 'Integrations' }),
|
||||||
|
).toBeVisible();
|
||||||
|
await expect(
|
||||||
|
page.getByText('Manage Integrations for this workspace'),
|
||||||
|
).toBeVisible();
|
||||||
|
|
||||||
|
// Assert presence of search box
|
||||||
|
await expect(
|
||||||
|
page.getByPlaceholder('Search for an integration...'),
|
||||||
|
).toBeVisible();
|
||||||
|
|
||||||
|
// Assert at least one data source with Configure button
|
||||||
|
const configureBtn = page.getByRole('button', { name: 'Configure' }).first();
|
||||||
|
await expect(configureBtn).toBeVisible();
|
||||||
|
|
||||||
|
// Assert Request more integrations section
|
||||||
|
await expect(
|
||||||
|
page.getByText(
|
||||||
|
"Can't find what you’re looking for? Request more integrations",
|
||||||
|
),
|
||||||
|
).toBeVisible();
|
||||||
|
await expect(page.getByPlaceholder('Enter integration name...')).toBeVisible();
|
||||||
|
await expect(page.getByRole('button', { name: 'Submit' })).toBeVisible();
|
||||||
|
});
|
||||||
@@ -0,0 +1,48 @@
|
|||||||
|
import { expect, test } from '@playwright/test';
|
||||||
|
|
||||||
|
import { ensureLoggedIn } from '../../../utils/login.util';
|
||||||
|
|
||||||
|
test('Integrations Settings - View and Interact', async ({ page }) => {
|
||||||
|
await ensureLoggedIn(page);
|
||||||
|
|
||||||
|
// 1. Open the sidebar settings menu using data-testid
|
||||||
|
await page.getByTestId('settings-nav-item').click();
|
||||||
|
|
||||||
|
// 2. Click Account Settings in the dropdown (by role/name or data-testid if available)
|
||||||
|
await page.getByRole('menuitem', { name: 'Account Settings' }).click();
|
||||||
|
|
||||||
|
// Assert the main tabpanel/heading (confirmed by DOM)
|
||||||
|
await expect(page.getByTestId('settings-page-title')).toBeVisible();
|
||||||
|
|
||||||
|
// Focus on the settings page sidenav
|
||||||
|
await page.getByTestId('settings-page-sidenav').focus();
|
||||||
|
|
||||||
|
// Click Integrations tab in the settings sidebar (by data-testid)
|
||||||
|
await page.getByTestId('integrations').click();
|
||||||
|
|
||||||
|
// Assert heading and subheading
|
||||||
|
await expect(
|
||||||
|
page.getByRole('heading', { name: 'Integrations' }),
|
||||||
|
).toBeVisible();
|
||||||
|
await expect(
|
||||||
|
page.getByText('Manage Integrations for this workspace'),
|
||||||
|
).toBeVisible();
|
||||||
|
|
||||||
|
// Assert presence of search box
|
||||||
|
await expect(
|
||||||
|
page.getByPlaceholder('Search for an integration...'),
|
||||||
|
).toBeVisible();
|
||||||
|
|
||||||
|
// Assert at least one integration with Configure button
|
||||||
|
const configureBtn = page.getByRole('button', { name: 'Configure' }).first();
|
||||||
|
await expect(configureBtn).toBeVisible();
|
||||||
|
|
||||||
|
// Assert Request more integrations section
|
||||||
|
await expect(
|
||||||
|
page.getByText(
|
||||||
|
"Can't find what you’re looking for? Request more integrations",
|
||||||
|
),
|
||||||
|
).toBeVisible();
|
||||||
|
await expect(page.getByPlaceholder('Enter integration name...')).toBeVisible();
|
||||||
|
await expect(page.getByRole('button', { name: 'Submit' })).toBeVisible();
|
||||||
|
});
|
||||||
@@ -0,0 +1,56 @@
|
|||||||
|
import { expect, test } from '@playwright/test';
|
||||||
|
|
||||||
|
import { ensureLoggedIn } from '../../../utils/login.util';
|
||||||
|
|
||||||
|
test('Members & SSO Settings - View and Interact', async ({ page }) => {
|
||||||
|
await ensureLoggedIn(page);
|
||||||
|
|
||||||
|
// 1. Open the sidebar settings menu using data-testid
|
||||||
|
await page.getByTestId('settings-nav-item').click();
|
||||||
|
|
||||||
|
// 2. Click Account Settings in the dropdown (by role/name or data-testid if available)
|
||||||
|
await page.getByRole('menuitem', { name: 'Account Settings' }).click();
|
||||||
|
|
||||||
|
// Assert the main tabpanel/heading (confirmed by DOM)
|
||||||
|
await expect(page.getByTestId('settings-page-title')).toBeVisible();
|
||||||
|
|
||||||
|
// Focus on the settings page sidenav
|
||||||
|
await page.getByTestId('settings-page-sidenav').focus();
|
||||||
|
|
||||||
|
// Click Members & SSO tab in the settings sidebar (by data-testid)
|
||||||
|
await page.getByTestId('members-sso').click();
|
||||||
|
|
||||||
|
// Assert headings and tables
|
||||||
|
await expect(
|
||||||
|
page.getByRole('heading', { name: /Members \(\d+\)/ }),
|
||||||
|
).toBeVisible();
|
||||||
|
await expect(
|
||||||
|
page.getByRole('heading', { name: /Pending Invites \(\d+\)/ }),
|
||||||
|
).toBeVisible();
|
||||||
|
await expect(
|
||||||
|
page.getByRole('heading', { name: 'Authenticated Domains' }),
|
||||||
|
).toBeVisible();
|
||||||
|
|
||||||
|
// Assert Invite Members button is visible and clickable
|
||||||
|
const inviteBtn = page.getByRole('button', { name: /Invite Members/ });
|
||||||
|
await expect(inviteBtn).toBeVisible();
|
||||||
|
await inviteBtn.click();
|
||||||
|
// Assert Invite Members modal/dialog appears (modal title is unique)
|
||||||
|
await expect(page.getByText('Invite team members').first()).toBeVisible();
|
||||||
|
// Close the modal (use unique 'Close' button)
|
||||||
|
await page.getByRole('button', { name: 'Close' }).click();
|
||||||
|
|
||||||
|
// Assert Edit and Delete buttons are present for at least one member
|
||||||
|
const editBtn = page.getByRole('button', { name: /Edit/ }).first();
|
||||||
|
const deleteBtn = page.getByRole('button', { name: /Delete/ }).first();
|
||||||
|
await expect(editBtn).toBeVisible();
|
||||||
|
await expect(deleteBtn).toBeVisible();
|
||||||
|
|
||||||
|
// Assert Add Domains button is visible
|
||||||
|
await expect(page.getByRole('button', { name: /Add Domains/ })).toBeVisible();
|
||||||
|
// Assert Configure SSO or Edit Google Auth button is visible for at least one domain
|
||||||
|
const ssoBtn = page
|
||||||
|
.getByRole('button', { name: /Configure SSO|Edit Google Auth/ })
|
||||||
|
.first();
|
||||||
|
await expect(ssoBtn).toBeVisible();
|
||||||
|
});
|
||||||
@@ -0,0 +1,57 @@
|
|||||||
|
import { expect, test } from '@playwright/test';
|
||||||
|
|
||||||
|
import { ensureLoggedIn } from '../../../utils/login.util';
|
||||||
|
|
||||||
|
test('Notification Channels Settings - View and Interact', async ({ page }) => {
|
||||||
|
await ensureLoggedIn(page);
|
||||||
|
|
||||||
|
// 1. Open the sidebar settings menu using data-testid
|
||||||
|
await page.getByTestId('settings-nav-item').click();
|
||||||
|
|
||||||
|
// 2. Click Account Settings in the dropdown (by role/name or data-testid if available)
|
||||||
|
await page.getByRole('menuitem', { name: 'Account Settings' }).click();
|
||||||
|
|
||||||
|
// Assert the main tabpanel/heading (confirmed by DOM)
|
||||||
|
await expect(page.getByTestId('settings-page-title')).toBeVisible();
|
||||||
|
|
||||||
|
// Focus on the settings page sidenav
|
||||||
|
await page.getByTestId('settings-page-sidenav').focus();
|
||||||
|
|
||||||
|
// Click Notification Channels tab in the settings sidebar (by data-testid)
|
||||||
|
await page.getByTestId('notification-channels').click();
|
||||||
|
|
||||||
|
// Wait for loading to finish
|
||||||
|
await page.getByText('loading').first().waitFor({ state: 'hidden' });
|
||||||
|
|
||||||
|
// Assert presence of New Alert Channel button
|
||||||
|
const newChannelBtn = page.getByRole('button', { name: /New Alert Channel/ });
|
||||||
|
await expect(newChannelBtn).toBeVisible();
|
||||||
|
|
||||||
|
// Assert table columns
|
||||||
|
await expect(page.getByText('Name')).toBeVisible();
|
||||||
|
await expect(page.getByText('Type')).toBeVisible();
|
||||||
|
await expect(page.getByText('Action')).toBeVisible();
|
||||||
|
|
||||||
|
// Click New Alert Channel and assert modal fields/buttons
|
||||||
|
await newChannelBtn.click();
|
||||||
|
await expect(
|
||||||
|
page.getByRole('heading', { name: 'New Notification Channel' }),
|
||||||
|
).toBeVisible();
|
||||||
|
await expect(page.getByLabel('Name')).toBeVisible();
|
||||||
|
await expect(page.getByLabel('Type')).toBeVisible();
|
||||||
|
await expect(page.getByLabel('Webhook URL')).toBeVisible();
|
||||||
|
await expect(
|
||||||
|
page.getByRole('switch', { name: 'Send resolved alerts' }),
|
||||||
|
).toBeVisible();
|
||||||
|
await expect(page.getByRole('button', { name: 'Save' })).toBeVisible();
|
||||||
|
await expect(page.getByRole('button', { name: 'Test' })).toBeVisible();
|
||||||
|
await expect(page.getByRole('button', { name: 'Back' })).toBeVisible();
|
||||||
|
// Close modal
|
||||||
|
await page.getByRole('button', { name: 'Back' }).click();
|
||||||
|
|
||||||
|
// Assert Edit and Delete buttons for at least one channel
|
||||||
|
const editBtn = page.getByRole('button', { name: 'Edit' }).first();
|
||||||
|
const deleteBtn = page.getByRole('button', { name: 'Delete' }).first();
|
||||||
|
await expect(editBtn).toBeVisible();
|
||||||
|
await expect(deleteBtn).toBeVisible();
|
||||||
|
});
|
||||||
35
frontend/e2e/utils/login.util.ts
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
import { Page } from '@playwright/test';
|
||||||
|
|
||||||
|
// Read credentials from environment variables
|
||||||
|
const username = process.env.LOGIN_USERNAME;
|
||||||
|
const password = process.env.LOGIN_PASSWORD;
|
||||||
|
const baseURL = process.env.BASE_URL;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ensures the user is logged in. If not, performs the login steps.
|
||||||
|
* Follows the MCP process step-by-step.
|
||||||
|
*/
|
||||||
|
export async function ensureLoggedIn(page: Page): Promise<void> {
|
||||||
|
// if already in home page, return
|
||||||
|
if (await page.url().includes('/home')) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!username || !password) {
|
||||||
|
throw new Error(
|
||||||
|
'E2E_EMAIL and E2E_PASSWORD environment variables must be set.',
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
await page.goto(`${baseURL}/login`);
|
||||||
|
await page.getByTestId('email').click();
|
||||||
|
await page.getByTestId('email').fill(username);
|
||||||
|
await page.getByTestId('initiate_login').click();
|
||||||
|
await page.getByTestId('password').click();
|
||||||
|
await page.getByTestId('password').fill(password);
|
||||||
|
await page.getByRole('button', { name: 'Login' }).click();
|
||||||
|
|
||||||
|
await page
|
||||||
|
.getByText('Hello there, Welcome to your')
|
||||||
|
.waitFor({ state: 'visible' });
|
||||||
|
}
|
||||||
@@ -16,6 +16,7 @@ const config: Config.InitialOptions = {
|
|||||||
'ts-jest': {
|
'ts-jest': {
|
||||||
useESM: true,
|
useESM: true,
|
||||||
isolatedModules: true,
|
isolatedModules: true,
|
||||||
|
tsconfig: '<rootDir>/tsconfig.jest.json',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
testMatch: ['<rootDir>/src/**/*?(*.)(test).(ts|js)?(x)'],
|
testMatch: ['<rootDir>/src/**/*?(*.)(test).(ts|js)?(x)'],
|
||||||
@@ -25,7 +26,7 @@ const config: Config.InitialOptions = {
|
|||||||
'^.+\\.(js|jsx)$': 'babel-jest',
|
'^.+\\.(js|jsx)$': 'babel-jest',
|
||||||
},
|
},
|
||||||
transformIgnorePatterns: [
|
transformIgnorePatterns: [
|
||||||
'node_modules/(?!(lodash-es|react-dnd|core-dnd|@react-dnd|dnd-core|react-dnd-html5-backend|axios|@signozhq/design-tokens|d3-interpolate|d3-color|api)/)',
|
'node_modules/(?!(lodash-es|react-dnd|core-dnd|@react-dnd|dnd-core|react-dnd-html5-backend|axios|@signozhq/design-tokens|@signozhq/table|@signozhq/calendar|@signozhq/input|@signozhq/popover|@signozhq/button|@signozhq/sonner|@signozhq/*|date-fns|d3-interpolate|d3-color|api|@codemirror|@lezer|@marijn)/)',
|
||||||
],
|
],
|
||||||
setupFilesAfterEnv: ['<rootDir>jest.setup.ts'],
|
setupFilesAfterEnv: ['<rootDir>jest.setup.ts'],
|
||||||
testPathIgnorePatterns: ['/node_modules/', '/public/'],
|
testPathIgnorePatterns: ['/node_modules/', '/public/'],
|
||||||
|
|||||||
@@ -28,6 +28,8 @@
|
|||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@ant-design/colors": "6.0.0",
|
"@ant-design/colors": "6.0.0",
|
||||||
"@ant-design/icons": "4.8.0",
|
"@ant-design/icons": "4.8.0",
|
||||||
|
"@codemirror/autocomplete": "6.18.6",
|
||||||
|
"@codemirror/lang-javascript": "6.2.3",
|
||||||
"@dnd-kit/core": "6.1.0",
|
"@dnd-kit/core": "6.1.0",
|
||||||
"@dnd-kit/modifiers": "7.0.0",
|
"@dnd-kit/modifiers": "7.0.0",
|
||||||
"@dnd-kit/sortable": "8.0.0",
|
"@dnd-kit/sortable": "8.0.0",
|
||||||
@@ -36,13 +38,27 @@
|
|||||||
"@mdx-js/loader": "2.3.0",
|
"@mdx-js/loader": "2.3.0",
|
||||||
"@mdx-js/react": "2.3.0",
|
"@mdx-js/react": "2.3.0",
|
||||||
"@monaco-editor/react": "^4.3.1",
|
"@monaco-editor/react": "^4.3.1",
|
||||||
|
"@playwright/test": "1.54.1",
|
||||||
"@radix-ui/react-tabs": "1.0.4",
|
"@radix-ui/react-tabs": "1.0.4",
|
||||||
"@radix-ui/react-tooltip": "1.0.7",
|
"@radix-ui/react-tooltip": "1.0.7",
|
||||||
"@sentry/react": "8.41.0",
|
"@sentry/react": "8.41.0",
|
||||||
"@sentry/webpack-plugin": "2.22.6",
|
"@sentry/webpack-plugin": "2.22.6",
|
||||||
|
"@signozhq/badge": "0.0.2",
|
||||||
|
"@signozhq/button": "0.0.2",
|
||||||
|
"@signozhq/calendar": "0.0.0",
|
||||||
|
"@signozhq/callout": "0.0.2",
|
||||||
"@signozhq/design-tokens": "1.1.4",
|
"@signozhq/design-tokens": "1.1.4",
|
||||||
|
"@signozhq/input": "0.0.2",
|
||||||
|
"@signozhq/popover": "0.0.0",
|
||||||
|
"@signozhq/resizable": "0.0.0",
|
||||||
|
"@signozhq/sonner": "0.1.0",
|
||||||
|
"@signozhq/table": "0.3.7",
|
||||||
|
"@signozhq/tooltip": "0.0.2",
|
||||||
"@tanstack/react-table": "8.20.6",
|
"@tanstack/react-table": "8.20.6",
|
||||||
"@tanstack/react-virtual": "3.11.2",
|
"@tanstack/react-virtual": "3.11.2",
|
||||||
|
"@uiw/codemirror-theme-copilot": "4.23.11",
|
||||||
|
"@uiw/codemirror-theme-github": "4.24.1",
|
||||||
|
"@uiw/react-codemirror": "4.23.10",
|
||||||
"@uiw/react-md-editor": "3.23.5",
|
"@uiw/react-md-editor": "3.23.5",
|
||||||
"@visx/group": "3.3.0",
|
"@visx/group": "3.3.0",
|
||||||
"@visx/hierarchy": "3.12.0",
|
"@visx/hierarchy": "3.12.0",
|
||||||
@@ -52,6 +68,7 @@
|
|||||||
"ansi-to-html": "0.7.2",
|
"ansi-to-html": "0.7.2",
|
||||||
"antd": "5.11.0",
|
"antd": "5.11.0",
|
||||||
"antd-table-saveas-excel": "2.2.1",
|
"antd-table-saveas-excel": "2.2.1",
|
||||||
|
"antlr4": "4.13.2",
|
||||||
"axios": "1.8.2",
|
"axios": "1.8.2",
|
||||||
"babel-eslint": "^10.1.0",
|
"babel-eslint": "^10.1.0",
|
||||||
"babel-jest": "^29.6.4",
|
"babel-jest": "^29.6.4",
|
||||||
@@ -85,6 +102,7 @@
|
|||||||
"i18next-http-backend": "^1.3.2",
|
"i18next-http-backend": "^1.3.2",
|
||||||
"jest": "^27.5.1",
|
"jest": "^27.5.1",
|
||||||
"js-base64": "^3.7.2",
|
"js-base64": "^3.7.2",
|
||||||
|
"kbar": "0.1.0-beta.48",
|
||||||
"less": "^4.1.2",
|
"less": "^4.1.2",
|
||||||
"less-loader": "^10.2.0",
|
"less-loader": "^10.2.0",
|
||||||
"lodash-es": "^4.17.21",
|
"lodash-es": "^4.17.21",
|
||||||
@@ -213,7 +231,9 @@
|
|||||||
"eslint-plugin-simple-import-sort": "^7.0.0",
|
"eslint-plugin-simple-import-sort": "^7.0.0",
|
||||||
"eslint-plugin-sonarjs": "^0.12.0",
|
"eslint-plugin-sonarjs": "^0.12.0",
|
||||||
"husky": "^7.0.4",
|
"husky": "^7.0.4",
|
||||||
"image-webpack-loader": "8.1.0",
|
"image-minimizer-webpack-plugin": "^4.0.0",
|
||||||
|
"imagemin": "^8.0.1",
|
||||||
|
"imagemin-svgo": "^10.0.1",
|
||||||
"is-ci": "^3.0.1",
|
"is-ci": "^3.0.1",
|
||||||
"jest-styled-components": "^7.0.8",
|
"jest-styled-components": "^7.0.8",
|
||||||
"lint-staged": "^12.5.0",
|
"lint-staged": "^12.5.0",
|
||||||
@@ -230,9 +250,10 @@
|
|||||||
"redux-mock-store": "1.5.4",
|
"redux-mock-store": "1.5.4",
|
||||||
"sass": "1.66.1",
|
"sass": "1.66.1",
|
||||||
"sass-loader": "13.3.2",
|
"sass-loader": "13.3.2",
|
||||||
|
"sharp": "^0.33.4",
|
||||||
"ts-jest": "^27.1.5",
|
"ts-jest": "^27.1.5",
|
||||||
"ts-node": "^10.2.1",
|
"ts-node": "^10.2.1",
|
||||||
"typescript-plugin-css-modules": "5.0.1",
|
"typescript-plugin-css-modules": "5.2.0",
|
||||||
"webpack-bundle-analyzer": "^4.5.0",
|
"webpack-bundle-analyzer": "^4.5.0",
|
||||||
"webpack-cli": "^5.1.4"
|
"webpack-cli": "^5.1.4"
|
||||||
},
|
},
|
||||||
@@ -254,6 +275,9 @@
|
|||||||
"cross-spawn": "7.0.5",
|
"cross-spawn": "7.0.5",
|
||||||
"cookie": "^0.7.1",
|
"cookie": "^0.7.1",
|
||||||
"serialize-javascript": "6.0.2",
|
"serialize-javascript": "6.0.2",
|
||||||
"prismjs": "1.30.0"
|
"prismjs": "1.30.0",
|
||||||
|
"got": "11.8.5",
|
||||||
|
"form-data": "4.0.4",
|
||||||
|
"brace-expansion": "^2.0.2"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
95
frontend/playwright.config.ts
Normal file
@@ -0,0 +1,95 @@
|
|||||||
|
import { defineConfig, devices } from '@playwright/test';
|
||||||
|
import dotenv from 'dotenv';
|
||||||
|
import path from 'path';
|
||||||
|
|
||||||
|
// Read from ".env" file.
|
||||||
|
dotenv.config({ path: path.resolve(__dirname, '.env') });
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Read environment variables from file.
|
||||||
|
* https://github.com/motdotla/dotenv
|
||||||
|
*/
|
||||||
|
// import dotenv from 'dotenv';
|
||||||
|
// import path from 'path';
|
||||||
|
// dotenv.config({ path: path.resolve(__dirname, '.env') });
|
||||||
|
|
||||||
|
/**
|
||||||
|
* See https://playwright.dev/docs/test-configuration.
|
||||||
|
*/
|
||||||
|
export default defineConfig({
|
||||||
|
testDir: './e2e/tests',
|
||||||
|
/* Run tests in files in parallel */
|
||||||
|
fullyParallel: true,
|
||||||
|
/* Fail the build on CI if you accidentally left test.only in the source code. */
|
||||||
|
forbidOnly: !!process.env.CI,
|
||||||
|
/* Retry on CI only */
|
||||||
|
retries: process.env.CI ? 2 : 0,
|
||||||
|
/* Run tests in parallel even in CI - optimized for GitHub Actions free tier */
|
||||||
|
workers: process.env.CI ? 2 : undefined,
|
||||||
|
/* Reporter to use. See https://playwright.dev/docs/test-reporters */
|
||||||
|
reporter: 'html',
|
||||||
|
/* Shared settings for all the projects below. See https://playwright.dev/docs/api/class-testoptions. */
|
||||||
|
use: {
|
||||||
|
/* Base URL to use in actions like `await page.goto('/')`. */
|
||||||
|
baseURL:
|
||||||
|
process.env.SIGNOZ_E2E_BASE_URL || 'https://app.us.staging.signoz.cloud',
|
||||||
|
|
||||||
|
/* Collect trace when retrying the failed test. See https://playwright.dev/docs/trace-viewer */
|
||||||
|
trace: 'on-first-retry',
|
||||||
|
colorScheme: 'dark',
|
||||||
|
locale: 'en-US',
|
||||||
|
viewport: { width: 1280, height: 720 },
|
||||||
|
},
|
||||||
|
|
||||||
|
/* Configure projects for major browsers */
|
||||||
|
projects: [
|
||||||
|
{
|
||||||
|
name: 'chromium',
|
||||||
|
use: {
|
||||||
|
launchOptions: { args: ['--start-maximized'] },
|
||||||
|
viewport: null,
|
||||||
|
colorScheme: 'dark',
|
||||||
|
locale: 'en-US',
|
||||||
|
baseURL: 'https://app.us.staging.signoz.cloud',
|
||||||
|
trace: 'on-first-retry',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
name: 'firefox',
|
||||||
|
use: { ...devices['Desktop Firefox'] },
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
name: 'webkit',
|
||||||
|
use: { ...devices['Desktop Safari'] },
|
||||||
|
},
|
||||||
|
|
||||||
|
/* Test against mobile viewports. */
|
||||||
|
// {
|
||||||
|
// name: 'Mobile Chrome',
|
||||||
|
// use: { ...devices['Pixel 5'] },
|
||||||
|
// },
|
||||||
|
// {
|
||||||
|
// name: 'Mobile Safari',
|
||||||
|
// use: { ...devices['iPhone 12'] },
|
||||||
|
// },
|
||||||
|
|
||||||
|
/* Test against branded browsers. */
|
||||||
|
// {
|
||||||
|
// name: 'Microsoft Edge',
|
||||||
|
// use: { ...devices['Desktop Edge'], channel: 'msedge' },
|
||||||
|
// },
|
||||||
|
// {
|
||||||
|
// name: 'Google Chrome',
|
||||||
|
// use: { ...devices['Desktop Chrome'], channel: 'chrome' },
|
||||||
|
// },
|
||||||
|
],
|
||||||
|
|
||||||
|
/* Run your local dev server before starting the tests */
|
||||||
|
// webServer: {
|
||||||
|
// command: 'npm run start',
|
||||||
|
// url: 'http://localhost:3000',
|
||||||
|
// reuseExistingServer: !process.env.CI,
|
||||||
|
// },
|
||||||
|
});
|
||||||
16
frontend/prompts/generate-e2e-test.md
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
RULE: All test code for this repo must be generated by following the step-by-step Playwright MCP process as described below.
|
||||||
|
|
||||||
|
- You are a playwright test generator.
|
||||||
|
- You are given a scenario and you need to generate a playwright test for it.
|
||||||
|
- Use login util if not logged in.
|
||||||
|
- DO NOT generate test code based on the scenario alone.
|
||||||
|
- DO run steps one by one using the tools provided by the Playwright MCP.
|
||||||
|
- Only after all steps are completed, emit a Playwright TypeScript test that uses @playwright/test based on message history
|
||||||
|
- Gather correct selectors before writing the test
|
||||||
|
- DO NOT valiate for dynamic content in the tests, only validate for the correctness with meta data
|
||||||
|
- Always inspect the DOM at each navigation or interaction step to determine the correct selector for the next action. Do not assume selectors, confirm via inspection before proceeding.
|
||||||
|
- Assert visibility of definitive/static elements in the UI (such as labels, headings, or section titles) rather than dynamic values or content that may change between runs.
|
||||||
|
- Save generated test file in the tests directory
|
||||||
|
- Execute the test file and iterate until the test passes
|
||||||
|
|
||||||
|
|
||||||
1
frontend/public/Logos/argocd.svg
Normal file
|
After Width: | Height: | Size: 6.1 KiB |
1
frontend/public/Logos/azure-mysql.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 18 18"><defs><linearGradient id="a" x1="2.59" y1="10.16" x2="15.41" y2="10.16" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#005ba1"/><stop offset=".07" stop-color="#0060a9"/><stop offset=".36" stop-color="#0071c8"/><stop offset=".52" stop-color="#0078d4"/><stop offset=".64" stop-color="#0074cd"/><stop offset=".82" stop-color="#006abb"/><stop offset="1" stop-color="#005ba1"/></linearGradient></defs><path d="M9 5.14c-3.54 0-6.41-1-6.41-2.32v12.36c0 1.27 2.82 2.3 6.32 2.32H9c3.54 0 6.41-1 6.41-2.32V2.82c0 1.29-2.87 2.32-6.41 2.32z" fill="url(#a)"/><path d="M15.41 2.82c0 1.29-2.87 2.32-6.41 2.32s-6.41-1-6.41-2.32S5.46.5 9 .5s6.41 1 6.41 2.32" fill="#e8e8e8"/><path d="M13.92 2.63c0 .82-2.21 1.48-4.92 1.48s-4.92-.66-4.92-1.48S6.29 1.16 9 1.16s4.92.66 4.92 1.47" fill="#50e6ff"/><path d="M9 3a11.55 11.55 0 00-3.89.57A11.42 11.42 0 009 4.11a11.15 11.15 0 003.89-.58A11.84 11.84 0 009 3z" fill="#198ab3"/><path d="M12.64 9v1.63h-1a.39.39 0 01-.29-.14V9H10v1.78a.92.92 0 001 .89h1.49l.26-.13s-.11.41-.26.43h-2.38v1h2.66A1.21 1.21 0 0014 11.7V9zM9.53 9v-.49a.7.7 0 00-.48-.77 1.74 1.74 0 00-.5-.08.94.94 0 00-.91.58l-.78 1.9-1-1.9A.93.93 0 005 7.66a1.44 1.44 0 00-.51.09c-.35.11-.43.34-.43.73v3.31h1.17V9.56l.63 1.57a1.08 1.08 0 001 .66c.44 0 .62-.26.8-.66l.67-1.51v2.15h1.18V9z" fill="#f2f2f2"/></svg>
|
||||||
|
After Width: | Height: | Size: 1.3 KiB |
1
frontend/public/Logos/cloudflare.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 32 32" width="64" height="64"><path d="M8.16 23h21.177v-5.86l-4.023-2.307-.694-.3-16.46.113z" fill="#fff"/><path d="M22.012 22.222c.197-.675.122-1.294-.206-1.754-.3-.422-.807-.666-1.416-.694l-11.545-.15c-.075 0-.14-.038-.178-.094s-.047-.13-.028-.206c.038-.113.15-.197.272-.206l11.648-.15c1.38-.066 2.88-1.182 3.404-2.55l.666-1.735a.38.38 0 0 0 .02-.225c-.75-3.395-3.78-5.927-7.4-5.927-3.34 0-6.17 2.157-7.184 5.15-.657-.488-1.5-.75-2.392-.666-1.604.16-2.9 1.444-3.048 3.048a3.58 3.58 0 0 0 .084 1.191A4.84 4.84 0 0 0 0 22.1c0 .234.02.47.047.703.02.113.113.197.225.197H21.58a.29.29 0 0 0 .272-.206l.16-.572z" fill="#f38020"/><path d="M25.688 14.803l-.32.01c-.075 0-.14.056-.17.13l-.45 1.566c-.197.675-.122 1.294.206 1.754.3.422.807.666 1.416.694l2.457.15c.075 0 .14.038.178.094s.047.14.028.206c-.038.113-.15.197-.272.206l-2.56.15c-1.388.066-2.88 1.182-3.404 2.55l-.188.478c-.038.094.028.188.13.188h8.797a.23.23 0 0 0 .225-.169A6.41 6.41 0 0 0 32 21.106a6.32 6.32 0 0 0-6.312-6.302" fill="#faae40"/></svg>
|
||||||
|
After Width: | Height: | Size: 1.0 KiB |
18
frontend/public/Logos/dynamodb.svg
Normal file
|
After Width: | Height: | Size: 6.2 KiB |
2
frontend/public/Logos/elk.svg
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8"?><!-- Uploaded to: SVG Repo, www.svgrepo.com, Generator: SVG Repo Mixer Tools -->
|
||||||
|
<svg width="800px" height="800px" viewBox="0 0 256 256" xmlns="http://www.w3.org/2000/svg" preserveAspectRatio="xMinYMin meet"><path d="M255.96 134.393c0-21.521-13.373-40.117-33.223-47.43a75.239 75.239 0 0 0 1.253-13.791c0-39.909-32.386-72.295-72.295-72.295-23.193 0-44.923 11.074-58.505 30.088-6.686-5.224-14.835-7.94-23.402-7.94-21.104 0-38.446 17.133-38.446 38.446 0 4.597.836 9.194 2.298 13.373C13.582 81.739 0 100.962 0 122.274c0 21.522 13.373 40.327 33.431 47.64-.835 4.388-1.253 8.985-1.253 13.79 0 39.7 32.386 72.087 72.086 72.087 23.402 0 44.924-11.283 58.505-30.088 6.686 5.223 15.044 8.149 23.611 8.149 21.104 0 38.446-17.134 38.446-38.446 0-4.597-.836-9.194-2.298-13.373 19.64-7.104 33.431-26.327 33.431-47.64z" fill="#FFF"/><path d="M100.085 110.364l57.043 26.119 57.669-50.565a64.312 64.312 0 0 0 1.253-12.746c0-35.52-28.834-64.355-64.355-64.355-21.313 0-41.162 10.447-53.072 27.998l-9.612 49.73 11.074 23.82z" fill="#F4BD19"/><path d="M40.953 170.75c-.835 4.179-1.253 8.567-1.253 12.955 0 35.52 29.043 64.564 64.564 64.564 21.522 0 41.372-10.656 53.49-28.208l9.403-49.729-12.746-24.238-57.251-26.118-56.207 50.774z" fill="#3CBEB1"/><path d="M40.536 71.918l39.073 9.194 8.775-44.506c-5.432-4.179-11.91-6.268-18.805-6.268-16.925 0-30.924 13.79-30.924 30.924 0 3.552.627 7.313 1.88 10.656z" fill="#E9478C"/><path d="M37.192 81.32c-17.551 5.642-29.67 22.567-29.67 40.954 0 17.97 11.074 34.059 27.79 40.327l54.953-49.73-10.03-21.52-43.043-10.03z" fill="#2C458F"/><path d="M167.784 219.852c5.432 4.18 11.91 6.478 18.596 6.478 16.925 0 30.924-13.79 30.924-30.924 0-3.761-.627-7.314-1.88-10.657l-39.073-9.193-8.567 44.296z" fill="#95C63D"/><path d="M175.724 165.317l43.043 10.03c17.551-5.85 29.67-22.566 29.67-40.954 0-17.97-11.074-33.849-27.79-40.326l-56.415 49.311 11.492 21.94z" fill="#176655"/></svg>
|
||||||
|
After Width: | Height: | Size: 1.9 KiB |
19
frontend/public/Logos/external-api-monitoring.svg
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8"?>
|
||||||
|
|
||||||
|
<!-- Uploaded to: SVG Repo, www.svgrepo.com, Generator: SVG Repo Mixer Tools -->
|
||||||
|
<svg version="1.1" baseProfile="tiny" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||||
|
width="800px" height="800px" viewBox="0 0 24 24" overflow="visible" xml:space="preserve">
|
||||||
|
<g >
|
||||||
|
<rect y="0" fill="none" width="24" height="24"/>
|
||||||
|
<g transform="translate(1.000000, 8.000000)">
|
||||||
|
<path fill-rule="evenodd" fill="#5C85DE" d="M2-1.9c-1.1,0-2.3,1.1-2.3,2.2V10H2V5.5h2.2V10h2.2V0.3c0-1.1-1.1-2.2-2.3-2.2H2
|
||||||
|
L2-1.9z M2,3.2v-3h2.2v3H2L2,3.2z"/>
|
||||||
|
<path fill-rule="evenodd" fill="#5C85DE" d="M10.3-2C9.1-2,8-0.9,8,0.2V10l2.2,0V5.5h2.2c1.1,0,2.3-1.1,2.3-2.2l0-3
|
||||||
|
c0-1.1-1.1-2.2-2.3-2.2H10.3L10.3-2z M10.2,3.2v-3h2.2v3H10.2L10.2,3.2z"/>
|
||||||
|
<polygon fill-rule="evenodd" fill="#5C85DE" points="18.5,0.3 18.5,7.8 16.2,7.8 16.2,10 23,10 23,7.8 20.8,7.8 20.8,0.3 23,0.3
|
||||||
|
23,-1.9 16.2,-1.9 16.2,0.3 "/>
|
||||||
|
<polygon fill-rule="evenodd" fill="#3367D6" points="2,5.5 2,3.2 3.5,3.2 "/>
|
||||||
|
<polygon fill-rule="evenodd" fill="#3367D6" points="10.2,5.5 10.2,3.2 11.5,3.2 "/>
|
||||||
|
<polygon fill-rule="evenodd" fill="#3367D6" points="18.5,1.8 18.5,1.8 18.5,0.3 20.8,0.3 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
After Width: | Height: | Size: 1.2 KiB |
1
frontend/public/Logos/github-actions.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 128 128"><path fill="#2088ff" d="M26.666 0C11.97 0 0 11.97 0 26.666c0 12.87 9.181 23.651 21.334 26.13v37.87c0 11.77 9.68 21.334 21.332 21.334h.195c1.302 9.023 9.1 16 18.473 16C71.612 128 80 119.612 80 109.334s-8.388-18.668-18.666-18.668c-9.372 0-17.17 6.977-18.473 16h-.195c-8.737 0-16-7.152-16-16V63.779a18.514 18.514 0 0 0 13.24 5.555h2.955c1.303 9.023 9.1 16 18.473 16 9.372 0 17.169-6.977 18.47-16h11.057c1.303 9.023 9.1 16 18.473 16 10.278 0 18.666-8.39 18.666-18.668C128 56.388 119.612 48 109.334 48c-9.373 0-17.171 6.977-18.473 16H79.805c-1.301-9.023-9.098-16-18.471-16s-17.171 6.977-18.473 16h-2.955c-6.433 0-11.793-4.589-12.988-10.672 14.58-.136 26.416-12.05 26.416-26.662C53.334 11.97 41.362 0 26.666 0zm0 5.334A21.292 21.292 0 0 1 48 26.666 21.294 21.294 0 0 1 26.666 48 21.292 21.292 0 0 1 5.334 26.666 21.29 21.29 0 0 1 26.666 5.334zm-5.215 7.541C18.67 12.889 16 15.123 16 18.166v17.043c0 4.043 4.709 6.663 8.145 4.533l13.634-8.455c3.257-2.02 3.274-7.002.032-9.045l-13.635-8.59a5.024 5.024 0 0 0-2.725-.777zm-.117 5.291 13.635 8.588-13.635 8.455V18.166zm40 35.168a13.29 13.29 0 0 1 13.332 13.332A13.293 13.293 0 0 1 61.334 80 13.294 13.294 0 0 1 48 66.666a13.293 13.293 0 0 1 13.334-13.332zm48 0a13.29 13.29 0 0 1 13.332 13.332A13.293 13.293 0 0 1 109.334 80 13.294 13.294 0 0 1 96 66.666a13.293 13.293 0 0 1 13.334-13.332zm-42.568 6.951a2.667 2.667 0 0 0-1.887.78l-6.3 6.294-2.093-2.084a2.667 2.667 0 0 0-3.771.006 2.667 2.667 0 0 0 .008 3.772l3.974 3.96a2.667 2.667 0 0 0 3.766-.001l8.185-8.174a2.667 2.667 0 0 0 .002-3.772 2.667 2.667 0 0 0-1.884-.78zm48 0a2.667 2.667 0 0 0-1.887.78l-6.3 6.294-2.093-2.084a2.667 2.667 0 0 0-3.771.006 2.667 2.667 0 0 0 .008 3.772l3.974 3.96a2.667 2.667 0 0 0 3.766-.001l8.185-8.174a2.667 2.667 0 0 0 .002-3.772 2.667 2.667 0 0 0-1.884-.78zM61.334 96a13.293 13.293 0 0 1 13.332 13.334 13.29 13.29 0 0 1-13.332 13.332A13.293 13.293 0 0 1 48 109.334 13.294 13.294 0 0 1 61.334 96zM56 105.334c-2.193 0-4 1.807-4 4 0 2.195 1.808 4 4 4s4-1.805 4-4c0-2.193-1.807-4-4-4zm10.666 0c-2.193 0-4 1.807-4 4 0 2.195 1.808 4 4 4s4-1.805 4-4c0-2.193-1.807-4-4-4zM56 108c.75 0 1.334.585 1.334 1.334 0 .753-.583 1.332-1.334 1.332-.75 0-1.334-.58-1.334-1.332 0-.75.585-1.334 1.334-1.334zm10.666 0c.75 0 1.334.585 1.334 1.334 0 .753-.583 1.332-1.334 1.332-.75 0-1.332-.58-1.332-1.332 0-.75.583-1.334 1.332-1.334z"/><path fill="#79b8ff" d="M109.334 90.666c-9.383 0-17.188 6.993-18.477 16.031a2.667 2.667 0 0 0-.265-.011l-2.7.09a2.667 2.667 0 0 0-2.578 2.751 2.667 2.667 0 0 0 2.752 2.578l2.7-.087a2.667 2.667 0 0 0 .097-.006C92.17 121.029 99.965 128 109.334 128c10.278 0 18.666-8.388 18.666-18.666s-8.388-18.668-18.666-18.668zm0 5.334a13.293 13.293 0 0 1 13.332 13.334 13.29 13.29 0 0 1-13.332 13.332A13.293 13.293 0 0 1 96 109.334 13.294 13.294 0 0 1 109.334 96z"/></svg>
|
||||||
|
After Width: | Height: | Size: 2.8 KiB |