Compare commits
1 Commits
main
...
fix/pipeli
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
99edf96910 |
@@ -1,6 +1,6 @@
|
||||
services:
|
||||
clickhouse:
|
||||
image: clickhouse/clickhouse-server:25.5.6
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
container_name: clickhouse
|
||||
volumes:
|
||||
- ${PWD}/fs/etc/clickhouse-server/config.d/config.xml:/etc/clickhouse-server/config.d/config.xml
|
||||
@@ -23,10 +23,8 @@ services:
|
||||
retries: 3
|
||||
depends_on:
|
||||
- zookeeper
|
||||
environment:
|
||||
- CLICKHOUSE_SKIP_USER_SETUP=1
|
||||
zookeeper:
|
||||
image: signoz/zookeeper:3.7.1
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
container_name: zookeeper
|
||||
volumes:
|
||||
- ${PWD}/fs/tmp/zookeeper:/bitnami/zookeeper
|
||||
@@ -42,7 +40,7 @@ services:
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
schema-migrator-sync:
|
||||
image: signoz/signoz-schema-migrator:v0.129.12
|
||||
image: signoz/signoz-schema-migrator:v0.128.0
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
@@ -55,7 +53,7 @@ services:
|
||||
condition: service_healthy
|
||||
restart: on-failure
|
||||
schema-migrator-async:
|
||||
image: signoz/signoz-schema-migrator:v0.129.12
|
||||
image: signoz/signoz-schema-migrator:v0.128.0
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
services:
|
||||
signoz-otel-collector:
|
||||
image: signoz/signoz-otel-collector:v0.129.6
|
||||
container_name: signoz-otel-collector-dev
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --feature-gates=-pkg.translator.prometheus.NormalizeName
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||
ports:
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
- "13133:13133" # health check extension
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
- wget
|
||||
- --spider
|
||||
- -q
|
||||
- localhost:13133
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
restart: unless-stopped
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
@@ -1,96 +0,0 @@
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
prometheus:
|
||||
config:
|
||||
global:
|
||||
scrape_interval: 60s
|
||||
scrape_configs:
|
||||
- job_name: otel-collector
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:8888
|
||||
labels:
|
||||
job_name: otel-collector
|
||||
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
timeout: 10s
|
||||
resourcedetection:
|
||||
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||
detectors: [env, system]
|
||||
timeout: 2s
|
||||
signozspanmetrics/delta:
|
||||
metrics_exporter: signozclickhousemetrics
|
||||
metrics_flush_interval: 60s
|
||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||
dimensions_cache_size: 100000
|
||||
aggregation_temporality: AGGREGATION_TEMPORALITY_DELTA
|
||||
enable_exp_histogram: true
|
||||
dimensions:
|
||||
- name: service.namespace
|
||||
default: default
|
||||
- name: deployment.environment
|
||||
default: default
|
||||
# This is added to ensure the uniqueness of the timeseries
|
||||
# Otherwise, identical timeseries produced by multiple replicas of
|
||||
# collectors result in incorrect APM metrics
|
||||
- name: signoz.collector.id
|
||||
- name: service.version
|
||||
- name: browser.platform
|
||||
- name: browser.mobile
|
||||
- name: k8s.cluster.name
|
||||
- name: k8s.node.name
|
||||
- name: k8s.namespace.name
|
||||
- name: host.name
|
||||
- name: host.type
|
||||
- name: container.name
|
||||
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
pprof:
|
||||
endpoint: 0.0.0.0:1777
|
||||
|
||||
exporters:
|
||||
clickhousetraces:
|
||||
datasource: tcp://host.docker.internal:9000/signoz_traces
|
||||
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
|
||||
use_new_schema: true
|
||||
signozclickhousemetrics:
|
||||
dsn: tcp://host.docker.internal:9000/signoz_metrics
|
||||
clickhouselogsexporter:
|
||||
dsn: tcp://host.docker.internal:9000/signoz_logs
|
||||
timeout: 10s
|
||||
use_new_schema: true
|
||||
|
||||
service:
|
||||
telemetry:
|
||||
logs:
|
||||
encoding: json
|
||||
extensions:
|
||||
- health_check
|
||||
- pprof
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [signozspanmetrics/delta, batch]
|
||||
exporters: [clickhousetraces]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [signozclickhousemetrics]
|
||||
metrics/prometheus:
|
||||
receivers: [prometheus]
|
||||
processors: [batch]
|
||||
exporters: [signozclickhousemetrics]
|
||||
logs:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [clickhouselogsexporter]
|
||||
53
.github/CODEOWNERS
vendored
53
.github/CODEOWNERS
vendored
@@ -2,50 +2,19 @@
|
||||
# Owners are automatically requested for review for PRs that changes code
|
||||
# that they own.
|
||||
|
||||
/frontend/ @YounixM @aks07
|
||||
|
||||
# Onboarding
|
||||
/frontend/src/container/OnboardingV2Container/onboarding-configs/onboarding-config-with-links.json @makeavish
|
||||
/frontend/src/container/OnboardingV2Container/AddDataSource/AddDataSource.tsx @makeavish
|
||||
|
||||
/frontend/ @SigNoz/frontend @YounixM
|
||||
/frontend/src/container/MetricsApplication @srikanthccv
|
||||
/frontend/src/container/NewWidget/RightContainer/types.ts @srikanthccv
|
||||
/deploy/ @SigNoz/devops
|
||||
.github @SigNoz/devops
|
||||
|
||||
# Scaffold Owners
|
||||
/pkg/config/ @therealpandey
|
||||
/pkg/errors/ @therealpandey
|
||||
/pkg/factory/ @therealpandey
|
||||
/pkg/types/ @therealpandey
|
||||
/pkg/valuer/ @therealpandey
|
||||
/cmd/ @therealpandey
|
||||
.golangci.yml @therealpandey
|
||||
|
||||
# Zeus Owners
|
||||
/pkg/config/ @grandwizard28
|
||||
/pkg/errors/ @grandwizard28
|
||||
/pkg/factory/ @grandwizard28
|
||||
/pkg/types/ @grandwizard28
|
||||
.golangci.yml @grandwizard28
|
||||
/pkg/zeus/ @vikrantgupta25
|
||||
/ee/zeus/ @vikrantgupta25
|
||||
/pkg/licensing/ @vikrantgupta25
|
||||
/ee/licensing/ @vikrantgupta25
|
||||
|
||||
# SQL Owners
|
||||
/pkg/sqlmigration/ @vikrantgupta25
|
||||
/ee/sqlmigration/ @vikrantgupta25
|
||||
/pkg/sqlschema/ @vikrantgupta25
|
||||
/ee/sqlschema/ @vikrantgupta25
|
||||
|
||||
# Analytics Owners
|
||||
/pkg/analytics/ @vikrantgupta25
|
||||
/pkg/statsreporter/ @vikrantgupta25
|
||||
|
||||
# Querier Owners
|
||||
/pkg/querier/ @srikanthccv
|
||||
/pkg/variables/ @srikanthccv
|
||||
/pkg/types/querybuildertypes/ @srikanthccv
|
||||
/pkg/querybuilder/ @srikanthccv
|
||||
/pkg/telemetrylogs/ @srikanthccv
|
||||
/pkg/telemetrymetadata/ @srikanthccv
|
||||
/pkg/telemetrymetrics/ @srikanthccv
|
||||
/pkg/telemetrytraces/ @srikanthccv
|
||||
|
||||
# AuthN / AuthZ Owners
|
||||
|
||||
/pkg/authz/ @vikrantgupta25 @therealpandey
|
||||
/ee/zeus/ @vikrantgupta25
|
||||
/ee/licensing/ @vikrantgupta25
|
||||
/ee/sqlmigration/ @vikrantgupta25
|
||||
13
.github/workflows/build-community.yaml
vendored
13
.github/workflows/build-community.yaml
vendored
@@ -3,8 +3,8 @@ name: build-community
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+"
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+"
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+'
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
@@ -62,21 +62,22 @@ jobs:
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_VERSION: 1.24
|
||||
GO_VERSION: 1.23
|
||||
GO_NAME: signoz-community
|
||||
GO_INPUT_ARTIFACT_CACHE_KEY: community-jsbuild-${{ github.sha }}
|
||||
GO_INPUT_ARTIFACT_PATH: frontend/build
|
||||
GO_BUILD_CONTEXT: ./cmd/community
|
||||
GO_BUILD_CONTEXT: ./pkg/query-service
|
||||
GO_BUILD_FLAGS: >-
|
||||
-tags timetzdata
|
||||
-ldflags='-s -w
|
||||
-ldflags='-linkmode external -extldflags \"-static\" -s -w
|
||||
-X github.com/SigNoz/signoz/pkg/version.version=${{ needs.prepare.outputs.version }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.variant=community
|
||||
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.time=${{ needs.prepare.outputs.time }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}
|
||||
-X github.com/SigNoz/signoz/pkg/analytics.key=9kRrJ7oPCGPEJLF6QjMPLt5bljFhRQBr'
|
||||
GO_CGO_ENABLED: 1
|
||||
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
||||
DOCKER_DOCKERFILE_PATH: ./cmd/community/Dockerfile.multi-arch
|
||||
DOCKER_DOCKERFILE_PATH: ./pkg/query-service/Dockerfile.multi-arch
|
||||
DOCKER_MANIFEST: true
|
||||
DOCKER_PROVIDERS: dockerhub
|
||||
|
||||
13
.github/workflows/build-enterprise.yaml
vendored
13
.github/workflows/build-enterprise.yaml
vendored
@@ -69,7 +69,6 @@ jobs:
|
||||
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
|
||||
echo 'PYLON_APP_ID="${{ secrets.PYLON_APP_ID }}"' >> frontend/.env
|
||||
echo 'APPCUES_APP_ID="${{ secrets.APPCUES_APP_ID }}"' >> frontend/.env
|
||||
echo 'PYLON_IDENTITY_SECRET="${{ secrets.PYLON_IDENTITY_SECRET }}"' >> frontend/.env
|
||||
- name: cache-dotenv
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
@@ -85,7 +84,7 @@ jobs:
|
||||
JS_INPUT_ARTIFACT_CACHE_KEY: enterprise-dotenv-${{ github.sha }}
|
||||
JS_INPUT_ARTIFACT_PATH: frontend/.env
|
||||
JS_OUTPUT_ARTIFACT_CACHE_KEY: enterprise-jsbuild-${{ github.sha }}
|
||||
JS_OUTPUT_ARTIFACT_PATH: frontend/build
|
||||
JS_OUTPUT_ARTIFACT_PATH: frontend/build
|
||||
DOCKER_BUILD: false
|
||||
DOCKER_MANIFEST: false
|
||||
go-build:
|
||||
@@ -94,13 +93,13 @@ jobs:
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_VERSION: 1.24
|
||||
GO_VERSION: 1.23
|
||||
GO_INPUT_ARTIFACT_CACHE_KEY: enterprise-jsbuild-${{ github.sha }}
|
||||
GO_INPUT_ARTIFACT_PATH: frontend/build
|
||||
GO_BUILD_CONTEXT: ./cmd/enterprise
|
||||
GO_BUILD_CONTEXT: ./ee/query-service
|
||||
GO_BUILD_FLAGS: >-
|
||||
-tags timetzdata
|
||||
-ldflags='-s -w
|
||||
-ldflags='-linkmode external -extldflags \"-static\" -s -w
|
||||
-X github.com/SigNoz/signoz/pkg/version.version=${{ needs.prepare.outputs.version }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.variant=enterprise
|
||||
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
|
||||
@@ -108,9 +107,11 @@ jobs:
|
||||
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}
|
||||
-X github.com/SigNoz/signoz/ee/zeus.url=https://api.signoz.cloud
|
||||
-X github.com/SigNoz/signoz/ee/zeus.deprecatedURL=https://license.signoz.io
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.signoz.cloud
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.signoz.io/api/v1
|
||||
-X github.com/SigNoz/signoz/pkg/analytics.key=9kRrJ7oPCGPEJLF6QjMPLt5bljFhRQBr'
|
||||
GO_CGO_ENABLED: 1
|
||||
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
||||
DOCKER_DOCKERFILE_PATH: ./cmd/enterprise/Dockerfile.multi-arch
|
||||
DOCKER_DOCKERFILE_PATH: ./ee/query-service/Dockerfile.multi-arch
|
||||
DOCKER_MANIFEST: true
|
||||
DOCKER_PROVIDERS: ${{ needs.prepare.outputs.docker_providers }}
|
||||
|
||||
13
.github/workflows/build-staging.yaml
vendored
13
.github/workflows/build-staging.yaml
vendored
@@ -68,7 +68,6 @@ jobs:
|
||||
echo 'TUNNEL_DOMAIN="${{ secrets.NP_TUNNEL_DOMAIN }}"' >> frontend/.env
|
||||
echo 'PYLON_APP_ID="${{ secrets.NP_PYLON_APP_ID }}"' >> frontend/.env
|
||||
echo 'APPCUES_APP_ID="${{ secrets.NP_APPCUES_APP_ID }}"' >> frontend/.env
|
||||
echo 'PYLON_IDENTITY_SECRET="${{ secrets.NP_PYLON_IDENTITY_SECRET }}"' >> frontend/.env
|
||||
- name: cache-dotenv
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
@@ -93,13 +92,13 @@ jobs:
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_VERSION: 1.24
|
||||
GO_VERSION: 1.23
|
||||
GO_INPUT_ARTIFACT_CACHE_KEY: staging-jsbuild-${{ github.sha }}
|
||||
GO_INPUT_ARTIFACT_PATH: frontend/build
|
||||
GO_BUILD_CONTEXT: ./cmd/enterprise
|
||||
GO_BUILD_CONTEXT: ./ee/query-service
|
||||
GO_BUILD_FLAGS: >-
|
||||
-tags timetzdata
|
||||
-ldflags='-s -w
|
||||
-ldflags='-linkmode external -extldflags \"-static\" -s -w
|
||||
-X github.com/SigNoz/signoz/pkg/version.version=${{ needs.prepare.outputs.version }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.variant=enterprise
|
||||
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
|
||||
@@ -107,10 +106,12 @@ jobs:
|
||||
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}
|
||||
-X github.com/SigNoz/signoz/ee/zeus.url=https://api.staging.signoz.cloud
|
||||
-X github.com/SigNoz/signoz/ee/zeus.deprecatedURL=https://license.staging.signoz.cloud
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.staging.signoz.cloud
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.staging.signoz.cloud/api/v1
|
||||
-X github.com/SigNoz/signoz/pkg/analytics.key=9kRrJ7oPCGPEJLF6QjMPLt5bljFhRQBr'
|
||||
GO_CGO_ENABLED: 1
|
||||
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
||||
DOCKER_DOCKERFILE_PATH: ./cmd/enterprise/Dockerfile.multi-arch
|
||||
DOCKER_DOCKERFILE_PATH: ./ee/query-service/Dockerfile.multi-arch
|
||||
DOCKER_MANIFEST: true
|
||||
DOCKER_PROVIDERS: gcp
|
||||
staging:
|
||||
@@ -124,4 +125,4 @@ jobs:
|
||||
GITHUB_SILENT: true
|
||||
GITHUB_REPOSITORY_NAME: charts-saas-v3-staging
|
||||
GITHUB_EVENT_NAME: releaser
|
||||
GITHUB_EVENT_PAYLOAD: '{"deployment": "${{ needs.prepare.outputs.deployment }}", "signoz_version": "${{ needs.prepare.outputs.version }}"}'
|
||||
GITHUB_EVENT_PAYLOAD: "{\"deployment\": \"${{ needs.prepare.outputs.deployment }}\", \"signoz_version\": \"${{ needs.prepare.outputs.version }}\"}"
|
||||
|
||||
26
.github/workflows/goci.yaml
vendored
26
.github/workflows/goci.yaml
vendored
@@ -18,7 +18,7 @@ jobs:
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_TEST_CONTEXT: ./...
|
||||
GO_VERSION: 1.24
|
||||
GO_VERSION: 1.23
|
||||
fmt:
|
||||
if: |
|
||||
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||
@@ -27,7 +27,7 @@ jobs:
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_VERSION: 1.24
|
||||
GO_VERSION: 1.23
|
||||
lint:
|
||||
if: |
|
||||
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_VERSION: 1.24
|
||||
GO_VERSION: 1.23
|
||||
deps:
|
||||
if: |
|
||||
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||
@@ -45,7 +45,7 @@ jobs:
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_VERSION: 1.24
|
||||
GO_VERSION: 1.23
|
||||
build:
|
||||
if: |
|
||||
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||
@@ -57,7 +57,7 @@ jobs:
|
||||
- name: go-install
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.24"
|
||||
go-version: "1.23"
|
||||
- name: qemu-install
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: aarch64-install
|
||||
@@ -73,19 +73,3 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
make docker-build-enterprise
|
||||
openapi:
|
||||
if: |
|
||||
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: self-checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: go-install
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.24"
|
||||
- name: generate-openapi
|
||||
run: |
|
||||
go run cmd/enterprise/*.go generate openapi
|
||||
git diff --compact-summary --exit-code || (echo; echo "Unexpected difference in openapi spec. Run go run cmd/enterprise/*.go generate openapi locally and commit."; exit 1)
|
||||
|
||||
8
.github/workflows/gor-signoz-community.yaml
vendored
8
.github/workflows/gor-signoz-community.yaml
vendored
@@ -36,7 +36,7 @@ jobs:
|
||||
- ubuntu-latest
|
||||
- macos-latest
|
||||
env:
|
||||
CONFIG_PATH: cmd/community/.goreleaser.yaml
|
||||
CONFIG_PATH: pkg/query-service/.goreleaser.yaml
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- name: checkout
|
||||
@@ -58,7 +58,7 @@ jobs:
|
||||
- name: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.24"
|
||||
go-version: "1.23"
|
||||
- name: cross-compilation-tools
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
run: |
|
||||
@@ -100,7 +100,7 @@ jobs:
|
||||
needs: build
|
||||
env:
|
||||
DOCKER_CLI_EXPERIMENTAL: "enabled"
|
||||
WORKDIR: cmd/community
|
||||
WORKDIR: pkg/query-service
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
@@ -122,7 +122,7 @@ jobs:
|
||||
- name: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.24"
|
||||
go-version: "1.23"
|
||||
|
||||
# copy the caches from build
|
||||
- name: get-sha
|
||||
|
||||
7
.github/workflows/gor-signoz.yaml
vendored
7
.github/workflows/gor-signoz.yaml
vendored
@@ -35,7 +35,6 @@ jobs:
|
||||
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> .env
|
||||
echo 'PYLON_APP_ID="${{ secrets.PYLON_APP_ID }}"' >> .env
|
||||
echo 'APPCUES_APP_ID="${{ secrets.APPCUES_APP_ID }}"' >> .env
|
||||
echo 'PYLON_IDENTITY_SECRET="${{ secrets.PYLON_IDENTITY_SECRET }}"' >> .env
|
||||
- name: build-frontend
|
||||
run: make js-build
|
||||
- name: upload-frontend-artifact
|
||||
@@ -51,7 +50,7 @@ jobs:
|
||||
- ubuntu-latest
|
||||
- macos-latest
|
||||
env:
|
||||
CONFIG_PATH: cmd/enterprise/.goreleaser.yaml
|
||||
CONFIG_PATH: ee/query-service/.goreleaser.yaml
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- name: checkout
|
||||
@@ -73,7 +72,7 @@ jobs:
|
||||
- name: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.24"
|
||||
go-version: "1.23"
|
||||
- name: cross-compilation-tools
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
run: |
|
||||
@@ -136,7 +135,7 @@ jobs:
|
||||
- name: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.24"
|
||||
go-version: "1.23"
|
||||
|
||||
# copy the caches from build
|
||||
- name: get-sha
|
||||
|
||||
25
.github/workflows/integrationci.yaml
vendored
25
.github/workflows/integrationci.yaml
vendored
@@ -15,19 +15,14 @@ jobs:
|
||||
matrix:
|
||||
src:
|
||||
- bootstrap
|
||||
- passwordauthn
|
||||
- callbackauthn
|
||||
- cloudintegrations
|
||||
- dashboard
|
||||
- querier
|
||||
- ttl
|
||||
sqlstore-provider:
|
||||
- postgres
|
||||
- sqlite
|
||||
clickhouse-version:
|
||||
- 25.5.6
|
||||
- 24.1.2-alpine
|
||||
- 24.12-alpine
|
||||
schema-migrator-version:
|
||||
- v0.129.7
|
||||
- v0.128.0
|
||||
postgres-version:
|
||||
- 15
|
||||
if: |
|
||||
@@ -46,20 +41,6 @@ jobs:
|
||||
python -m pip install poetry==2.1.2
|
||||
python -m poetry config virtualenvs.in-project true
|
||||
cd tests/integration && poetry install --no-root
|
||||
- name: webdriver
|
||||
run: |
|
||||
wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | sudo apt-key add -
|
||||
echo "deb http://dl.google.com/linux/chrome/deb/ stable main" | sudo tee -a /etc/apt/sources.list.d/google-chrome.list
|
||||
sudo apt-get update -qqy
|
||||
sudo apt-get -qqy install google-chrome-stable
|
||||
CHROME_VERSION=$(google-chrome-stable --version)
|
||||
CHROME_FULL_VERSION=${CHROME_VERSION%%.*}
|
||||
CHROME_MAJOR_VERSION=${CHROME_FULL_VERSION//[!0-9]}
|
||||
sudo rm /etc/apt/sources.list.d/google-chrome.list
|
||||
export CHROMEDRIVER_VERSION=`curl -s https://googlechromelabs.github.io/chrome-for-testing/LATEST_RELEASE_${CHROME_MAJOR_VERSION%%.*}`
|
||||
curl -L -O "https://storage.googleapis.com/chrome-for-testing-public/${CHROMEDRIVER_VERSION}/linux64/chromedriver-linux64.zip"
|
||||
unzip chromedriver-linux64.zip && chmod +x chromedriver && sudo mv chromedriver /usr/local/bin
|
||||
chromedriver -version
|
||||
- name: run
|
||||
run: |
|
||||
cd tests/integration && \
|
||||
|
||||
4
.github/workflows/prereleaser.yaml
vendored
4
.github/workflows/prereleaser.yaml
vendored
@@ -1,6 +1,10 @@
|
||||
name: prereleaser
|
||||
|
||||
on:
|
||||
# schedule every wednesday 6:30 AM UTC (12:00 PM IST)
|
||||
schedule:
|
||||
- cron: '30 6 * * 3'
|
||||
|
||||
# allow manual triggering of the workflow by a maintainer
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
|
||||
62
.github/workflows/run-e2e.yaml
vendored
62
.github/workflows/run-e2e.yaml
vendored
@@ -1,62 +0,0 @@
|
||||
name: e2eci
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
userRole:
|
||||
description: "Role of the user (ADMIN, EDITOR, VIEWER)"
|
||||
required: true
|
||||
type: choice
|
||||
options:
|
||||
- ADMIN
|
||||
- EDITOR
|
||||
- VIEWER
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: Run Playwright Tests
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: lts/*
|
||||
|
||||
- name: Mask secrets and input
|
||||
run: |
|
||||
echo "::add-mask::${{ secrets.BASE_URL }}"
|
||||
echo "::add-mask::${{ secrets.LOGIN_USERNAME }}"
|
||||
echo "::add-mask::${{ secrets.LOGIN_PASSWORD }}"
|
||||
echo "::add-mask::${{ github.event.inputs.userRole }}"
|
||||
|
||||
- name: Install dependencies
|
||||
working-directory: frontend
|
||||
run: |
|
||||
npm install -g yarn
|
||||
yarn
|
||||
|
||||
- name: Install Playwright Browsers
|
||||
working-directory: frontend
|
||||
run: yarn playwright install --with-deps
|
||||
|
||||
- name: Run Playwright Tests
|
||||
working-directory: frontend
|
||||
run: |
|
||||
BASE_URL="${{ secrets.BASE_URL }}" \
|
||||
LOGIN_USERNAME="${{ secrets.LOGIN_USERNAME }}" \
|
||||
LOGIN_PASSWORD="${{ secrets.LOGIN_PASSWORD }}" \
|
||||
USER_ROLE="${{ github.event.inputs.userRole }}" \
|
||||
yarn playwright test
|
||||
|
||||
- name: Upload Playwright Report
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: playwright-report
|
||||
path: frontend/playwright-report/
|
||||
retention-days: 30
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -86,8 +86,6 @@ queries.active
|
||||
.devenv/**/tmp/**
|
||||
.qodo
|
||||
|
||||
.dev
|
||||
|
||||
### Python ###
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
@@ -230,6 +228,4 @@ poetry.toml
|
||||
# LSP config files
|
||||
pyrightconfig.json
|
||||
|
||||
|
||||
# cursor files
|
||||
frontend/.cursor/
|
||||
# End of https://www.toptal.com/developers/gitignore/api/python
|
||||
@@ -1,63 +1,34 @@
|
||||
version: "2"
|
||||
linters:
|
||||
default: none
|
||||
default: standard
|
||||
enable:
|
||||
- bodyclose
|
||||
- depguard
|
||||
- errcheck
|
||||
- forbidigo
|
||||
- govet
|
||||
- iface
|
||||
- ineffassign
|
||||
- misspell
|
||||
- nilnil
|
||||
- sloglint
|
||||
- depguard
|
||||
- iface
|
||||
- unparam
|
||||
- unused
|
||||
settings:
|
||||
depguard:
|
||||
rules:
|
||||
noerrors:
|
||||
deny:
|
||||
- pkg: errors
|
||||
desc: Do not use errors package. Use github.com/SigNoz/signoz/pkg/errors instead.
|
||||
nozap:
|
||||
deny:
|
||||
- pkg: go.uber.org/zap
|
||||
desc: Do not use zap logger. Use slog instead.
|
||||
forbidigo:
|
||||
forbid:
|
||||
- pattern: fmt.Errorf
|
||||
- pattern: ^(fmt\.Print.*|print|println)$
|
||||
iface:
|
||||
enable:
|
||||
- identical
|
||||
sloglint:
|
||||
no-mixed-args: true
|
||||
kv-only: true
|
||||
no-global: all
|
||||
context: all
|
||||
static-msg: true
|
||||
key-naming-case: snake
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
paths:
|
||||
- pkg/query-service
|
||||
- ee/query-service
|
||||
- scripts/
|
||||
- tmp/
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
formatters:
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
|
||||
linters-settings:
|
||||
sloglint:
|
||||
no-mixed-args: true
|
||||
kv-only: true
|
||||
no-global: all
|
||||
context: all
|
||||
static-msg: true
|
||||
msg-style: lowercased
|
||||
key-naming-case: snake
|
||||
depguard:
|
||||
rules:
|
||||
nozap:
|
||||
deny:
|
||||
- pkg: "go.uber.org/zap"
|
||||
desc: "Do not use zap logger. Use slog instead."
|
||||
iface:
|
||||
enable:
|
||||
- identical
|
||||
issues:
|
||||
exclude-dirs:
|
||||
- "pkg/query-service"
|
||||
- "ee/query-service"
|
||||
- "scripts/"
|
||||
|
||||
62
ADVOCATE.md
62
ADVOCATE.md
@@ -1,62 +0,0 @@
|
||||
# SigNoz Community Advocate Program
|
||||
|
||||
Our community is filled with passionate developers who love SigNoz and have been helping spread the word about observability across the world. The SigNoz Community Advocate Program is our way of recognizing these incredible community members and creating deeper collaboration opportunities.
|
||||
|
||||
## What is the SigNoz Community Advocate Program?
|
||||
|
||||
The SigNoz Community Advocate Program celebrates and supports community members who are already passionate about observability and helping fellow developers. If you're someone who loves discussing SigNoz, helping others with their implementations, or sharing knowledge about observability practices, this program is designed with you in mind.
|
||||
|
||||
Our advocates are the heart of the SigNoz community, helping other developers succeed with observability and providing valuable insights that help us build better products.
|
||||
|
||||
## What Do Advocates Do?
|
||||
|
||||
1. **Community Support**
|
||||
|
||||
- Help fellow developers in our Slack community and GitHub Discussions
|
||||
- Answer questions and share solutions
|
||||
- Guide newcomers through SigNoz self-host implementations
|
||||
|
||||
2. **Knowledge Sharing**
|
||||
|
||||
- Spread awareness about observability best practices on developer forums
|
||||
- Create content like blog posts, social media posts, and videos
|
||||
- Host local meetups and events in their regions
|
||||
|
||||
3. **Product Collaboration**
|
||||
|
||||
- Provide insights on features, changes, and improvements the community needs
|
||||
- Beta test new features and provide early feedback
|
||||
- Help us understand real-world use cases and pain points
|
||||
|
||||
## What's In It For You?
|
||||
|
||||
**Recognition & Swag**
|
||||
|
||||
- Official recognition as a SigNoz advocate
|
||||
- Welcome hamper upon joining
|
||||
- Exclusive swag box within your first 3 months
|
||||
- Feature on our website (with your permission)
|
||||
|
||||
**Early Access**
|
||||
|
||||
- First look at new features and updates
|
||||
- Direct line to the SigNoz team for feedback and suggestions
|
||||
- Opportunity to influence product roadmap
|
||||
|
||||
**Community Impact**
|
||||
|
||||
- Help shape the observability landscape
|
||||
- Build your reputation in the developer community
|
||||
- Connect with like-minded developers globally
|
||||
|
||||
## How Does It Work?
|
||||
|
||||
Currently, the SigNoz Community Advocate Program is **invite-only**. We're starting with a small group of passionate community members who have already been making a difference.
|
||||
|
||||
We'll be working closely with our first advocates to shape the program details, benefits, and structure based on what works best for everyone involved.
|
||||
|
||||
If you're interested in learning more about the program or want to get more involved in the SigNoz community, join our [Slack community](https://signoz-community.slack.com/) and let us know!
|
||||
|
||||
---
|
||||
|
||||
*The SigNoz Community Advocate Program recognizes and celebrates the amazing community members who are already passionate about helping fellow developers succeed with observability.*
|
||||
@@ -78,5 +78,3 @@ Need assistance? Join our Slack community:
|
||||
|
||||
- Set up your [development environment](docs/contributing/development.md)
|
||||
- Deploy and observe [SigNoz in action with OpenTelemetry Demo Application](docs/otel-demo-docs.md)
|
||||
- Explore the [SigNoz Community Advocate Program](ADVOCATE.md), which recognises contributors who support the community, share their expertise, and help shape SigNoz's future.
|
||||
- Write [integration tests](docs/contributing/go/integration.md)
|
||||
|
||||
2
LICENSE
2
LICENSE
@@ -2,7 +2,7 @@ Copyright (c) 2020-present SigNoz Inc.
|
||||
|
||||
Portions of this software are licensed as follows:
|
||||
|
||||
* All content that resides under the "ee/" and the "cmd/enterprise/" directory of this repository, if that directory exists, is licensed under the license defined in "ee/LICENSE".
|
||||
* All content that resides under the "ee/" directory of this repository, if that directory exists, is licensed under the license defined in "ee/LICENSE".
|
||||
* All third party components incorporated into the SigNoz Software are licensed under the original license provided by the owner of the applicable component.
|
||||
* Content outside of the above mentioned directories or restrictions above is available under the "MIT Expat" license as defined below.
|
||||
|
||||
|
||||
43
Makefile
43
Makefile
@@ -20,18 +20,18 @@ GO_BUILD_LDFLAG_LICENSE_SIGNOZ_IO = -X github.com/SigNoz/signoz/ee/zeus.depreca
|
||||
|
||||
GO_BUILD_VERSION_LDFLAGS = -X github.com/SigNoz/signoz/pkg/version.version=$(VERSION) -X github.com/SigNoz/signoz/pkg/version.hash=$(COMMIT_SHORT_SHA) -X github.com/SigNoz/signoz/pkg/version.time=$(TIMESTAMP) -X github.com/SigNoz/signoz/pkg/version.branch=$(BRANCH_NAME)
|
||||
GO_BUILD_ARCHS_COMMUNITY = $(addprefix go-build-community-,$(ARCHS))
|
||||
GO_BUILD_CONTEXT_COMMUNITY = $(SRC)/cmd/community
|
||||
GO_BUILD_CONTEXT_COMMUNITY = $(SRC)/pkg/query-service
|
||||
GO_BUILD_LDFLAGS_COMMUNITY = $(GO_BUILD_VERSION_LDFLAGS) -X github.com/SigNoz/signoz/pkg/version.variant=community
|
||||
GO_BUILD_ARCHS_ENTERPRISE = $(addprefix go-build-enterprise-,$(ARCHS))
|
||||
GO_BUILD_ARCHS_ENTERPRISE_RACE = $(addprefix go-build-enterprise-race-,$(ARCHS))
|
||||
GO_BUILD_CONTEXT_ENTERPRISE = $(SRC)/cmd/enterprise
|
||||
GO_BUILD_CONTEXT_ENTERPRISE = $(SRC)/ee/query-service
|
||||
GO_BUILD_LDFLAGS_ENTERPRISE = $(GO_BUILD_VERSION_LDFLAGS) -X github.com/SigNoz/signoz/pkg/version.variant=enterprise $(GO_BUILD_LDFLAG_ZEUS_URL) $(GO_BUILD_LDFLAG_LICENSE_SIGNOZ_IO)
|
||||
|
||||
DOCKER_BUILD_ARCHS_COMMUNITY = $(addprefix docker-build-community-,$(ARCHS))
|
||||
DOCKERFILE_COMMUNITY = $(SRC)/cmd/community/Dockerfile
|
||||
DOCKERFILE_COMMUNITY = $(SRC)/pkg/query-service/Dockerfile
|
||||
DOCKER_REGISTRY_COMMUNITY ?= docker.io/signoz/signoz-community
|
||||
DOCKER_BUILD_ARCHS_ENTERPRISE = $(addprefix docker-build-enterprise-,$(ARCHS))
|
||||
DOCKERFILE_ENTERPRISE = $(SRC)/cmd/enterprise/Dockerfile
|
||||
DOCKERFILE_ENTERPRISE = $(SRC)/ee/query-service/Dockerfile
|
||||
DOCKER_REGISTRY_ENTERPRISE ?= docker.io/signoz/signoz
|
||||
JS_BUILD_CONTEXT = $(SRC)/frontend
|
||||
|
||||
@@ -61,17 +61,6 @@ devenv-postgres: ## Run postgres in devenv
|
||||
@cd .devenv/docker/postgres; \
|
||||
docker compose -f compose.yaml up -d
|
||||
|
||||
.PHONY: devenv-signoz-otel-collector
|
||||
devenv-signoz-otel-collector: ## Run signoz-otel-collector in devenv (requires clickhouse to be running)
|
||||
@cd .devenv/docker/signoz-otel-collector; \
|
||||
docker compose -f compose.yaml up -d
|
||||
|
||||
.PHONY: devenv-up
|
||||
devenv-up: devenv-clickhouse devenv-signoz-otel-collector ## Start both clickhouse and signoz-otel-collector for local development
|
||||
@echo "Development environment is ready!"
|
||||
@echo " - ClickHouse: http://localhost:8123"
|
||||
@echo " - Signoz OTel Collector: grpc://localhost:4317, http://localhost:4318"
|
||||
|
||||
##############################################################
|
||||
# go commands
|
||||
##############################################################
|
||||
@@ -84,9 +73,10 @@ go-run-enterprise: ## Runs the enterprise go backend server
|
||||
SIGNOZ_ALERTMANAGER_PROVIDER=signoz \
|
||||
SIGNOZ_TELEMETRYSTORE_PROVIDER=clickhouse \
|
||||
SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://127.0.0.1:9000 \
|
||||
SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER=cluster \
|
||||
go run -race \
|
||||
$(GO_BUILD_CONTEXT_ENTERPRISE)/*.go server
|
||||
$(GO_BUILD_CONTEXT_ENTERPRISE)/main.go \
|
||||
--config ./conf/prometheus.yml \
|
||||
--cluster cluster
|
||||
|
||||
.PHONY: go-test
|
||||
go-test: ## Runs go unit tests
|
||||
@@ -101,9 +91,10 @@ go-run-community: ## Runs the community go backend server
|
||||
SIGNOZ_ALERTMANAGER_PROVIDER=signoz \
|
||||
SIGNOZ_TELEMETRYSTORE_PROVIDER=clickhouse \
|
||||
SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://127.0.0.1:9000 \
|
||||
SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER=cluster \
|
||||
go run -race \
|
||||
$(GO_BUILD_CONTEXT_COMMUNITY)/*.go server
|
||||
$(GO_BUILD_CONTEXT_COMMUNITY)/main.go \
|
||||
--config ./conf/prometheus.yml \
|
||||
--cluster cluster
|
||||
|
||||
.PHONY: go-build-community $(GO_BUILD_ARCHS_COMMUNITY)
|
||||
go-build-community: ## Builds the go backend server for community
|
||||
@@ -112,9 +103,9 @@ $(GO_BUILD_ARCHS_COMMUNITY): go-build-community-%: $(TARGET_DIR)
|
||||
@mkdir -p $(TARGET_DIR)/$(OS)-$*
|
||||
@echo ">> building binary $(TARGET_DIR)/$(OS)-$*/$(NAME)-community"
|
||||
@if [ $* = "arm64" ]; then \
|
||||
GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_COMMUNITY) -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME)-community -ldflags "-s -w $(GO_BUILD_LDFLAGS_COMMUNITY)"; \
|
||||
CC=aarch64-linux-gnu-gcc CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_COMMUNITY) -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME)-community -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_COMMUNITY)"; \
|
||||
else \
|
||||
GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_COMMUNITY) -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME)-community -ldflags "-s -w $(GO_BUILD_LDFLAGS_COMMUNITY)"; \
|
||||
CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_COMMUNITY) -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME)-community -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_COMMUNITY)"; \
|
||||
fi
|
||||
|
||||
|
||||
@@ -125,9 +116,9 @@ $(GO_BUILD_ARCHS_ENTERPRISE): go-build-enterprise-%: $(TARGET_DIR)
|
||||
@mkdir -p $(TARGET_DIR)/$(OS)-$*
|
||||
@echo ">> building binary $(TARGET_DIR)/$(OS)-$*/$(NAME)"
|
||||
@if [ $* = "arm64" ]; then \
|
||||
GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
|
||||
CC=aarch64-linux-gnu-gcc CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
|
||||
else \
|
||||
GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
|
||||
CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
|
||||
fi
|
||||
|
||||
.PHONY: go-build-enterprise-race $(GO_BUILD_ARCHS_ENTERPRISE_RACE)
|
||||
@@ -137,9 +128,9 @@ $(GO_BUILD_ARCHS_ENTERPRISE_RACE): go-build-enterprise-race-%: $(TARGET_DIR)
|
||||
@mkdir -p $(TARGET_DIR)/$(OS)-$*
|
||||
@echo ">> building binary $(TARGET_DIR)/$(OS)-$*/$(NAME)"
|
||||
@if [ $* = "arm64" ]; then \
|
||||
GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -race -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
|
||||
CC=aarch64-linux-gnu-gcc CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -race -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
|
||||
else \
|
||||
GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -race -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
|
||||
CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -race -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
|
||||
fi
|
||||
|
||||
##############################################################
|
||||
@@ -206,4 +197,4 @@ py-lint: ## Run lint for integration tests
|
||||
|
||||
.PHONY: py-test
|
||||
py-test: ## Runs integration tests
|
||||
@cd tests/integration && poetry run pytest --basetemp=./tmp/ -vv --capture=no src/
|
||||
@cd tests/integration && poetry run pytest --basetemp=./tmp/ -vv --capture=no src/
|
||||
@@ -8,6 +8,7 @@
|
||||
<p align="center">All your logs, metrics, and traces in one place. Monitor your application, spot issues before they occur and troubleshoot downtime quickly with rich context. SigNoz is a cost-effective open-source alternative to Datadog and New Relic. Visit <a href="https://signoz.io" target="_blank">signoz.io</a> for the full documentation, tutorials, and guide.</p>
|
||||
|
||||
<p align="center">
|
||||
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/signoz.svg?label=Docker%20Downloads"> </a>
|
||||
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
||||
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
||||
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
||||
@@ -230,13 +231,11 @@ Not sure how to get started? Just ping us on `#contributing` in our [slack commu
|
||||
- [Shaheer Kochai](https://github.com/ahmadshaheer)
|
||||
- [Amlan Kumar Nandy](https://github.com/amlannandy)
|
||||
- [Sahil Khan](https://github.com/sawhil)
|
||||
- [Aditya Singh](https://github.com/aks07)
|
||||
- [Abhi Kumar](https://github.com/ahrefabhi)
|
||||
|
||||
#### DevOps
|
||||
|
||||
- [Prashant Shahi](https://github.com/prashant-shahi)
|
||||
- [Vibhu Pandey](https://github.com/therealpandey)
|
||||
- [Vibhu Pandey](https://github.com/grandwizard28)
|
||||
|
||||
<br /><br />
|
||||
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
|
||||
"github.com/SigNoz/signoz/cmd"
|
||||
"github.com/SigNoz/signoz/pkg/instrumentation"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// initialize logger for logging in the cmd/ package. This logger is different from the logger used in the application.
|
||||
logger := instrumentation.NewLogger(instrumentation.Config{Logs: instrumentation.LogsConfig{Level: slog.LevelInfo}})
|
||||
|
||||
// register a list of commands to the root command
|
||||
registerServer(cmd.RootCmd, logger)
|
||||
cmd.RegisterGenerate(cmd.RootCmd, logger)
|
||||
|
||||
cmd.Execute(logger)
|
||||
}
|
||||
@@ -1,122 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
|
||||
"github.com/SigNoz/signoz/cmd"
|
||||
"github.com/SigNoz/signoz/ee/authz/openfgaauthz"
|
||||
"github.com/SigNoz/signoz/ee/authz/openfgaschema"
|
||||
"github.com/SigNoz/signoz/ee/sqlstore/postgressqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/analytics"
|
||||
"github.com/SigNoz/signoz/pkg/authn"
|
||||
"github.com/SigNoz/signoz/pkg/authz"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/licensing"
|
||||
"github.com/SigNoz/signoz/pkg/licensing/nooplicensing"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app"
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
"github.com/SigNoz/signoz/pkg/sqlschema"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore/sqlstorehook"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/version"
|
||||
"github.com/SigNoz/signoz/pkg/zeus"
|
||||
"github.com/SigNoz/signoz/pkg/zeus/noopzeus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func registerServer(parentCmd *cobra.Command, logger *slog.Logger) {
|
||||
var flags signoz.DeprecatedFlags
|
||||
|
||||
serverCmd := &cobra.Command{
|
||||
Use: "server",
|
||||
Short: "Run the SigNoz server",
|
||||
FParseErrWhitelist: cobra.FParseErrWhitelist{UnknownFlags: true},
|
||||
RunE: func(currCmd *cobra.Command, args []string) error {
|
||||
config, err := cmd.NewSigNozConfig(currCmd.Context(), logger, flags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return runServer(currCmd.Context(), config, logger)
|
||||
},
|
||||
}
|
||||
|
||||
flags.RegisterFlags(serverCmd)
|
||||
parentCmd.AddCommand(serverCmd)
|
||||
}
|
||||
|
||||
func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) error {
|
||||
// print the version
|
||||
version.Info.PrettyPrint(config.Version)
|
||||
|
||||
// add enterprise sqlstore factories to the community sqlstore factories
|
||||
sqlstoreFactories := signoz.NewSQLStoreProviderFactories()
|
||||
if err := sqlstoreFactories.Add(postgressqlstore.NewFactory(sqlstorehook.NewLoggingFactory())); err != nil {
|
||||
logger.ErrorContext(ctx, "failed to add postgressqlstore factory", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
signoz, err := signoz.New(
|
||||
ctx,
|
||||
config,
|
||||
zeus.Config{},
|
||||
noopzeus.NewProviderFactory(),
|
||||
licensing.Config{},
|
||||
func(_ sqlstore.SQLStore, _ zeus.Zeus, _ organization.Getter, _ analytics.Analytics) factory.ProviderFactory[licensing.Licensing, licensing.Config] {
|
||||
return nooplicensing.NewFactory()
|
||||
},
|
||||
signoz.NewEmailingProviderFactories(),
|
||||
signoz.NewCacheProviderFactories(),
|
||||
signoz.NewWebProviderFactories(),
|
||||
func(sqlstore sqlstore.SQLStore) factory.NamedMap[factory.ProviderFactory[sqlschema.SQLSchema, sqlschema.Config]] {
|
||||
return signoz.NewSQLSchemaProviderFactories(sqlstore)
|
||||
},
|
||||
signoz.NewSQLStoreProviderFactories(),
|
||||
signoz.NewTelemetryStoreProviderFactories(),
|
||||
func(ctx context.Context, providerSettings factory.ProviderSettings, store authtypes.AuthNStore, licensing licensing.Licensing) (map[authtypes.AuthNProvider]authn.AuthN, error) {
|
||||
return signoz.NewAuthNs(ctx, providerSettings, store, licensing)
|
||||
},
|
||||
func(ctx context.Context, sqlstore sqlstore.SQLStore) factory.ProviderFactory[authz.AuthZ, authz.Config] {
|
||||
return openfgaauthz.NewProviderFactory(sqlstore, openfgaschema.NewSchema().Get(ctx))
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
logger.ErrorContext(ctx, "failed to create signoz", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
server, err := app.NewServer(config, signoz)
|
||||
if err != nil {
|
||||
logger.ErrorContext(ctx, "failed to create server", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := server.Start(ctx); err != nil {
|
||||
logger.ErrorContext(ctx, "failed to start server", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
signoz.Start(ctx)
|
||||
|
||||
if err := signoz.Wait(ctx); err != nil {
|
||||
logger.ErrorContext(ctx, "failed to start signoz", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = server.Stop(ctx)
|
||||
if err != nil {
|
||||
logger.ErrorContext(ctx, "failed to stop server", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = signoz.Stop(ctx)
|
||||
if err != nil {
|
||||
logger.ErrorContext(ctx, "failed to stop signoz", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/config"
|
||||
"github.com/SigNoz/signoz/pkg/config/envprovider"
|
||||
"github.com/SigNoz/signoz/pkg/config/fileprovider"
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
)
|
||||
|
||||
func NewSigNozConfig(ctx context.Context, logger *slog.Logger, flags signoz.DeprecatedFlags) (signoz.Config, error) {
|
||||
config, err := signoz.NewConfig(
|
||||
ctx,
|
||||
logger,
|
||||
config.ResolverConfig{
|
||||
Uris: []string{"env:"},
|
||||
ProviderFactories: []config.ProviderFactory{
|
||||
envprovider.NewFactory(),
|
||||
fileprovider.NewFactory(),
|
||||
},
|
||||
},
|
||||
flags,
|
||||
)
|
||||
if err != nil {
|
||||
return signoz.Config{}, err
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
|
||||
"github.com/SigNoz/signoz/cmd"
|
||||
"github.com/SigNoz/signoz/pkg/instrumentation"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// initialize logger for logging in the cmd/ package. This logger is different from the logger used in the application.
|
||||
logger := instrumentation.NewLogger(instrumentation.Config{Logs: instrumentation.LogsConfig{Level: slog.LevelInfo}})
|
||||
|
||||
// register a list of commands to the root command
|
||||
registerServer(cmd.RootCmd, logger)
|
||||
cmd.RegisterGenerate(cmd.RootCmd, logger)
|
||||
|
||||
cmd.Execute(logger)
|
||||
}
|
||||
@@ -1,151 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/cmd"
|
||||
"github.com/SigNoz/signoz/ee/authn/callbackauthn/oidccallbackauthn"
|
||||
"github.com/SigNoz/signoz/ee/authn/callbackauthn/samlcallbackauthn"
|
||||
"github.com/SigNoz/signoz/ee/authz/openfgaauthz"
|
||||
"github.com/SigNoz/signoz/ee/authz/openfgaschema"
|
||||
enterpriselicensing "github.com/SigNoz/signoz/ee/licensing"
|
||||
"github.com/SigNoz/signoz/ee/licensing/httplicensing"
|
||||
enterpriseapp "github.com/SigNoz/signoz/ee/query-service/app"
|
||||
"github.com/SigNoz/signoz/ee/sqlschema/postgressqlschema"
|
||||
"github.com/SigNoz/signoz/ee/sqlstore/postgressqlstore"
|
||||
enterprisezeus "github.com/SigNoz/signoz/ee/zeus"
|
||||
"github.com/SigNoz/signoz/ee/zeus/httpzeus"
|
||||
"github.com/SigNoz/signoz/pkg/analytics"
|
||||
"github.com/SigNoz/signoz/pkg/authn"
|
||||
"github.com/SigNoz/signoz/pkg/authz"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/licensing"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
"github.com/SigNoz/signoz/pkg/sqlschema"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore/sqlstorehook"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/version"
|
||||
"github.com/SigNoz/signoz/pkg/zeus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func registerServer(parentCmd *cobra.Command, logger *slog.Logger) {
|
||||
var flags signoz.DeprecatedFlags
|
||||
|
||||
serverCmd := &cobra.Command{
|
||||
Use: "server",
|
||||
Short: "Run the SigNoz server",
|
||||
FParseErrWhitelist: cobra.FParseErrWhitelist{UnknownFlags: true},
|
||||
RunE: func(currCmd *cobra.Command, args []string) error {
|
||||
config, err := cmd.NewSigNozConfig(currCmd.Context(), logger, flags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return runServer(currCmd.Context(), config, logger)
|
||||
},
|
||||
}
|
||||
|
||||
flags.RegisterFlags(serverCmd)
|
||||
parentCmd.AddCommand(serverCmd)
|
||||
}
|
||||
|
||||
func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) error {
|
||||
// print the version
|
||||
version.Info.PrettyPrint(config.Version)
|
||||
|
||||
// add enterprise sqlstore factories to the community sqlstore factories
|
||||
sqlstoreFactories := signoz.NewSQLStoreProviderFactories()
|
||||
if err := sqlstoreFactories.Add(postgressqlstore.NewFactory(sqlstorehook.NewLoggingFactory(), sqlstorehook.NewInstrumentationFactory())); err != nil {
|
||||
logger.ErrorContext(ctx, "failed to add postgressqlstore factory", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
signoz, err := signoz.New(
|
||||
ctx,
|
||||
config,
|
||||
enterprisezeus.Config(),
|
||||
httpzeus.NewProviderFactory(),
|
||||
enterpriselicensing.Config(24*time.Hour, 3),
|
||||
func(sqlstore sqlstore.SQLStore, zeus zeus.Zeus, orgGetter organization.Getter, analytics analytics.Analytics) factory.ProviderFactory[licensing.Licensing, licensing.Config] {
|
||||
return httplicensing.NewProviderFactory(sqlstore, zeus, orgGetter, analytics)
|
||||
},
|
||||
signoz.NewEmailingProviderFactories(),
|
||||
signoz.NewCacheProviderFactories(),
|
||||
signoz.NewWebProviderFactories(),
|
||||
func(sqlstore sqlstore.SQLStore) factory.NamedMap[factory.ProviderFactory[sqlschema.SQLSchema, sqlschema.Config]] {
|
||||
existingFactories := signoz.NewSQLSchemaProviderFactories(sqlstore)
|
||||
if err := existingFactories.Add(postgressqlschema.NewFactory(sqlstore)); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return existingFactories
|
||||
},
|
||||
sqlstoreFactories,
|
||||
signoz.NewTelemetryStoreProviderFactories(),
|
||||
func(ctx context.Context, providerSettings factory.ProviderSettings, store authtypes.AuthNStore, licensing licensing.Licensing) (map[authtypes.AuthNProvider]authn.AuthN, error) {
|
||||
samlCallbackAuthN, err := samlcallbackauthn.New(ctx, store, licensing)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
oidcCallbackAuthN, err := oidccallbackauthn.New(store, licensing, providerSettings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
authNs, err := signoz.NewAuthNs(ctx, providerSettings, store, licensing)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
authNs[authtypes.AuthNProviderSAML] = samlCallbackAuthN
|
||||
authNs[authtypes.AuthNProviderOIDC] = oidcCallbackAuthN
|
||||
|
||||
return authNs, nil
|
||||
},
|
||||
func(ctx context.Context, sqlstore sqlstore.SQLStore) factory.ProviderFactory[authz.AuthZ, authz.Config] {
|
||||
return openfgaauthz.NewProviderFactory(sqlstore, openfgaschema.NewSchema().Get(ctx))
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
logger.ErrorContext(ctx, "failed to create signoz", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
server, err := enterpriseapp.NewServer(config, signoz)
|
||||
if err != nil {
|
||||
logger.ErrorContext(ctx, "failed to create server", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := server.Start(ctx); err != nil {
|
||||
logger.ErrorContext(ctx, "failed to start server", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
signoz.Start(ctx)
|
||||
|
||||
if err := signoz.Wait(ctx); err != nil {
|
||||
logger.ErrorContext(ctx, "failed to start signoz", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = server.Stop(ctx)
|
||||
if err != nil {
|
||||
logger.ErrorContext(ctx, "failed to stop server", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = signoz.Stop(ctx)
|
||||
if err != nil {
|
||||
logger.ErrorContext(ctx, "failed to stop signoz", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func RegisterGenerate(parentCmd *cobra.Command, logger *slog.Logger) {
|
||||
var generateCmd = &cobra.Command{
|
||||
Use: "generate",
|
||||
Short: "Generate artifacts",
|
||||
SilenceUsage: true,
|
||||
SilenceErrors: true,
|
||||
CompletionOptions: cobra.CompletionOptions{DisableDefaultCmd: true},
|
||||
}
|
||||
|
||||
registerGenerateOpenAPI(generateCmd)
|
||||
|
||||
parentCmd.AddCommand(generateCmd)
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/instrumentation"
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
"github.com/SigNoz/signoz/pkg/version"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func registerGenerateOpenAPI(parentCmd *cobra.Command) {
|
||||
openapiCmd := &cobra.Command{
|
||||
Use: "openapi",
|
||||
Short: "Generate OpenAPI schema for SigNoz",
|
||||
RunE: func(currCmd *cobra.Command, args []string) error {
|
||||
return runGenerateOpenAPI(currCmd.Context())
|
||||
},
|
||||
}
|
||||
|
||||
parentCmd.AddCommand(openapiCmd)
|
||||
}
|
||||
|
||||
func runGenerateOpenAPI(ctx context.Context) error {
|
||||
instrumentation, err := instrumentation.New(ctx, instrumentation.Config{Logs: instrumentation.LogsConfig{Level: slog.LevelInfo}}, version.Info, "signoz")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
openapi, err := signoz.NewOpenAPI(ctx, instrumentation)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := openapi.CreateAndWrite("docs/api/openapi.yml"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
33
cmd/root.go
33
cmd/root.go
@@ -1,33 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
"os"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/version"
|
||||
"github.com/spf13/cobra"
|
||||
"go.uber.org/zap" //nolint:depguard
|
||||
)
|
||||
|
||||
var RootCmd = &cobra.Command{
|
||||
Use: "signoz",
|
||||
Short: "OpenTelemetry-Native Logs, Metrics and Traces in a single pane",
|
||||
Version: version.Info.Version(),
|
||||
SilenceUsage: true,
|
||||
SilenceErrors: true,
|
||||
CompletionOptions: cobra.CompletionOptions{DisableDefaultCmd: true},
|
||||
}
|
||||
|
||||
func Execute(logger *slog.Logger) {
|
||||
zapLogger := newZapLogger()
|
||||
zap.ReplaceGlobals(zapLogger)
|
||||
defer func() {
|
||||
_ = zapLogger.Sync()
|
||||
}()
|
||||
|
||||
err := RootCmd.Execute()
|
||||
if err != nil {
|
||||
logger.ErrorContext(RootCmd.Context(), "error running command", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
15
cmd/zap.go
15
cmd/zap.go
@@ -1,15 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"go.uber.org/zap" //nolint:depguard
|
||||
"go.uber.org/zap/zapcore" //nolint:depguard
|
||||
)
|
||||
|
||||
// Deprecated: Use `NewLogger` from `pkg/instrumentation` instead.
|
||||
func newZapLogger() *zap.Logger {
|
||||
config := zap.NewProductionConfig()
|
||||
config.EncoderConfig.TimeKey = "timestamp"
|
||||
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
logger, _ := config.Build()
|
||||
return logger
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
##################### SigNoz Configuration Example #####################
|
||||
#
|
||||
#
|
||||
# Do not modify this file
|
||||
#
|
||||
|
||||
@@ -47,10 +47,10 @@ cache:
|
||||
provider: memory
|
||||
# memory: Uses in-memory caching.
|
||||
memory:
|
||||
# Max items for the in-memory cache (10x the entries)
|
||||
num_counters: 100000
|
||||
# Total cost in bytes allocated bounded cache
|
||||
max_cost: 67108864
|
||||
# Time-to-live for cache entries in memory. Specify the duration in ns
|
||||
ttl: 60000000000
|
||||
# The interval at which the cache will be cleaned up
|
||||
cleanup_interval: 1m
|
||||
# redis: Uses Redis as the caching backend.
|
||||
redis:
|
||||
# The hostname or IP address of the Redis server.
|
||||
@@ -58,7 +58,7 @@ cache:
|
||||
# The port on which the Redis server is running. Default is usually 6379.
|
||||
port: 6379
|
||||
# The password for authenticating with the Redis server, if required.
|
||||
password:
|
||||
password:
|
||||
# The Redis database number to use
|
||||
db: 0
|
||||
|
||||
@@ -71,10 +71,6 @@ sqlstore:
|
||||
sqlite:
|
||||
# The path to the SQLite database file.
|
||||
path: /var/lib/signoz/signoz.db
|
||||
# Mode is the mode to use for the sqlite database.
|
||||
mode: delete
|
||||
# BusyTimeout is the timeout for the sqlite database to wait for a lock.
|
||||
busy_timeout: 10s
|
||||
|
||||
##################### APIServer #####################
|
||||
apiserver:
|
||||
@@ -125,8 +121,6 @@ telemetrystore:
|
||||
timeout_before_checking_execution_speed: 0
|
||||
max_bytes_to_read: 0
|
||||
max_result_rows: 0
|
||||
ignore_data_skipping_indices: ""
|
||||
secondary_indices_enable_bulk_filtering: false
|
||||
|
||||
##################### Prometheus #####################
|
||||
prometheus:
|
||||
@@ -141,7 +135,10 @@ prometheus:
|
||||
##################### Alertmanager #####################
|
||||
alertmanager:
|
||||
# Specifies the alertmanager provider to use.
|
||||
provider: signoz
|
||||
provider: legacy
|
||||
legacy:
|
||||
# The API URL (with prefix) of the legacy Alertmanager instance.
|
||||
api_url: http://localhost:9093/api
|
||||
signoz:
|
||||
# The poll interval for periodically syncing the alertmanager with the config in the store.
|
||||
poll_interval: 1m
|
||||
@@ -242,32 +239,8 @@ statsreporter:
|
||||
# Whether to collect identities and traits (emails).
|
||||
identities: true
|
||||
|
||||
|
||||
##################### Gateway (License only) #####################
|
||||
gateway:
|
||||
# The URL of the gateway's api.
|
||||
url: http://localhost:8080
|
||||
|
||||
##################### Tokenizer #####################
|
||||
tokenizer:
|
||||
# Specifies the tokenizer provider to use.
|
||||
provider: jwt
|
||||
lifetime:
|
||||
# The duration for which a user can be idle before being required to authenticate.
|
||||
idle: 168h
|
||||
# The duration for which a user can remain logged in before being asked to login.
|
||||
max: 720h
|
||||
rotation:
|
||||
# The interval to rotate tokens in.
|
||||
interval: 30m
|
||||
# The duration for which the previous token pair remains valid after a token pair is rotated.
|
||||
duration: 60s
|
||||
jwt:
|
||||
# The secret to sign the JWT tokens.
|
||||
secret: secret
|
||||
opaque:
|
||||
gc:
|
||||
# The interval to perform garbage collection.
|
||||
interval: 1h
|
||||
token:
|
||||
# The maximum number of tokens a user can have. This limits the number of concurrent sessions a user can have.
|
||||
max_per_user: 5
|
||||
|
||||
@@ -11,7 +11,7 @@ x-common: &common
|
||||
max-file: "3"
|
||||
x-clickhouse-defaults: &clickhouse-defaults
|
||||
!!merge <<: *common
|
||||
image: clickhouse/clickhouse-server:25.5.6
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
tty: true
|
||||
deploy:
|
||||
labels:
|
||||
@@ -37,11 +37,9 @@ x-clickhouse-defaults: &clickhouse-defaults
|
||||
nofile:
|
||||
soft: 262144
|
||||
hard: 262144
|
||||
environment:
|
||||
- CLICKHOUSE_SKIP_USER_SETUP=1
|
||||
x-zookeeper-defaults: &zookeeper-defaults
|
||||
!!merge <<: *common
|
||||
image: signoz/zookeeper:3.7.1
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
user: root
|
||||
deploy:
|
||||
labels:
|
||||
@@ -65,7 +63,7 @@ x-db-depend: &db-depend
|
||||
services:
|
||||
init-clickhouse:
|
||||
!!merge <<: *common
|
||||
image: clickhouse/clickhouse-server:25.5.6
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
command:
|
||||
- bash
|
||||
- -c
|
||||
@@ -176,7 +174,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:v0.104.0
|
||||
image: signoz/signoz:v0.89.0
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
ports:
|
||||
@@ -209,7 +207,7 @@ services:
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:v0.129.12
|
||||
image: signoz/signoz-otel-collector:v0.128.0
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
@@ -233,7 +231,7 @@ services:
|
||||
- signoz
|
||||
schema-migrator:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:v0.129.12
|
||||
image: signoz/signoz-schema-migrator:v0.128.0
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
@@ -11,7 +11,7 @@ x-common: &common
|
||||
max-file: "3"
|
||||
x-clickhouse-defaults: &clickhouse-defaults
|
||||
!!merge <<: *common
|
||||
image: clickhouse/clickhouse-server:25.5.6
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
tty: true
|
||||
deploy:
|
||||
labels:
|
||||
@@ -36,11 +36,9 @@ x-clickhouse-defaults: &clickhouse-defaults
|
||||
nofile:
|
||||
soft: 262144
|
||||
hard: 262144
|
||||
environment:
|
||||
- CLICKHOUSE_SKIP_USER_SETUP=1
|
||||
x-zookeeper-defaults: &zookeeper-defaults
|
||||
!!merge <<: *common
|
||||
image: signoz/zookeeper:3.7.1
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
user: root
|
||||
deploy:
|
||||
labels:
|
||||
@@ -62,7 +60,7 @@ x-db-depend: &db-depend
|
||||
services:
|
||||
init-clickhouse:
|
||||
!!merge <<: *common
|
||||
image: clickhouse/clickhouse-server:25.5.6
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
command:
|
||||
- bash
|
||||
- -c
|
||||
@@ -117,7 +115,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:v0.104.0
|
||||
image: signoz/signoz:v0.89.0
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
ports:
|
||||
@@ -150,7 +148,7 @@ services:
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:v0.129.12
|
||||
image: signoz/signoz-otel-collector:v0.128.0
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
@@ -176,7 +174,7 @@ services:
|
||||
- signoz
|
||||
schema-migrator:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:v0.129.12
|
||||
image: signoz/signoz-schema-migrator:v0.128.0
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
@@ -1,10 +1,3 @@
|
||||
connectors:
|
||||
signozmeter:
|
||||
metrics_flush_interval: 1h
|
||||
dimensions:
|
||||
- name: service.name
|
||||
- name: deployment.environment
|
||||
- name: host.name
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
@@ -28,10 +21,6 @@ processors:
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
timeout: 10s
|
||||
batch/meter:
|
||||
send_batch_max_size: 25000
|
||||
send_batch_size: 20000
|
||||
timeout: 1s
|
||||
resourcedetection:
|
||||
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||
detectors: [env, system]
|
||||
@@ -77,11 +66,6 @@ exporters:
|
||||
dsn: tcp://clickhouse:9000/signoz_logs
|
||||
timeout: 10s
|
||||
use_new_schema: true
|
||||
signozclickhousemeter:
|
||||
dsn: tcp://clickhouse:9000/signoz_meter
|
||||
timeout: 45s
|
||||
sending_queue:
|
||||
enabled: false
|
||||
service:
|
||||
telemetry:
|
||||
logs:
|
||||
@@ -93,20 +77,16 @@ service:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [signozspanmetrics/delta, batch]
|
||||
exporters: [clickhousetraces, signozmeter]
|
||||
exporters: [clickhousetraces]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [signozclickhousemetrics, signozmeter]
|
||||
exporters: [signozclickhousemetrics]
|
||||
metrics/prometheus:
|
||||
receivers: [prometheus]
|
||||
processors: [batch]
|
||||
exporters: [signozclickhousemetrics, signozmeter]
|
||||
exporters: [signozclickhousemetrics]
|
||||
logs:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [clickhouselogsexporter, signozmeter]
|
||||
metrics/meter:
|
||||
receivers: [signozmeter]
|
||||
processors: [batch/meter]
|
||||
exporters: [signozclickhousemeter]
|
||||
exporters: [clickhouselogsexporter]
|
||||
|
||||
@@ -10,7 +10,7 @@ x-common: &common
|
||||
x-clickhouse-defaults: &clickhouse-defaults
|
||||
!!merge <<: *common
|
||||
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
|
||||
image: clickhouse/clickhouse-server:25.5.6
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
tty: true
|
||||
labels:
|
||||
signoz.io/scrape: "true"
|
||||
@@ -40,11 +40,9 @@ x-clickhouse-defaults: &clickhouse-defaults
|
||||
nofile:
|
||||
soft: 262144
|
||||
hard: 262144
|
||||
environment:
|
||||
- CLICKHOUSE_SKIP_USER_SETUP=1
|
||||
x-zookeeper-defaults: &zookeeper-defaults
|
||||
!!merge <<: *common
|
||||
image: signoz/zookeeper:3.7.1
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
user: root
|
||||
labels:
|
||||
signoz.io/scrape: "true"
|
||||
@@ -67,7 +65,7 @@ x-db-depend: &db-depend
|
||||
services:
|
||||
init-clickhouse:
|
||||
!!merge <<: *common
|
||||
image: clickhouse/clickhouse-server:25.5.6
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
container_name: signoz-init-clickhouse
|
||||
command:
|
||||
- bash
|
||||
@@ -179,7 +177,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:${VERSION:-v0.104.0}
|
||||
image: signoz/signoz:${VERSION:-v0.89.0}
|
||||
container_name: signoz
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
@@ -213,7 +211,7 @@ services:
|
||||
# TODO: support otel-collector multiple replicas. Nginx/Traefik for loadbalancing?
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.129.12}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.128.0}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
@@ -239,7 +237,7 @@ services:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.12}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.128.0}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
@@ -250,7 +248,7 @@ services:
|
||||
condition: service_healthy
|
||||
schema-migrator-async:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.12}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.128.0}
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
|
||||
@@ -9,7 +9,8 @@ x-common: &common
|
||||
max-file: "3"
|
||||
x-clickhouse-defaults: &clickhouse-defaults
|
||||
!!merge <<: *common
|
||||
image: clickhouse/clickhouse-server:25.5.6
|
||||
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
tty: true
|
||||
labels:
|
||||
signoz.io/scrape: "true"
|
||||
@@ -35,11 +36,9 @@ x-clickhouse-defaults: &clickhouse-defaults
|
||||
nofile:
|
||||
soft: 262144
|
||||
hard: 262144
|
||||
environment:
|
||||
- CLICKHOUSE_SKIP_USER_SETUP=1
|
||||
x-zookeeper-defaults: &zookeeper-defaults
|
||||
!!merge <<: *common
|
||||
image: signoz/zookeeper:3.7.1
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
user: root
|
||||
labels:
|
||||
signoz.io/scrape: "true"
|
||||
@@ -62,7 +61,7 @@ x-db-depend: &db-depend
|
||||
services:
|
||||
init-clickhouse:
|
||||
!!merge <<: *common
|
||||
image: clickhouse/clickhouse-server:25.5.6
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
container_name: signoz-init-clickhouse
|
||||
command:
|
||||
- bash
|
||||
@@ -111,7 +110,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:${VERSION:-v0.104.0}
|
||||
image: signoz/signoz:${VERSION:-v0.89.0}
|
||||
container_name: signoz
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
@@ -144,7 +143,7 @@ services:
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.129.12}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.128.0}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
@@ -166,7 +165,7 @@ services:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.12}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.128.0}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
@@ -178,7 +177,7 @@ services:
|
||||
restart: on-failure
|
||||
schema-migrator-async:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.12}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.128.0}
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
|
||||
@@ -1,10 +1,3 @@
|
||||
connectors:
|
||||
signozmeter:
|
||||
metrics_flush_interval: 1h
|
||||
dimensions:
|
||||
- name: service.name
|
||||
- name: deployment.environment
|
||||
- name: host.name
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
@@ -28,10 +21,6 @@ processors:
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
timeout: 10s
|
||||
batch/meter:
|
||||
send_batch_max_size: 25000
|
||||
send_batch_size: 20000
|
||||
timeout: 1s
|
||||
resourcedetection:
|
||||
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||
detectors: [env, system]
|
||||
@@ -77,11 +66,6 @@ exporters:
|
||||
dsn: tcp://clickhouse:9000/signoz_logs
|
||||
timeout: 10s
|
||||
use_new_schema: true
|
||||
signozclickhousemeter:
|
||||
dsn: tcp://clickhouse:9000/signoz_meter
|
||||
timeout: 45s
|
||||
sending_queue:
|
||||
enabled: false
|
||||
service:
|
||||
telemetry:
|
||||
logs:
|
||||
@@ -93,20 +77,16 @@ service:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [signozspanmetrics/delta, batch]
|
||||
exporters: [clickhousetraces, signozmeter]
|
||||
exporters: [clickhousetraces]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [signozclickhousemetrics, signozmeter]
|
||||
exporters: [signozclickhousemetrics]
|
||||
metrics/prometheus:
|
||||
receivers: [prometheus]
|
||||
processors: [batch]
|
||||
exporters: [signozclickhousemetrics, signozmeter]
|
||||
exporters: [signozclickhousemetrics]
|
||||
logs:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [clickhouselogsexporter, signozmeter]
|
||||
metrics/meter:
|
||||
receivers: [signozmeter]
|
||||
processors: [batch/meter]
|
||||
exporters: [signozclickhousemeter]
|
||||
exporters: [clickhouselogsexporter]
|
||||
|
||||
2293
docs/api/openapi.yml
2293
docs/api/openapi.yml
File diff suppressed because it is too large
Load Diff
@@ -13,6 +13,8 @@ Before diving in, make sure you have these tools installed:
|
||||
- Download from [go.dev/dl](https://go.dev/dl/)
|
||||
- Check [go.mod](../../go.mod#L3) for the minimum version
|
||||
|
||||
- **GCC** - Required for CGO dependencies
|
||||
- Download from [gcc.gnu.org](https://gcc.gnu.org/)
|
||||
|
||||
- **Node** - Powers our frontend
|
||||
- Download from [nodejs.org](https://nodejs.org)
|
||||
@@ -42,35 +44,20 @@ Before diving in, make sure you have these tools installed:
|
||||
|
||||
SigNoz has three main components: Clickhouse, Backend, and Frontend. Let's set them up one by one.
|
||||
|
||||
### 1. Setting up ClickHouse
|
||||
### 1. Setting up Clickhouse
|
||||
|
||||
First, we need to get ClickHouse running:
|
||||
First, we need to get Clickhouse running:
|
||||
|
||||
```bash
|
||||
make devenv-clickhouse
|
||||
```
|
||||
|
||||
This command:
|
||||
- Starts ClickHouse in a single-shard, single-replica cluster
|
||||
- Starts Clickhouse in a single-shard, single-replica cluster
|
||||
- Sets up Zookeeper
|
||||
- Runs the latest schema migrations
|
||||
|
||||
### 2. Setting up SigNoz OpenTelemetry Collector
|
||||
|
||||
Next, start the OpenTelemetry Collector to receive telemetry data:
|
||||
|
||||
```bash
|
||||
make devenv-signoz-otel-collector
|
||||
```
|
||||
|
||||
This command:
|
||||
- Starts the SigNoz OpenTelemetry Collector
|
||||
- Listens on port 4317 (gRPC) and 4318 (HTTP) for incoming telemetry data
|
||||
- Forwards data to ClickHouse for storage
|
||||
|
||||
> 💡 **Quick Setup**: Use `make devenv-up` to start both ClickHouse and OTel Collector together
|
||||
|
||||
### 3. Starting the Backend
|
||||
### 2. Starting the Backend
|
||||
|
||||
1. Run the backend server:
|
||||
```bash
|
||||
@@ -86,24 +73,19 @@ This command:
|
||||
|
||||
> 💡 **Tip**: The API server runs at `http://localhost:8080/` by default
|
||||
|
||||
### 4. Setting up the Frontend
|
||||
### 3. Setting up the Frontend
|
||||
|
||||
1. Navigate to the frontend directory:
|
||||
```bash
|
||||
cd frontend
|
||||
```
|
||||
|
||||
2. Install dependencies:
|
||||
1. Install dependencies:
|
||||
```bash
|
||||
yarn install
|
||||
```
|
||||
|
||||
3. Create a `.env` file in this directory:
|
||||
2. Create a `.env` file in the `frontend` directory:
|
||||
```env
|
||||
FRONTEND_API_ENDPOINT=http://localhost:8080
|
||||
```
|
||||
|
||||
4. Start the development server:
|
||||
3. Start the development server:
|
||||
```bash
|
||||
yarn dev
|
||||
```
|
||||
@@ -111,25 +93,3 @@ This command:
|
||||
> 💡 **Tip**: `yarn dev` will automatically rebuild when you make changes to the code
|
||||
|
||||
Now you're all set to start developing! Happy coding! 🎉
|
||||
|
||||
## Verifying Your Setup
|
||||
To verify everything is working correctly:
|
||||
|
||||
1. **Check ClickHouse**: `curl http://localhost:8123/ping` (should return "Ok.")
|
||||
2. **Check OTel Collector**: `curl http://localhost:13133` (should return health status)
|
||||
3. **Check Backend**: `curl http://localhost:8080/api/v1/health` (should return `{"status":"ok"}`)
|
||||
4. **Check Frontend**: Open `http://localhost:3301` in your browser
|
||||
|
||||
## How to send test data?
|
||||
|
||||
You can now send telemetry data to your local SigNoz instance:
|
||||
|
||||
- **OTLP gRPC**: `localhost:4317`
|
||||
- **OTLP HTTP**: `localhost:4318`
|
||||
|
||||
For example, using `curl` to send a test trace:
|
||||
```bash
|
||||
curl -X POST http://localhost:4318/v1/traces \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"resourceSpans":[{"resource":{"attributes":[{"key":"service.name","value":{"stringValue":"test-service"}}]},"scopeSpans":[{"spans":[{"traceId":"12345678901234567890123456789012","spanId":"1234567890123456","name":"test-span","startTimeUnixNano":"1609459200000000000","endTimeUnixNano":"1609459201000000000"}]}]}]}'
|
||||
```
|
||||
|
||||
@@ -1,213 +0,0 @@
|
||||
# Integration Tests
|
||||
|
||||
SigNoz uses integration tests to verify that different components work together correctly in a real environment. These tests run against actual services (ClickHouse, PostgreSQL, etc.) to ensure end-to-end functionality.
|
||||
|
||||
## How to set up the integration test environment?
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Before running integration tests, ensure you have the following installed:
|
||||
|
||||
- Python 3.13+
|
||||
- Poetry (for dependency management)
|
||||
- Docker (for containerized services)
|
||||
|
||||
### Initial Setup
|
||||
|
||||
1. Navigate to the integration tests directory:
|
||||
```bash
|
||||
cd tests/integration
|
||||
```
|
||||
|
||||
2. Install dependencies using Poetry:
|
||||
```bash
|
||||
poetry install --no-root
|
||||
```
|
||||
|
||||
### Starting the Test Environment
|
||||
|
||||
To spin up all the containers necessary for writing integration tests and keep them running:
|
||||
|
||||
```bash
|
||||
poetry run pytest --basetemp=./tmp/ -vv --reuse src/bootstrap/setup.py::test_setup
|
||||
```
|
||||
|
||||
This command will:
|
||||
- Start all required services (ClickHouse, PostgreSQL, Zookeeper, etc.)
|
||||
- Keep containers running due to the `--reuse` flag
|
||||
- Verify that the setup is working correctly
|
||||
|
||||
### Stopping the Test Environment
|
||||
|
||||
When you're done writing integration tests, clean up the environment:
|
||||
|
||||
```bash
|
||||
poetry run pytest --basetemp=./tmp/ -vv --teardown -s src/bootstrap/setup.py::test_teardown
|
||||
```
|
||||
|
||||
This will destroy the running integration test setup and clean up resources.
|
||||
|
||||
## Understanding the Integration Test Framework
|
||||
|
||||
Python and pytest form the foundation of the integration testing framework. Testcontainers are used to spin up disposable integration environments. Wiremock is used to spin up **test doubles** of other services.
|
||||
|
||||
- **Why Python/pytest?** It's expressive, low-boilerplate, and has powerful fixture capabilities that make integration testing straightforward. Extensive libraries for HTTP requests, JSON handling, and data analysis (numpy) make it easier to test APIs and verify data
|
||||
- **Why testcontainers?** They let us spin up isolated dependencies that match our production environment without complex setup.
|
||||
- **Why wiremock?** Well maintained, documented and extensible.
|
||||
|
||||
```
|
||||
.
|
||||
├── conftest.py
|
||||
├── fixtures
|
||||
│ ├── __init__.py
|
||||
│ ├── auth.py
|
||||
│ ├── clickhouse.py
|
||||
│ ├── fs.py
|
||||
│ ├── http.py
|
||||
│ ├── migrator.py
|
||||
│ ├── network.py
|
||||
│ ├── postgres.py
|
||||
│ ├── signoz.py
|
||||
│ ├── sql.py
|
||||
│ ├── sqlite.py
|
||||
│ ├── types.py
|
||||
│ └── zookeeper.py
|
||||
├── poetry.lock
|
||||
├── pyproject.toml
|
||||
└── src
|
||||
└── bootstrap
|
||||
├── __init__.py
|
||||
├── a_database.py
|
||||
├── b_register.py
|
||||
└── c_license.py
|
||||
```
|
||||
|
||||
Each test suite follows some important principles:
|
||||
|
||||
1. **Organization**: Test suites live under `src/` in self-contained packages. Fixtures (a pytest concept) live inside `fixtures/`.
|
||||
2. **Execution Order**: Files are prefixed with `a_`, `b_`, `c_` to ensure sequential execution.
|
||||
3. **Time Constraints**: Each suite should complete in under 10 minutes (setup takes ~4 mins).
|
||||
|
||||
### Test Suite Design
|
||||
|
||||
Test suites should target functional domains or subsystems within SigNoz. When designing a test suite, consider these principles:
|
||||
|
||||
- **Functional Cohesion**: Group tests around a specific capability or service boundary
|
||||
- **Data Flow**: Follow the path of data through related components
|
||||
- **Change Patterns**: Components frequently modified together should be tested together
|
||||
|
||||
The exact boundaries for modules are intentionally flexible, allowing teams to define logical groupings based on their specific context and knowledge of the system.
|
||||
|
||||
Eg: The **bootstrap** integration test suite validates core system functionality:
|
||||
|
||||
- Database initialization
|
||||
- Version check
|
||||
|
||||
Other test suites can be **pipelines, auth, querier.**
|
||||
|
||||
## How to write an integration test?
|
||||
|
||||
Now start writing an integration test. Create a new file `src/bootstrap/e_version.py` and paste the following:
|
||||
|
||||
```python
|
||||
import requests
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
|
||||
def test_version(signoz: types.SigNoz) -> None:
|
||||
response = requests.get(signoz.self.host_config.get("/api/v1/version"), timeout=2)
|
||||
logger.info(response)
|
||||
```
|
||||
|
||||
We have written a simple test which calls the `version` endpoint of the container in step 1. In **order to just run this function, run the following command:**
|
||||
|
||||
```bash
|
||||
poetry run pytest --basetemp=./tmp/ -vv --reuse src/bootstrap/e_version.py::test_version
|
||||
```
|
||||
|
||||
> Note: The `--reuse` flag is used to reuse the environment if it is already running. Always use this flag when writing and running integration tests. If you don't use this flag, the environment will be destroyed and recreated every time you run the test.
|
||||
|
||||
Here's another example of how to write a more comprehensive integration test:
|
||||
|
||||
```python
|
||||
from http import HTTPStatus
|
||||
import requests
|
||||
from fixtures import types
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
|
||||
def test_user_registration(signoz: types.SigNoz) -> None:
|
||||
"""Test user registration functionality."""
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get("/api/v1/register"),
|
||||
json={
|
||||
"name": "testuser",
|
||||
"orgId": "",
|
||||
"orgName": "test.org",
|
||||
"email": "test@example.com",
|
||||
"password": "password123Z$",
|
||||
},
|
||||
timeout=2,
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.json()["setupCompleted"] is True
|
||||
```
|
||||
|
||||
## How to run integration tests?
|
||||
|
||||
### Running All Tests
|
||||
|
||||
```bash
|
||||
poetry run pytest --basetemp=./tmp/ -vv --reuse src/
|
||||
```
|
||||
|
||||
### Running Specific Test Categories
|
||||
|
||||
```bash
|
||||
poetry run pytest --basetemp=./tmp/ -vv --reuse src/<suite>
|
||||
|
||||
# Run querier tests
|
||||
poetry run pytest --basetemp=./tmp/ -vv --reuse src/querier/
|
||||
# Run auth tests
|
||||
poetry run pytest --basetemp=./tmp/ -vv --reuse src/auth/
|
||||
```
|
||||
|
||||
### Running Individual Tests
|
||||
|
||||
```bash
|
||||
poetry run pytest --basetemp=./tmp/ -vv --reuse src/<suite>/<file>.py::test_name
|
||||
|
||||
# Run test_register in file a_register.py in auth suite
|
||||
poetry run pytest --basetemp=./tmp/ -vv --reuse src/auth/a_register.py::test_register
|
||||
```
|
||||
|
||||
## How to configure different options for integration tests?
|
||||
|
||||
Tests can be configured using pytest options:
|
||||
|
||||
- `--sqlstore-provider` - Choose database provider (default: postgres)
|
||||
- `--postgres-version` - PostgreSQL version (default: 15)
|
||||
- `--clickhouse-version` - ClickHouse version (default: 25.5.6)
|
||||
- `--zookeeper-version` - Zookeeper version (default: 3.7.1)
|
||||
|
||||
Example:
|
||||
```bash
|
||||
poetry run pytest --basetemp=./tmp/ -vv --reuse --sqlstore-provider=postgres --postgres-version=14 src/auth/
|
||||
```
|
||||
|
||||
|
||||
## What should I remember?
|
||||
|
||||
- **Always use the `--reuse` flag** when setting up the environment to keep containers running
|
||||
- **Use the `--teardown` flag** when cleaning up to avoid resource leaks
|
||||
- **Follow the naming convention** with alphabetical prefixes for test execution order
|
||||
- **Use proper timeouts** in HTTP requests to avoid hanging tests
|
||||
- **Clean up test data** between tests to avoid interference
|
||||
- **Use descriptive test names** that clearly indicate what is being tested
|
||||
- **Leverage fixtures** for common setup and authentication
|
||||
- **Test both success and failure scenarios** to ensure robust functionality
|
||||
@@ -103,19 +103,9 @@ Remember to replace the region and ingestion key with proper values as obtained
|
||||
|
||||
Both SigNoz and OTel demo app [frontend-proxy service, to be accurate] share common port allocation at 8080. To prevent port allocation conflicts, modify the OTel demo application config to use port 8081 as the `ENVOY_PORT` value as shown below, and run docker compose command.
|
||||
|
||||
Also, both SigNoz and OTel Demo App have the same `PROMETHEUS_PORT` configured, by default both of them try to start at `9090`, which may cause either of them to fail depending upon which one acquires it first. To prevent this, we need to mofify the value of `PROMETHEUS_PORT` too.
|
||||
|
||||
|
||||
```sh
|
||||
ENVOY_PORT=8081 PROMETHEUS_PORT=9091 docker compose up -d
|
||||
ENVOY_PORT=8081 docker compose up -d
|
||||
```
|
||||
|
||||
Alternatively, we can modify these values using the `.env` file too, which reduces the command as just:
|
||||
|
||||
```sh
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
This spins up multiple microservices, with OpenTelemetry instrumentation enabled. you can verify this by,
|
||||
```sh
|
||||
docker compose ps -a
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
package anomaly
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type DailyProvider struct {
|
||||
BaseSeasonalProvider
|
||||
}
|
||||
|
||||
var _ BaseProvider = (*DailyProvider)(nil)
|
||||
|
||||
func (dp *DailyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider {
|
||||
return &dp.BaseSeasonalProvider
|
||||
}
|
||||
|
||||
func NewDailyProvider(opts ...GenericProviderOption[*DailyProvider]) *DailyProvider {
|
||||
dp := &DailyProvider{
|
||||
BaseSeasonalProvider: BaseSeasonalProvider{},
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(dp)
|
||||
}
|
||||
|
||||
return dp
|
||||
}
|
||||
|
||||
func (p *DailyProvider) GetAnomalies(ctx context.Context, orgID valuer.UUID, req *AnomaliesRequest) (*AnomaliesResponse, error) {
|
||||
req.Seasonality = SeasonalityDaily
|
||||
return p.getAnomalies(ctx, orgID, req)
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
package anomaly
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type HourlyProvider struct {
|
||||
BaseSeasonalProvider
|
||||
}
|
||||
|
||||
var _ BaseProvider = (*HourlyProvider)(nil)
|
||||
|
||||
func (hp *HourlyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider {
|
||||
return &hp.BaseSeasonalProvider
|
||||
}
|
||||
|
||||
// NewHourlyProvider now uses the generic option type
|
||||
func NewHourlyProvider(opts ...GenericProviderOption[*HourlyProvider]) *HourlyProvider {
|
||||
hp := &HourlyProvider{
|
||||
BaseSeasonalProvider: BaseSeasonalProvider{},
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(hp)
|
||||
}
|
||||
|
||||
return hp
|
||||
}
|
||||
|
||||
func (p *HourlyProvider) GetAnomalies(ctx context.Context, orgID valuer.UUID, req *AnomaliesRequest) (*AnomaliesResponse, error) {
|
||||
req.Seasonality = SeasonalityHourly
|
||||
return p.getAnomalies(ctx, orgID, req)
|
||||
}
|
||||
@@ -1,223 +0,0 @@
|
||||
package anomaly
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type Seasonality struct{ valuer.String }
|
||||
|
||||
var (
|
||||
SeasonalityHourly = Seasonality{valuer.NewString("hourly")}
|
||||
SeasonalityDaily = Seasonality{valuer.NewString("daily")}
|
||||
SeasonalityWeekly = Seasonality{valuer.NewString("weekly")}
|
||||
)
|
||||
|
||||
var (
|
||||
oneWeekOffset = uint64(24 * 7 * time.Hour.Milliseconds())
|
||||
oneDayOffset = uint64(24 * time.Hour.Milliseconds())
|
||||
oneHourOffset = uint64(time.Hour.Milliseconds())
|
||||
fiveMinOffset = uint64(5 * time.Minute.Milliseconds())
|
||||
)
|
||||
|
||||
func (s Seasonality) IsValid() bool {
|
||||
switch s {
|
||||
case SeasonalityHourly, SeasonalityDaily, SeasonalityWeekly:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
type AnomaliesRequest struct {
|
||||
Params qbtypes.QueryRangeRequest
|
||||
Seasonality Seasonality
|
||||
}
|
||||
|
||||
type AnomaliesResponse struct {
|
||||
Results []*qbtypes.TimeSeriesData
|
||||
}
|
||||
|
||||
// anomalyParams is the params for anomaly detection
|
||||
// prediction = avg(past_period_query) + avg(current_season_query) - mean(past_season_query, past2_season_query, past3_season_query)
|
||||
//
|
||||
// ^ ^
|
||||
// | |
|
||||
// (rounded value for past peiod) + (seasonal growth)
|
||||
//
|
||||
// score = abs(value - prediction) / stddev (current_season_query)
|
||||
type anomalyQueryParams struct {
|
||||
// CurrentPeriodQuery is the query range params for period user is looking at or eval window
|
||||
// Example: (now-5m, now), (now-30m, now), (now-1h, now)
|
||||
// The results obtained from this query are used to compare with predicted values
|
||||
// and to detect anomalies
|
||||
CurrentPeriodQuery qbtypes.QueryRangeRequest
|
||||
// PastPeriodQuery is the query range params for past period of seasonality
|
||||
// Example: For weekly seasonality, (now-1w-5m, now-1w)
|
||||
// : For daily seasonality, (now-1d-5m, now-1d)
|
||||
// : For hourly seasonality, (now-1h-5m, now-1h)
|
||||
PastPeriodQuery qbtypes.QueryRangeRequest
|
||||
// CurrentSeasonQuery is the query range params for current period (seasonal)
|
||||
// Example: For weekly seasonality, this is the query range params for the (now-1w-5m, now)
|
||||
// : For daily seasonality, this is the query range params for the (now-1d-5m, now)
|
||||
// : For hourly seasonality, this is the query range params for the (now-1h-5m, now)
|
||||
CurrentSeasonQuery qbtypes.QueryRangeRequest
|
||||
// PastSeasonQuery is the query range params for past seasonal period to the current season
|
||||
// Example: For weekly seasonality, this is the query range params for the (now-2w-5m, now-1w)
|
||||
// : For daily seasonality, this is the query range params for the (now-2d-5m, now-1d)
|
||||
// : For hourly seasonality, this is the query range params for the (now-2h-5m, now-1h)
|
||||
PastSeasonQuery qbtypes.QueryRangeRequest
|
||||
// Past2SeasonQuery is the query range params for past 2 seasonal period to the current season
|
||||
// Example: For weekly seasonality, this is the query range params for the (now-3w-5m, now-2w)
|
||||
// : For daily seasonality, this is the query range params for the (now-3d-5m, now-2d)
|
||||
// : For hourly seasonality, this is the query range params for the (now-3h-5m, now-2h)
|
||||
Past2SeasonQuery qbtypes.QueryRangeRequest
|
||||
// Past3SeasonQuery is the query range params for past 3 seasonal period to the current season
|
||||
// Example: For weekly seasonality, this is the query range params for the (now-4w-5m, now-3w)
|
||||
// : For daily seasonality, this is the query range params for the (now-4d-5m, now-3d)
|
||||
// : For hourly seasonality, this is the query range params for the (now-4h-5m, now-3h)
|
||||
Past3SeasonQuery qbtypes.QueryRangeRequest
|
||||
}
|
||||
|
||||
func prepareAnomalyQueryParams(req qbtypes.QueryRangeRequest, seasonality Seasonality) *anomalyQueryParams {
|
||||
start := req.Start
|
||||
end := req.End
|
||||
|
||||
currentPeriodQuery := qbtypes.QueryRangeRequest{
|
||||
Start: start,
|
||||
End: end,
|
||||
RequestType: qbtypes.RequestTypeTimeSeries,
|
||||
CompositeQuery: req.CompositeQuery,
|
||||
NoCache: false,
|
||||
}
|
||||
|
||||
var pastPeriodStart, pastPeriodEnd uint64
|
||||
|
||||
switch seasonality {
|
||||
// for one week period, we fetch the data from the past week with 5 min offset
|
||||
case SeasonalityWeekly:
|
||||
pastPeriodStart = start - oneWeekOffset - fiveMinOffset
|
||||
pastPeriodEnd = end - oneWeekOffset
|
||||
// for one day period, we fetch the data from the past day with 5 min offset
|
||||
case SeasonalityDaily:
|
||||
pastPeriodStart = start - oneDayOffset - fiveMinOffset
|
||||
pastPeriodEnd = end - oneDayOffset
|
||||
// for one hour period, we fetch the data from the past hour with 5 min offset
|
||||
case SeasonalityHourly:
|
||||
pastPeriodStart = start - oneHourOffset - fiveMinOffset
|
||||
pastPeriodEnd = end - oneHourOffset
|
||||
}
|
||||
|
||||
pastPeriodQuery := qbtypes.QueryRangeRequest{
|
||||
Start: pastPeriodStart,
|
||||
End: pastPeriodEnd,
|
||||
RequestType: qbtypes.RequestTypeTimeSeries,
|
||||
CompositeQuery: req.CompositeQuery,
|
||||
NoCache: false,
|
||||
}
|
||||
|
||||
// seasonality growth trend
|
||||
var currentGrowthPeriodStart, currentGrowthPeriodEnd uint64
|
||||
switch seasonality {
|
||||
case SeasonalityWeekly:
|
||||
currentGrowthPeriodStart = start - oneWeekOffset
|
||||
currentGrowthPeriodEnd = start
|
||||
case SeasonalityDaily:
|
||||
currentGrowthPeriodStart = start - oneDayOffset
|
||||
currentGrowthPeriodEnd = start
|
||||
case SeasonalityHourly:
|
||||
currentGrowthPeriodStart = start - oneHourOffset
|
||||
currentGrowthPeriodEnd = start
|
||||
}
|
||||
|
||||
currentGrowthQuery := qbtypes.QueryRangeRequest{
|
||||
Start: currentGrowthPeriodStart,
|
||||
End: currentGrowthPeriodEnd,
|
||||
RequestType: qbtypes.RequestTypeTimeSeries,
|
||||
CompositeQuery: req.CompositeQuery,
|
||||
NoCache: false,
|
||||
}
|
||||
|
||||
var pastGrowthPeriodStart, pastGrowthPeriodEnd uint64
|
||||
switch seasonality {
|
||||
case SeasonalityWeekly:
|
||||
pastGrowthPeriodStart = start - 2*oneWeekOffset
|
||||
pastGrowthPeriodEnd = start - 1*oneWeekOffset
|
||||
case SeasonalityDaily:
|
||||
pastGrowthPeriodStart = start - 2*oneDayOffset
|
||||
pastGrowthPeriodEnd = start - 1*oneDayOffset
|
||||
case SeasonalityHourly:
|
||||
pastGrowthPeriodStart = start - 2*oneHourOffset
|
||||
pastGrowthPeriodEnd = start - 1*oneHourOffset
|
||||
}
|
||||
|
||||
pastGrowthQuery := qbtypes.QueryRangeRequest{
|
||||
Start: pastGrowthPeriodStart,
|
||||
End: pastGrowthPeriodEnd,
|
||||
RequestType: qbtypes.RequestTypeTimeSeries,
|
||||
CompositeQuery: req.CompositeQuery,
|
||||
NoCache: false,
|
||||
}
|
||||
|
||||
var past2GrowthPeriodStart, past2GrowthPeriodEnd uint64
|
||||
switch seasonality {
|
||||
case SeasonalityWeekly:
|
||||
past2GrowthPeriodStart = start - 3*oneWeekOffset
|
||||
past2GrowthPeriodEnd = start - 2*oneWeekOffset
|
||||
case SeasonalityDaily:
|
||||
past2GrowthPeriodStart = start - 3*oneDayOffset
|
||||
past2GrowthPeriodEnd = start - 2*oneDayOffset
|
||||
case SeasonalityHourly:
|
||||
past2GrowthPeriodStart = start - 3*oneHourOffset
|
||||
past2GrowthPeriodEnd = start - 2*oneHourOffset
|
||||
}
|
||||
|
||||
past2GrowthQuery := qbtypes.QueryRangeRequest{
|
||||
Start: past2GrowthPeriodStart,
|
||||
End: past2GrowthPeriodEnd,
|
||||
RequestType: qbtypes.RequestTypeTimeSeries,
|
||||
CompositeQuery: req.CompositeQuery,
|
||||
NoCache: false,
|
||||
}
|
||||
|
||||
var past3GrowthPeriodStart, past3GrowthPeriodEnd uint64
|
||||
switch seasonality {
|
||||
case SeasonalityWeekly:
|
||||
past3GrowthPeriodStart = start - 4*oneWeekOffset
|
||||
past3GrowthPeriodEnd = start - 3*oneWeekOffset
|
||||
case SeasonalityDaily:
|
||||
past3GrowthPeriodStart = start - 4*oneDayOffset
|
||||
past3GrowthPeriodEnd = start - 3*oneDayOffset
|
||||
case SeasonalityHourly:
|
||||
past3GrowthPeriodStart = start - 4*oneHourOffset
|
||||
past3GrowthPeriodEnd = start - 3*oneHourOffset
|
||||
}
|
||||
|
||||
past3GrowthQuery := qbtypes.QueryRangeRequest{
|
||||
Start: past3GrowthPeriodStart,
|
||||
End: past3GrowthPeriodEnd,
|
||||
RequestType: qbtypes.RequestTypeTimeSeries,
|
||||
CompositeQuery: req.CompositeQuery,
|
||||
NoCache: false,
|
||||
}
|
||||
|
||||
return &anomalyQueryParams{
|
||||
CurrentPeriodQuery: currentPeriodQuery,
|
||||
PastPeriodQuery: pastPeriodQuery,
|
||||
CurrentSeasonQuery: currentGrowthQuery,
|
||||
PastSeasonQuery: pastGrowthQuery,
|
||||
Past2SeasonQuery: past2GrowthQuery,
|
||||
Past3SeasonQuery: past3GrowthQuery,
|
||||
}
|
||||
}
|
||||
|
||||
type anomalyQueryResults struct {
|
||||
CurrentPeriodResults []*qbtypes.TimeSeriesData
|
||||
PastPeriodResults []*qbtypes.TimeSeriesData
|
||||
CurrentSeasonResults []*qbtypes.TimeSeriesData
|
||||
PastSeasonResults []*qbtypes.TimeSeriesData
|
||||
Past2SeasonResults []*qbtypes.TimeSeriesData
|
||||
Past3SeasonResults []*qbtypes.TimeSeriesData
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
package anomaly
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type Provider interface {
|
||||
GetAnomalies(ctx context.Context, orgID valuer.UUID, req *AnomaliesRequest) (*AnomaliesResponse, error)
|
||||
}
|
||||
@@ -1,465 +0,0 @@
|
||||
package anomaly
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"math"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/querier"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
)
|
||||
|
||||
var (
|
||||
// TODO(srikanthccv): make this configurable?
|
||||
movingAvgWindowSize = 7
|
||||
)
|
||||
|
||||
// BaseProvider is an interface that includes common methods for all provider types
|
||||
type BaseProvider interface {
|
||||
GetBaseSeasonalProvider() *BaseSeasonalProvider
|
||||
}
|
||||
|
||||
// GenericProviderOption is a generic type for provider options
|
||||
type GenericProviderOption[T BaseProvider] func(T)
|
||||
|
||||
func WithQuerier[T BaseProvider](querier querier.Querier) GenericProviderOption[T] {
|
||||
return func(p T) {
|
||||
p.GetBaseSeasonalProvider().querier = querier
|
||||
}
|
||||
}
|
||||
|
||||
func WithLogger[T BaseProvider](logger *slog.Logger) GenericProviderOption[T] {
|
||||
return func(p T) {
|
||||
p.GetBaseSeasonalProvider().logger = logger
|
||||
}
|
||||
}
|
||||
|
||||
type BaseSeasonalProvider struct {
|
||||
querier querier.Querier
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getQueryParams(req *AnomaliesRequest) *anomalyQueryParams {
|
||||
if !req.Seasonality.IsValid() {
|
||||
req.Seasonality = SeasonalityDaily
|
||||
}
|
||||
return prepareAnomalyQueryParams(req.Params, req.Seasonality)
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) toTSResults(ctx context.Context, resp *qbtypes.QueryRangeResponse) []*qbtypes.TimeSeriesData {
|
||||
|
||||
tsData := []*qbtypes.TimeSeriesData{}
|
||||
|
||||
if resp == nil {
|
||||
p.logger.InfoContext(ctx, "nil response from query range")
|
||||
return tsData
|
||||
}
|
||||
|
||||
for _, item := range resp.Data.Results {
|
||||
if resultData, ok := item.(*qbtypes.TimeSeriesData); ok {
|
||||
tsData = append(tsData, resultData)
|
||||
}
|
||||
}
|
||||
|
||||
return tsData
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getResults(ctx context.Context, orgID valuer.UUID, params *anomalyQueryParams) (*anomalyQueryResults, error) {
|
||||
// TODO(srikanthccv): parallelize this?
|
||||
p.logger.InfoContext(ctx, "fetching results for current period", "anomaly_current_period_query", params.CurrentPeriodQuery)
|
||||
currentPeriodResults, err := p.querier.QueryRange(ctx, orgID, ¶ms.CurrentPeriodQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p.logger.InfoContext(ctx, "fetching results for past period", "anomaly_past_period_query", params.PastPeriodQuery)
|
||||
pastPeriodResults, err := p.querier.QueryRange(ctx, orgID, ¶ms.PastPeriodQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p.logger.InfoContext(ctx, "fetching results for current season", "anomaly_current_season_query", params.CurrentSeasonQuery)
|
||||
currentSeasonResults, err := p.querier.QueryRange(ctx, orgID, ¶ms.CurrentSeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p.logger.InfoContext(ctx, "fetching results for past season", "anomaly_past_season_query", params.PastSeasonQuery)
|
||||
pastSeasonResults, err := p.querier.QueryRange(ctx, orgID, ¶ms.PastSeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p.logger.InfoContext(ctx, "fetching results for past 2 season", "anomaly_past_2season_query", params.Past2SeasonQuery)
|
||||
past2SeasonResults, err := p.querier.QueryRange(ctx, orgID, ¶ms.Past2SeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p.logger.InfoContext(ctx, "fetching results for past 3 season", "anomaly_past_3season_query", params.Past3SeasonQuery)
|
||||
past3SeasonResults, err := p.querier.QueryRange(ctx, orgID, ¶ms.Past3SeasonQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &anomalyQueryResults{
|
||||
CurrentPeriodResults: p.toTSResults(ctx, currentPeriodResults),
|
||||
PastPeriodResults: p.toTSResults(ctx, pastPeriodResults),
|
||||
CurrentSeasonResults: p.toTSResults(ctx, currentSeasonResults),
|
||||
PastSeasonResults: p.toTSResults(ctx, pastSeasonResults),
|
||||
Past2SeasonResults: p.toTSResults(ctx, past2SeasonResults),
|
||||
Past3SeasonResults: p.toTSResults(ctx, past3SeasonResults),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// getMatchingSeries gets the matching series from the query result
|
||||
// for the given series
|
||||
func (p *BaseSeasonalProvider) getMatchingSeries(_ context.Context, queryResult *qbtypes.TimeSeriesData, series *qbtypes.TimeSeries) *qbtypes.TimeSeries {
|
||||
if queryResult == nil || len(queryResult.Aggregations) == 0 || len(queryResult.Aggregations[0].Series) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, curr := range queryResult.Aggregations[0].Series {
|
||||
currLabelsKey := qbtypes.GetUniqueSeriesKey(curr.Labels)
|
||||
seriesLabelsKey := qbtypes.GetUniqueSeriesKey(series.Labels)
|
||||
if currLabelsKey == seriesLabelsKey {
|
||||
return curr
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getAvg(series *qbtypes.TimeSeries) float64 {
|
||||
if series == nil || len(series.Values) == 0 {
|
||||
return 0
|
||||
}
|
||||
var sum float64
|
||||
for _, smpl := range series.Values {
|
||||
sum += smpl.Value
|
||||
}
|
||||
return sum / float64(len(series.Values))
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getStdDev(series *qbtypes.TimeSeries) float64 {
|
||||
if series == nil || len(series.Values) == 0 {
|
||||
return 0
|
||||
}
|
||||
avg := p.getAvg(series)
|
||||
var sum float64
|
||||
for _, smpl := range series.Values {
|
||||
sum += math.Pow(smpl.Value-avg, 2)
|
||||
}
|
||||
return math.Sqrt(sum / float64(len(series.Values)))
|
||||
}
|
||||
|
||||
// getMovingAvg gets the moving average for the given series
|
||||
// for the given window size and start index
|
||||
func (p *BaseSeasonalProvider) getMovingAvg(series *qbtypes.TimeSeries, movingAvgWindowSize, startIdx int) float64 {
|
||||
if series == nil || len(series.Values) == 0 {
|
||||
return 0
|
||||
}
|
||||
if startIdx >= len(series.Values)-movingAvgWindowSize {
|
||||
startIdx = int(math.Max(0, float64(len(series.Values)-movingAvgWindowSize)))
|
||||
}
|
||||
var sum float64
|
||||
points := series.Values[startIdx:]
|
||||
windowSize := int(math.Min(float64(movingAvgWindowSize), float64(len(points))))
|
||||
for i := 0; i < windowSize; i++ {
|
||||
sum += points[i].Value
|
||||
}
|
||||
avg := sum / float64(windowSize)
|
||||
return avg
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getMean(floats ...float64) float64 {
|
||||
if len(floats) == 0 {
|
||||
return 0
|
||||
}
|
||||
var sum float64
|
||||
for _, f := range floats {
|
||||
sum += f
|
||||
}
|
||||
return sum / float64(len(floats))
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getPredictedSeries(
|
||||
ctx context.Context,
|
||||
series, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries *qbtypes.TimeSeries,
|
||||
) *qbtypes.TimeSeries {
|
||||
predictedSeries := &qbtypes.TimeSeries{
|
||||
Labels: series.Labels,
|
||||
Values: make([]*qbtypes.TimeSeriesValue, 0),
|
||||
}
|
||||
|
||||
// for each point in the series, get the predicted value
|
||||
// the predicted value is the moving average (with window size = 7) of the previous period series
|
||||
// plus the average of the current season series
|
||||
// minus the mean of the past season series, past2 season series and past3 season series
|
||||
for idx, curr := range series.Values {
|
||||
movingAvg := p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
|
||||
avg := p.getAvg(currentSeasonSeries)
|
||||
mean := p.getMean(p.getAvg(pastSeasonSeries), p.getAvg(past2SeasonSeries), p.getAvg(past3SeasonSeries))
|
||||
predictedValue := movingAvg + avg - mean
|
||||
|
||||
if predictedValue < 0 {
|
||||
// this should not happen (except when the data has extreme outliers)
|
||||
// we will use the moving avg of the previous period series in this case
|
||||
p.logger.WarnContext(ctx, "predicted value is less than 0 for series", "anomaly_predicted_value", predictedValue, "anomaly_labels", series.Labels)
|
||||
predictedValue = p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
|
||||
}
|
||||
|
||||
p.logger.DebugContext(ctx, "predicted value for series",
|
||||
"anomaly_moving_avg", movingAvg,
|
||||
"anomaly_avg", avg,
|
||||
"anomaly_mean", mean,
|
||||
"anomaly_labels", series.Labels,
|
||||
"anomaly_predicted_value", predictedValue,
|
||||
"anomaly_curr", curr.Value,
|
||||
)
|
||||
predictedSeries.Values = append(predictedSeries.Values, &qbtypes.TimeSeriesValue{
|
||||
Timestamp: curr.Timestamp,
|
||||
Value: predictedValue,
|
||||
})
|
||||
}
|
||||
|
||||
return predictedSeries
|
||||
}
|
||||
|
||||
// getBounds gets the upper and lower bounds for the given series
|
||||
// for the given z score threshold
|
||||
// moving avg of the previous period series + z score threshold * std dev of the series
|
||||
// moving avg of the previous period series - z score threshold * std dev of the series
|
||||
func (p *BaseSeasonalProvider) getBounds(
|
||||
series, predictedSeries, weekSeries *qbtypes.TimeSeries,
|
||||
zScoreThreshold float64,
|
||||
) (*qbtypes.TimeSeries, *qbtypes.TimeSeries) {
|
||||
upperBoundSeries := &qbtypes.TimeSeries{
|
||||
Labels: series.Labels,
|
||||
Values: make([]*qbtypes.TimeSeriesValue, 0),
|
||||
}
|
||||
|
||||
lowerBoundSeries := &qbtypes.TimeSeries{
|
||||
Labels: series.Labels,
|
||||
Values: make([]*qbtypes.TimeSeriesValue, 0),
|
||||
}
|
||||
|
||||
for idx, curr := range series.Values {
|
||||
upperBound := p.getMovingAvg(predictedSeries, movingAvgWindowSize, idx) + zScoreThreshold*p.getStdDev(weekSeries)
|
||||
lowerBound := p.getMovingAvg(predictedSeries, movingAvgWindowSize, idx) - zScoreThreshold*p.getStdDev(weekSeries)
|
||||
upperBoundSeries.Values = append(upperBoundSeries.Values, &qbtypes.TimeSeriesValue{
|
||||
Timestamp: curr.Timestamp,
|
||||
Value: upperBound,
|
||||
})
|
||||
lowerBoundSeries.Values = append(lowerBoundSeries.Values, &qbtypes.TimeSeriesValue{
|
||||
Timestamp: curr.Timestamp,
|
||||
Value: math.Max(lowerBound, 0),
|
||||
})
|
||||
}
|
||||
|
||||
return upperBoundSeries, lowerBoundSeries
|
||||
}
|
||||
|
||||
// getExpectedValue gets the expected value for the given series
|
||||
// for the given index
|
||||
// prevSeriesAvg + currentSeasonSeriesAvg - mean of past season series, past2 season series and past3 season series
|
||||
func (p *BaseSeasonalProvider) getExpectedValue(
|
||||
_, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries *qbtypes.TimeSeries, idx int,
|
||||
) float64 {
|
||||
prevSeriesAvg := p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
|
||||
currentSeasonSeriesAvg := p.getAvg(currentSeasonSeries)
|
||||
pastSeasonSeriesAvg := p.getAvg(pastSeasonSeries)
|
||||
past2SeasonSeriesAvg := p.getAvg(past2SeasonSeries)
|
||||
past3SeasonSeriesAvg := p.getAvg(past3SeasonSeries)
|
||||
return prevSeriesAvg + currentSeasonSeriesAvg - p.getMean(pastSeasonSeriesAvg, past2SeasonSeriesAvg, past3SeasonSeriesAvg)
|
||||
}
|
||||
|
||||
// getScore gets the anomaly score for the given series
|
||||
// for the given index
|
||||
// (value - expectedValue) / std dev of the series
|
||||
func (p *BaseSeasonalProvider) getScore(
|
||||
series, prevSeries, weekSeries, weekPrevSeries, past2SeasonSeries, past3SeasonSeries *qbtypes.TimeSeries, value float64, idx int,
|
||||
) float64 {
|
||||
expectedValue := p.getExpectedValue(series, prevSeries, weekSeries, weekPrevSeries, past2SeasonSeries, past3SeasonSeries, idx)
|
||||
if expectedValue < 0 {
|
||||
expectedValue = p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
|
||||
}
|
||||
return (value - expectedValue) / p.getStdDev(weekSeries)
|
||||
}
|
||||
|
||||
// getAnomalyScores gets the anomaly scores for the given series
|
||||
// for the given index
|
||||
// (value - expectedValue) / std dev of the series
|
||||
func (p *BaseSeasonalProvider) getAnomalyScores(
|
||||
series, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries *qbtypes.TimeSeries,
|
||||
) *qbtypes.TimeSeries {
|
||||
anomalyScoreSeries := &qbtypes.TimeSeries{
|
||||
Labels: series.Labels,
|
||||
Values: make([]*qbtypes.TimeSeriesValue, 0),
|
||||
}
|
||||
|
||||
for idx, curr := range series.Values {
|
||||
anomalyScore := p.getScore(series, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries, curr.Value, idx)
|
||||
anomalyScoreSeries.Values = append(anomalyScoreSeries.Values, &qbtypes.TimeSeriesValue{
|
||||
Timestamp: curr.Timestamp,
|
||||
Value: anomalyScore,
|
||||
})
|
||||
}
|
||||
|
||||
return anomalyScoreSeries
|
||||
}
|
||||
|
||||
func (p *BaseSeasonalProvider) getAnomalies(ctx context.Context, orgID valuer.UUID, req *AnomaliesRequest) (*AnomaliesResponse, error) {
|
||||
anomalyParams := p.getQueryParams(req)
|
||||
anomalyQueryResults, err := p.getResults(ctx, orgID, anomalyParams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
currentPeriodResults := make(map[string]*qbtypes.TimeSeriesData)
|
||||
for _, result := range anomalyQueryResults.CurrentPeriodResults {
|
||||
currentPeriodResults[result.QueryName] = result
|
||||
}
|
||||
|
||||
pastPeriodResults := make(map[string]*qbtypes.TimeSeriesData)
|
||||
for _, result := range anomalyQueryResults.PastPeriodResults {
|
||||
pastPeriodResults[result.QueryName] = result
|
||||
}
|
||||
|
||||
currentSeasonResults := make(map[string]*qbtypes.TimeSeriesData)
|
||||
for _, result := range anomalyQueryResults.CurrentSeasonResults {
|
||||
currentSeasonResults[result.QueryName] = result
|
||||
}
|
||||
|
||||
pastSeasonResults := make(map[string]*qbtypes.TimeSeriesData)
|
||||
for _, result := range anomalyQueryResults.PastSeasonResults {
|
||||
pastSeasonResults[result.QueryName] = result
|
||||
}
|
||||
|
||||
past2SeasonResults := make(map[string]*qbtypes.TimeSeriesData)
|
||||
for _, result := range anomalyQueryResults.Past2SeasonResults {
|
||||
past2SeasonResults[result.QueryName] = result
|
||||
}
|
||||
|
||||
past3SeasonResults := make(map[string]*qbtypes.TimeSeriesData)
|
||||
for _, result := range anomalyQueryResults.Past3SeasonResults {
|
||||
past3SeasonResults[result.QueryName] = result
|
||||
}
|
||||
|
||||
for _, result := range currentPeriodResults {
|
||||
funcs := req.Params.FuncsForQuery(result.QueryName)
|
||||
|
||||
var zScoreThreshold float64
|
||||
for _, f := range funcs {
|
||||
if f.Name == qbtypes.FunctionNameAnomaly {
|
||||
for _, arg := range f.Args {
|
||||
if arg.Name != "z_score_threshold" {
|
||||
continue
|
||||
}
|
||||
value, ok := arg.Value.(float64)
|
||||
if ok {
|
||||
zScoreThreshold = value
|
||||
} else {
|
||||
p.logger.InfoContext(ctx, "z_score_threshold not provided, defaulting")
|
||||
zScoreThreshold = 3
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pastPeriodResult, ok := pastPeriodResults[result.QueryName]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
currentSeasonResult, ok := currentSeasonResults[result.QueryName]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
pastSeasonResult, ok := pastSeasonResults[result.QueryName]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
past2SeasonResult, ok := past2SeasonResults[result.QueryName]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
past3SeasonResult, ok := past3SeasonResults[result.QueryName]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// no data;
|
||||
if len(result.Aggregations) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
aggOfInterest := result.Aggregations[0]
|
||||
|
||||
for _, series := range aggOfInterest.Series {
|
||||
|
||||
pastPeriodSeries := p.getMatchingSeries(ctx, pastPeriodResult, series)
|
||||
currentSeasonSeries := p.getMatchingSeries(ctx, currentSeasonResult, series)
|
||||
pastSeasonSeries := p.getMatchingSeries(ctx, pastSeasonResult, series)
|
||||
past2SeasonSeries := p.getMatchingSeries(ctx, past2SeasonResult, series)
|
||||
past3SeasonSeries := p.getMatchingSeries(ctx, past3SeasonResult, series)
|
||||
|
||||
stdDev := p.getStdDev(currentSeasonSeries)
|
||||
p.logger.InfoContext(ctx, "calculated standard deviation for series", "anomaly_std_dev", stdDev, "anomaly_labels", series.Labels)
|
||||
|
||||
prevSeriesAvg := p.getAvg(pastPeriodSeries)
|
||||
currentSeasonSeriesAvg := p.getAvg(currentSeasonSeries)
|
||||
pastSeasonSeriesAvg := p.getAvg(pastSeasonSeries)
|
||||
past2SeasonSeriesAvg := p.getAvg(past2SeasonSeries)
|
||||
past3SeasonSeriesAvg := p.getAvg(past3SeasonSeries)
|
||||
p.logger.InfoContext(ctx, "calculated mean for series",
|
||||
"anomaly_prev_series_avg", prevSeriesAvg,
|
||||
"anomaly_current_season_series_avg", currentSeasonSeriesAvg,
|
||||
"anomaly_past_season_series_avg", pastSeasonSeriesAvg,
|
||||
"anomaly_past_2season_series_avg", past2SeasonSeriesAvg,
|
||||
"anomaly_past_3season_series_avg", past3SeasonSeriesAvg,
|
||||
"anomaly_labels", series.Labels,
|
||||
)
|
||||
|
||||
predictedSeries := p.getPredictedSeries(
|
||||
ctx,
|
||||
series,
|
||||
pastPeriodSeries,
|
||||
currentSeasonSeries,
|
||||
pastSeasonSeries,
|
||||
past2SeasonSeries,
|
||||
past3SeasonSeries,
|
||||
)
|
||||
aggOfInterest.PredictedSeries = append(aggOfInterest.PredictedSeries, predictedSeries)
|
||||
|
||||
upperBoundSeries, lowerBoundSeries := p.getBounds(
|
||||
series,
|
||||
predictedSeries,
|
||||
currentSeasonSeries,
|
||||
zScoreThreshold,
|
||||
)
|
||||
aggOfInterest.UpperBoundSeries = append(aggOfInterest.UpperBoundSeries, upperBoundSeries)
|
||||
aggOfInterest.LowerBoundSeries = append(aggOfInterest.LowerBoundSeries, lowerBoundSeries)
|
||||
|
||||
anomalyScoreSeries := p.getAnomalyScores(
|
||||
series,
|
||||
pastPeriodSeries,
|
||||
currentSeasonSeries,
|
||||
pastSeasonSeries,
|
||||
past2SeasonSeries,
|
||||
past3SeasonSeries,
|
||||
)
|
||||
aggOfInterest.AnomalyScores = append(aggOfInterest.AnomalyScores, anomalyScoreSeries)
|
||||
}
|
||||
}
|
||||
|
||||
results := make([]*qbtypes.TimeSeriesData, 0, len(currentPeriodResults))
|
||||
for _, result := range currentPeriodResults {
|
||||
results = append(results, result)
|
||||
}
|
||||
|
||||
return &AnomaliesResponse{
|
||||
Results: results,
|
||||
}, nil
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
package anomaly
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type WeeklyProvider struct {
|
||||
BaseSeasonalProvider
|
||||
}
|
||||
|
||||
var _ BaseProvider = (*WeeklyProvider)(nil)
|
||||
|
||||
func (wp *WeeklyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider {
|
||||
return &wp.BaseSeasonalProvider
|
||||
}
|
||||
|
||||
func NewWeeklyProvider(opts ...GenericProviderOption[*WeeklyProvider]) *WeeklyProvider {
|
||||
wp := &WeeklyProvider{
|
||||
BaseSeasonalProvider: BaseSeasonalProvider{},
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(wp)
|
||||
}
|
||||
|
||||
return wp
|
||||
}
|
||||
|
||||
func (p *WeeklyProvider) GetAnomalies(ctx context.Context, orgID valuer.UUID, req *AnomaliesRequest) (*AnomaliesResponse, error) {
|
||||
req.Seasonality = SeasonalityWeekly
|
||||
return p.getAnomalies(ctx, orgID, req)
|
||||
}
|
||||
@@ -1,197 +0,0 @@
|
||||
package oidccallbackauthn
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/url"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/authn"
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/http/client"
|
||||
"github.com/SigNoz/signoz/pkg/licensing"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/coreos/go-oidc/v3/oidc"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
redirectPath string = "/api/v1/complete/oidc"
|
||||
)
|
||||
|
||||
var (
|
||||
scopes []string = []string{"email", oidc.ScopeOpenID}
|
||||
)
|
||||
|
||||
var _ authn.CallbackAuthN = (*AuthN)(nil)
|
||||
|
||||
type AuthN struct {
|
||||
store authtypes.AuthNStore
|
||||
licensing licensing.Licensing
|
||||
httpClient *client.Client
|
||||
}
|
||||
|
||||
func New(store authtypes.AuthNStore, licensing licensing.Licensing, providerSettings factory.ProviderSettings) (*AuthN, error) {
|
||||
httpClient, err := client.New(providerSettings.Logger, providerSettings.TracerProvider, providerSettings.MeterProvider)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &AuthN{
|
||||
store: store,
|
||||
licensing: licensing,
|
||||
httpClient: httpClient,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *AuthN) LoginURL(ctx context.Context, siteURL *url.URL, authDomain *authtypes.AuthDomain) (string, error) {
|
||||
if authDomain.AuthDomainConfig().AuthNProvider != authtypes.AuthNProviderOIDC {
|
||||
return "", errors.Newf(errors.TypeInternal, authtypes.ErrCodeAuthDomainMismatch, "domain type is not oidc")
|
||||
}
|
||||
|
||||
_, oauth2Config, err := a.oidcProviderAndoauth2Config(ctx, siteURL, authDomain)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return oauth2Config.AuthCodeURL(authtypes.NewState(siteURL, authDomain.StorableAuthDomain().ID).URL.String()), nil
|
||||
}
|
||||
|
||||
func (a *AuthN) HandleCallback(ctx context.Context, query url.Values) (*authtypes.CallbackIdentity, error) {
|
||||
if err := query.Get("error"); err != "" {
|
||||
return nil, errors.Newf(errors.TypeInternal, errors.CodeInternal, "oidc: error while authenticating").WithAdditional(query.Get("error_description"))
|
||||
}
|
||||
|
||||
state, err := authtypes.NewStateFromString(query.Get("state"))
|
||||
if err != nil {
|
||||
return nil, errors.Newf(errors.TypeInvalidInput, authtypes.ErrCodeInvalidState, "oidc: invalid state").WithAdditional(err.Error())
|
||||
}
|
||||
|
||||
authDomain, err := a.store.GetAuthDomainFromID(ctx, state.DomainID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = a.licensing.GetActive(ctx, authDomain.StorableAuthDomain().OrgID)
|
||||
if err != nil {
|
||||
return nil, errors.New(errors.TypeLicenseUnavailable, errors.CodeLicenseUnavailable, "a valid license is not available").WithAdditional("this feature requires a valid license").WithAdditional(err.Error())
|
||||
}
|
||||
|
||||
oidcProvider, oauth2Config, err := a.oidcProviderAndoauth2Config(ctx, state.URL, authDomain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx = context.WithValue(ctx, oauth2.HTTPClient, a.httpClient.Client())
|
||||
token, err := oauth2Config.Exchange(ctx, query.Get("code"))
|
||||
if err != nil {
|
||||
var retrieveError *oauth2.RetrieveError
|
||||
if errors.As(err, &retrieveError) {
|
||||
return nil, errors.Newf(errors.TypeForbidden, errors.CodeForbidden, "oidc: failed to get token").WithAdditional(retrieveError.ErrorDescription).WithAdditional(string(retrieveError.Body))
|
||||
}
|
||||
|
||||
return nil, errors.Newf(errors.TypeInternal, errors.CodeInternal, "oidc: failed to get token").WithAdditional(err.Error())
|
||||
}
|
||||
|
||||
claims, err := a.claimsFromIDToken(ctx, authDomain, oidcProvider, token)
|
||||
if err != nil && !errors.Ast(err, errors.TypeNotFound) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if claims == nil && authDomain.AuthDomainConfig().OIDC.GetUserInfo {
|
||||
claims, err = a.claimsFromUserInfo(ctx, oidcProvider, token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
emailClaim, ok := claims[authDomain.AuthDomainConfig().OIDC.ClaimMapping.Email].(string)
|
||||
if !ok {
|
||||
return nil, errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "oidc: missing email in claims")
|
||||
}
|
||||
|
||||
email, err := valuer.NewEmail(emailClaim)
|
||||
if err != nil {
|
||||
return nil, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "oidc: failed to parse email").WithAdditional(err.Error())
|
||||
}
|
||||
|
||||
if !authDomain.AuthDomainConfig().OIDC.InsecureSkipEmailVerified {
|
||||
emailVerifiedClaim, ok := claims["email_verified"].(bool)
|
||||
if !ok {
|
||||
return nil, errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "oidc: missing email_verified in claims")
|
||||
}
|
||||
|
||||
if !emailVerifiedClaim {
|
||||
return nil, errors.New(errors.TypeForbidden, errors.CodeForbidden, "oidc: email is not verified")
|
||||
}
|
||||
}
|
||||
|
||||
return authtypes.NewCallbackIdentity("", email, authDomain.StorableAuthDomain().OrgID, state), nil
|
||||
}
|
||||
|
||||
func (a *AuthN) ProviderInfo(ctx context.Context, authDomain *authtypes.AuthDomain) *authtypes.AuthNProviderInfo {
|
||||
return &authtypes.AuthNProviderInfo{
|
||||
RelayStatePath: nil,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *AuthN) oidcProviderAndoauth2Config(ctx context.Context, siteURL *url.URL, authDomain *authtypes.AuthDomain) (*oidc.Provider, *oauth2.Config, error) {
|
||||
if authDomain.AuthDomainConfig().OIDC.IssuerAlias != "" {
|
||||
ctx = oidc.InsecureIssuerURLContext(ctx, authDomain.AuthDomainConfig().OIDC.IssuerAlias)
|
||||
}
|
||||
|
||||
oidcProvider, err := oidc.NewProvider(ctx, authDomain.AuthDomainConfig().OIDC.Issuer)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return oidcProvider, &oauth2.Config{
|
||||
ClientID: authDomain.AuthDomainConfig().OIDC.ClientID,
|
||||
ClientSecret: authDomain.AuthDomainConfig().OIDC.ClientSecret,
|
||||
Endpoint: oidcProvider.Endpoint(),
|
||||
Scopes: scopes,
|
||||
RedirectURL: (&url.URL{
|
||||
Scheme: siteURL.Scheme,
|
||||
Host: siteURL.Host,
|
||||
Path: redirectPath,
|
||||
}).String(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *AuthN) claimsFromIDToken(ctx context.Context, authDomain *authtypes.AuthDomain, provider *oidc.Provider, token *oauth2.Token) (map[string]any, error) {
|
||||
rawIDToken, ok := token.Extra("id_token").(string)
|
||||
if !ok {
|
||||
return nil, errors.New(errors.TypeNotFound, errors.CodeNotFound, "oidc: no id_token in token response")
|
||||
}
|
||||
|
||||
verifier := provider.Verifier(&oidc.Config{ClientID: authDomain.AuthDomainConfig().OIDC.ClientID})
|
||||
idToken, err := verifier.Verify(ctx, rawIDToken)
|
||||
if err != nil {
|
||||
return nil, errors.Newf(errors.TypeForbidden, errors.CodeForbidden, "oidc: failed to verify token").WithAdditional(err.Error())
|
||||
}
|
||||
|
||||
var claims map[string]any
|
||||
if err := idToken.Claims(&claims); err != nil {
|
||||
return nil, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "oidc: failed to decode claims").WithAdditional(err.Error())
|
||||
}
|
||||
|
||||
return claims, nil
|
||||
}
|
||||
|
||||
func (a *AuthN) claimsFromUserInfo(ctx context.Context, provider *oidc.Provider, token *oauth2.Token) (map[string]any, error) {
|
||||
var claims map[string]any
|
||||
|
||||
userInfo, err := provider.UserInfo(ctx, oauth2.StaticTokenSource(&oauth2.Token{
|
||||
AccessToken: token.AccessToken,
|
||||
TokenType: "Bearer", // The UserInfo endpoint requires a bearer token as per RFC6750
|
||||
}))
|
||||
if err != nil {
|
||||
return nil, errors.Newf(errors.TypeInternal, errors.CodeInternal, "oidc: failed to get user info").WithAdditional(err.Error())
|
||||
}
|
||||
|
||||
if err := userInfo.Claims(&claims); err != nil {
|
||||
return nil, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "oidc: failed to decode claims").WithAdditional(err.Error())
|
||||
}
|
||||
|
||||
return claims, nil
|
||||
}
|
||||
@@ -1,163 +0,0 @@
|
||||
package samlcallbackauthn
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/pem"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/authn"
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/licensing"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
saml2 "github.com/russellhaering/gosaml2"
|
||||
dsig "github.com/russellhaering/goxmldsig"
|
||||
)
|
||||
|
||||
const (
|
||||
redirectPath string = "/api/v1/complete/saml"
|
||||
)
|
||||
|
||||
var _ authn.CallbackAuthN = (*AuthN)(nil)
|
||||
|
||||
type AuthN struct {
|
||||
store authtypes.AuthNStore
|
||||
licensing licensing.Licensing
|
||||
}
|
||||
|
||||
func New(ctx context.Context, store authtypes.AuthNStore, licensing licensing.Licensing) (*AuthN, error) {
|
||||
return &AuthN{
|
||||
store: store,
|
||||
licensing: licensing,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *AuthN) LoginURL(ctx context.Context, siteURL *url.URL, authDomain *authtypes.AuthDomain) (string, error) {
|
||||
if authDomain.AuthDomainConfig().AuthNProvider != authtypes.AuthNProviderSAML {
|
||||
return "", errors.Newf(errors.TypeInternal, authtypes.ErrCodeAuthDomainMismatch, "saml: domain type is not saml")
|
||||
}
|
||||
|
||||
sp, err := a.serviceProvider(siteURL, authDomain)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
url, err := sp.BuildAuthURL(authtypes.NewState(siteURL, authDomain.StorableAuthDomain().ID).URL.String())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return url, nil
|
||||
}
|
||||
|
||||
func (a *AuthN) HandleCallback(ctx context.Context, formValues url.Values) (*authtypes.CallbackIdentity, error) {
|
||||
state, err := authtypes.NewStateFromString(formValues.Get("RelayState"))
|
||||
if err != nil {
|
||||
return nil, errors.New(errors.TypeInvalidInput, authtypes.ErrCodeInvalidState, "saml: invalid state").WithAdditional(err.Error())
|
||||
}
|
||||
|
||||
authDomain, err := a.store.GetAuthDomainFromID(ctx, state.DomainID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = a.licensing.GetActive(ctx, authDomain.StorableAuthDomain().OrgID)
|
||||
if err != nil {
|
||||
return nil, errors.New(errors.TypeLicenseUnavailable, errors.CodeLicenseUnavailable, "a valid license is not available").WithAdditional("this feature requires a valid license").WithAdditional(err.Error())
|
||||
}
|
||||
|
||||
sp, err := a.serviceProvider(state.URL, authDomain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
assertionInfo, err := sp.RetrieveAssertionInfo(formValues.Get("SAMLResponse"))
|
||||
if err != nil {
|
||||
if errors.As(err, &saml2.ErrVerification{}) {
|
||||
return nil, errors.New(errors.TypeForbidden, errors.CodeForbidden, err.Error())
|
||||
}
|
||||
|
||||
if errors.As(err, &saml2.ErrMissingElement{}) {
|
||||
return nil, errors.New(errors.TypeNotFound, errors.CodeNotFound, err.Error())
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if assertionInfo.WarningInfo.InvalidTime {
|
||||
return nil, errors.New(errors.TypeForbidden, errors.CodeForbidden, "saml: expired saml response")
|
||||
}
|
||||
|
||||
email, err := valuer.NewEmail(assertionInfo.NameID)
|
||||
if err != nil {
|
||||
return nil, errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "saml: invalid email").WithAdditional("The nameID assertion is used to retrieve the email address, please check your IDP configuration and try again.")
|
||||
}
|
||||
|
||||
return authtypes.NewCallbackIdentity("", email, authDomain.StorableAuthDomain().OrgID, state), nil
|
||||
}
|
||||
|
||||
func (a *AuthN) ProviderInfo(ctx context.Context, authDomain *authtypes.AuthDomain) *authtypes.AuthNProviderInfo {
|
||||
state := authtypes.NewState(&url.URL{Path: "login"}, authDomain.StorableAuthDomain().ID).URL.String()
|
||||
|
||||
return &authtypes.AuthNProviderInfo{
|
||||
RelayStatePath: &state,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *AuthN) serviceProvider(siteURL *url.URL, authDomain *authtypes.AuthDomain) (*saml2.SAMLServiceProvider, error) {
|
||||
certStore, err := a.getCertificateStore(authDomain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
acsURL := &url.URL{Scheme: siteURL.Scheme, Host: siteURL.Host, Path: redirectPath}
|
||||
|
||||
// Note:
|
||||
// The ServiceProviderIssuer is the client id in case of keycloak. Since we set it to the host here, we need to set the client id == host in keycloak.
|
||||
// For AWSSSO, this is the value of Application SAML audience.
|
||||
return &saml2.SAMLServiceProvider{
|
||||
IdentityProviderSSOURL: authDomain.AuthDomainConfig().SAML.SamlIdp,
|
||||
IdentityProviderIssuer: authDomain.AuthDomainConfig().SAML.SamlEntity,
|
||||
ServiceProviderIssuer: siteURL.Host,
|
||||
AssertionConsumerServiceURL: acsURL.String(),
|
||||
SignAuthnRequests: !authDomain.AuthDomainConfig().SAML.InsecureSkipAuthNRequestsSigned,
|
||||
AllowMissingAttributes: true,
|
||||
IDPCertificateStore: certStore,
|
||||
SPKeyStore: dsig.RandomKeyStoreForTest(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *AuthN) getCertificateStore(authDomain *authtypes.AuthDomain) (dsig.X509CertificateStore, error) {
|
||||
certStore := &dsig.MemoryX509CertificateStore{
|
||||
Roots: []*x509.Certificate{},
|
||||
}
|
||||
|
||||
var certBytes []byte
|
||||
if strings.Contains(authDomain.AuthDomainConfig().SAML.SamlCert, "-----BEGIN CERTIFICATE-----") {
|
||||
block, _ := pem.Decode([]byte(authDomain.AuthDomainConfig().SAML.SamlCert))
|
||||
if block == nil {
|
||||
return certStore, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "no valid pem cert found")
|
||||
}
|
||||
|
||||
certBytes = block.Bytes
|
||||
} else {
|
||||
certData, err := base64.StdEncoding.DecodeString(authDomain.AuthDomainConfig().SAML.SamlCert)
|
||||
if err != nil {
|
||||
return certStore, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "failed to read certificate: %s", err.Error())
|
||||
}
|
||||
|
||||
certBytes = certData
|
||||
}
|
||||
|
||||
idpCert, err := x509.ParseCertificate(certBytes)
|
||||
if err != nil {
|
||||
return certStore, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "failed to prepare saml request, invalid cert: %s", err.Error())
|
||||
}
|
||||
|
||||
certStore.Roots = append(certStore.Roots, idpCert)
|
||||
|
||||
return certStore, nil
|
||||
}
|
||||
@@ -1,98 +0,0 @@
|
||||
package openfgaauthz
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/authz"
|
||||
pkgopenfgaauthz "github.com/SigNoz/signoz/pkg/authz/openfgaauthz"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
openfgav1 "github.com/openfga/api/proto/openfga/v1"
|
||||
openfgapkgtransformer "github.com/openfga/language/pkg/go/transformer"
|
||||
)
|
||||
|
||||
type provider struct {
|
||||
pkgAuthzService authz.AuthZ
|
||||
}
|
||||
|
||||
func NewProviderFactory(sqlstore sqlstore.SQLStore, openfgaSchema []openfgapkgtransformer.ModuleFile) factory.ProviderFactory[authz.AuthZ, authz.Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("openfga"), func(ctx context.Context, ps factory.ProviderSettings, config authz.Config) (authz.AuthZ, error) {
|
||||
return newOpenfgaProvider(ctx, ps, config, sqlstore, openfgaSchema)
|
||||
})
|
||||
}
|
||||
|
||||
func newOpenfgaProvider(ctx context.Context, settings factory.ProviderSettings, config authz.Config, sqlstore sqlstore.SQLStore, openfgaSchema []openfgapkgtransformer.ModuleFile) (authz.AuthZ, error) {
|
||||
pkgOpenfgaAuthzProvider := pkgopenfgaauthz.NewProviderFactory(sqlstore, openfgaSchema)
|
||||
pkgAuthzService, err := pkgOpenfgaAuthzProvider.New(ctx, settings, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &provider{
|
||||
pkgAuthzService: pkgAuthzService,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (provider *provider) Start(ctx context.Context) error {
|
||||
return provider.pkgAuthzService.Start(ctx)
|
||||
}
|
||||
|
||||
func (provider *provider) Stop(ctx context.Context) error {
|
||||
return provider.pkgAuthzService.Stop(ctx)
|
||||
}
|
||||
|
||||
func (provider *provider) Check(ctx context.Context, tuple *openfgav1.TupleKey) error {
|
||||
return provider.pkgAuthzService.Check(ctx, tuple)
|
||||
}
|
||||
|
||||
func (provider *provider) CheckWithTupleCreation(ctx context.Context, claims authtypes.Claims, orgID valuer.UUID, relation authtypes.Relation, _ authtypes.Relation, typeable authtypes.Typeable, selectors []authtypes.Selector) error {
|
||||
subject, err := authtypes.NewSubject(authtypes.TypeableUser, claims.UserID, orgID, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tuples, err := typeable.Tuples(subject, relation, selectors, orgID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = provider.BatchCheck(ctx, tuples)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (provider *provider) CheckWithTupleCreationWithoutClaims(ctx context.Context, orgID valuer.UUID, relation authtypes.Relation, _ authtypes.Relation, typeable authtypes.Typeable, selectors []authtypes.Selector) error {
|
||||
subject, err := authtypes.NewSubject(authtypes.TypeableAnonymous, authtypes.AnonymousUser.String(), orgID, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tuples, err := typeable.Tuples(subject, relation, selectors, orgID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = provider.BatchCheck(ctx, tuples)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (provider *provider) BatchCheck(ctx context.Context, tuples []*openfgav1.TupleKey) error {
|
||||
return provider.pkgAuthzService.BatchCheck(ctx, tuples)
|
||||
}
|
||||
|
||||
func (provider *provider) ListObjects(ctx context.Context, subject string, relation authtypes.Relation, typeable authtypes.Typeable) ([]*authtypes.Object, error) {
|
||||
return provider.pkgAuthzService.ListObjects(ctx, subject, relation, typeable)
|
||||
}
|
||||
|
||||
func (provider *provider) Write(ctx context.Context, additions []*openfgav1.TupleKey, deletions []*openfgav1.TupleKey) error {
|
||||
return provider.pkgAuthzService.Write(ctx, additions, deletions)
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
module base
|
||||
|
||||
type organisation
|
||||
relations
|
||||
define read: [user, role#assignee]
|
||||
define update: [user, role#assignee]
|
||||
|
||||
type user
|
||||
relations
|
||||
define read: [user, role#assignee]
|
||||
define update: [user, role#assignee]
|
||||
define delete: [user, role#assignee]
|
||||
|
||||
type anonymous
|
||||
|
||||
type role
|
||||
relations
|
||||
define assignee: [user, anonymous]
|
||||
|
||||
define read: [user, role#assignee]
|
||||
define update: [user, role#assignee]
|
||||
define delete: [user, role#assignee]
|
||||
|
||||
type metaresources
|
||||
relations
|
||||
define create: [user, role#assignee]
|
||||
define list: [user, role#assignee]
|
||||
|
||||
type metaresource
|
||||
relations
|
||||
define read: [user, anonymous, role#assignee]
|
||||
define update: [user, role#assignee]
|
||||
define delete: [user, role#assignee]
|
||||
|
||||
define block: [user, role#assignee]
|
||||
|
||||
|
||||
type telemetryresource
|
||||
relations
|
||||
define read: [user, role#assignee]
|
||||
@@ -1,29 +0,0 @@
|
||||
package openfgaschema
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/authz"
|
||||
openfgapkgtransformer "github.com/openfga/language/pkg/go/transformer"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed base.fga
|
||||
baseDSL string
|
||||
)
|
||||
|
||||
type schema struct{}
|
||||
|
||||
func NewSchema() authz.Schema {
|
||||
return &schema{}
|
||||
}
|
||||
|
||||
func (schema *schema) Get(ctx context.Context) []openfgapkgtransformer.ModuleFile {
|
||||
return []openfgapkgtransformer.ModuleFile{
|
||||
{
|
||||
Name: "base.fga",
|
||||
Contents: baseDSL,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
package licensing
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/licensing"
|
||||
)
|
||||
|
||||
@@ -18,7 +18,7 @@ func Config(pollInterval time.Duration, failureThreshold int) licensing.Config {
|
||||
once.Do(func() {
|
||||
config = licensing.Config{PollInterval: pollInterval, FailureThreshold: failureThreshold}
|
||||
if err := config.Validate(); err != nil {
|
||||
panic(errors.WrapInternalf(err, errors.CodeInternal, "invalid licensing config"))
|
||||
panic(fmt.Errorf("invalid licensing config: %w", err))
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
4
ee/query-service/.dockerignore
Normal file
4
ee/query-service/.dockerignore
Normal file
@@ -0,0 +1,4 @@
|
||||
.vscode
|
||||
README.md
|
||||
signoz.db
|
||||
bin
|
||||
@@ -11,7 +11,13 @@ before:
|
||||
builds:
|
||||
- id: signoz
|
||||
binary: bin/signoz
|
||||
main: ./cmd/enterprise
|
||||
main: ee/query-service/main.go
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
- >-
|
||||
{{- if eq .Os "linux" }}
|
||||
{{- if eq .Arch "arm64" }}CC=aarch64-linux-gnu-gcc{{- end }}
|
||||
{{- end }}
|
||||
goos:
|
||||
- linux
|
||||
- darwin
|
||||
@@ -31,8 +37,11 @@ builds:
|
||||
- -X github.com/SigNoz/signoz/pkg/version.branch={{ .Branch }}
|
||||
- -X github.com/SigNoz/signoz/ee/zeus.url=https://api.signoz.cloud
|
||||
- -X github.com/SigNoz/signoz/ee/zeus.deprecatedURL=https://license.signoz.io
|
||||
- -X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.signoz.cloud
|
||||
- -X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.signoz.io/api/v1
|
||||
- -X github.com/SigNoz/signoz/pkg/analytics.key=9kRrJ7oPCGPEJLF6QjMPLt5bljFhRQBr
|
||||
- >-
|
||||
{{- if eq .Os "linux" }}-linkmode external -extldflags '-static'{{- end }}
|
||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||
tags:
|
||||
- timetzdata
|
||||
@@ -16,4 +16,4 @@ COPY frontend/build/ /etc/signoz/web/
|
||||
|
||||
RUN chmod 755 /root /root/signoz
|
||||
|
||||
ENTRYPOINT ["./signoz", "server"]
|
||||
ENTRYPOINT ["./signoz"]
|
||||
@@ -1,12 +1,4 @@
|
||||
FROM node:18-bullseye AS build
|
||||
|
||||
WORKDIR /opt/
|
||||
COPY ./frontend/ ./
|
||||
ENV NODE_OPTIONS=--max-old-space-size=8192
|
||||
RUN CI=1 yarn install
|
||||
RUN CI=1 yarn build
|
||||
|
||||
FROM golang:1.24-bullseye
|
||||
FROM golang:1.23-bullseye
|
||||
|
||||
ARG OS="linux"
|
||||
ARG TARGETARCH
|
||||
@@ -31,7 +23,6 @@ COPY go.mod go.sum ./
|
||||
|
||||
RUN go mod download
|
||||
|
||||
COPY ./cmd/ ./cmd/
|
||||
COPY ./ee/ ./ee/
|
||||
COPY ./pkg/ ./pkg/
|
||||
COPY ./templates/email /root/templates
|
||||
@@ -40,8 +31,6 @@ COPY Makefile Makefile
|
||||
RUN TARGET_DIR=/root ARCHS=${TARGETARCH} ZEUS_URL=${ZEUSURL} LICENSE_URL=${ZEUSURL}/api/v1 make go-build-enterprise-race
|
||||
RUN mv /root/linux-${TARGETARCH}/signoz /root/signoz
|
||||
|
||||
COPY --from=build /opt/build ./web/
|
||||
|
||||
RUN chmod 755 /root /root/signoz
|
||||
|
||||
ENTRYPOINT ["/root/signoz", "server"]
|
||||
ENTRYPOINT ["/root/signoz"]
|
||||
@@ -17,4 +17,4 @@ COPY frontend/build/ /etc/signoz/web/
|
||||
|
||||
RUN chmod 755 /root /root/signoz
|
||||
|
||||
ENTRYPOINT ["./signoz", "server"]
|
||||
ENTRYPOINT ["./signoz"]
|
||||
@@ -19,12 +19,8 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
rules "github.com/SigNoz/signoz/pkg/query-service/rules"
|
||||
"github.com/SigNoz/signoz/pkg/queryparser"
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/SigNoz/signoz/pkg/version"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
@@ -39,7 +35,10 @@ type APIHandlerOptions struct {
|
||||
Gateway *httputil.ReverseProxy
|
||||
GatewayUrl string
|
||||
// Querier Influx Interval
|
||||
FluxInterval time.Duration
|
||||
FluxInterval time.Duration
|
||||
UseLogsNewSchema bool
|
||||
UseTraceNewSchema bool
|
||||
JWT *authtypes.JWT
|
||||
}
|
||||
|
||||
type APIHandler struct {
|
||||
@@ -60,8 +59,7 @@ func NewAPIHandler(opts APIHandlerOptions, signoz *signoz.SigNoz) (*APIHandler,
|
||||
LicensingAPI: httplicensing.NewLicensingAPI(signoz.Licensing),
|
||||
FieldsAPI: fields.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.TelemetryStore),
|
||||
Signoz: signoz,
|
||||
QuerierAPI: querierAPI.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.Querier, signoz.Analytics),
|
||||
QueryParserAPI: queryparser.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.QueryParser),
|
||||
QuerierAPI: querierAPI.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.Querier),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -94,6 +92,9 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *middleware.AuthZ) {
|
||||
// routes available only in ee version
|
||||
router.HandleFunc("/api/v1/features", am.ViewAccess(ah.getFeatureFlags)).Methods(http.MethodGet)
|
||||
|
||||
// paid plans specific routes
|
||||
router.HandleFunc("/api/v1/complete/saml", am.OpenAccess(ah.receiveSAML)).Methods(http.MethodPost)
|
||||
|
||||
// base overrides
|
||||
router.HandleFunc("/api/v1/version", am.OpenAccess(ah.getVersion)).Methods(http.MethodGet)
|
||||
|
||||
@@ -101,39 +102,6 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *middleware.AuthZ) {
|
||||
router.HandleFunc("/api/v1/billing", am.AdminAccess(ah.getBilling)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/portal", am.AdminAccess(ah.LicensingAPI.Portal)).Methods(http.MethodPost)
|
||||
|
||||
// dashboards
|
||||
router.HandleFunc("/api/v1/dashboards/{id}/public", am.AdminAccess(ah.Signoz.Handlers.Dashboard.CreatePublic)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/dashboards/{id}/public", am.AdminAccess(ah.Signoz.Handlers.Dashboard.GetPublic)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/dashboards/{id}/public", am.AdminAccess(ah.Signoz.Handlers.Dashboard.UpdatePublic)).Methods(http.MethodPut)
|
||||
router.HandleFunc("/api/v1/dashboards/{id}/public", am.AdminAccess(ah.Signoz.Handlers.Dashboard.DeletePublic)).Methods(http.MethodDelete)
|
||||
|
||||
// public access for dashboards
|
||||
router.HandleFunc("/api/v1/public/dashboards/{id}", am.CheckWithoutClaims(
|
||||
ah.Signoz.Handlers.Dashboard.GetPublicData,
|
||||
authtypes.RelationRead, authtypes.RelationRead,
|
||||
dashboardtypes.TypeableMetaResourcePublicDashboard,
|
||||
func(req *http.Request, orgs []*types.Organization) ([]authtypes.Selector, valuer.UUID, error) {
|
||||
id, err := valuer.NewUUID(mux.Vars(req)["id"])
|
||||
if err != nil {
|
||||
return nil, valuer.UUID{}, err
|
||||
}
|
||||
|
||||
return ah.Signoz.Modules.Dashboard.GetPublicDashboardOrgAndSelectors(req.Context(), id, orgs)
|
||||
})).Methods(http.MethodGet)
|
||||
|
||||
router.HandleFunc("/api/v1/public/dashboards/{id}/widgets/{index}/query_range", am.CheckWithoutClaims(
|
||||
ah.Signoz.Handlers.Dashboard.GetPublicWidgetQueryRange,
|
||||
authtypes.RelationRead, authtypes.RelationRead,
|
||||
dashboardtypes.TypeableMetaResourcePublicDashboard,
|
||||
func(req *http.Request, orgs []*types.Organization) ([]authtypes.Selector, valuer.UUID, error) {
|
||||
id, err := valuer.NewUUID(mux.Vars(req)["id"])
|
||||
if err != nil {
|
||||
return nil, valuer.UUID{}, err
|
||||
}
|
||||
|
||||
return ah.Signoz.Modules.Dashboard.GetPublicDashboardOrgAndSelectors(req.Context(), id, orgs)
|
||||
})).Methods(http.MethodGet)
|
||||
|
||||
// v3
|
||||
router.HandleFunc("/api/v3/licenses", am.AdminAccess(ah.LicensingAPI.Activate)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v3/licenses", am.AdminAccess(ah.LicensingAPI.Refresh)).Methods(http.MethodPut)
|
||||
@@ -142,11 +110,6 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *middleware.AuthZ) {
|
||||
// v4
|
||||
router.HandleFunc("/api/v4/query_range", am.ViewAccess(ah.queryRangeV4)).Methods(http.MethodPost)
|
||||
|
||||
// v5
|
||||
router.HandleFunc("/api/v5/query_range", am.ViewAccess(ah.queryRangeV5)).Methods(http.MethodPost)
|
||||
|
||||
router.HandleFunc("/api/v5/substitute_vars", am.ViewAccess(ah.QuerierAPI.ReplaceVariables)).Methods(http.MethodPost)
|
||||
|
||||
// Gateway
|
||||
router.PathPrefix(gateway.RoutePrefix).HandlerFunc(am.EditAccess(ah.ServeGatewayHTTP))
|
||||
|
||||
|
||||
107
ee/query-service/app/api/auth.go
Normal file
107
ee/query-service/app/api/auth.go
Normal file
@@ -0,0 +1,107 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/constants"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
func handleSsoError(w http.ResponseWriter, r *http.Request, redirectURL string) {
|
||||
ssoError := []byte("Login failed. Please contact your system administrator")
|
||||
dst := make([]byte, base64.StdEncoding.EncodedLen(len(ssoError)))
|
||||
base64.StdEncoding.Encode(dst, ssoError)
|
||||
|
||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectURL, string(dst)), http.StatusSeeOther)
|
||||
}
|
||||
|
||||
// receiveSAML completes a SAML request and gets user logged in
|
||||
func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
|
||||
// this is the source url that initiated the login request
|
||||
redirectUri := constants.GetDefaultSiteURL()
|
||||
ctx := context.Background()
|
||||
|
||||
err := r.ParseForm()
|
||||
if err != nil {
|
||||
zap.L().Error("[receiveSAML] failed to process response - invalid response from IDP", zap.Error(err), zap.Any("request", r))
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
// the relay state is sent when a login request is submitted to
|
||||
// Idp.
|
||||
relayState := r.FormValue("RelayState")
|
||||
zap.L().Debug("[receiveML] relay state", zap.String("relayState", relayState))
|
||||
|
||||
parsedState, err := url.Parse(relayState)
|
||||
if err != nil || relayState == "" {
|
||||
zap.L().Error("[receiveSAML] failed to process response - invalid response from IDP", zap.Error(err), zap.Any("request", r))
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
// upgrade redirect url from the relay state for better accuracy
|
||||
redirectUri = fmt.Sprintf("%s://%s%s", parsedState.Scheme, parsedState.Host, "/login")
|
||||
|
||||
// fetch domain by parsing relay state.
|
||||
domain, err := ah.Signoz.Modules.User.GetDomainFromSsoResponse(ctx, parsedState)
|
||||
if err != nil {
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
orgID, err := valuer.NewUUID(domain.OrgID)
|
||||
if err != nil {
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
_, err = ah.Signoz.Licensing.GetActive(ctx, orgID)
|
||||
if err != nil {
|
||||
zap.L().Error("[receiveSAML] sso requested but feature unavailable in org domain")
|
||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
|
||||
return
|
||||
}
|
||||
|
||||
sp, err := domain.PrepareSamlRequest(parsedState)
|
||||
if err != nil {
|
||||
zap.L().Error("[receiveSAML] failed to prepare saml request for domain", zap.String("domain", domain.String()), zap.Error(err))
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
assertionInfo, err := sp.RetrieveAssertionInfo(r.FormValue("SAMLResponse"))
|
||||
if err != nil {
|
||||
zap.L().Error("[receiveSAML] failed to retrieve assertion info from saml response", zap.String("domain", domain.String()), zap.Error(err))
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
if assertionInfo.WarningInfo.InvalidTime {
|
||||
zap.L().Error("[receiveSAML] expired saml response", zap.String("domain", domain.String()), zap.Error(err))
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
email := assertionInfo.NameID
|
||||
if email == "" {
|
||||
zap.L().Error("[receiveSAML] invalid email in the SSO response", zap.String("domain", domain.String()))
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
nextPage, err := ah.Signoz.Modules.User.PrepareSsoRedirect(ctx, redirectUri, email)
|
||||
if err != nil {
|
||||
zap.L().Error("[receiveSAML] failed to generate redirect URI after successful login ", zap.String("domain", domain.String()), zap.Error(err))
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
http.Redirect(w, r, nextPage, http.StatusSeeOther)
|
||||
}
|
||||
@@ -10,13 +10,14 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/query-service/constants"
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
"github.com/SigNoz/signoz/pkg/modules/user"
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/mux"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
@@ -76,7 +77,7 @@ func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseW
|
||||
return
|
||||
}
|
||||
|
||||
ingestionUrl, signozApiUrl, apiErr := ah.getIngestionUrlAndSigNozAPIUrl(r.Context(), license.Key)
|
||||
ingestionUrl, signozApiUrl, apiErr := getIngestionUrlAndSigNozAPIUrl(r.Context(), license.Key)
|
||||
if apiErr != nil {
|
||||
RespondError(w, basemodel.WrapApiError(
|
||||
apiErr, "couldn't deduce ingestion url and signoz api url",
|
||||
@@ -167,55 +168,82 @@ func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId
|
||||
func (ah *APIHandler) getOrCreateCloudIntegrationUser(
|
||||
ctx context.Context, orgId string, cloudProvider string,
|
||||
) (*types.User, *basemodel.ApiError) {
|
||||
cloudIntegrationUserName := fmt.Sprintf("%s-integration", cloudProvider)
|
||||
email := valuer.MustNewEmail(fmt.Sprintf("%s@signoz.io", cloudIntegrationUserName))
|
||||
cloudIntegrationUser := fmt.Sprintf("%s-integration", cloudProvider)
|
||||
email := fmt.Sprintf("%s@signoz.io", cloudIntegrationUser)
|
||||
|
||||
cloudIntegrationUser, err := types.NewUser(cloudIntegrationUserName, email, types.RoleViewer, valuer.MustNewUUID(orgId))
|
||||
integrationUserResult, err := ah.Signoz.Modules.User.GetUserByEmailInOrg(ctx, orgId, email)
|
||||
if err != nil && !errors.Ast(err, errors.TypeNotFound) {
|
||||
return nil, basemodel.NotFoundError(fmt.Errorf("couldn't look for integration user: %w", err))
|
||||
}
|
||||
|
||||
if integrationUserResult != nil {
|
||||
return &integrationUserResult.User, nil
|
||||
}
|
||||
|
||||
zap.L().Info(
|
||||
"cloud integration user not found. Attempting to create the user",
|
||||
zap.String("cloudProvider", cloudProvider),
|
||||
)
|
||||
|
||||
newUser, err := types.NewUser(cloudIntegrationUser, email, types.RoleViewer.String(), orgId)
|
||||
if err != nil {
|
||||
return nil, basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't create cloud integration user: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
password, err := types.NewFactorPassword(uuid.NewString())
|
||||
|
||||
integrationUser, err := ah.Signoz.Modules.User.CreateUserWithPassword(ctx, newUser, password)
|
||||
if err != nil {
|
||||
return nil, basemodel.InternalError(fmt.Errorf("couldn't create cloud integration user: %w", err))
|
||||
}
|
||||
|
||||
password := types.MustGenerateFactorPassword(cloudIntegrationUser.ID.StringValue())
|
||||
|
||||
cloudIntegrationUser, err = ah.Signoz.Modules.User.GetOrCreateUser(ctx, cloudIntegrationUser, user.WithFactorPassword(password))
|
||||
if err != nil {
|
||||
return nil, basemodel.InternalError(fmt.Errorf("couldn't look for integration user: %w", err))
|
||||
}
|
||||
|
||||
return cloudIntegrationUser, nil
|
||||
return integrationUser, nil
|
||||
}
|
||||
|
||||
func (ah *APIHandler) getIngestionUrlAndSigNozAPIUrl(ctx context.Context, licenseKey string) (
|
||||
func getIngestionUrlAndSigNozAPIUrl(ctx context.Context, licenseKey string) (
|
||||
string, string, *basemodel.ApiError,
|
||||
) {
|
||||
// TODO: remove this struct from here
|
||||
url := fmt.Sprintf(
|
||||
"%s%s",
|
||||
strings.TrimSuffix(constants.ZeusURL, "/"),
|
||||
"/v2/deployments/me",
|
||||
)
|
||||
|
||||
type deploymentResponse struct {
|
||||
Name string `json:"name"`
|
||||
ClusterInfo struct {
|
||||
Region struct {
|
||||
DNS string `json:"dns"`
|
||||
} `json:"region"`
|
||||
} `json:"cluster"`
|
||||
Status string `json:"status"`
|
||||
Error string `json:"error"`
|
||||
Data struct {
|
||||
Name string `json:"name"`
|
||||
|
||||
ClusterInfo struct {
|
||||
Region struct {
|
||||
DNS string `json:"dns"`
|
||||
} `json:"region"`
|
||||
} `json:"cluster"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
respBytes, err := ah.Signoz.Zeus.GetDeployment(ctx, licenseKey)
|
||||
if err != nil {
|
||||
resp, apiErr := requestAndParseResponse[deploymentResponse](
|
||||
ctx, url, map[string]string{"X-Signoz-Cloud-Api-Key": licenseKey}, nil,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
return "", "", basemodel.WrapApiError(
|
||||
apiErr, "couldn't query for deployment info",
|
||||
)
|
||||
}
|
||||
|
||||
if resp.Status != "success" {
|
||||
return "", "", basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't query for deployment info: error: %w", err,
|
||||
"couldn't query for deployment info: status: %s, error: %s",
|
||||
resp.Status, resp.Error,
|
||||
))
|
||||
}
|
||||
|
||||
resp := new(deploymentResponse)
|
||||
|
||||
err = json.Unmarshal(respBytes, resp)
|
||||
if err != nil {
|
||||
return "", "", basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't unmarshal deployment info response: error: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
regionDns := resp.ClusterInfo.Region.DNS
|
||||
deploymentName := resp.Name
|
||||
regionDns := resp.Data.ClusterInfo.Region.DNS
|
||||
deploymentName := resp.Data.Name
|
||||
|
||||
if len(regionDns) < 1 || len(deploymentName) < 1 {
|
||||
// Fail early if actual response structure and expectation here ever diverge
|
||||
|
||||
@@ -2,16 +2,11 @@ package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"runtime/debug"
|
||||
|
||||
anomalyV2 "github.com/SigNoz/signoz/ee/anomaly"
|
||||
"github.com/SigNoz/signoz/ee/query-service/anomaly"
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
|
||||
@@ -20,8 +15,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"go.uber.org/zap"
|
||||
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
)
|
||||
|
||||
func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -143,139 +136,3 @@ func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
|
||||
aH.QueryRangeV4(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
func extractSeasonality(anomalyQuery *qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]) anomalyV2.Seasonality {
|
||||
for _, fn := range anomalyQuery.Functions {
|
||||
if fn.Name == qbtypes.FunctionNameAnomaly {
|
||||
for _, arg := range fn.Args {
|
||||
if arg.Name == "seasonality" {
|
||||
if seasonalityStr, ok := arg.Value.(string); ok {
|
||||
switch seasonalityStr {
|
||||
case "weekly":
|
||||
return anomalyV2.SeasonalityWeekly
|
||||
case "hourly":
|
||||
return anomalyV2.SeasonalityHourly
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return anomalyV2.SeasonalityDaily // default
|
||||
}
|
||||
|
||||
func createAnomalyProvider(aH *APIHandler, seasonality anomalyV2.Seasonality) anomalyV2.Provider {
|
||||
switch seasonality {
|
||||
case anomalyV2.SeasonalityWeekly:
|
||||
return anomalyV2.NewWeeklyProvider(
|
||||
anomalyV2.WithQuerier[*anomalyV2.WeeklyProvider](aH.Signoz.Querier),
|
||||
anomalyV2.WithLogger[*anomalyV2.WeeklyProvider](aH.Signoz.Instrumentation.Logger()),
|
||||
)
|
||||
case anomalyV2.SeasonalityHourly:
|
||||
return anomalyV2.NewHourlyProvider(
|
||||
anomalyV2.WithQuerier[*anomalyV2.HourlyProvider](aH.Signoz.Querier),
|
||||
anomalyV2.WithLogger[*anomalyV2.HourlyProvider](aH.Signoz.Instrumentation.Logger()),
|
||||
)
|
||||
default:
|
||||
return anomalyV2.NewDailyProvider(
|
||||
anomalyV2.WithQuerier[*anomalyV2.DailyProvider](aH.Signoz.Querier),
|
||||
anomalyV2.WithLogger[*anomalyV2.DailyProvider](aH.Signoz.Instrumentation.Logger()),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func (aH *APIHandler) handleAnomalyQuery(ctx context.Context, orgID valuer.UUID, anomalyQuery *qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation], queryRangeRequest qbtypes.QueryRangeRequest) (*anomalyV2.AnomaliesResponse, error) {
|
||||
seasonality := extractSeasonality(anomalyQuery)
|
||||
provider := createAnomalyProvider(aH, seasonality)
|
||||
|
||||
return provider.GetAnomalies(ctx, orgID, &anomalyV2.AnomaliesRequest{Params: queryRangeRequest})
|
||||
}
|
||||
|
||||
func (aH *APIHandler) queryRangeV5(rw http.ResponseWriter, req *http.Request) {
|
||||
|
||||
bodyBytes, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
render.Error(rw, errors.NewInvalidInputf(errors.CodeInvalidInput, "failed to read request body: %v", err))
|
||||
return
|
||||
}
|
||||
req.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
|
||||
|
||||
ctx := req.Context()
|
||||
|
||||
claims, err := authtypes.ClaimsFromContext(ctx)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
var queryRangeRequest qbtypes.QueryRangeRequest
|
||||
if err := json.NewDecoder(req.Body).Decode(&queryRangeRequest); err != nil {
|
||||
render.Error(rw, errors.NewInvalidInputf(errors.CodeInvalidInput, "failed to decode request body: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
stackTrace := string(debug.Stack())
|
||||
|
||||
queryJSON, _ := json.Marshal(queryRangeRequest)
|
||||
|
||||
aH.Signoz.Instrumentation.Logger().ErrorContext(ctx, "panic in QueryRange",
|
||||
"error", r,
|
||||
"user", claims.UserID,
|
||||
"payload", string(queryJSON),
|
||||
"stacktrace", stackTrace,
|
||||
)
|
||||
|
||||
render.Error(rw, errors.NewInternalf(
|
||||
errors.CodeInternal,
|
||||
"Something went wrong on our end. It's not you, it's us. Our team is notified about it. Reach out to support if issue persists.",
|
||||
))
|
||||
}
|
||||
}()
|
||||
|
||||
if err := queryRangeRequest.Validate(); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgID, err := valuer.NewUUID(claims.OrgID)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
if anomalyQuery, ok := queryRangeRequest.IsAnomalyRequest(); ok {
|
||||
anomalies, err := aH.handleAnomalyQuery(ctx, orgID, anomalyQuery, queryRangeRequest)
|
||||
if err != nil {
|
||||
render.Error(rw, errors.NewInternalf(errors.CodeInternal, "failed to get anomalies: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
results := []any{}
|
||||
for _, item := range anomalies.Results {
|
||||
results = append(results, item)
|
||||
}
|
||||
|
||||
finalResp := &qbtypes.QueryRangeResponse{
|
||||
Type: queryRangeRequest.RequestType,
|
||||
Data: struct {
|
||||
Results []any `json:"results"`
|
||||
}{
|
||||
Results: results,
|
||||
},
|
||||
Meta: struct {
|
||||
RowsScanned uint64 `json:"rowsScanned"`
|
||||
BytesScanned uint64 `json:"bytesScanned"`
|
||||
DurationMS uint64 `json:"durationMs"`
|
||||
}{},
|
||||
}
|
||||
|
||||
render.Success(rw, http.StatusOK, finalResp)
|
||||
return
|
||||
} else {
|
||||
// regular query range request, let the querier handle it
|
||||
req.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
|
||||
aH.QuerierAPI.QueryRange(rw, req)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,16 +3,9 @@ package app
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof" // http profiler
|
||||
"slices"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/cache/memorycache"
|
||||
"github.com/SigNoz/signoz/pkg/ruler/rulestore/sqlrulestore"
|
||||
"go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux"
|
||||
"go.opentelemetry.io/otel/propagation"
|
||||
|
||||
"github.com/gorilla/handlers"
|
||||
|
||||
@@ -25,10 +18,10 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/http/middleware"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||
"github.com/SigNoz/signoz/pkg/querier"
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/web"
|
||||
"github.com/rs/cors"
|
||||
"github.com/soheilhy/cmux"
|
||||
@@ -49,10 +42,24 @@ import (
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type ServerOptions struct {
|
||||
Config signoz.Config
|
||||
SigNoz *signoz.SigNoz
|
||||
HTTPHostPort string
|
||||
PrivateHostPort string
|
||||
PreferSpanMetrics bool
|
||||
FluxInterval string
|
||||
FluxIntervalForTraceDetail string
|
||||
Cluster string
|
||||
GatewayUrl string
|
||||
Jwt *authtypes.JWT
|
||||
}
|
||||
|
||||
// Server runs HTTP, Mux and a grpc server
|
||||
type Server struct {
|
||||
config signoz.Config
|
||||
signoz *signoz.SigNoz
|
||||
jwt *authtypes.JWT
|
||||
ruleManager *baserules.Manager
|
||||
|
||||
// public http router
|
||||
@@ -60,6 +67,11 @@ type Server struct {
|
||||
httpServer *http.Server
|
||||
httpHostPort string
|
||||
|
||||
// private http
|
||||
privateConn net.Listener
|
||||
privateHTTP *http.Server
|
||||
privateHostPort string
|
||||
|
||||
opampServer *opamp.Server
|
||||
|
||||
// Usage manager
|
||||
@@ -69,32 +81,19 @@ type Server struct {
|
||||
}
|
||||
|
||||
// NewServer creates and initializes Server
|
||||
func NewServer(config signoz.Config, signoz *signoz.SigNoz) (*Server, error) {
|
||||
func NewServer(config signoz.Config, signoz *signoz.SigNoz, jwt *authtypes.JWT) (*Server, error) {
|
||||
gatewayProxy, err := gateway.NewProxy(config.Gateway.URL.String(), gateway.RoutePrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cacheForTraceDetail, err := memorycache.New(context.TODO(), signoz.Instrumentation.ToProviderSettings(), cache.Config{
|
||||
Provider: "memory",
|
||||
Memory: cache.Memory{
|
||||
NumCounters: 10 * 10000,
|
||||
MaxCost: 1 << 27, // 128 MB
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reader := clickhouseReader.NewReader(
|
||||
signoz.SQLStore,
|
||||
signoz.TelemetryStore,
|
||||
signoz.Prometheus,
|
||||
signoz.TelemetryStore.Cluster(),
|
||||
config.Querier.FluxInterval,
|
||||
cacheForTraceDetail,
|
||||
signoz.Cache,
|
||||
nil,
|
||||
)
|
||||
|
||||
rm, err := makeRulesManager(
|
||||
@@ -105,8 +104,6 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz) (*Server, error) {
|
||||
signoz.TelemetryStore,
|
||||
signoz.Prometheus,
|
||||
signoz.Modules.OrgGetter,
|
||||
signoz.Querier,
|
||||
signoz.Instrumentation.Logger(),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
@@ -168,6 +165,7 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz) (*Server, error) {
|
||||
FluxInterval: config.Querier.FluxInterval,
|
||||
Gateway: gatewayProxy,
|
||||
GatewayUrl: config.Gateway.URL.String(),
|
||||
JWT: jwt,
|
||||
}
|
||||
|
||||
apiHandler, err := api.NewAPIHandler(apiOpts, signoz)
|
||||
@@ -178,8 +176,10 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz) (*Server, error) {
|
||||
s := &Server{
|
||||
config: config,
|
||||
signoz: signoz,
|
||||
jwt: jwt,
|
||||
ruleManager: rm,
|
||||
httpHostPort: baseconst.HTTPHostPort,
|
||||
privateHostPort: baseconst.PrivateHostPort,
|
||||
unavailableChannel: make(chan healthcheck.Status),
|
||||
usageManager: usageManager,
|
||||
}
|
||||
@@ -192,6 +192,13 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz) (*Server, error) {
|
||||
|
||||
s.httpServer = httpServer
|
||||
|
||||
privateServer, err := s.createPrivateServer(apiHandler)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.privateHTTP = privateServer
|
||||
|
||||
s.opampServer = opamp.InitializeServer(
|
||||
&opAmpModel.AllAgents, agentConfMgr, signoz.Instrumentation,
|
||||
)
|
||||
@@ -204,21 +211,41 @@ func (s Server) HealthCheckStatus() chan healthcheck.Status {
|
||||
return s.unavailableChannel
|
||||
}
|
||||
|
||||
func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*http.Server, error) {
|
||||
func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server, error) {
|
||||
r := baseapp.NewRouter()
|
||||
am := middleware.NewAuthZ(s.signoz.Instrumentation.Logger(), s.signoz.Modules.OrgGetter, s.signoz.Authz)
|
||||
|
||||
r.Use(otelmux.Middleware(
|
||||
"apiserver",
|
||||
otelmux.WithMeterProvider(s.signoz.Instrumentation.MeterProvider()),
|
||||
otelmux.WithTracerProvider(s.signoz.Instrumentation.TracerProvider()),
|
||||
otelmux.WithPropagators(propagation.NewCompositeTextMapPropagator(propagation.Baggage{}, propagation.TraceContext{})),
|
||||
otelmux.WithFilter(func(r *http.Request) bool {
|
||||
return !slices.Contains([]string{"/api/v1/health"}, r.URL.Path)
|
||||
}),
|
||||
otelmux.WithPublicEndpoint(),
|
||||
))
|
||||
r.Use(middleware.NewAuthN([]string{"Authorization", "Sec-WebSocket-Protocol"}, s.signoz.Sharder, s.signoz.Tokenizer, s.signoz.Instrumentation.Logger()).Wrap)
|
||||
r.Use(middleware.NewAuth(s.jwt, []string{"Authorization", "Sec-WebSocket-Protocol"}, s.signoz.Sharder, s.signoz.Instrumentation.Logger()).Wrap)
|
||||
r.Use(middleware.NewAPIKey(s.signoz.SQLStore, []string{"SIGNOZ-API-KEY"}, s.signoz.Instrumentation.Logger(), s.signoz.Sharder).Wrap)
|
||||
r.Use(middleware.NewTimeout(s.signoz.Instrumentation.Logger(),
|
||||
s.config.APIServer.Timeout.ExcludedRoutes,
|
||||
s.config.APIServer.Timeout.Default,
|
||||
s.config.APIServer.Timeout.Max,
|
||||
).Wrap)
|
||||
r.Use(middleware.NewLogging(s.signoz.Instrumentation.Logger(), s.config.APIServer.Logging.ExcludedRoutes).Wrap)
|
||||
|
||||
apiHandler.RegisterPrivateRoutes(r)
|
||||
|
||||
c := cors.New(cors.Options{
|
||||
//todo(amol): find out a way to add exact domain or
|
||||
// ip here for alert manager
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []string{"GET", "DELETE", "POST", "PUT", "PATCH"},
|
||||
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "SIGNOZ-API-KEY", "X-SIGNOZ-QUERY-ID", "Sec-WebSocket-Protocol"},
|
||||
})
|
||||
|
||||
handler := c.Handler(r)
|
||||
handler = handlers.CompressHandler(handler)
|
||||
|
||||
return &http.Server{
|
||||
Handler: handler,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*http.Server, error) {
|
||||
r := baseapp.NewRouter()
|
||||
am := middleware.NewAuthZ(s.signoz.Instrumentation.Logger())
|
||||
|
||||
r.Use(middleware.NewAuth(s.jwt, []string{"Authorization", "Sec-WebSocket-Protocol"}, s.signoz.Sharder, s.signoz.Instrumentation.Logger()).Wrap)
|
||||
r.Use(middleware.NewAPIKey(s.signoz.SQLStore, []string{"SIGNOZ-API-KEY"}, s.signoz.Instrumentation.Logger(), s.signoz.Sharder).Wrap)
|
||||
r.Use(middleware.NewTimeout(s.signoz.Instrumentation.Logger(),
|
||||
s.config.APIServer.Timeout.ExcludedRoutes,
|
||||
@@ -226,7 +253,6 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*h
|
||||
s.config.APIServer.Timeout.Max,
|
||||
).Wrap)
|
||||
r.Use(middleware.NewLogging(s.signoz.Instrumentation.Logger(), s.config.APIServer.Logging.ExcludedRoutes).Wrap)
|
||||
r.Use(middleware.NewComment().Wrap)
|
||||
|
||||
apiHandler.RegisterRoutes(r, am)
|
||||
apiHandler.RegisterLogsRoutes(r, am)
|
||||
@@ -243,11 +269,6 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*h
|
||||
apiHandler.MetricExplorerRoutes(r, am)
|
||||
apiHandler.RegisterTraceFunnelsRoutes(r, am)
|
||||
|
||||
err := s.signoz.APIServer.AddToRouter(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c := cors.New(cors.Options{
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []string{"GET", "DELETE", "POST", "PUT", "PATCH", "OPTIONS"},
|
||||
@@ -258,7 +279,7 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*h
|
||||
|
||||
handler = handlers.CompressHandler(handler)
|
||||
|
||||
err = web.AddToRouter(r)
|
||||
err := web.AddToRouter(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -284,6 +305,19 @@ func (s *Server) initListeners() error {
|
||||
|
||||
zap.L().Info(fmt.Sprintf("Query server started listening on %s...", s.httpHostPort))
|
||||
|
||||
// listen on private port to support internal services
|
||||
privateHostPort := s.privateHostPort
|
||||
|
||||
if privateHostPort == "" {
|
||||
return fmt.Errorf("baseconst.PrivateHostPort is required")
|
||||
}
|
||||
|
||||
s.privateConn, err = net.Listen("tcp", privateHostPort)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
zap.L().Info(fmt.Sprintf("Query server started listening on private port %s...", s.privateHostPort))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -322,6 +356,26 @@ func (s *Server) Start(ctx context.Context) error {
|
||||
}
|
||||
}()
|
||||
|
||||
var privatePort int
|
||||
if port, err := utils.GetPort(s.privateConn.Addr()); err == nil {
|
||||
privatePort = port
|
||||
}
|
||||
|
||||
go func() {
|
||||
zap.L().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.privateHostPort))
|
||||
|
||||
switch err := s.privateHTTP.Serve(s.privateConn); err {
|
||||
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
|
||||
// normal exit, nothing to do
|
||||
zap.L().Info("private http server closed")
|
||||
default:
|
||||
zap.L().Error("Could not start private HTTP server", zap.Error(err))
|
||||
}
|
||||
|
||||
s.unavailableChannel <- healthcheck.Unavailable
|
||||
|
||||
}()
|
||||
|
||||
go func() {
|
||||
zap.L().Info("Starting OpAmp Websocket server", zap.String("addr", baseconst.OpAmpWsEndpoint))
|
||||
err := s.opampServer.Start(baseconst.OpAmpWsEndpoint)
|
||||
@@ -341,6 +395,12 @@ func (s *Server) Stop(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
if s.privateHTTP != nil {
|
||||
if err := s.privateHTTP.Shutdown(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
s.opampServer.Stop()
|
||||
|
||||
if s.ruleManager != nil {
|
||||
@@ -353,9 +413,15 @@ func (s *Server) Stop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeRulesManager(ch baseint.Reader, cache cache.Cache, alertmanager alertmanager.Alertmanager, sqlstore sqlstore.SQLStore, telemetryStore telemetrystore.TelemetryStore, prometheus prometheus.Prometheus, orgGetter organization.Getter, querier querier.Querier, logger *slog.Logger) (*baserules.Manager, error) {
|
||||
ruleStore := sqlrulestore.NewRuleStore(sqlstore)
|
||||
maintenanceStore := sqlrulestore.NewMaintenanceStore(sqlstore)
|
||||
func makeRulesManager(
|
||||
ch baseint.Reader,
|
||||
cache cache.Cache,
|
||||
alertmanager alertmanager.Alertmanager,
|
||||
sqlstore sqlstore.SQLStore,
|
||||
telemetryStore telemetrystore.TelemetryStore,
|
||||
prometheus prometheus.Prometheus,
|
||||
orgGetter organization.Getter,
|
||||
) (*baserules.Manager, error) {
|
||||
// create manager opts
|
||||
managerOpts := &baserules.ManagerOptions{
|
||||
TelemetryStore: telemetryStore,
|
||||
@@ -363,17 +429,13 @@ func makeRulesManager(ch baseint.Reader, cache cache.Cache, alertmanager alertma
|
||||
Context: context.Background(),
|
||||
Logger: zap.L(),
|
||||
Reader: ch,
|
||||
Querier: querier,
|
||||
SLogger: logger,
|
||||
Cache: cache,
|
||||
EvalDelay: baseconst.GetEvalDelay(),
|
||||
PrepareTaskFunc: rules.PrepareTaskFunc,
|
||||
PrepareTestRuleFunc: rules.TestNotification,
|
||||
Alertmanager: alertmanager,
|
||||
SQLStore: sqlstore,
|
||||
OrgGetter: orgGetter,
|
||||
RuleStore: ruleStore,
|
||||
MaintenanceStore: maintenanceStore,
|
||||
SqlStore: sqlstore,
|
||||
}
|
||||
|
||||
// create Manager
|
||||
|
||||
@@ -4,12 +4,19 @@ import (
|
||||
"os"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultSiteURL = "https://localhost:8080"
|
||||
)
|
||||
|
||||
var LicenseSignozIo = "https://license.signoz.io/api/v1"
|
||||
var LicenseAPIKey = GetOrDefaultEnv("SIGNOZ_LICENSE_API_KEY", "")
|
||||
var SaasSegmentKey = GetOrDefaultEnv("SIGNOZ_SAAS_SEGMENT_KEY", "")
|
||||
var FetchFeatures = GetOrDefaultEnv("FETCH_FEATURES", "false")
|
||||
var ZeusFeaturesURL = GetOrDefaultEnv("ZEUS_FEATURES_URL", "ZeusFeaturesURL")
|
||||
|
||||
// this is set via build time variable
|
||||
var ZeusURL = "https://api.signoz.cloud"
|
||||
|
||||
func GetOrDefaultEnv(key string, fallback string) string {
|
||||
v := os.Getenv(key)
|
||||
if len(v) == 0 {
|
||||
@@ -20,13 +27,20 @@ func GetOrDefaultEnv(key string, fallback string) string {
|
||||
|
||||
// constant functions that override env vars
|
||||
|
||||
// GetDefaultSiteURL returns default site url, primarily
|
||||
// used to send saml request and allowing backend to
|
||||
// handle http redirect
|
||||
func GetDefaultSiteURL() string {
|
||||
return GetOrDefaultEnv("SIGNOZ_SITE_URL", DefaultSiteURL)
|
||||
}
|
||||
|
||||
const DotMetricsEnabled = "DOT_METRICS_ENABLED"
|
||||
|
||||
var IsDotMetricsEnabled = false
|
||||
var IsPreferSpanMetrics = false
|
||||
|
||||
func init() {
|
||||
if GetOrDefaultEnv(DotMetricsEnabled, "true") == "true" {
|
||||
if GetOrDefaultEnv(DotMetricsEnabled, "false") == "true" {
|
||||
IsDotMetricsEnabled = true
|
||||
}
|
||||
|
||||
|
||||
189
ee/query-service/main.go
Normal file
189
ee/query-service/main.go
Normal file
@@ -0,0 +1,189 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/licensing"
|
||||
"github.com/SigNoz/signoz/ee/licensing/httplicensing"
|
||||
"github.com/SigNoz/signoz/ee/query-service/app"
|
||||
"github.com/SigNoz/signoz/ee/sqlschema/postgressqlschema"
|
||||
"github.com/SigNoz/signoz/ee/sqlstore/postgressqlstore"
|
||||
"github.com/SigNoz/signoz/ee/zeus"
|
||||
"github.com/SigNoz/signoz/ee/zeus/httpzeus"
|
||||
"github.com/SigNoz/signoz/pkg/analytics"
|
||||
"github.com/SigNoz/signoz/pkg/config"
|
||||
"github.com/SigNoz/signoz/pkg/config/envprovider"
|
||||
"github.com/SigNoz/signoz/pkg/config/fileprovider"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
pkglicensing "github.com/SigNoz/signoz/pkg/licensing"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
baseconst "github.com/SigNoz/signoz/pkg/query-service/constants"
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
"github.com/SigNoz/signoz/pkg/sqlschema"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore/sqlstorehook"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/version"
|
||||
pkgzeus "github.com/SigNoz/signoz/pkg/zeus"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
// Deprecated: Please use the logger from pkg/instrumentation.
|
||||
func initZapLog() *zap.Logger {
|
||||
config := zap.NewProductionConfig()
|
||||
config.EncoderConfig.TimeKey = "timestamp"
|
||||
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
logger, _ := config.Build()
|
||||
return logger
|
||||
}
|
||||
|
||||
func main() {
|
||||
var promConfigPath, skipTopLvlOpsPath string
|
||||
|
||||
// disables rule execution but allows change to the rule definition
|
||||
var disableRules bool
|
||||
|
||||
// the url used to build link in the alert messages in slack and other systems
|
||||
var ruleRepoURL string
|
||||
var cluster string
|
||||
|
||||
var useLogsNewSchema bool
|
||||
var useTraceNewSchema bool
|
||||
var cacheConfigPath, fluxInterval, fluxIntervalForTraceDetail string
|
||||
var preferSpanMetrics bool
|
||||
|
||||
var maxIdleConns int
|
||||
var maxOpenConns int
|
||||
var dialTimeout time.Duration
|
||||
var gatewayUrl string
|
||||
var useLicensesV3 bool
|
||||
|
||||
// Deprecated
|
||||
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
|
||||
// Deprecated
|
||||
flag.BoolVar(&useTraceNewSchema, "use-trace-new-schema", false, "use new schema for traces")
|
||||
// Deprecated
|
||||
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
|
||||
// Deprecated
|
||||
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
|
||||
// Deprecated
|
||||
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
|
||||
flag.BoolVar(&preferSpanMetrics, "prefer-span-metrics", false, "(prefer span metrics for service level metrics)")
|
||||
// Deprecated
|
||||
flag.IntVar(&maxIdleConns, "max-idle-conns", 50, "(number of connections to maintain in the pool.)")
|
||||
// Deprecated
|
||||
flag.IntVar(&maxOpenConns, "max-open-conns", 100, "(max connections for use at any time.)")
|
||||
// Deprecated
|
||||
flag.DurationVar(&dialTimeout, "dial-timeout", 5*time.Second, "(the maximum time to establish a connection.)")
|
||||
// Deprecated
|
||||
flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)")
|
||||
// Deprecated
|
||||
flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)")
|
||||
flag.StringVar(&fluxInterval, "flux-interval", "5m", "(the interval to exclude data from being cached to avoid incorrect cache for data in motion)")
|
||||
flag.StringVar(&fluxIntervalForTraceDetail, "flux-interval-trace-detail", "2m", "(the interval to exclude data from being cached to avoid incorrect cache for trace data in motion)")
|
||||
flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')")
|
||||
flag.StringVar(&gatewayUrl, "gateway-url", "", "(url to the gateway)")
|
||||
// Deprecated
|
||||
flag.BoolVar(&useLicensesV3, "use-licenses-v3", false, "use licenses_v3 schema for licenses")
|
||||
flag.Parse()
|
||||
|
||||
loggerMgr := initZapLog()
|
||||
zap.ReplaceGlobals(loggerMgr)
|
||||
defer loggerMgr.Sync() // flushes buffer, if any
|
||||
ctx := context.Background()
|
||||
|
||||
config, err := signoz.NewConfig(ctx, config.ResolverConfig{
|
||||
Uris: []string{"env:"},
|
||||
ProviderFactories: []config.ProviderFactory{
|
||||
envprovider.NewFactory(),
|
||||
fileprovider.NewFactory(),
|
||||
},
|
||||
}, signoz.DeprecatedFlags{
|
||||
MaxIdleConns: maxIdleConns,
|
||||
MaxOpenConns: maxOpenConns,
|
||||
DialTimeout: dialTimeout,
|
||||
Config: promConfigPath,
|
||||
FluxInterval: fluxInterval,
|
||||
FluxIntervalForTraceDetail: fluxIntervalForTraceDetail,
|
||||
Cluster: cluster,
|
||||
GatewayUrl: gatewayUrl,
|
||||
})
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to create config", zap.Error(err))
|
||||
}
|
||||
|
||||
version.Info.PrettyPrint(config.Version)
|
||||
|
||||
sqlStoreFactories := signoz.NewSQLStoreProviderFactories()
|
||||
if err := sqlStoreFactories.Add(postgressqlstore.NewFactory(sqlstorehook.NewLoggingFactory())); err != nil {
|
||||
zap.L().Fatal("Failed to add postgressqlstore factory", zap.Error(err))
|
||||
}
|
||||
|
||||
jwtSecret := os.Getenv("SIGNOZ_JWT_SECRET")
|
||||
|
||||
if len(jwtSecret) == 0 {
|
||||
zap.L().Warn("No JWT secret key is specified.")
|
||||
} else {
|
||||
zap.L().Info("JWT secret key set successfully.")
|
||||
}
|
||||
|
||||
jwt := authtypes.NewJWT(jwtSecret, 30*time.Minute, 30*24*time.Hour)
|
||||
|
||||
signoz, err := signoz.New(
|
||||
context.Background(),
|
||||
config,
|
||||
jwt,
|
||||
zeus.Config(),
|
||||
httpzeus.NewProviderFactory(),
|
||||
licensing.Config(24*time.Hour, 3),
|
||||
func(sqlstore sqlstore.SQLStore, zeus pkgzeus.Zeus, orgGetter organization.Getter, analytics analytics.Analytics) factory.ProviderFactory[pkglicensing.Licensing, pkglicensing.Config] {
|
||||
return httplicensing.NewProviderFactory(sqlstore, zeus, orgGetter, analytics)
|
||||
},
|
||||
signoz.NewEmailingProviderFactories(),
|
||||
signoz.NewCacheProviderFactories(),
|
||||
signoz.NewWebProviderFactories(),
|
||||
func(sqlstore sqlstore.SQLStore) factory.NamedMap[factory.ProviderFactory[sqlschema.SQLSchema, sqlschema.Config]] {
|
||||
existingFactories := signoz.NewSQLSchemaProviderFactories(sqlstore)
|
||||
if err := existingFactories.Add(postgressqlschema.NewFactory(sqlstore)); err != nil {
|
||||
zap.L().Fatal("Failed to add postgressqlschema factory", zap.Error(err))
|
||||
}
|
||||
|
||||
return existingFactories
|
||||
},
|
||||
sqlStoreFactories,
|
||||
signoz.NewTelemetryStoreProviderFactories(),
|
||||
)
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to create signoz", zap.Error(err))
|
||||
}
|
||||
|
||||
server, err := app.NewServer(config, signoz, jwt)
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to create server", zap.Error(err))
|
||||
}
|
||||
|
||||
if err := server.Start(ctx); err != nil {
|
||||
zap.L().Fatal("Could not start server", zap.Error(err))
|
||||
}
|
||||
|
||||
signoz.Start(ctx)
|
||||
|
||||
if err := signoz.Wait(ctx); err != nil {
|
||||
zap.L().Fatal("Failed to start signoz", zap.Error(err))
|
||||
}
|
||||
|
||||
err = server.Stop(ctx)
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to stop server", zap.Error(err))
|
||||
}
|
||||
|
||||
err = signoz.Stop(ctx)
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to stop signoz", zap.Error(err))
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
)
|
||||
@@ -57,7 +57,7 @@ func Unauthorized(err error) *ApiError {
|
||||
func BadRequestStr(s string) *ApiError {
|
||||
return &ApiError{
|
||||
Typ: basemodel.ErrorBadData,
|
||||
Err: errors.New(s),
|
||||
Err: fmt.Errorf(s),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,7 +73,7 @@ func InternalError(err error) *ApiError {
|
||||
func InternalErrorStr(s string) *ApiError {
|
||||
return &ApiError{
|
||||
Typ: basemodel.ErrorInternal,
|
||||
Err: errors.New(s),
|
||||
Err: fmt.Errorf(s),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -4,17 +4,17 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"math"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/query-service/anomaly"
|
||||
"github.com/SigNoz/signoz/pkg/cache"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/common"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/transition"
|
||||
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
|
||||
@@ -30,11 +30,7 @@ import (
|
||||
|
||||
baserules "github.com/SigNoz/signoz/pkg/query-service/rules"
|
||||
|
||||
querierV5 "github.com/SigNoz/signoz/pkg/querier"
|
||||
|
||||
anomalyV2 "github.com/SigNoz/signoz/ee/anomaly"
|
||||
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -51,14 +47,7 @@ type AnomalyRule struct {
|
||||
// querierV2 is used for alerts created after the introduction of new metrics query builder
|
||||
querierV2 interfaces.Querier
|
||||
|
||||
// querierV5 is used for alerts migrated after the introduction of new query builder
|
||||
querierV5 querierV5.Querier
|
||||
|
||||
provider anomaly.Provider
|
||||
providerV2 anomalyV2.Provider
|
||||
|
||||
version string
|
||||
logger *slog.Logger
|
||||
provider anomaly.Provider
|
||||
|
||||
seasonality anomaly.Seasonality
|
||||
}
|
||||
@@ -68,15 +57,16 @@ func NewAnomalyRule(
|
||||
orgID valuer.UUID,
|
||||
p *ruletypes.PostableRule,
|
||||
reader interfaces.Reader,
|
||||
querierV5 querierV5.Querier,
|
||||
logger *slog.Logger,
|
||||
cache cache.Cache,
|
||||
opts ...baserules.RuleOption,
|
||||
) (*AnomalyRule, error) {
|
||||
|
||||
logger.Info("creating new AnomalyRule", "rule_id", id)
|
||||
zap.L().Info("creating new AnomalyRule", zap.String("id", id), zap.Any("opts", opts))
|
||||
|
||||
opts = append(opts, baserules.WithLogger(logger))
|
||||
if p.RuleCondition.CompareOp == ruletypes.ValueIsBelow {
|
||||
target := -1 * *p.RuleCondition.Target
|
||||
p.RuleCondition.Target = &target
|
||||
}
|
||||
|
||||
baseRule, err := baserules.NewBaseRule(id, orgID, p, reader, opts...)
|
||||
if err != nil {
|
||||
@@ -98,7 +88,7 @@ func NewAnomalyRule(
|
||||
t.seasonality = anomaly.SeasonalityDaily
|
||||
}
|
||||
|
||||
logger.Info("using seasonality", "seasonality", t.seasonality.String())
|
||||
zap.L().Info("using seasonality", zap.String("seasonality", t.seasonality.String()))
|
||||
|
||||
querierOptsV2 := querierV2.QuerierOptions{
|
||||
Reader: reader,
|
||||
@@ -127,27 +117,6 @@ func NewAnomalyRule(
|
||||
anomaly.WithReader[*anomaly.WeeklyProvider](reader),
|
||||
)
|
||||
}
|
||||
|
||||
if t.seasonality == anomaly.SeasonalityHourly {
|
||||
t.providerV2 = anomalyV2.NewHourlyProvider(
|
||||
anomalyV2.WithQuerier[*anomalyV2.HourlyProvider](querierV5),
|
||||
anomalyV2.WithLogger[*anomalyV2.HourlyProvider](logger),
|
||||
)
|
||||
} else if t.seasonality == anomaly.SeasonalityDaily {
|
||||
t.providerV2 = anomalyV2.NewDailyProvider(
|
||||
anomalyV2.WithQuerier[*anomalyV2.DailyProvider](querierV5),
|
||||
anomalyV2.WithLogger[*anomalyV2.DailyProvider](logger),
|
||||
)
|
||||
} else if t.seasonality == anomaly.SeasonalityWeekly {
|
||||
t.providerV2 = anomalyV2.NewWeeklyProvider(
|
||||
anomalyV2.WithQuerier[*anomalyV2.WeeklyProvider](querierV5),
|
||||
anomalyV2.WithLogger[*anomalyV2.WeeklyProvider](logger),
|
||||
)
|
||||
}
|
||||
|
||||
t.querierV5 = querierV5
|
||||
t.version = p.Version
|
||||
t.logger = logger
|
||||
return &t, nil
|
||||
}
|
||||
|
||||
@@ -155,15 +124,20 @@ func (r *AnomalyRule) Type() ruletypes.RuleType {
|
||||
return RuleTypeAnomaly
|
||||
}
|
||||
|
||||
func (r *AnomalyRule) prepareQueryRange(ctx context.Context, ts time.Time) (*v3.QueryRangeParamsV3, error) {
|
||||
func (r *AnomalyRule) prepareQueryRange(ts time.Time) (*v3.QueryRangeParamsV3, error) {
|
||||
|
||||
r.logger.InfoContext(
|
||||
ctx, "prepare query range request v4", "ts", ts.UnixMilli(), "eval_window", r.EvalWindow().Milliseconds(), "eval_delay", r.EvalDelay().Milliseconds(),
|
||||
)
|
||||
zap.L().Info("prepareQueryRange", zap.Int64("ts", ts.UnixMilli()), zap.Int64("evalWindow", r.EvalWindow().Milliseconds()), zap.Int64("evalDelay", r.EvalDelay().Milliseconds()))
|
||||
|
||||
st, en := r.Timestamps(ts)
|
||||
start := st.UnixMilli()
|
||||
end := en.UnixMilli()
|
||||
start := ts.Add(-time.Duration(r.EvalWindow())).UnixMilli()
|
||||
end := ts.UnixMilli()
|
||||
|
||||
if r.EvalDelay() > 0 {
|
||||
start = start - int64(r.EvalDelay().Milliseconds())
|
||||
end = end - int64(r.EvalDelay().Milliseconds())
|
||||
}
|
||||
// round to minute otherwise we could potentially miss data
|
||||
start = start - (start % (60 * 1000))
|
||||
end = end - (end % (60 * 1000))
|
||||
|
||||
compositeQuery := r.Condition().CompositeQuery
|
||||
|
||||
@@ -182,34 +156,13 @@ func (r *AnomalyRule) prepareQueryRange(ctx context.Context, ts time.Time) (*v3.
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *AnomalyRule) prepareQueryRangeV5(ctx context.Context, ts time.Time) (*qbtypes.QueryRangeRequest, error) {
|
||||
|
||||
r.logger.InfoContext(ctx, "prepare query range request v5", "ts", ts.UnixMilli(), "eval_window", r.EvalWindow().Milliseconds(), "eval_delay", r.EvalDelay().Milliseconds())
|
||||
|
||||
startTs, endTs := r.Timestamps(ts)
|
||||
start, end := startTs.UnixMilli(), endTs.UnixMilli()
|
||||
|
||||
req := &qbtypes.QueryRangeRequest{
|
||||
Start: uint64(start),
|
||||
End: uint64(end),
|
||||
RequestType: qbtypes.RequestTypeTimeSeries,
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: make([]qbtypes.QueryEnvelope, 0),
|
||||
},
|
||||
NoCache: true,
|
||||
}
|
||||
req.CompositeQuery.Queries = make([]qbtypes.QueryEnvelope, len(r.Condition().CompositeQuery.Queries))
|
||||
copy(req.CompositeQuery.Queries, r.Condition().CompositeQuery.Queries)
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (r *AnomalyRule) GetSelectedQuery() string {
|
||||
return r.Condition().GetSelectedQueryName()
|
||||
}
|
||||
|
||||
func (r *AnomalyRule) buildAndRunQuery(ctx context.Context, orgID valuer.UUID, ts time.Time) (ruletypes.Vector, error) {
|
||||
|
||||
params, err := r.prepareQueryRange(ctx, ts)
|
||||
params, err := r.prepareQueryRange(ts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -237,74 +190,13 @@ func (r *AnomalyRule) buildAndRunQuery(ctx context.Context, orgID valuer.UUID, t
|
||||
var resultVector ruletypes.Vector
|
||||
|
||||
scoresJSON, _ := json.Marshal(queryResult.AnomalyScores)
|
||||
r.logger.InfoContext(ctx, "anomaly scores", "scores", string(scoresJSON))
|
||||
zap.L().Info("anomaly scores", zap.String("scores", string(scoresJSON)))
|
||||
|
||||
for _, series := range queryResult.AnomalyScores {
|
||||
if r.Condition() != nil && r.Condition().RequireMinPoints {
|
||||
if len(series.Points) < r.Condition().RequiredNumPoints {
|
||||
r.logger.InfoContext(ctx, "not enough data points to evaluate series, skipping", "ruleid", r.ID(), "numPoints", len(series.Points), "requiredPoints", r.Condition().RequiredNumPoints)
|
||||
continue
|
||||
}
|
||||
smpl, shouldAlert := r.ShouldAlert(*series)
|
||||
if shouldAlert {
|
||||
resultVector = append(resultVector, smpl)
|
||||
}
|
||||
results, err := r.Threshold.Eval(*series, r.Unit(), ruletypes.EvalData{
|
||||
ActiveAlerts: r.ActiveAlertsLabelFP(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resultVector = append(resultVector, results...)
|
||||
}
|
||||
return resultVector, nil
|
||||
}
|
||||
|
||||
func (r *AnomalyRule) buildAndRunQueryV5(ctx context.Context, orgID valuer.UUID, ts time.Time) (ruletypes.Vector, error) {
|
||||
|
||||
params, err := r.prepareQueryRangeV5(ctx, ts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
anomalies, err := r.providerV2.GetAnomalies(ctx, orgID, &anomalyV2.AnomaliesRequest{
|
||||
Params: *params,
|
||||
Seasonality: anomalyV2.Seasonality{String: valuer.NewString(r.seasonality.String())},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var qbResult *qbtypes.TimeSeriesData
|
||||
for _, result := range anomalies.Results {
|
||||
if result.QueryName == r.GetSelectedQuery() {
|
||||
qbResult = result
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if qbResult == nil {
|
||||
r.logger.WarnContext(ctx, "nil qb result", "ts", ts.UnixMilli())
|
||||
}
|
||||
|
||||
queryResult := transition.ConvertV5TimeSeriesDataToV4Result(qbResult)
|
||||
|
||||
var resultVector ruletypes.Vector
|
||||
|
||||
scoresJSON, _ := json.Marshal(queryResult.AnomalyScores)
|
||||
r.logger.InfoContext(ctx, "anomaly scores", "scores", string(scoresJSON))
|
||||
|
||||
for _, series := range queryResult.AnomalyScores {
|
||||
if r.Condition().RequireMinPoints {
|
||||
if len(series.Points) < r.Condition().RequiredNumPoints {
|
||||
r.logger.InfoContext(ctx, "not enough data points to evaluate series, skipping", "ruleid", r.ID(), "numPoints", len(series.Points), "requiredPoints", r.Condition().RequiredNumPoints)
|
||||
continue
|
||||
}
|
||||
}
|
||||
results, err := r.Threshold.Eval(*series, r.Unit(), ruletypes.EvalData{
|
||||
ActiveAlerts: r.ActiveAlertsLabelFP(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resultVector = append(resultVector, results...)
|
||||
}
|
||||
return resultVector, nil
|
||||
}
|
||||
@@ -314,17 +206,8 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
|
||||
prevState := r.State()
|
||||
|
||||
valueFormatter := formatter.FromUnit(r.Unit())
|
||||
res, err := r.buildAndRunQuery(ctx, r.OrgID(), ts)
|
||||
|
||||
var res ruletypes.Vector
|
||||
var err error
|
||||
|
||||
if r.version == "v5" {
|
||||
r.logger.InfoContext(ctx, "running v5 query")
|
||||
res, err = r.buildAndRunQueryV5(ctx, r.OrgID(), ts)
|
||||
} else {
|
||||
r.logger.InfoContext(ctx, "running v4 query")
|
||||
res, err = r.buildAndRunQuery(ctx, r.OrgID(), ts)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -335,20 +218,15 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
|
||||
resultFPs := map[uint64]struct{}{}
|
||||
var alerts = make(map[uint64]*ruletypes.Alert, len(res))
|
||||
|
||||
ruleReceivers := r.Threshold.GetRuleReceivers()
|
||||
ruleReceiverMap := make(map[string][]string)
|
||||
for _, value := range ruleReceivers {
|
||||
ruleReceiverMap[value.Name] = value.Channels
|
||||
}
|
||||
|
||||
for _, smpl := range res {
|
||||
l := make(map[string]string, len(smpl.Metric))
|
||||
for _, lbl := range smpl.Metric {
|
||||
l[lbl.Name] = lbl.Value
|
||||
}
|
||||
|
||||
value := valueFormatter.Format(smpl.V, r.Unit())
|
||||
threshold := valueFormatter.Format(smpl.Target, smpl.TargetUnit)
|
||||
r.logger.DebugContext(ctx, "Alert template data for rule", "rule_name", r.Name(), "formatter", valueFormatter.Name(), "value", value, "threshold", threshold)
|
||||
threshold := valueFormatter.Format(r.TargetVal(), r.Unit())
|
||||
zap.L().Debug("Alert template data for rule", zap.String("name", r.Name()), zap.String("formatter", valueFormatter.Name()), zap.String("value", value), zap.String("threshold", threshold))
|
||||
|
||||
tmplData := ruletypes.AlertTemplateData(l, value, threshold)
|
||||
// Inject some convenience variables that are easier to remember for users
|
||||
@@ -369,7 +247,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
|
||||
result, err := tmpl.Expand()
|
||||
if err != nil {
|
||||
result = fmt.Sprintf("<error expanding template: %s>", err)
|
||||
r.logger.ErrorContext(ctx, "Expanding alert template failed", "error", err, "data", tmplData, "rule_name", r.Name())
|
||||
zap.L().Error("Expanding alert template failed", zap.Error(err), zap.Any("data", tmplData))
|
||||
}
|
||||
return result
|
||||
}
|
||||
@@ -391,7 +269,6 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
|
||||
}
|
||||
if smpl.IsMissing {
|
||||
lb.Set(labels.AlertNameLabel, "[No data] "+r.Name())
|
||||
lb.Set(labels.NoDataLabel, "true")
|
||||
}
|
||||
|
||||
lbs := lb.Labels()
|
||||
@@ -399,7 +276,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
|
||||
resultFPs[h] = struct{}{}
|
||||
|
||||
if _, ok := alerts[h]; ok {
|
||||
r.logger.ErrorContext(ctx, "the alert query returns duplicate records", "rule_id", r.ID(), "alert", alerts[h])
|
||||
zap.L().Error("the alert query returns duplicate records", zap.String("ruleid", r.ID()), zap.Any("alert", alerts[h]))
|
||||
err = fmt.Errorf("duplicate alert found, vector contains metrics with the same labelset after applying alert labels")
|
||||
return nil, err
|
||||
}
|
||||
@@ -412,13 +289,13 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
|
||||
State: model.StatePending,
|
||||
Value: smpl.V,
|
||||
GeneratorURL: r.GeneratorURL(),
|
||||
Receivers: ruleReceiverMap[lbs.Map()[ruletypes.LabelThresholdName]],
|
||||
Receivers: r.PreferredChannels(),
|
||||
Missing: smpl.IsMissing,
|
||||
IsRecovering: smpl.IsRecovering,
|
||||
}
|
||||
}
|
||||
|
||||
r.logger.InfoContext(ctx, "number of alerts found", "rule_name", r.Name(), "alerts_count", len(alerts))
|
||||
zap.L().Info("number of alerts found", zap.String("name", r.Name()), zap.Int("count", len(alerts)))
|
||||
|
||||
// alerts[h] is ready, add or update active list now
|
||||
for h, a := range alerts {
|
||||
// Check whether we already have alerting state for the identifying label set.
|
||||
@@ -427,12 +304,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
|
||||
|
||||
alert.Value = a.Value
|
||||
alert.Annotations = a.Annotations
|
||||
// Update the recovering and missing state of existing alert
|
||||
alert.IsRecovering = a.IsRecovering
|
||||
alert.Missing = a.Missing
|
||||
if v, ok := alert.Labels.Map()[ruletypes.LabelThresholdName]; ok {
|
||||
alert.Receivers = ruleReceiverMap[v]
|
||||
}
|
||||
alert.Receivers = r.PreferredChannels()
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -445,7 +317,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
|
||||
for fp, a := range r.Active {
|
||||
labelsJSON, err := json.Marshal(a.QueryResultLables)
|
||||
if err != nil {
|
||||
r.logger.ErrorContext(ctx, "error marshaling labels", "error", err, "labels", a.Labels)
|
||||
zap.L().Error("error marshaling labels", zap.Error(err), zap.Any("labels", a.Labels))
|
||||
}
|
||||
if _, ok := resultFPs[fp]; !ok {
|
||||
// If the alert was previously firing, keep it around for a given
|
||||
@@ -488,30 +360,6 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
|
||||
Value: a.Value,
|
||||
})
|
||||
}
|
||||
|
||||
// We need to change firing alert to recovering if the returned sample meets recovery threshold
|
||||
changeFiringToRecovering := a.State == model.StateFiring && a.IsRecovering
|
||||
// We need to change recovering alerts to firing if the returned sample meets target threshold
|
||||
changeRecoveringToFiring := a.State == model.StateRecovering && !a.IsRecovering && !a.Missing
|
||||
// in any of the above case we need to update the status of alert
|
||||
if changeFiringToRecovering || changeRecoveringToFiring {
|
||||
state := model.StateRecovering
|
||||
if changeRecoveringToFiring {
|
||||
state = model.StateFiring
|
||||
}
|
||||
a.State = state
|
||||
r.logger.DebugContext(ctx, "converting alert state", "name", r.Name(), "state", state)
|
||||
itemsToAdd = append(itemsToAdd, model.RuleStateHistory{
|
||||
RuleID: r.ID(),
|
||||
RuleName: r.Name(),
|
||||
State: state,
|
||||
StateChanged: true,
|
||||
UnixMilli: ts.UnixMilli(),
|
||||
Labels: model.LabelsString(labelsJSON),
|
||||
Fingerprint: a.QueryResultLables.Hash(),
|
||||
Value: a.Value,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
currentState := r.State()
|
||||
@@ -539,7 +387,7 @@ func (r *AnomalyRule) String() string {
|
||||
PreferredChannels: r.PreferredChannels(),
|
||||
}
|
||||
|
||||
byt, err := json.Marshal(ar)
|
||||
byt, err := yaml.Marshal(ar)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("error marshaling alerting rule: %s", err.Error())
|
||||
}
|
||||
|
||||
@@ -3,10 +3,8 @@ package rules
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
baserules "github.com/SigNoz/signoz/pkg/query-service/rules"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils/labels"
|
||||
@@ -22,10 +20,6 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
|
||||
var task baserules.Task
|
||||
|
||||
ruleId := baserules.RuleIdFromTaskName(opts.TaskName)
|
||||
evaluation, err := opts.Rule.Evaluation.GetEvaluation()
|
||||
if err != nil {
|
||||
return nil, errors.NewInvalidInputf(errors.CodeInvalidInput, "evaluation is invalid: %v", err)
|
||||
}
|
||||
if opts.Rule.RuleType == ruletypes.RuleTypeThreshold {
|
||||
// create a threshold rule
|
||||
tr, err := baserules.NewThresholdRule(
|
||||
@@ -33,8 +27,6 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
|
||||
opts.OrgID,
|
||||
opts.Rule,
|
||||
opts.Reader,
|
||||
opts.Querier,
|
||||
opts.SLogger,
|
||||
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
|
||||
baserules.WithSQLStore(opts.SQLStore),
|
||||
)
|
||||
@@ -46,7 +38,7 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
|
||||
rules = append(rules, tr)
|
||||
|
||||
// create ch rule task for evalution
|
||||
task = newTask(baserules.TaskTypeCh, opts.TaskName, time.Duration(evaluation.GetFrequency()), rules, opts.ManagerOpts, opts.NotifyFunc, opts.MaintenanceStore, opts.OrgID)
|
||||
task = newTask(baserules.TaskTypeCh, opts.TaskName, time.Duration(opts.Rule.Frequency), rules, opts.ManagerOpts, opts.NotifyFunc, opts.MaintenanceStore, opts.OrgID)
|
||||
|
||||
} else if opts.Rule.RuleType == ruletypes.RuleTypeProm {
|
||||
|
||||
@@ -55,7 +47,7 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
|
||||
ruleId,
|
||||
opts.OrgID,
|
||||
opts.Rule,
|
||||
opts.SLogger,
|
||||
opts.Logger,
|
||||
opts.Reader,
|
||||
opts.ManagerOpts.Prometheus,
|
||||
baserules.WithSQLStore(opts.SQLStore),
|
||||
@@ -68,7 +60,7 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
|
||||
rules = append(rules, pr)
|
||||
|
||||
// create promql rule task for evalution
|
||||
task = newTask(baserules.TaskTypeProm, opts.TaskName, time.Duration(evaluation.GetFrequency()), rules, opts.ManagerOpts, opts.NotifyFunc, opts.MaintenanceStore, opts.OrgID)
|
||||
task = newTask(baserules.TaskTypeProm, opts.TaskName, time.Duration(opts.Rule.Frequency), rules, opts.ManagerOpts, opts.NotifyFunc, opts.MaintenanceStore, opts.OrgID)
|
||||
|
||||
} else if opts.Rule.RuleType == ruletypes.RuleTypeAnomaly {
|
||||
// create anomaly rule
|
||||
@@ -77,8 +69,6 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
|
||||
opts.OrgID,
|
||||
opts.Rule,
|
||||
opts.Reader,
|
||||
opts.Querier,
|
||||
opts.SLogger,
|
||||
opts.Cache,
|
||||
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
|
||||
baserules.WithSQLStore(opts.SQLStore),
|
||||
@@ -90,7 +80,7 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
|
||||
rules = append(rules, ar)
|
||||
|
||||
// create anomaly rule task for evalution
|
||||
task = newTask(baserules.TaskTypeCh, opts.TaskName, time.Duration(evaluation.GetFrequency()), rules, opts.ManagerOpts, opts.NotifyFunc, opts.MaintenanceStore, opts.OrgID)
|
||||
task = newTask(baserules.TaskTypeCh, opts.TaskName, time.Duration(opts.Rule.Frequency), rules, opts.ManagerOpts, opts.NotifyFunc, opts.MaintenanceStore, opts.OrgID)
|
||||
|
||||
} else {
|
||||
return nil, fmt.Errorf("unsupported rule type %s. Supported types: %s, %s", opts.Rule.RuleType, ruletypes.RuleTypeProm, ruletypes.RuleTypeThreshold)
|
||||
@@ -126,6 +116,7 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
|
||||
if parsedRule.RuleType == ruletypes.RuleTypeThreshold {
|
||||
|
||||
// add special labels for test alerts
|
||||
parsedRule.Annotations[labels.AlertSummaryLabel] = fmt.Sprintf("The rule threshold is set to %.4f, and the observed metric value is {{$value}}.", *parsedRule.RuleCondition.Target)
|
||||
parsedRule.Labels[labels.RuleSourceLabel] = ""
|
||||
parsedRule.Labels[labels.AlertRuleIdLabel] = ""
|
||||
|
||||
@@ -135,8 +126,6 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
|
||||
opts.OrgID,
|
||||
parsedRule,
|
||||
opts.Reader,
|
||||
opts.Querier,
|
||||
opts.SLogger,
|
||||
baserules.WithSendAlways(),
|
||||
baserules.WithSendUnmatched(),
|
||||
baserules.WithSQLStore(opts.SQLStore),
|
||||
@@ -154,7 +143,7 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
|
||||
alertname,
|
||||
opts.OrgID,
|
||||
parsedRule,
|
||||
opts.SLogger,
|
||||
opts.Logger,
|
||||
opts.Reader,
|
||||
opts.ManagerOpts.Prometheus,
|
||||
baserules.WithSendAlways(),
|
||||
@@ -173,8 +162,6 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
|
||||
opts.OrgID,
|
||||
parsedRule,
|
||||
opts.Reader,
|
||||
opts.Querier,
|
||||
opts.SLogger,
|
||||
opts.Cache,
|
||||
baserules.WithSendAlways(),
|
||||
baserules.WithSendUnmatched(),
|
||||
|
||||
@@ -30,8 +30,6 @@ func (formatter Formatter) DataTypeOf(dataType string) sqlschema.DataType {
|
||||
return sqlschema.DataTypeBoolean
|
||||
case "VARCHAR", "CHARACTER VARYING", "CHARACTER":
|
||||
return sqlschema.DataTypeText
|
||||
case "BYTEA":
|
||||
return sqlschema.DataTypeBytea
|
||||
}
|
||||
|
||||
return formatter.Formatter.DataTypeOf(dataType)
|
||||
|
||||
@@ -2,7 +2,6 @@ package postgressqlschema
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/sqlschema"
|
||||
@@ -48,45 +47,50 @@ func (provider *provider) Operator() sqlschema.SQLOperator {
|
||||
}
|
||||
|
||||
func (provider *provider) GetTable(ctx context.Context, tableName sqlschema.TableName) (*sqlschema.Table, []*sqlschema.UniqueConstraint, error) {
|
||||
columns := []struct {
|
||||
ColumnName string `bun:"column_name"`
|
||||
Nullable bool `bun:"nullable"`
|
||||
SQLDataType string `bun:"udt_name"`
|
||||
DefaultVal *string `bun:"column_default"`
|
||||
}{}
|
||||
|
||||
err := provider.
|
||||
rows, err := provider.
|
||||
sqlstore.
|
||||
BunDB().
|
||||
NewRaw(`
|
||||
QueryContext(ctx, `
|
||||
SELECT
|
||||
c.column_name,
|
||||
c.is_nullable = 'YES' as nullable,
|
||||
c.is_nullable = 'YES',
|
||||
c.udt_name,
|
||||
c.column_default
|
||||
FROM
|
||||
information_schema.columns AS c
|
||||
WHERE
|
||||
c.table_name = ?`, string(tableName)).
|
||||
Scan(ctx, &columns)
|
||||
c.table_name = ?`, string(tableName))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(columns) == 0 {
|
||||
return nil, nil, sql.ErrNoRows
|
||||
}
|
||||
|
||||
sqlschemaColumns := make([]*sqlschema.Column, 0)
|
||||
for _, column := range columns {
|
||||
columnDefault := ""
|
||||
if column.DefaultVal != nil {
|
||||
columnDefault = *column.DefaultVal
|
||||
defer func() {
|
||||
if err := rows.Close(); err != nil {
|
||||
provider.settings.Logger().ErrorContext(ctx, "error closing rows", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
columns := make([]*sqlschema.Column, 0)
|
||||
for rows.Next() {
|
||||
var (
|
||||
name string
|
||||
sqlDataType string
|
||||
nullable bool
|
||||
defaultVal *string
|
||||
)
|
||||
if err := rows.Scan(&name, &nullable, &sqlDataType, &defaultVal); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
sqlschemaColumns = append(sqlschemaColumns, &sqlschema.Column{
|
||||
Name: sqlschema.ColumnName(column.ColumnName),
|
||||
Nullable: column.Nullable,
|
||||
DataType: provider.fmter.DataTypeOf(column.SQLDataType),
|
||||
columnDefault := ""
|
||||
if defaultVal != nil {
|
||||
columnDefault = *defaultVal
|
||||
}
|
||||
|
||||
columns = append(columns, &sqlschema.Column{
|
||||
Name: sqlschema.ColumnName(name),
|
||||
Nullable: nullable,
|
||||
DataType: provider.fmter.DataTypeOf(sqlDataType),
|
||||
Default: columnDefault,
|
||||
})
|
||||
}
|
||||
@@ -204,7 +208,7 @@ WHERE
|
||||
|
||||
return &sqlschema.Table{
|
||||
Name: tableName,
|
||||
Columns: sqlschemaColumns,
|
||||
Columns: columns,
|
||||
PrimaryKeyConstraint: primaryKeyConstraint,
|
||||
ForeignKeyConstraints: foreignKeyConstraints,
|
||||
}, uniqueConstraints, nil
|
||||
|
||||
@@ -1,153 +0,0 @@
|
||||
package postgressqlstore
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/uptrace/bun/schema"
|
||||
)
|
||||
|
||||
type formatter struct {
|
||||
bunf schema.Formatter
|
||||
}
|
||||
|
||||
func newFormatter(dialect schema.Dialect) sqlstore.SQLFormatter {
|
||||
return &formatter{bunf: schema.NewFormatter(dialect)}
|
||||
}
|
||||
|
||||
func (f *formatter) JSONExtractString(column, path string) []byte {
|
||||
var sql []byte
|
||||
sql = f.bunf.AppendIdent(sql, column)
|
||||
sql = append(sql, f.convertJSONPathToPostgres(path)...)
|
||||
return sql
|
||||
}
|
||||
|
||||
func (f *formatter) JSONType(column, path string) []byte {
|
||||
var sql []byte
|
||||
sql = append(sql, "jsonb_typeof("...)
|
||||
sql = f.bunf.AppendIdent(sql, column)
|
||||
sql = append(sql, f.convertJSONPathToPostgresWithMode(path, false)...)
|
||||
sql = append(sql, ')')
|
||||
return sql
|
||||
}
|
||||
|
||||
func (f *formatter) JSONIsArray(column, path string) []byte {
|
||||
var sql []byte
|
||||
sql = append(sql, f.JSONType(column, path)...)
|
||||
sql = append(sql, " = "...)
|
||||
sql = schema.Append(f.bunf, sql, "array")
|
||||
return sql
|
||||
}
|
||||
|
||||
func (f *formatter) JSONArrayElements(column, path, alias string) ([]byte, []byte) {
|
||||
var sql []byte
|
||||
sql = append(sql, "jsonb_array_elements("...)
|
||||
sql = f.bunf.AppendIdent(sql, column)
|
||||
sql = append(sql, f.convertJSONPathToPostgresWithMode(path, false)...)
|
||||
sql = append(sql, ") AS "...)
|
||||
sql = f.bunf.AppendIdent(sql, alias)
|
||||
|
||||
return sql, []byte(alias)
|
||||
}
|
||||
|
||||
func (f *formatter) JSONArrayOfStrings(column, path, alias string) ([]byte, []byte) {
|
||||
var sql []byte
|
||||
sql = append(sql, "jsonb_array_elements_text("...)
|
||||
sql = f.bunf.AppendIdent(sql, column)
|
||||
sql = append(sql, f.convertJSONPathToPostgresWithMode(path, false)...)
|
||||
sql = append(sql, ") AS "...)
|
||||
sql = f.bunf.AppendIdent(sql, alias)
|
||||
|
||||
return sql, append([]byte(alias), "::text"...)
|
||||
}
|
||||
|
||||
func (f *formatter) JSONKeys(column, path, alias string) ([]byte, []byte) {
|
||||
var sql []byte
|
||||
sql = append(sql, "jsonb_each("...)
|
||||
sql = f.bunf.AppendIdent(sql, column)
|
||||
sql = append(sql, f.convertJSONPathToPostgresWithMode(path, false)...)
|
||||
sql = append(sql, ") AS "...)
|
||||
sql = f.bunf.AppendIdent(sql, alias)
|
||||
|
||||
return sql, append([]byte(alias), ".key"...)
|
||||
}
|
||||
|
||||
func (f *formatter) JSONArrayAgg(expression string) []byte {
|
||||
var sql []byte
|
||||
sql = append(sql, "jsonb_agg("...)
|
||||
sql = append(sql, expression...)
|
||||
sql = append(sql, ')')
|
||||
return sql
|
||||
}
|
||||
|
||||
func (f *formatter) JSONArrayLiteral(values ...string) []byte {
|
||||
var sql []byte
|
||||
sql = append(sql, "jsonb_build_array("...)
|
||||
for idx, value := range values {
|
||||
if idx > 0 {
|
||||
sql = append(sql, ", "...)
|
||||
}
|
||||
sql = schema.Append(f.bunf, sql, value)
|
||||
}
|
||||
sql = append(sql, ')')
|
||||
return sql
|
||||
}
|
||||
|
||||
func (f *formatter) TextToJsonColumn(column string) []byte {
|
||||
var sql []byte
|
||||
sql = f.bunf.AppendIdent(sql, column)
|
||||
sql = append(sql, "::jsonb"...)
|
||||
return sql
|
||||
}
|
||||
|
||||
func (f *formatter) convertJSONPathToPostgres(jsonPath string) []byte {
|
||||
return f.convertJSONPathToPostgresWithMode(jsonPath, true)
|
||||
}
|
||||
|
||||
func (f *formatter) convertJSONPathToPostgresWithMode(jsonPath string, asText bool) []byte {
|
||||
path := strings.TrimPrefix(strings.TrimPrefix(jsonPath, "$"), ".")
|
||||
|
||||
if path == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
parts := strings.Split(path, ".")
|
||||
|
||||
var validParts []string
|
||||
for _, part := range parts {
|
||||
if part != "" {
|
||||
validParts = append(validParts, part)
|
||||
}
|
||||
}
|
||||
|
||||
if len(validParts) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var result []byte
|
||||
|
||||
for idx, part := range validParts {
|
||||
if idx == len(validParts)-1 {
|
||||
if asText {
|
||||
result = append(result, "->>"...)
|
||||
} else {
|
||||
result = append(result, "->"...)
|
||||
}
|
||||
result = schema.Append(f.bunf, result, part)
|
||||
return result
|
||||
}
|
||||
|
||||
result = append(result, "->"...)
|
||||
result = schema.Append(f.bunf, result, part)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (f *formatter) LowerExpression(expression string) []byte {
|
||||
var sql []byte
|
||||
sql = append(sql, "lower("...)
|
||||
sql = append(sql, expression...)
|
||||
sql = append(sql, ')')
|
||||
return sql
|
||||
}
|
||||
@@ -1,500 +0,0 @@
|
||||
package postgressqlstore
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/uptrace/bun/dialect/pgdialect"
|
||||
)
|
||||
|
||||
func TestJSONExtractString(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
column string
|
||||
path string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "simple path",
|
||||
column: "data",
|
||||
path: "$.field",
|
||||
expected: `"data"->>'field'`,
|
||||
},
|
||||
{
|
||||
name: "nested path",
|
||||
column: "metadata",
|
||||
path: "$.user.name",
|
||||
expected: `"metadata"->'user'->>'name'`,
|
||||
},
|
||||
{
|
||||
name: "deeply nested path",
|
||||
column: "json_col",
|
||||
path: "$.level1.level2.level3",
|
||||
expected: `"json_col"->'level1'->'level2'->>'level3'`,
|
||||
},
|
||||
{
|
||||
name: "root path",
|
||||
column: "json_col",
|
||||
path: "$",
|
||||
expected: `"json_col"`,
|
||||
},
|
||||
{
|
||||
name: "empty path",
|
||||
column: "data",
|
||||
path: "",
|
||||
expected: `"data"`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
f := newFormatter(pgdialect.New())
|
||||
got := string(f.JSONExtractString(tt.column, tt.path))
|
||||
assert.Equal(t, tt.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONType(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
column string
|
||||
path string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "simple path",
|
||||
column: "data",
|
||||
path: "$.field",
|
||||
expected: `jsonb_typeof("data"->'field')`,
|
||||
},
|
||||
{
|
||||
name: "nested path",
|
||||
column: "metadata",
|
||||
path: "$.user.age",
|
||||
expected: `jsonb_typeof("metadata"->'user'->'age')`,
|
||||
},
|
||||
{
|
||||
name: "root path",
|
||||
column: "json_col",
|
||||
path: "$",
|
||||
expected: `jsonb_typeof("json_col")`,
|
||||
},
|
||||
{
|
||||
name: "empty path",
|
||||
column: "data",
|
||||
path: "",
|
||||
expected: `jsonb_typeof("data")`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
f := newFormatter(pgdialect.New())
|
||||
got := string(f.JSONType(tt.column, tt.path))
|
||||
assert.Equal(t, tt.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONIsArray(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
column string
|
||||
path string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "simple path",
|
||||
column: "data",
|
||||
path: "$.items",
|
||||
expected: `jsonb_typeof("data"->'items') = 'array'`,
|
||||
},
|
||||
{
|
||||
name: "nested path",
|
||||
column: "metadata",
|
||||
path: "$.user.tags",
|
||||
expected: `jsonb_typeof("metadata"->'user'->'tags') = 'array'`,
|
||||
},
|
||||
{
|
||||
name: "root path",
|
||||
column: "json_col",
|
||||
path: "$",
|
||||
expected: `jsonb_typeof("json_col") = 'array'`,
|
||||
},
|
||||
{
|
||||
name: "empty path",
|
||||
column: "data",
|
||||
path: "",
|
||||
expected: `jsonb_typeof("data") = 'array'`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
f := newFormatter(pgdialect.New())
|
||||
got := string(f.JSONIsArray(tt.column, tt.path))
|
||||
assert.Equal(t, tt.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONArrayElements(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
column string
|
||||
path string
|
||||
alias string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "root path with dollar sign",
|
||||
column: "data",
|
||||
path: "$",
|
||||
alias: "elem",
|
||||
expected: `jsonb_array_elements("data") AS "elem"`,
|
||||
},
|
||||
{
|
||||
name: "root path empty",
|
||||
column: "data",
|
||||
path: "",
|
||||
alias: "elem",
|
||||
expected: `jsonb_array_elements("data") AS "elem"`,
|
||||
},
|
||||
{
|
||||
name: "nested path",
|
||||
column: "metadata",
|
||||
path: "$.items",
|
||||
alias: "item",
|
||||
expected: `jsonb_array_elements("metadata"->'items') AS "item"`,
|
||||
},
|
||||
{
|
||||
name: "deeply nested path",
|
||||
column: "json_col",
|
||||
path: "$.user.tags",
|
||||
alias: "tag",
|
||||
expected: `jsonb_array_elements("json_col"->'user'->'tags') AS "tag"`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
f := newFormatter(pgdialect.New())
|
||||
got, _ := f.JSONArrayElements(tt.column, tt.path, tt.alias)
|
||||
assert.Equal(t, tt.expected, string(got))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONArrayOfStrings(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
column string
|
||||
path string
|
||||
alias string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "root path with dollar sign",
|
||||
column: "data",
|
||||
path: "$",
|
||||
alias: "str",
|
||||
expected: `jsonb_array_elements_text("data") AS "str"`,
|
||||
},
|
||||
{
|
||||
name: "root path empty",
|
||||
column: "data",
|
||||
path: "",
|
||||
alias: "str",
|
||||
expected: `jsonb_array_elements_text("data") AS "str"`,
|
||||
},
|
||||
{
|
||||
name: "nested path",
|
||||
column: "metadata",
|
||||
path: "$.strings",
|
||||
alias: "s",
|
||||
expected: `jsonb_array_elements_text("metadata"->'strings') AS "s"`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
f := newFormatter(pgdialect.New())
|
||||
got, _ := f.JSONArrayOfStrings(tt.column, tt.path, tt.alias)
|
||||
assert.Equal(t, tt.expected, string(got))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONKeys(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
column string
|
||||
path string
|
||||
alias string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "root path with dollar sign",
|
||||
column: "data",
|
||||
path: "$",
|
||||
alias: "k",
|
||||
expected: `jsonb_each("data") AS "k"`,
|
||||
},
|
||||
{
|
||||
name: "root path empty",
|
||||
column: "data",
|
||||
path: "",
|
||||
alias: "k",
|
||||
expected: `jsonb_each("data") AS "k"`,
|
||||
},
|
||||
{
|
||||
name: "nested path",
|
||||
column: "metadata",
|
||||
path: "$.object",
|
||||
alias: "key",
|
||||
expected: `jsonb_each("metadata"->'object') AS "key"`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
f := newFormatter(pgdialect.New())
|
||||
got, _ := f.JSONKeys(tt.column, tt.path, tt.alias)
|
||||
assert.Equal(t, tt.expected, string(got))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONArrayAgg(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
expression string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "simple column",
|
||||
expression: "id",
|
||||
expected: "jsonb_agg(id)",
|
||||
},
|
||||
{
|
||||
name: "expression with function",
|
||||
expression: "DISTINCT name",
|
||||
expected: "jsonb_agg(DISTINCT name)",
|
||||
},
|
||||
{
|
||||
name: "complex expression",
|
||||
expression: "data->>'field'",
|
||||
expected: "jsonb_agg(data->>'field')",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
f := newFormatter(pgdialect.New())
|
||||
got := string(f.JSONArrayAgg(tt.expression))
|
||||
assert.Equal(t, tt.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONArrayLiteral(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
values []string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "empty array",
|
||||
values: []string{},
|
||||
expected: "jsonb_build_array()",
|
||||
},
|
||||
{
|
||||
name: "single value",
|
||||
values: []string{"value1"},
|
||||
expected: "jsonb_build_array('value1')",
|
||||
},
|
||||
{
|
||||
name: "multiple values",
|
||||
values: []string{"value1", "value2", "value3"},
|
||||
expected: "jsonb_build_array('value1', 'value2', 'value3')",
|
||||
},
|
||||
{
|
||||
name: "values with special characters",
|
||||
values: []string{"test", "with space", "with-dash"},
|
||||
expected: "jsonb_build_array('test', 'with space', 'with-dash')",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
f := newFormatter(pgdialect.New())
|
||||
got := string(f.JSONArrayLiteral(tt.values...))
|
||||
assert.Equal(t, tt.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertJSONPathToPostgresWithMode(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
jsonPath string
|
||||
asText bool
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "simple path as text",
|
||||
jsonPath: "$.field",
|
||||
asText: true,
|
||||
expected: "->>'field'",
|
||||
},
|
||||
{
|
||||
name: "simple path as json",
|
||||
jsonPath: "$.field",
|
||||
asText: false,
|
||||
expected: "->'field'",
|
||||
},
|
||||
{
|
||||
name: "nested path as text",
|
||||
jsonPath: "$.user.name",
|
||||
asText: true,
|
||||
expected: "->'user'->>'name'",
|
||||
},
|
||||
{
|
||||
name: "nested path as json",
|
||||
jsonPath: "$.user.name",
|
||||
asText: false,
|
||||
expected: "->'user'->'name'",
|
||||
},
|
||||
{
|
||||
name: "deeply nested as text",
|
||||
jsonPath: "$.a.b.c.d",
|
||||
asText: true,
|
||||
expected: "->'a'->'b'->'c'->>'d'",
|
||||
},
|
||||
{
|
||||
name: "root path",
|
||||
jsonPath: "$",
|
||||
asText: true,
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "empty path",
|
||||
jsonPath: "",
|
||||
asText: true,
|
||||
expected: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
f := newFormatter(pgdialect.New()).(*formatter)
|
||||
got := string(f.convertJSONPathToPostgresWithMode(tt.jsonPath, tt.asText))
|
||||
assert.Equal(t, tt.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTextToJsonColumn(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
column string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "simple column name",
|
||||
column: "data",
|
||||
expected: `"data"::jsonb`,
|
||||
},
|
||||
{
|
||||
name: "column with underscore",
|
||||
column: "user_data",
|
||||
expected: `"user_data"::jsonb`,
|
||||
},
|
||||
{
|
||||
name: "column with special characters",
|
||||
column: "json-col",
|
||||
expected: `"json-col"::jsonb`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
f := newFormatter(pgdialect.New())
|
||||
got := string(f.TextToJsonColumn(tt.column))
|
||||
assert.Equal(t, tt.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLowerExpression(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
expr string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "simple column name",
|
||||
expr: "name",
|
||||
expected: "lower(name)",
|
||||
},
|
||||
{
|
||||
name: "quoted column identifier",
|
||||
expr: `"column_name"`,
|
||||
expected: `lower("column_name")`,
|
||||
},
|
||||
{
|
||||
name: "jsonb text extraction",
|
||||
expr: "data->>'field'",
|
||||
expected: "lower(data->>'field')",
|
||||
},
|
||||
{
|
||||
name: "nested jsonb extraction",
|
||||
expr: "metadata->'user'->>'name'",
|
||||
expected: "lower(metadata->'user'->>'name')",
|
||||
},
|
||||
{
|
||||
name: "jsonb_typeof expression",
|
||||
expr: "jsonb_typeof(data->'field')",
|
||||
expected: "lower(jsonb_typeof(data->'field'))",
|
||||
},
|
||||
{
|
||||
name: "string concatenation",
|
||||
expr: "first_name || ' ' || last_name",
|
||||
expected: "lower(first_name || ' ' || last_name)",
|
||||
},
|
||||
{
|
||||
name: "CAST expression",
|
||||
expr: "CAST(value AS TEXT)",
|
||||
expected: "lower(CAST(value AS TEXT))",
|
||||
},
|
||||
{
|
||||
name: "COALESCE expression",
|
||||
expr: "COALESCE(name, 'default')",
|
||||
expected: "lower(COALESCE(name, 'default'))",
|
||||
},
|
||||
{
|
||||
name: "subquery column",
|
||||
expr: "users.email",
|
||||
expected: "lower(users.email)",
|
||||
},
|
||||
{
|
||||
name: "quoted identifier with special chars",
|
||||
expr: `"user-name"`,
|
||||
expected: `lower("user-name")`,
|
||||
},
|
||||
{
|
||||
name: "jsonb to text cast",
|
||||
expr: "data::text",
|
||||
expected: "lower(data::text)",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
f := newFormatter(pgdialect.New())
|
||||
got := string(f.LowerExpression(tt.expr))
|
||||
assert.Equal(t, tt.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -15,11 +15,10 @@ import (
|
||||
)
|
||||
|
||||
type provider struct {
|
||||
settings factory.ScopedProviderSettings
|
||||
sqldb *sql.DB
|
||||
bundb *sqlstore.BunDB
|
||||
dialect *dialect
|
||||
formatter sqlstore.SQLFormatter
|
||||
settings factory.ScopedProviderSettings
|
||||
sqldb *sql.DB
|
||||
bundb *sqlstore.BunDB
|
||||
dialect *dialect
|
||||
}
|
||||
|
||||
func NewFactory(hookFactories ...factory.ProviderFactory[sqlstore.SQLStoreHook, sqlstore.Config]) factory.ProviderFactory[sqlstore.SQLStore, sqlstore.Config] {
|
||||
@@ -56,14 +55,11 @@ func New(ctx context.Context, providerSettings factory.ProviderSettings, config
|
||||
|
||||
sqldb := stdlib.OpenDBFromPool(pool)
|
||||
|
||||
pgDialect := pgdialect.New()
|
||||
bunDB := sqlstore.NewBunDB(settings, sqldb, pgDialect, hooks)
|
||||
return &provider{
|
||||
settings: settings,
|
||||
sqldb: sqldb,
|
||||
bundb: bunDB,
|
||||
dialect: new(dialect),
|
||||
formatter: newFormatter(bunDB.Dialect()),
|
||||
settings: settings,
|
||||
sqldb: sqldb,
|
||||
bundb: sqlstore.NewBunDB(settings, sqldb, pgdialect.New(), hooks),
|
||||
dialect: new(dialect),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -79,10 +75,6 @@ func (provider *provider) Dialect() sqlstore.SQLDialect {
|
||||
return provider.dialect
|
||||
}
|
||||
|
||||
func (provider *provider) Formatter() sqlstore.SQLFormatter {
|
||||
return provider.formatter
|
||||
}
|
||||
|
||||
func (provider *provider) BunDBCtx(ctx context.Context) bun.IDB {
|
||||
return provider.bundb.BunDBCtx(ctx)
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
package zeus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
neturl "net/url"
|
||||
"sync"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/zeus"
|
||||
)
|
||||
|
||||
@@ -24,17 +24,17 @@ func Config() zeus.Config {
|
||||
once.Do(func() {
|
||||
parsedURL, err := neturl.Parse(url)
|
||||
if err != nil {
|
||||
panic(errors.WrapInternalf(err, errors.CodeInternal, "invalid zeus URL"))
|
||||
panic(fmt.Errorf("invalid zeus URL: %w", err))
|
||||
}
|
||||
|
||||
deprecatedParsedURL, err := neturl.Parse(deprecatedURL)
|
||||
if err != nil {
|
||||
panic(errors.WrapInternalf(err, errors.CodeInternal, "invalid zeus deprecated URL"))
|
||||
panic(fmt.Errorf("invalid zeus deprecated URL: %w", err))
|
||||
}
|
||||
|
||||
config = zeus.Config{URL: parsedURL, DeprecatedURL: deprecatedParsedURL}
|
||||
if err := config.Validate(); err != nil {
|
||||
panic(errors.WrapInternalf(err, errors.CodeInternal, "invalid zeus config"))
|
||||
panic(fmt.Errorf("invalid zeus config: %w", err))
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
@@ -1,484 +0,0 @@
|
||||
# Persona
|
||||
You are an expert developer with deep knowledge of Jest, React Testing Library, MSW, and TypeScript, tasked with creating unit tests for this repository.
|
||||
|
||||
# Auto-detect TypeScript Usage
|
||||
Check for TypeScript in the project through tsconfig.json or package.json dependencies.
|
||||
Adjust syntax based on this detection.
|
||||
|
||||
# TypeScript Type Safety for Jest Tests
|
||||
**CRITICAL**: All Jest tests MUST be fully type-safe with proper TypeScript types.
|
||||
|
||||
**Type Safety Requirements:**
|
||||
- Use proper TypeScript interfaces for all mock data
|
||||
- Type all Jest mock functions with `jest.MockedFunction<T>`
|
||||
- Use generic types for React components and hooks
|
||||
- Define proper return types for mock functions
|
||||
- Use `as const` for literal types when needed
|
||||
- Avoid `any` type – use proper typing instead
|
||||
|
||||
# Unit Testing Focus
|
||||
Focus on critical functionality (business logic, utility functions, component behavior)
|
||||
Mock dependencies (API calls, external modules) before imports
|
||||
Test multiple data scenarios (valid inputs, invalid inputs, edge cases)
|
||||
Write maintainable tests with descriptive names grouped in describe blocks
|
||||
|
||||
# Global vs Local Mocks
|
||||
**Use Global Mocks for:**
|
||||
- High-frequency dependencies (20+ test files)
|
||||
- Core infrastructure (react-router-dom, react-query, antd)
|
||||
- Standard implementations across the app
|
||||
- Browser APIs (ResizeObserver, matchMedia, localStorage)
|
||||
- Utility libraries (date-fns, lodash)
|
||||
|
||||
**Use Local Mocks for:**
|
||||
- Business logic dependencies (5-15 test files)
|
||||
- Test-specific behavior (different data per test)
|
||||
- API endpoints with specific responses
|
||||
- Domain-specific components
|
||||
- Error scenarios and edge cases
|
||||
|
||||
**Global Mock Files Available (from jest.config.ts):**
|
||||
- `uplot` → `__mocks__/uplotMock.ts`
|
||||
|
||||
# Repo-specific Testing Conventions
|
||||
|
||||
## Imports
|
||||
Always import from our harness:
|
||||
```ts
|
||||
import { render, screen, userEvent, waitFor } from 'tests/test-utils';
|
||||
```
|
||||
For API mocks:
|
||||
```ts
|
||||
import { server, rest } from 'mocks-server/server';
|
||||
```
|
||||
Do not import directly from `@testing-library/react`.
|
||||
|
||||
## Router
|
||||
Use the router built into render:
|
||||
```ts
|
||||
render(<Page />, undefined, { initialRoute: '/traces-explorer' });
|
||||
```
|
||||
Only mock `useLocation` / `useParams` if the test depends on them.
|
||||
|
||||
## Hook Mocks
|
||||
Pattern:
|
||||
```ts
|
||||
import useFoo from 'hooks/useFoo';
|
||||
jest.mock('hooks/useFoo');
|
||||
const mockUseFoo = jest.mocked(useFoo);
|
||||
mockUseFoo.mockReturnValue(/* minimal shape */ as any);
|
||||
```
|
||||
Prefer helpers (`rqSuccess`, `rqLoading`, `rqError`) for React Query results.
|
||||
|
||||
## MSW
|
||||
Global MSW server runs automatically.
|
||||
Override per-test:
|
||||
```ts
|
||||
server.use(
|
||||
rest.get('*/api/v1/foo', (_req, res, ctx) => res(ctx.status(200), ctx.json({ ok: true })))
|
||||
);
|
||||
```
|
||||
Keep large responses in `mocks-server/__mockdata_`.
|
||||
|
||||
## Interactions
|
||||
- Prefer `userEvent` for real user interactions (click, type, select, tab).
|
||||
- Use `fireEvent` only for low-level/programmatic events not covered by `userEvent` (e.g., scroll, resize, setting `element.scrollTop` for virtualization). Wrap in `act(...)` if needed.
|
||||
- Always await interactions:
|
||||
```ts
|
||||
const user = userEvent.setup({ pointerEventsCheck: 0 });
|
||||
await user.click(screen.getByRole('button', { name: /save/i }));
|
||||
```
|
||||
|
||||
```ts
|
||||
// Example: virtualized list scroll (no userEvent helper)
|
||||
const scroller = container.querySelector('[data-test-id="virtuoso-scroller"]') as HTMLElement;
|
||||
scroller.scrollTop = targetScrollTop;
|
||||
act(() => { fireEvent.scroll(scroller); });
|
||||
```
|
||||
|
||||
## Timers
|
||||
❌ No global fake timers.
|
||||
✅ Per-test only, for debounce/throttle:
|
||||
```ts
|
||||
jest.useFakeTimers();
|
||||
const user = userEvent.setup({ advanceTimers: (ms) => jest.advanceTimersByTime(ms) });
|
||||
await user.type(screen.getByRole('textbox'), 'query');
|
||||
jest.advanceTimersByTime(400);
|
||||
jest.useRealTimers();
|
||||
```
|
||||
|
||||
## Queries
|
||||
Prefer accessible queries (`getByRole`, `findByRole`, `getByLabelText`).
|
||||
Fallback: visible text.
|
||||
Last resort: `data-testid`.
|
||||
|
||||
# Example Test (using only configured global mocks)
|
||||
```ts
|
||||
import { render, screen, userEvent, waitFor } from 'tests/test-utils';
|
||||
import { server, rest } from 'mocks-server/server';
|
||||
import MyComponent from '../MyComponent';
|
||||
|
||||
describe('MyComponent', () => {
|
||||
it('renders and interacts', async () => {
|
||||
const user = userEvent.setup({ pointerEventsCheck: 0 });
|
||||
|
||||
server.use(
|
||||
rest.get('*/api/v1/example', (_req, res, ctx) => res(ctx.status(200), ctx.json({ value: 42 })))
|
||||
);
|
||||
|
||||
render(<MyComponent />, undefined, { initialRoute: '/foo' });
|
||||
|
||||
expect(await screen.findByText(/value: 42/i)).toBeInTheDocument();
|
||||
await user.click(screen.getByRole('button', { name: /refresh/i }));
|
||||
await waitFor(() => expect(screen.getByText(/loading/i)).toBeInTheDocument());
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
# Anti-patterns
|
||||
❌ Importing RTL directly
|
||||
❌ Using global fake timers
|
||||
❌ Wrapping render in `act(...)`
|
||||
❌ Mocking infra dependencies locally (router, react-query)
|
||||
✅ Use our harness (`tests/test-utils`)
|
||||
✅ Use MSW for API overrides
|
||||
✅ Use userEvent + await
|
||||
✅ Pin time only in tests that assert relative dates
|
||||
|
||||
# Best Practices
|
||||
- **Critical Functionality**: Prioritize testing business logic and utilities
|
||||
- **Dependency Mocking**: Global mocks for infra, local mocks for business logic
|
||||
- **Data Scenarios**: Always test valid, invalid, and edge cases
|
||||
- **Descriptive Names**: Make test intent clear
|
||||
- **Organization**: Group related tests in describe
|
||||
- **Consistency**: Match repo conventions
|
||||
- **Edge Cases**: Test null, undefined, unexpected values
|
||||
- **Limit Scope**: 3–5 focused tests per file
|
||||
- **Use Helpers**: `rqSuccess`, `makeUser`, etc.
|
||||
- **No Any**: Enforce type safety
|
||||
|
||||
# Example Test
|
||||
```ts
|
||||
import { render, screen, userEvent, waitFor } from 'tests/test-utils';
|
||||
import { server, rest } from 'mocks-server/server';
|
||||
import MyComponent from '../MyComponent';
|
||||
|
||||
describe('MyComponent', () => {
|
||||
it('renders and interacts', async () => {
|
||||
const user = userEvent.setup({ pointerEventsCheck: 0 });
|
||||
|
||||
server.use(
|
||||
rest.get('*/api/v1/example', (_req, res, ctx) => res(ctx.status(200), ctx.json({ value: 42 })))
|
||||
);
|
||||
|
||||
render(<MyComponent />, undefined, { initialRoute: '/foo' });
|
||||
|
||||
expect(await screen.findByText(/value: 42/i)).toBeInTheDocument();
|
||||
await user.click(screen.getByRole('button', { name: /refresh/i }));
|
||||
await waitFor(() => expect(screen.getByText(/loading/i)).toBeInTheDocument());
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
# Anti-patterns
|
||||
❌ Importing RTL directly
|
||||
❌ Using global fake timers
|
||||
❌ Wrapping render in `act(...)`
|
||||
❌ Mocking infra dependencies locally (router, react-query)
|
||||
✅ Use our harness (`tests/test-utils`)
|
||||
✅ Use MSW for API overrides
|
||||
✅ Use userEvent + await
|
||||
✅ Pin time only in tests that assert relative dates
|
||||
|
||||
# TypeScript Type Safety Examples
|
||||
|
||||
## Proper Mock Typing
|
||||
```ts
|
||||
// ✅ GOOD - Properly typed mocks
|
||||
interface User {
|
||||
id: number;
|
||||
name: string;
|
||||
email: string;
|
||||
}
|
||||
|
||||
interface ApiResponse<T> {
|
||||
data: T;
|
||||
status: number;
|
||||
message: string;
|
||||
}
|
||||
|
||||
// Type the mock functions
|
||||
const mockFetchUser = jest.fn() as jest.MockedFunction<(id: number) => Promise<ApiResponse<User>>>;
|
||||
const mockUpdateUser = jest.fn() as jest.MockedFunction<(user: User) => Promise<ApiResponse<User>>>;
|
||||
|
||||
// Mock implementation with proper typing
|
||||
mockFetchUser.mockResolvedValue({
|
||||
data: { id: 1, name: 'John Doe', email: 'john@example.com' },
|
||||
status: 200,
|
||||
message: 'Success'
|
||||
});
|
||||
|
||||
// ❌ BAD - Using any type
|
||||
const mockFetchUser = jest.fn() as any; // Don't do this
|
||||
```
|
||||
|
||||
## React Component Testing with Types
|
||||
```ts
|
||||
// ✅ GOOD - Properly typed component testing
|
||||
interface ComponentProps {
|
||||
title: string;
|
||||
data: User[];
|
||||
onUserSelect: (user: User) => void;
|
||||
isLoading?: boolean;
|
||||
}
|
||||
|
||||
const TestComponent: React.FC<ComponentProps> = ({ title, data, onUserSelect, isLoading = false }) => {
|
||||
// Component implementation
|
||||
};
|
||||
|
||||
describe('TestComponent', () => {
|
||||
it('should render with proper props', () => {
|
||||
// Arrange - Type the props properly
|
||||
const mockProps: ComponentProps = {
|
||||
title: 'Test Title',
|
||||
data: [{ id: 1, name: 'John', email: 'john@example.com' }],
|
||||
onUserSelect: jest.fn() as jest.MockedFunction<(user: User) => void>,
|
||||
isLoading: false
|
||||
};
|
||||
|
||||
// Act
|
||||
render(<TestComponent {...mockProps} />);
|
||||
|
||||
// Assert
|
||||
expect(screen.getByText('Test Title')).toBeInTheDocument();
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
## Hook Testing with Types
|
||||
```ts
|
||||
// ✅ GOOD - Properly typed hook testing
|
||||
interface UseUserDataReturn {
|
||||
user: User | null;
|
||||
loading: boolean;
|
||||
error: string | null;
|
||||
refetch: () => void;
|
||||
}
|
||||
|
||||
const useUserData = (id: number): UseUserDataReturn => {
|
||||
// Hook implementation
|
||||
};
|
||||
|
||||
describe('useUserData', () => {
|
||||
it('should return user data with proper typing', () => {
|
||||
// Arrange
|
||||
const mockUser: User = { id: 1, name: 'John', email: 'john@example.com' };
|
||||
mockFetchUser.mockResolvedValue({
|
||||
data: mockUser,
|
||||
status: 200,
|
||||
message: 'Success'
|
||||
});
|
||||
|
||||
// Act
|
||||
const { result } = renderHook(() => useUserData(1));
|
||||
|
||||
// Assert
|
||||
expect(result.current.user).toEqual(mockUser);
|
||||
expect(result.current.loading).toBe(false);
|
||||
expect(result.current.error).toBeNull();
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
## Global Mock Type Safety
|
||||
```ts
|
||||
// ✅ GOOD - Type-safe global mocks
|
||||
// In __mocks__/routerMock.ts
|
||||
export const mockUseLocation = (overrides: Partial<Location> = {}): Location => ({
|
||||
pathname: '/traces',
|
||||
search: '',
|
||||
hash: '',
|
||||
state: null,
|
||||
key: 'test-key',
|
||||
...overrides,
|
||||
});
|
||||
|
||||
// In test files
|
||||
const location = useLocation(); // Properly typed from global mock
|
||||
expect(location.pathname).toBe('/traces');
|
||||
```
|
||||
|
||||
# TypeScript Configuration for Jest
|
||||
|
||||
## Required Jest Configuration
|
||||
```json
|
||||
// jest.config.ts
|
||||
{
|
||||
"preset": "ts-jest/presets/js-with-ts-esm",
|
||||
"globals": {
|
||||
"ts-jest": {
|
||||
"useESM": true,
|
||||
"isolatedModules": true,
|
||||
"tsconfig": "<rootDir>/tsconfig.jest.json"
|
||||
}
|
||||
},
|
||||
"extensionsToTreatAsEsm": [".ts", ".tsx"],
|
||||
"moduleFileExtensions": ["ts", "tsx", "js", "json"]
|
||||
}
|
||||
```
|
||||
|
||||
## TypeScript Jest Configuration
|
||||
```json
|
||||
// tsconfig.jest.json
|
||||
{
|
||||
"extends": "./tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"types": ["jest", "@testing-library/jest-dom"],
|
||||
"esModuleInterop": true,
|
||||
"allowSyntheticDefaultImports": true,
|
||||
"moduleResolution": "node"
|
||||
},
|
||||
"include": [
|
||||
"src/**/*",
|
||||
"**/*.test.ts",
|
||||
"**/*.test.tsx",
|
||||
"__mocks__/**/*"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Common Type Safety Patterns
|
||||
|
||||
### Mock Function Typing
|
||||
```ts
|
||||
// ✅ GOOD - Proper mock function typing
|
||||
const mockApiCall = jest.fn() as jest.MockedFunction<typeof apiCall>;
|
||||
const mockEventHandler = jest.fn() as jest.MockedFunction<(event: Event) => void>;
|
||||
|
||||
// ❌ BAD - Using any
|
||||
const mockApiCall = jest.fn() as any;
|
||||
```
|
||||
|
||||
### Generic Mock Typing
|
||||
```ts
|
||||
// ✅ GOOD - Generic mock typing
|
||||
interface MockApiResponse<T> {
|
||||
data: T;
|
||||
status: number;
|
||||
}
|
||||
|
||||
const mockFetchData = jest.fn() as jest.MockedFunction<
|
||||
<T>(endpoint: string) => Promise<MockApiResponse<T>>
|
||||
>;
|
||||
|
||||
// Usage
|
||||
mockFetchData<User>('/users').mockResolvedValue({
|
||||
data: { id: 1, name: 'John' },
|
||||
status: 200
|
||||
});
|
||||
```
|
||||
|
||||
### React Testing Library with Types
|
||||
```ts
|
||||
// ✅ GOOD - Typed testing utilities
|
||||
import { render, screen, RenderResult } from '@testing-library/react';
|
||||
import { ComponentProps } from 'react';
|
||||
|
||||
type TestComponentProps = ComponentProps<typeof TestComponent>;
|
||||
|
||||
const renderTestComponent = (props: Partial<TestComponentProps> = {}): RenderResult => {
|
||||
const defaultProps: TestComponentProps = {
|
||||
title: 'Test',
|
||||
data: [],
|
||||
onSelect: jest.fn(),
|
||||
...props
|
||||
};
|
||||
|
||||
return render(<TestComponent {...defaultProps} />);
|
||||
};
|
||||
```
|
||||
|
||||
### Error Handling with Types
|
||||
```ts
|
||||
// ✅ GOOD - Typed error handling
|
||||
interface ApiError {
|
||||
message: string;
|
||||
code: number;
|
||||
details?: Record<string, unknown>;
|
||||
}
|
||||
|
||||
const mockApiError: ApiError = {
|
||||
message: 'API Error',
|
||||
code: 500,
|
||||
details: { endpoint: '/users' }
|
||||
};
|
||||
|
||||
mockFetchUser.mockRejectedValue(new Error(JSON.stringify(mockApiError)));
|
||||
```
|
||||
|
||||
## Type Safety Checklist
|
||||
- [ ] All mock functions use `jest.MockedFunction<T>`
|
||||
- [ ] All mock data has proper interfaces
|
||||
- [ ] No `any` types in test files
|
||||
- [ ] Generic types are used where appropriate
|
||||
- [ ] Error types are properly defined
|
||||
- [ ] Component props are typed
|
||||
- [ ] Hook return types are defined
|
||||
- [ ] API response types are defined
|
||||
- [ ] Global mocks are type-safe
|
||||
- [ ] Test utilities are properly typed
|
||||
|
||||
# Mock Decision Tree
|
||||
```
|
||||
Is it used in 20+ test files?
|
||||
├─ YES → Use Global Mock
|
||||
│ ├─ react-router-dom
|
||||
│ ├─ react-query
|
||||
│ ├─ antd components
|
||||
│ └─ browser APIs
|
||||
│
|
||||
└─ NO → Is it business logic?
|
||||
├─ YES → Use Local Mock
|
||||
│ ├─ API endpoints
|
||||
│ ├─ Custom hooks
|
||||
│ └─ Domain components
|
||||
│
|
||||
└─ NO → Is it test-specific?
|
||||
├─ YES → Use Local Mock
|
||||
│ ├─ Error scenarios
|
||||
│ ├─ Loading states
|
||||
│ └─ Specific data
|
||||
│
|
||||
└─ NO → Consider Global Mock
|
||||
└─ If it becomes frequently used
|
||||
```
|
||||
|
||||
# Common Anti-Patterns to Avoid
|
||||
|
||||
❌ **Don't mock global dependencies locally:**
|
||||
```js
|
||||
// BAD - This is already globally mocked
|
||||
jest.mock('react-router-dom', () => ({ ... }));
|
||||
```
|
||||
|
||||
❌ **Don't create global mocks for test-specific data:**
|
||||
```js
|
||||
// BAD - This should be local
|
||||
jest.mock('../api/tracesService', () => ({
|
||||
getTraces: jest.fn(() => specificTestData)
|
||||
}));
|
||||
```
|
||||
|
||||
✅ **Do use global mocks for infrastructure:**
|
||||
```js
|
||||
// GOOD - Use global mock
|
||||
import { useLocation } from 'react-router-dom';
|
||||
```
|
||||
|
||||
✅ **Do create local mocks for business logic:**
|
||||
```js
|
||||
// GOOD - Local mock for specific test needs
|
||||
jest.mock('../api/tracesService', () => ({
|
||||
getTraces: jest.fn(() => mockTracesData)
|
||||
}));
|
||||
```
|
||||
@@ -1,5 +1,4 @@
|
||||
node_modules
|
||||
build
|
||||
*.typegen.ts
|
||||
i18-generate-hash.js
|
||||
src/parser/TraceOperatorParser/**
|
||||
i18-generate-hash.js
|
||||
@@ -1,5 +1,4 @@
|
||||
module.exports = {
|
||||
ignorePatterns: ['src/parser/*.ts', 'scripts/update-registry.js'],
|
||||
env: {
|
||||
browser: true,
|
||||
es2021: true,
|
||||
|
||||
27
frontend/.gitignore
vendored
27
frontend/.gitignore
vendored
@@ -2,30 +2,3 @@
|
||||
# Sentry Config File
|
||||
.env.sentry-build-plugin
|
||||
.qodo
|
||||
|
||||
# Playwright
|
||||
node_modules/
|
||||
/test-results/
|
||||
/playwright-report/
|
||||
/blob-report/
|
||||
/playwright/.cache/
|
||||
/playwright/test-results/
|
||||
/playwright/blob-report/
|
||||
/playwright/playwright-report/
|
||||
|
||||
e2e/test-plan/alerts/
|
||||
e2e/test-plan/dashboards/
|
||||
e2e/test-plan/exceptions/
|
||||
e2e/test-plan/external-apis/
|
||||
e2e/test-plan/help-support/
|
||||
e2e/test-plan/infrastructure/
|
||||
e2e/test-plan/logs/
|
||||
e2e/test-plan/messaging-queues/
|
||||
e2e/test-plan/metrics/
|
||||
e2e/test-plan/navigation/
|
||||
e2e/test-plan/onboarding/
|
||||
e2e/test-plan/saved-views/
|
||||
e2e/test-plan/service-map/
|
||||
e2e/test-plan/services/
|
||||
e2e/test-plan/traces/
|
||||
e2e/test-plan/user-preferences/
|
||||
@@ -8,8 +8,3 @@ public/
|
||||
|
||||
# Ignore all JSON files:
|
||||
**/*.json
|
||||
|
||||
# Ignore all files in parser folder:
|
||||
src/parser/**
|
||||
|
||||
src/TraceOperator/parser/**
|
||||
@@ -1,51 +0,0 @@
|
||||
/* eslint-disable @typescript-eslint/no-unused-vars */
|
||||
|
||||
// Mock for uplot library used in tests
|
||||
export interface MockUPlotInstance {
|
||||
setData: jest.Mock;
|
||||
setSize: jest.Mock;
|
||||
destroy: jest.Mock;
|
||||
redraw: jest.Mock;
|
||||
setSeries: jest.Mock;
|
||||
}
|
||||
|
||||
export interface MockUPlotPaths {
|
||||
spline: jest.Mock;
|
||||
bars: jest.Mock;
|
||||
}
|
||||
|
||||
// Create mock instance methods
|
||||
const createMockUPlotInstance = (): MockUPlotInstance => ({
|
||||
setData: jest.fn(),
|
||||
setSize: jest.fn(),
|
||||
destroy: jest.fn(),
|
||||
redraw: jest.fn(),
|
||||
setSeries: jest.fn(),
|
||||
});
|
||||
|
||||
// Create mock paths
|
||||
const mockPaths: MockUPlotPaths = {
|
||||
spline: jest.fn(),
|
||||
bars: jest.fn(),
|
||||
};
|
||||
|
||||
// Mock static methods
|
||||
const mockTzDate = jest.fn(
|
||||
(date: Date, _timezone: string) => new Date(date.getTime()),
|
||||
);
|
||||
|
||||
// Mock uPlot constructor - this needs to be a proper constructor function
|
||||
function MockUPlot(
|
||||
_options: unknown,
|
||||
_data: unknown,
|
||||
_target: HTMLElement,
|
||||
): MockUPlotInstance {
|
||||
return createMockUPlotInstance();
|
||||
}
|
||||
|
||||
// Add static methods to the constructor
|
||||
MockUPlot.tzDate = mockTzDate;
|
||||
MockUPlot.paths = mockPaths;
|
||||
|
||||
// Export the constructor as default
|
||||
export default MockUPlot;
|
||||
@@ -1,29 +0,0 @@
|
||||
// Mock for useSafeNavigate hook to avoid React Router version conflicts in tests
|
||||
interface SafeNavigateOptions {
|
||||
replace?: boolean;
|
||||
state?: unknown;
|
||||
}
|
||||
|
||||
interface SafeNavigateTo {
|
||||
pathname?: string;
|
||||
search?: string;
|
||||
hash?: string;
|
||||
}
|
||||
|
||||
type SafeNavigateToType = string | SafeNavigateTo;
|
||||
|
||||
interface UseSafeNavigateReturn {
|
||||
safeNavigate: jest.MockedFunction<
|
||||
(to: SafeNavigateToType, options?: SafeNavigateOptions) => void
|
||||
>;
|
||||
}
|
||||
|
||||
export const useSafeNavigate = (): UseSafeNavigateReturn => ({
|
||||
safeNavigate: jest.fn(
|
||||
(to: SafeNavigateToType, options?: SafeNavigateOptions) => {
|
||||
console.log(`Mock safeNavigate called with:`, to, options);
|
||||
},
|
||||
) as jest.MockedFunction<
|
||||
(to: SafeNavigateToType, options?: SafeNavigateOptions) => void
|
||||
>,
|
||||
});
|
||||
@@ -1,29 +0,0 @@
|
||||
# SigNoz E2E Test Plan
|
||||
|
||||
This directory contains the structured test plan for the SigNoz application. Each subfolder corresponds to a main module or feature area, and contains scenario files for all user journeys, edge cases, and cross-module flows. These documents serve as the basis for generating Playwright MCP-driven E2E tests.
|
||||
|
||||
## Structure
|
||||
|
||||
- Each main module (e.g., logs, traces, dashboards, alerts, settings, etc.) has its own folder or markdown file.
|
||||
- Each file contains detailed scenario templates, including preconditions, step-by-step actions, and expected outcomes.
|
||||
- Use these documents to write, review, and update test cases as the application evolves.
|
||||
|
||||
## Folders & Files
|
||||
|
||||
- `logs/` — Logs module scenarios
|
||||
- `traces/` — Traces module scenarios
|
||||
- `metrics/` — Metrics module scenarios
|
||||
- `dashboards/` — Dashboards module scenarios
|
||||
- `alerts/` — Alerts module scenarios
|
||||
- `services/` — Services module scenarios
|
||||
- `settings/` — Settings and all sub-settings scenarios
|
||||
- `onboarding/` — Onboarding and signup flows
|
||||
- `navigation/` — Navigation, sidebar, and cross-module flows
|
||||
- `exceptions/` — Exception and error handling scenarios
|
||||
- `external-apis/` — External API monitoring scenarios
|
||||
- `messaging-queues/` — Messaging queue scenarios
|
||||
- `infrastructure/` — Infrastructure monitoring scenarios
|
||||
- `help-support/` — Help & support scenarios
|
||||
- `user-preferences/` — User preferences and personalization scenarios
|
||||
- `service-map/` — Service map scenarios
|
||||
- `saved-views/` — Saved views scenarios
|
||||
@@ -1,16 +0,0 @@
|
||||
# Settings Module Test Plan
|
||||
|
||||
This folder contains E2E test scenarios for the Settings module and all sub-settings.
|
||||
|
||||
## Scenario Categories
|
||||
|
||||
- General settings (org/workspace, branding, version info)
|
||||
- Billing settings
|
||||
- Members & SSO
|
||||
- Custom domain
|
||||
- Integrations
|
||||
- Notification channels
|
||||
- API keys
|
||||
- Ingestion
|
||||
- Account settings (profile, password, preferences)
|
||||
- Keyboard shortcuts
|
||||
@@ -1,43 +0,0 @@
|
||||
# Account Settings E2E Scenarios (Updated)
|
||||
|
||||
## 1. Update Name
|
||||
|
||||
- **Precondition:** User is logged in
|
||||
- **Steps:**
|
||||
1. Click 'Update name' button
|
||||
2. Edit name field in the modal/dialog
|
||||
3. Save changes
|
||||
- **Expected:** Name is updated in the UI
|
||||
|
||||
## 2. Update Email
|
||||
|
||||
- **Note:** The email field is not editable in the current UI.
|
||||
|
||||
## 3. Reset Password
|
||||
|
||||
- **Precondition:** User is logged in
|
||||
- **Steps:**
|
||||
1. Click 'Reset password' button
|
||||
2. Complete reset flow (modal/dialog or external flow)
|
||||
- **Expected:** Password is reset
|
||||
|
||||
## 4. Toggle 'Adapt to my timezone'
|
||||
|
||||
- **Precondition:** User is logged in
|
||||
- **Steps:**
|
||||
1. Toggle 'Adapt to my timezone' switch
|
||||
- **Expected:** Timezone adapts accordingly (UI feedback/confirmation should be checked)
|
||||
|
||||
## 5. Toggle Theme (Dark/Light)
|
||||
|
||||
- **Precondition:** User is logged in
|
||||
- **Steps:**
|
||||
1. Toggle theme radio buttons ('Dark', 'Light Beta')
|
||||
- **Expected:** Theme changes
|
||||
|
||||
## 6. Toggle Sidebar Always Open
|
||||
|
||||
- **Precondition:** User is logged in
|
||||
- **Steps:**
|
||||
1. Toggle 'Keep the primary sidebar always open' switch
|
||||
- **Expected:** Sidebar remains open/closed as per toggle
|
||||
@@ -1,26 +0,0 @@
|
||||
# API Keys E2E Scenarios (Updated)
|
||||
|
||||
## 1. Create a New API Key
|
||||
|
||||
- **Precondition:** User is admin
|
||||
- **Steps:**
|
||||
1. Click 'New Key' button
|
||||
2. Enter details in the modal/dialog
|
||||
3. Click 'Save'
|
||||
- **Expected:** API key is created and listed in the table
|
||||
|
||||
## 2. Revoke an API Key
|
||||
|
||||
- **Precondition:** API key exists
|
||||
- **Steps:**
|
||||
1. In the table, locate the API key row
|
||||
2. Click the revoke/delete button (icon button in the Action column)
|
||||
3. Confirm if prompted
|
||||
- **Expected:** API key is revoked/removed from the table
|
||||
|
||||
## 3. View API Key Usage
|
||||
|
||||
- **Precondition:** API key exists
|
||||
- **Steps:**
|
||||
1. View the 'Last used' and 'Expired' columns in the table
|
||||
- **Expected:** Usage data is displayed for each API key
|
||||
@@ -1,17 +0,0 @@
|
||||
# Billing Settings E2E Scenarios (Updated)
|
||||
|
||||
## 1. View Billing Information
|
||||
|
||||
- **Precondition:** User is admin
|
||||
- **Steps:**
|
||||
1. Navigate to Billing Settings
|
||||
2. Wait for the billing chart/data to finish loading
|
||||
- **Expected:**
|
||||
- Billing heading and subheading are displayed
|
||||
- Usage/cost table is visible with columns: Unit, Data Ingested, Price per Unit, Cost (Billing period to date)
|
||||
- "Download CSV" and "Manage Billing" buttons are present and enabled after loading
|
||||
- Test clicking "Download CSV" and "Manage Billing" for expected behavior (e.g., file download, navigation, or modal)
|
||||
|
||||
> Note: If these features are expected to trigger specific flows, document the observed behavior for each button.
|
||||
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
# Custom Domain E2E Scenarios (Updated)
|
||||
|
||||
## 1. Add or Update Custom Domain
|
||||
|
||||
- **Precondition:** User is admin
|
||||
- **Steps:**
|
||||
1. Click 'Customize team’s URL' button
|
||||
2. In the 'Customize your team’s URL' dialog, enter the preferred subdomain
|
||||
3. Click 'Apply Changes'
|
||||
- **Expected:** Domain is set/updated for the team (UI feedback/confirmation should be checked)
|
||||
|
||||
## 2. Verify Domain Ownership
|
||||
|
||||
- **Note:** No explicit 'Verify' button or flow is present in the current UI. If verification is required, it may be handled automatically or via support.
|
||||
|
||||
## 3. Remove a Custom Domain
|
||||
|
||||
- **Note:** No explicit 'Remove' button or flow is present in the current UI. The only available action is to update the subdomain.
|
||||
@@ -1,31 +0,0 @@
|
||||
# General Settings E2E Scenarios
|
||||
|
||||
## 1. View General Settings
|
||||
|
||||
- **Precondition:** User is logged in
|
||||
- **Steps:**
|
||||
1. Navigate to General Settings
|
||||
- **Expected:** General settings are displayed
|
||||
|
||||
## 2. Update Organization/Workspace Name
|
||||
|
||||
- **Precondition:** User is admin
|
||||
- **Steps:**
|
||||
1. Edit organization/workspace name
|
||||
2. Save changes
|
||||
- **Expected:** Name is updated and visible
|
||||
|
||||
## 3. Update Logo or Branding
|
||||
|
||||
- **Precondition:** User is admin
|
||||
- **Steps:**
|
||||
1. Upload new logo/branding
|
||||
2. Save changes
|
||||
- **Expected:** Branding is updated
|
||||
|
||||
## 4. View Version/Build Info
|
||||
|
||||
- **Precondition:** User is logged in
|
||||
- **Steps:**
|
||||
1. View version/build info section
|
||||
- **Expected:** Version/build info is displayed
|
||||
@@ -1,20 +0,0 @@
|
||||
# Ingestion E2E Scenarios (Updated)
|
||||
|
||||
## 1. View Ingestion Sources
|
||||
|
||||
- **Precondition:** User is admin
|
||||
- **Steps:**
|
||||
1. Navigate to the Integrations page
|
||||
- **Expected:** List of available data sources/integrations is displayed
|
||||
|
||||
## 2. Configure Ingestion Sources
|
||||
|
||||
- **Precondition:** User is admin
|
||||
- **Steps:**
|
||||
1. Click 'Configure' for a data source/integration
|
||||
2. Complete the configuration flow (modal or page, as available)
|
||||
- **Expected:** Source is configured (UI feedback/confirmation should be checked)
|
||||
|
||||
## 3. Disable/Enable Ingestion
|
||||
|
||||
- **Note:** No visible enable/disable toggle for ingestion sources in the current UI. Ingestion is managed via the Integrations configuration flows.
|
||||
@@ -1,51 +0,0 @@
|
||||
# Integrations E2E Scenarios (Updated)
|
||||
|
||||
## 1. View List of Available Integrations
|
||||
|
||||
- **Precondition:** User is logged in
|
||||
- **Steps:**
|
||||
1. Navigate to Integrations
|
||||
- **Expected:** List of integrations is displayed, each with a name, description, and 'Configure' button
|
||||
|
||||
## 2. Search Integrations by Name/Type
|
||||
|
||||
- **Precondition:** Integrations exist
|
||||
- **Steps:**
|
||||
1. Enter search/filter criteria in the 'Search for an integration...' box
|
||||
- **Expected:** Only matching integrations are shown
|
||||
|
||||
## 3. Connect a New Integration
|
||||
|
||||
- **Precondition:** User is admin
|
||||
- **Steps:**
|
||||
1. Click 'Configure' for an integration
|
||||
2. Complete the configuration flow (modal or page, as available)
|
||||
- **Expected:** Integration is connected/configured (UI feedback/confirmation should be checked)
|
||||
|
||||
## 4. Disconnect an Integration
|
||||
|
||||
- **Note:** No visible 'Disconnect' button in the main list. This may be available in the configuration flow for a connected integration.
|
||||
|
||||
## 5. Configure Integration Settings
|
||||
|
||||
- **Note:** Configuration is handled in the flow after clicking 'Configure' for an integration.
|
||||
|
||||
## 6. Test Integration Connection
|
||||
|
||||
- **Note:** No visible 'Test Connection' button in the main list. This may be available in the configuration flow.
|
||||
|
||||
## 7. View Integration Status/Logs
|
||||
|
||||
- **Note:** No visible status/logs section in the main list. This may be available in the configuration flow.
|
||||
|
||||
## 8. Filter Integrations by Category
|
||||
|
||||
- **Note:** No explicit category filter in the current UI, only a search box.
|
||||
|
||||
## 9. View Integration Documentation/Help
|
||||
|
||||
- **Note:** No visible 'Help/Docs' button in the main list. This may be available in the configuration flow.
|
||||
|
||||
## 10. Update Integration Configuration
|
||||
|
||||
- **Note:** Configuration is handled in the flow after clicking 'Configure' for an integration.
|
||||
@@ -1,19 +0,0 @@
|
||||
# Keyboard Shortcuts E2E Scenarios (Updated)
|
||||
|
||||
## 1. View Keyboard Shortcuts
|
||||
|
||||
- **Precondition:** User is logged in
|
||||
- **Steps:**
|
||||
1. Navigate to Keyboard Shortcuts
|
||||
- **Expected:** Shortcuts are displayed in categorized tables (Global, Logs Explorer, Query Builder, Dashboard)
|
||||
|
||||
## 2. Customize Keyboard Shortcuts (if supported)
|
||||
|
||||
- **Note:** Customization is not available in the current UI. Shortcuts are view-only.
|
||||
|
||||
## 3. Use Keyboard Shortcuts for Navigation/Actions
|
||||
|
||||
- **Precondition:** User is logged in
|
||||
- **Steps:**
|
||||
1. Use shortcut for navigation/action (e.g., shift+s for Services, cmd+enter for running query)
|
||||
- **Expected:** Navigation/action is performed as per shortcut
|
||||
@@ -1,49 +0,0 @@
|
||||
# Members & SSO E2E Scenarios (Updated)
|
||||
|
||||
## 1. Invite a New Member
|
||||
|
||||
- **Precondition:** User is admin
|
||||
- **Steps:**
|
||||
1. Click 'Invite Members' button
|
||||
2. In the 'Invite team members' dialog, enter email address, name (optional), and select role
|
||||
3. (Optional) Click 'Add another team member' to invite more
|
||||
4. Click 'Invite team members' to send invite(s)
|
||||
- **Expected:** Pending invite appears in the 'Pending Invites' table
|
||||
|
||||
## 2. Remove a Member
|
||||
|
||||
- **Precondition:** User is admin, member exists
|
||||
- **Steps:**
|
||||
1. In the 'Members' table, locate the member row
|
||||
2. Click 'Delete' in the Action column
|
||||
3. Confirm removal if prompted
|
||||
- **Expected:** Member is removed from the table
|
||||
|
||||
## 3. Update Member Roles
|
||||
|
||||
- **Precondition:** User is admin, member exists
|
||||
- **Steps:**
|
||||
1. In the 'Members' table, locate the member row
|
||||
2. Click 'Edit' in the Action column
|
||||
3. Change role in the edit dialog/modal
|
||||
4. Save changes
|
||||
- **Expected:** Member role is updated in the table
|
||||
|
||||
## 4. Configure SSO
|
||||
|
||||
- **Precondition:** User is admin
|
||||
- **Steps:**
|
||||
1. In the 'Authenticated Domains' section, locate the domain row
|
||||
2. Click 'Configure SSO' or 'Edit Google Auth' as available
|
||||
3. Complete SSO provider configuration in the modal/dialog
|
||||
4. Save settings
|
||||
- **Expected:** SSO is configured for the domain
|
||||
|
||||
## 5. Login via SSO
|
||||
|
||||
- **Precondition:** SSO is configured
|
||||
- **Steps:**
|
||||
1. Log out from the app
|
||||
2. On the login page, click 'Login with SSO'
|
||||
3. Complete SSO login flow
|
||||
- **Expected:** User is logged in via SSO
|
||||
@@ -1,39 +0,0 @@
|
||||
# Notification Channels E2E Scenarios (Updated)
|
||||
|
||||
## 1. Add a New Notification Channel
|
||||
|
||||
- **Precondition:** User is admin
|
||||
- **Steps:**
|
||||
1. Click 'New Alert Channel' button
|
||||
2. In the 'New Notification Channel' form, fill in required fields (Name, Type, Webhook URL, etc.)
|
||||
3. (Optional) Toggle 'Send resolved alerts'
|
||||
4. (Optional) Click 'Test' to send a test notification
|
||||
5. Click 'Save' to add the channel
|
||||
- **Expected:** Channel is added and listed in the table
|
||||
|
||||
## 2. Test Notification Channel
|
||||
|
||||
- **Precondition:** Channel is being created or edited
|
||||
- **Steps:**
|
||||
1. In the 'New Notification Channel' or 'Edit Notification Channel' form, click 'Test'
|
||||
- **Expected:** Test notification is sent (UI feedback/confirmation should be checked)
|
||||
|
||||
## 3. Remove a Notification Channel
|
||||
|
||||
- **Precondition:** Channel is added
|
||||
- **Steps:**
|
||||
1. In the table, locate the channel row
|
||||
2. Click 'Delete' in the Action column
|
||||
3. Confirm removal if prompted
|
||||
- **Expected:** Channel is removed from the table
|
||||
|
||||
## 4. Update Notification Channel Settings
|
||||
|
||||
- **Precondition:** Channel is added
|
||||
- **Steps:**
|
||||
1. In the table, locate the channel row
|
||||
2. Click 'Edit' in the Action column
|
||||
3. In the 'Edit Notification Channel' form, update fields as needed
|
||||
4. (Optional) Click 'Test' to send a test notification
|
||||
5. Click 'Save' to update the channel
|
||||
- **Expected:** Settings are updated
|
||||
@@ -1,199 +0,0 @@
|
||||
# SigNoz Test Plan Validation Report
|
||||
|
||||
This report documents the validation of the E2E test plan against the current live application using Playwright MCP. Each module is reviewed for coverage, gaps, and required updates.
|
||||
|
||||
---
|
||||
|
||||
## Home Module
|
||||
|
||||
- **Coverage:**
|
||||
- Widgets for logs, traces, metrics, dashboards, alerts, services, saved views, onboarding checklist
|
||||
- Quick access buttons: Explore Logs, Create dashboard, Create an alert
|
||||
- **Gaps/Updates:**
|
||||
- Add scenarios for checklist interactions (e.g., “I’ll do this later”, progress tracking)
|
||||
- Add scenarios for Saved Views and cross-module links
|
||||
- Add scenario for onboarding checklist completion
|
||||
|
||||
---
|
||||
|
||||
## Logs Module
|
||||
|
||||
- **Coverage:**
|
||||
- Explorer, Pipelines, Views tabs
|
||||
- Filtering by service, environment, severity, host, k8s, etc.
|
||||
- Search, save view, create alert, add to dashboard, export, view mode switching
|
||||
- **Gaps/Updates:**
|
||||
- Add scenario for quick filter customization
|
||||
- Add scenario for “Old Explorer” button
|
||||
- Add scenario for frequency chart toggle
|
||||
- Add scenario for “Stage & Run Query” workflow
|
||||
|
||||
---
|
||||
|
||||
## Traces Module
|
||||
|
||||
- **Coverage:**
|
||||
- Tabs: Explorer, Funnels, Views
|
||||
- Filtering by name, error status, duration, environment, function, service, RPC, status code, HTTP, trace ID, etc.
|
||||
- Search, save view, create alert, add to dashboard, export, view mode switching (List, Traces, Time Series, Table)
|
||||
- Pagination, quick filter customization, group by, aggregation
|
||||
- **Gaps/Updates:**
|
||||
- Add scenario for quick filter customization
|
||||
- Add scenario for “Stage & Run Query” workflow
|
||||
- Add scenario for all view modes (List, Traces, Time Series, Table)
|
||||
- Add scenario for group by/aggregation
|
||||
- Add scenario for trace detail navigation (clicking on trace row)
|
||||
- Add scenario for Funnels tab (create/edit/delete funnel)
|
||||
- Add scenario for Views tab (manage saved views)
|
||||
|
||||
---
|
||||
|
||||
## Metrics Module
|
||||
|
||||
- **Coverage:**
|
||||
- Tabs: Summary, Explorer, Views
|
||||
- Filtering by metric, type, unit, etc.
|
||||
- Search, save view, add to dashboard, export, view mode switching (chart, table, proportion view)
|
||||
- Pagination, group by, aggregation, custom queries
|
||||
- **Gaps/Updates:**
|
||||
- Add scenario for Proportion View in Summary
|
||||
- Add scenario for all view modes (chart, table, proportion)
|
||||
- Add scenario for group by/aggregation
|
||||
- Add scenario for custom queries in Explorer
|
||||
- Add scenario for Views tab (manage saved views)
|
||||
|
||||
---
|
||||
|
||||
## Dashboards Module
|
||||
|
||||
- **Coverage:**
|
||||
- List, search, and filter dashboards
|
||||
- Create new dashboard (button and template link)
|
||||
- Edit, delete, and view dashboard details
|
||||
- Add/edit/delete widgets (implied by dashboard detail)
|
||||
- Pagination through dashboards
|
||||
- **Gaps/Updates:**
|
||||
- Add scenario for browsing dashboard templates (external link)
|
||||
- Add scenario for requesting new template
|
||||
- Add scenario for dashboard owner and creation info
|
||||
- Add scenario for dashboard tags and filtering by tags
|
||||
- Add scenario for dashboard sharing (if available)
|
||||
- Add scenario for dashboard image/preview
|
||||
|
||||
---
|
||||
|
||||
## Messaging Queues Module
|
||||
|
||||
- **Coverage:**
|
||||
- Overview tab: queue metrics, filters (Service Name, Span Name, Msg System, Destination, Kind)
|
||||
- Search across all columns
|
||||
- Pagination of queue data
|
||||
- Sync and Share buttons
|
||||
- Tabs for Kafka and Celery
|
||||
- **Gaps/Updates:**
|
||||
- Add scenario for Kafka tab (detailed metrics, actions)
|
||||
- Add scenario for Celery tab (detailed metrics, actions)
|
||||
- Add scenario for filter combinations and edge cases
|
||||
- Add scenario for sharing queue data
|
||||
- Add scenario for time range selection
|
||||
|
||||
---
|
||||
|
||||
## External APIs Module
|
||||
|
||||
- **Coverage:**
|
||||
- Accessed via side navigation under MORE
|
||||
- Explorer tab: domain, endpoints, last used, rate, error %, avg. latency
|
||||
- Filters: Deployment Environment, Service Name, Rpc Method, Show IP addresses
|
||||
- Table pagination
|
||||
- Share and Stage & Run Query buttons
|
||||
- **Gaps/Updates:**
|
||||
- Add scenario for customizing quick filters
|
||||
- Add scenario for running and staging queries
|
||||
- Add scenario for sharing API data
|
||||
- Add scenario for edge cases in filters and table data
|
||||
|
||||
---
|
||||
|
||||
## Alerts Module
|
||||
|
||||
- **Coverage:**
|
||||
- Alert Rules tab: list, search, create (New Alert), edit, delete, enable/disable, severity, labels, actions
|
||||
- Triggered Alerts tab (visible in tablist)
|
||||
- Configuration tab (visible in tablist)
|
||||
- Table pagination
|
||||
- **Gaps/Updates:**
|
||||
- Add scenario for triggered alerts (view, acknowledge, resolve)
|
||||
- Add scenario for alert configuration (settings, integrations)
|
||||
- Add scenario for edge cases in alert creation and management
|
||||
- Add scenario for searching and filtering alerts
|
||||
|
||||
---
|
||||
|
||||
## Integrations Module
|
||||
|
||||
- **Coverage:**
|
||||
- Integrations tab: list, search, configure (e.g., AWS), request new integration
|
||||
- One-click setup for AWS monitoring
|
||||
- Request more integrations (form)
|
||||
- **Gaps/Updates:**
|
||||
- Add scenario for configuring integrations (step-by-step)
|
||||
- Add scenario for searching and filtering integrations
|
||||
- Add scenario for requesting new integrations
|
||||
- Add scenario for edge cases (e.g., failed configuration)
|
||||
|
||||
---
|
||||
|
||||
## Exceptions Module
|
||||
|
||||
- **Coverage:**
|
||||
- All Exceptions: list, search, filter (Deployment Environment, Service Name, Host Name, K8s Cluster/Deployment/Namespace, Net Peer Name)
|
||||
- Table: Exception Type, Error Message, Count, Last Seen, First Seen, Application
|
||||
- Pagination
|
||||
- Exception detail links
|
||||
- Share and Stage & Run Query buttons
|
||||
- **Gaps/Updates:**
|
||||
- Add scenario for exception detail view
|
||||
- Add scenario for advanced filtering and edge cases
|
||||
- Add scenario for sharing and running queries
|
||||
- Add scenario for error grouping and navigation
|
||||
|
||||
---
|
||||
|
||||
## Service Map Module
|
||||
|
||||
- **Coverage:**
|
||||
- Service Map visualization (main graph)
|
||||
- Filters: environment, resource attributes
|
||||
- Time range selection
|
||||
- Sync and Share buttons
|
||||
- **Gaps/Updates:**
|
||||
- Add scenario for interacting with the map (zoom, pan, select service)
|
||||
- Add scenario for filtering and edge cases
|
||||
- Add scenario for sharing the map
|
||||
- Add scenario for time range and environment combinations
|
||||
|
||||
---
|
||||
|
||||
## Billing Module
|
||||
|
||||
- **Coverage:**
|
||||
- Billing overview: cost monitoring, invoices, CSV download (disabled), manage billing (disabled)
|
||||
- Teams Cloud section
|
||||
- Billing table: Unit, Data Ingested, Price per Unit, Cost (Billing period to date)
|
||||
- **Gaps/Updates:**
|
||||
- Add scenario for invoice download and management (when enabled)
|
||||
- Add scenario for cost monitoring and edge cases
|
||||
- Add scenario for billing table data validation
|
||||
- Add scenario for permissions and access control
|
||||
|
||||
---
|
||||
|
||||
## Usage Explorer Module
|
||||
|
||||
- **Status:**
|
||||
- Not accessible in the current environment. Removing from test plan flows.
|
||||
|
||||
---
|
||||
|
||||
## [Next modules will be filled as validation proceeds]
|
||||
@@ -1,42 +0,0 @@
|
||||
import { expect, test } from '@playwright/test';
|
||||
|
||||
import { ensureLoggedIn } from '../../../utils/login.util';
|
||||
|
||||
test('Account Settings - View and Assert Static Controls', async ({ page }) => {
|
||||
await ensureLoggedIn(page);
|
||||
|
||||
// 1. Open the sidebar settings menu using data-testid
|
||||
await page.getByTestId('settings-nav-item').click();
|
||||
|
||||
// 2. Click Account Settings in the dropdown (by role/name or data-testid if available)
|
||||
await page.getByRole('menuitem', { name: 'Account Settings' }).click();
|
||||
|
||||
// Assert the main tabpanel/heading (confirmed by DOM)
|
||||
await expect(page.getByTestId('settings-page-title')).toBeVisible();
|
||||
|
||||
// Assert General section and controls (confirmed by DOM)
|
||||
await expect(
|
||||
page.getByLabel('My Settings').getByText('General'),
|
||||
).toBeVisible();
|
||||
await expect(page.getByText('Manage your account settings.')).toBeVisible();
|
||||
await expect(page.getByRole('button', { name: 'Update name' })).toBeVisible();
|
||||
await expect(
|
||||
page.getByRole('button', { name: 'Reset password' }),
|
||||
).toBeVisible();
|
||||
|
||||
// Assert User Preferences section and controls (confirmed by DOM)
|
||||
await expect(page.getByText('User Preferences')).toBeVisible();
|
||||
await expect(
|
||||
page.getByText('Tailor the SigNoz console to work according to your needs.'),
|
||||
).toBeVisible();
|
||||
await expect(page.getByText('Select your theme')).toBeVisible();
|
||||
|
||||
const themeSelector = page.getByTestId('theme-selector');
|
||||
|
||||
await expect(themeSelector.getByText('Dark')).toBeVisible();
|
||||
await expect(themeSelector.getByText('Light')).toBeVisible();
|
||||
await expect(themeSelector.getByText('System')).toBeVisible();
|
||||
|
||||
await expect(page.getByTestId('timezone-adaptation-switch')).toBeVisible();
|
||||
await expect(page.getByTestId('side-nav-pinned-switch')).toBeVisible();
|
||||
});
|
||||
@@ -1,42 +0,0 @@
|
||||
import { expect, test } from '@playwright/test';
|
||||
|
||||
import { ensureLoggedIn } from '../../../utils/login.util';
|
||||
|
||||
test('API Keys Settings - View and Interact', async ({ page }) => {
|
||||
await ensureLoggedIn(page);
|
||||
|
||||
// 1. Open the sidebar settings menu using data-testid
|
||||
await page.getByTestId('settings-nav-item').click();
|
||||
|
||||
// 2. Click Account Settings in the dropdown (by role/name or data-testid if available)
|
||||
await page.getByRole('menuitem', { name: 'Account Settings' }).click();
|
||||
|
||||
// Assert the main tabpanel/heading (confirmed by DOM)
|
||||
await expect(page.getByTestId('settings-page-title')).toBeVisible();
|
||||
|
||||
// Focus on the settings page sidenav
|
||||
await page.getByTestId('settings-page-sidenav').focus();
|
||||
|
||||
// Click API Keys tab in the settings sidebar (by data-testid)
|
||||
await page.getByTestId('api-keys').click();
|
||||
|
||||
// Assert heading and subheading
|
||||
await expect(page.getByRole('heading', { name: 'API Keys' })).toBeVisible();
|
||||
await expect(
|
||||
page.getByText('Create and manage API keys for the SigNoz API'),
|
||||
).toBeVisible();
|
||||
|
||||
// Assert presence of New Key button
|
||||
const newKeyBtn = page.getByRole('button', { name: 'New Key' });
|
||||
await expect(newKeyBtn).toBeVisible();
|
||||
|
||||
// Assert table columns
|
||||
await expect(page.getByText('Last used').first()).toBeVisible();
|
||||
await expect(page.getByText('Expired').first()).toBeVisible();
|
||||
|
||||
// Assert at least one API key row with action buttons
|
||||
// Select the first action cell's first button (icon button)
|
||||
const firstActionCell = page.locator('table tr').nth(1).locator('td').last();
|
||||
const deleteBtn = firstActionCell.locator('button').first();
|
||||
await expect(deleteBtn).toBeVisible();
|
||||
});
|
||||
@@ -1,71 +0,0 @@
|
||||
import { expect, test } from '@playwright/test';
|
||||
|
||||
import { ensureLoggedIn } from '../../../utils/login.util';
|
||||
|
||||
// E2E: Billing Settings - View Billing Information and Button Actions
|
||||
|
||||
test('View Billing Information and Button Actions', async ({
|
||||
page,
|
||||
context,
|
||||
}) => {
|
||||
// Ensure user is logged in
|
||||
await ensureLoggedIn(page);
|
||||
|
||||
// 1. Open the sidebar settings menu using data-testid
|
||||
await page.getByTestId('settings-nav-item').click();
|
||||
|
||||
// 2. Click Account Settings in the dropdown (by role/name or data-testid if available)
|
||||
await page.getByRole('menuitem', { name: 'Account Settings' }).click();
|
||||
|
||||
// Assert the main tabpanel/heading (confirmed by DOM)
|
||||
await expect(page.getByTestId('settings-page-title')).toBeVisible();
|
||||
|
||||
// Focus on the settings page sidenav
|
||||
await page.getByTestId('settings-page-sidenav').focus();
|
||||
|
||||
// Click Billing tab in the settings sidebar (by data-testid)
|
||||
await page.getByTestId('billing').click();
|
||||
|
||||
// Wait for billing chart/data to finish loading
|
||||
await page.getByText('loading').first().waitFor({ state: 'hidden' });
|
||||
|
||||
// Assert visibility of subheading (unique)
|
||||
await expect(
|
||||
page.getByText(
|
||||
'Manage your billing information, invoices, and monitor costs.',
|
||||
),
|
||||
).toBeVisible();
|
||||
// Assert visibility of Teams Cloud heading
|
||||
await expect(page.getByRole('heading', { name: 'Teams Cloud' })).toBeVisible();
|
||||
|
||||
// Assert presence of summary and detailed tables
|
||||
await expect(page.getByText('TOTAL SPENT')).toBeVisible();
|
||||
await expect(page.getByText('Data Ingested')).toBeVisible();
|
||||
await expect(page.getByText('Price per Unit')).toBeVisible();
|
||||
await expect(page.getByText('Cost (Billing period to date)')).toBeVisible();
|
||||
|
||||
// Assert presence of alert and note
|
||||
await expect(
|
||||
page.getByText('Your current billing period is from', { exact: false }),
|
||||
).toBeVisible();
|
||||
await expect(
|
||||
page.getByText('Billing metrics are updated once every 24 hours.'),
|
||||
).toBeVisible();
|
||||
|
||||
// Test Download CSV button
|
||||
const [download] = await Promise.all([
|
||||
page.waitForEvent('download'),
|
||||
page.getByRole('button', { name: 'cloud-download Download CSV' }).click(),
|
||||
]);
|
||||
// Optionally, check download file name
|
||||
expect(download.suggestedFilename()).toContain('billing_usage');
|
||||
|
||||
// Test Manage Billing button (opens Stripe in new tab)
|
||||
const [newPage] = await Promise.all([
|
||||
context.waitForEvent('page'),
|
||||
page.getByTestId('header-billing-button').click(),
|
||||
]);
|
||||
await newPage.waitForLoadState();
|
||||
expect(newPage.url()).toContain('stripe.com');
|
||||
await newPage.close();
|
||||
});
|
||||
@@ -1,52 +0,0 @@
|
||||
import { expect, test } from '@playwright/test';
|
||||
|
||||
import { ensureLoggedIn } from '../../../utils/login.util';
|
||||
|
||||
test('Custom Domain Settings - View and Interact', async ({ page }) => {
|
||||
await ensureLoggedIn(page);
|
||||
|
||||
// 1. Open the sidebar settings menu using data-testid
|
||||
await page.getByTestId('settings-nav-item').click();
|
||||
|
||||
// 2. Click Account Settings in the dropdown (by role/name or data-testid if available)
|
||||
await page.getByRole('menuitem', { name: 'Account Settings' }).click();
|
||||
|
||||
// Assert the main tabpanel/heading (confirmed by DOM)
|
||||
await expect(page.getByTestId('settings-page-title')).toBeVisible();
|
||||
|
||||
// Focus on the settings page sidenav
|
||||
await page.getByTestId('settings-page-sidenav').focus();
|
||||
|
||||
// Click Custom Domain tab in the settings sidebar (by data-testid)
|
||||
await page.getByTestId('custom-domain').click();
|
||||
|
||||
// Wait for custom domain chart/data to finish loading
|
||||
await page.getByText('loading').first().waitFor({ state: 'hidden' });
|
||||
|
||||
// Assert heading and subheading
|
||||
await expect(
|
||||
page.getByRole('heading', { name: 'Custom Domain Settings' }),
|
||||
).toBeVisible();
|
||||
await expect(
|
||||
page.getByText('Personalize your workspace domain effortlessly.'),
|
||||
).toBeVisible();
|
||||
|
||||
// Assert presence of Customize team’s URL button
|
||||
const customizeBtn = page.getByRole('button', {
|
||||
name: 'Customize team’s URL',
|
||||
});
|
||||
await expect(customizeBtn).toBeVisible();
|
||||
await customizeBtn.click();
|
||||
|
||||
// Assert modal/dialog fields and buttons
|
||||
await expect(
|
||||
page.getByRole('dialog', { name: 'Customize your team’s URL' }),
|
||||
).toBeVisible();
|
||||
await expect(page.getByLabel('Team’s URL subdomain')).toBeVisible();
|
||||
await expect(
|
||||
page.getByRole('button', { name: 'Apply Changes' }),
|
||||
).toBeVisible();
|
||||
await expect(page.getByRole('button', { name: 'Close' })).toBeVisible();
|
||||
// Close the modal
|
||||
await page.getByRole('button', { name: 'Close' }).click();
|
||||
});
|
||||
@@ -1,32 +0,0 @@
|
||||
import { expect, test } from '@playwright/test';
|
||||
|
||||
import { ensureLoggedIn } from '../../../utils/login.util';
|
||||
|
||||
test('View General Settings', async ({ page }) => {
|
||||
await ensureLoggedIn(page);
|
||||
|
||||
// 1. Open the sidebar settings menu using data-testid
|
||||
await page.getByTestId('settings-nav-item').click();
|
||||
|
||||
// 2. Click Account Settings in the dropdown (by role/name or data-testid if available)
|
||||
await page.getByRole('menuitem', { name: 'Account Settings' }).click();
|
||||
|
||||
// Assert the main tabpanel/heading (confirmed by DOM)
|
||||
await expect(page.getByTestId('settings-page-title')).toBeVisible();
|
||||
|
||||
// Focus on the settings page sidenav
|
||||
await page.getByTestId('settings-page-sidenav').focus();
|
||||
|
||||
// Click General tab in the settings sidebar (by data-testid)
|
||||
await page.getByTestId('general').click();
|
||||
|
||||
// Wait for General tab to be visible
|
||||
await page.getByRole('tabpanel', { name: 'General' }).waitFor();
|
||||
|
||||
// Assert visibility of definitive/static elements
|
||||
await expect(page.getByRole('heading', { name: 'Metrics' })).toBeVisible();
|
||||
await expect(page.getByRole('heading', { name: 'Traces' })).toBeVisible();
|
||||
await expect(page.getByRole('heading', { name: 'Logs' })).toBeVisible();
|
||||
await expect(page.getByText('Please')).toBeVisible();
|
||||
await expect(page.getByRole('link', { name: 'email us' })).toBeVisible();
|
||||
});
|
||||
@@ -1,48 +0,0 @@
|
||||
import { expect, test } from '@playwright/test';
|
||||
|
||||
import { ensureLoggedIn } from '../../../utils/login.util';
|
||||
|
||||
test('Ingestion Settings - View and Interact', async ({ page }) => {
|
||||
await ensureLoggedIn(page);
|
||||
|
||||
// 1. Open the sidebar settings menu using data-testid
|
||||
await page.getByTestId('settings-nav-item').click();
|
||||
|
||||
// 2. Click Account Settings in the dropdown (by role/name or data-testid if available)
|
||||
await page.getByRole('menuitem', { name: 'Account Settings' }).click();
|
||||
|
||||
// Assert the main tabpanel/heading (confirmed by DOM)
|
||||
await expect(page.getByTestId('settings-page-title')).toBeVisible();
|
||||
|
||||
// Focus on the settings page sidenav
|
||||
await page.getByTestId('settings-page-sidenav').focus();
|
||||
|
||||
// Click Ingestion tab in the settings sidebar (by data-testid)
|
||||
await page.getByTestId('ingestion').click();
|
||||
|
||||
// Assert heading and subheading (Integrations page)
|
||||
await expect(
|
||||
page.getByRole('heading', { name: 'Integrations' }),
|
||||
).toBeVisible();
|
||||
await expect(
|
||||
page.getByText('Manage Integrations for this workspace'),
|
||||
).toBeVisible();
|
||||
|
||||
// Assert presence of search box
|
||||
await expect(
|
||||
page.getByPlaceholder('Search for an integration...'),
|
||||
).toBeVisible();
|
||||
|
||||
// Assert at least one data source with Configure button
|
||||
const configureBtn = page.getByRole('button', { name: 'Configure' }).first();
|
||||
await expect(configureBtn).toBeVisible();
|
||||
|
||||
// Assert Request more integrations section
|
||||
await expect(
|
||||
page.getByText(
|
||||
"Can't find what you’re looking for? Request more integrations",
|
||||
),
|
||||
).toBeVisible();
|
||||
await expect(page.getByPlaceholder('Enter integration name...')).toBeVisible();
|
||||
await expect(page.getByRole('button', { name: 'Submit' })).toBeVisible();
|
||||
});
|
||||
@@ -1,48 +0,0 @@
|
||||
import { expect, test } from '@playwright/test';
|
||||
|
||||
import { ensureLoggedIn } from '../../../utils/login.util';
|
||||
|
||||
test('Integrations Settings - View and Interact', async ({ page }) => {
|
||||
await ensureLoggedIn(page);
|
||||
|
||||
// 1. Open the sidebar settings menu using data-testid
|
||||
await page.getByTestId('settings-nav-item').click();
|
||||
|
||||
// 2. Click Account Settings in the dropdown (by role/name or data-testid if available)
|
||||
await page.getByRole('menuitem', { name: 'Account Settings' }).click();
|
||||
|
||||
// Assert the main tabpanel/heading (confirmed by DOM)
|
||||
await expect(page.getByTestId('settings-page-title')).toBeVisible();
|
||||
|
||||
// Focus on the settings page sidenav
|
||||
await page.getByTestId('settings-page-sidenav').focus();
|
||||
|
||||
// Click Integrations tab in the settings sidebar (by data-testid)
|
||||
await page.getByTestId('integrations').click();
|
||||
|
||||
// Assert heading and subheading
|
||||
await expect(
|
||||
page.getByRole('heading', { name: 'Integrations' }),
|
||||
).toBeVisible();
|
||||
await expect(
|
||||
page.getByText('Manage Integrations for this workspace'),
|
||||
).toBeVisible();
|
||||
|
||||
// Assert presence of search box
|
||||
await expect(
|
||||
page.getByPlaceholder('Search for an integration...'),
|
||||
).toBeVisible();
|
||||
|
||||
// Assert at least one integration with Configure button
|
||||
const configureBtn = page.getByRole('button', { name: 'Configure' }).first();
|
||||
await expect(configureBtn).toBeVisible();
|
||||
|
||||
// Assert Request more integrations section
|
||||
await expect(
|
||||
page.getByText(
|
||||
"Can't find what you’re looking for? Request more integrations",
|
||||
),
|
||||
).toBeVisible();
|
||||
await expect(page.getByPlaceholder('Enter integration name...')).toBeVisible();
|
||||
await expect(page.getByRole('button', { name: 'Submit' })).toBeVisible();
|
||||
});
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user