Compare commits

..

1 Commits

Author SHA1 Message Date
ahmadshaheer
e10be91bc0 test: enable skipped test suites for HostMetricsLogs and LogsExplorerViews Pagination 2025-05-27 16:03:31 +04:30
1536 changed files with 35802 additions and 182748 deletions

View File

@@ -24,7 +24,7 @@ services:
depends_on: depends_on:
- zookeeper - zookeeper
zookeeper: zookeeper:
image: signoz/zookeeper:3.7.1 image: bitnami/zookeeper:3.7.1
container_name: zookeeper container_name: zookeeper
volumes: volumes:
- ${PWD}/fs/tmp/zookeeper:/bitnami/zookeeper - ${PWD}/fs/tmp/zookeeper:/bitnami/zookeeper
@@ -40,7 +40,7 @@ services:
timeout: 5s timeout: 5s
retries: 3 retries: 3
schema-migrator-sync: schema-migrator-sync:
image: signoz/signoz-schema-migrator:v0.129.0 image: signoz/signoz-schema-migrator:v0.111.41
container_name: schema-migrator-sync container_name: schema-migrator-sync
command: command:
- sync - sync
@@ -53,7 +53,7 @@ services:
condition: service_healthy condition: service_healthy
restart: on-failure restart: on-failure
schema-migrator-async: schema-migrator-async:
image: signoz/signoz-schema-migrator:v0.129.0 image: signoz/signoz-schema-migrator:v0.111.41
container_name: schema-migrator-async container_name: schema-migrator-async
command: command:
- async - async

View File

@@ -1,29 +0,0 @@
services:
signoz-otel-collector:
image: signoz/signoz-otel-collector:v0.128.2
container_name: signoz-otel-collector-dev
command:
- --config=/etc/otel-collector-config.yaml
- --feature-gates=-pkg.translator.prometheus.NormalizeName
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports:
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
- "13133:13133" # health check extension
healthcheck:
test:
- CMD
- wget
- --spider
- -q
- localhost:13133
interval: 30s
timeout: 5s
retries: 3
restart: unless-stopped
extra_hosts:
- "host.docker.internal:host-gateway"

View File

@@ -1,96 +0,0 @@
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- job_name: otel-collector
static_configs:
- targets:
- localhost:8888
labels:
job_name: otel-collector
processors:
batch:
send_batch_size: 10000
send_batch_max_size: 11000
timeout: 10s
resourcedetection:
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
detectors: [env, system]
timeout: 2s
signozspanmetrics/delta:
metrics_exporter: signozclickhousemetrics
metrics_flush_interval: 60s
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
dimensions_cache_size: 100000
aggregation_temporality: AGGREGATION_TEMPORALITY_DELTA
enable_exp_histogram: true
dimensions:
- name: service.namespace
default: default
- name: deployment.environment
default: default
# This is added to ensure the uniqueness of the timeseries
# Otherwise, identical timeseries produced by multiple replicas of
# collectors result in incorrect APM metrics
- name: signoz.collector.id
- name: service.version
- name: browser.platform
- name: browser.mobile
- name: k8s.cluster.name
- name: k8s.node.name
- name: k8s.namespace.name
- name: host.name
- name: host.type
- name: container.name
extensions:
health_check:
endpoint: 0.0.0.0:13133
pprof:
endpoint: 0.0.0.0:1777
exporters:
clickhousetraces:
datasource: tcp://host.docker.internal:9000/signoz_traces
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
use_new_schema: true
signozclickhousemetrics:
dsn: tcp://host.docker.internal:9000/signoz_metrics
clickhouselogsexporter:
dsn: tcp://host.docker.internal:9000/signoz_logs
timeout: 10s
use_new_schema: true
service:
telemetry:
logs:
encoding: json
extensions:
- health_check
- pprof
pipelines:
traces:
receivers: [otlp]
processors: [signozspanmetrics/delta, batch]
exporters: [clickhousetraces]
metrics:
receivers: [otlp]
processors: [batch]
exporters: [signozclickhousemetrics]
metrics/prometheus:
receivers: [prometheus]
processors: [batch]
exporters: [signozclickhousemetrics]
logs:
receivers: [otlp]
processors: [batch]
exporters: [clickhouselogsexporter]

31
.github/CODEOWNERS vendored
View File

@@ -7,38 +7,9 @@
/frontend/src/container/NewWidget/RightContainer/types.ts @srikanthccv /frontend/src/container/NewWidget/RightContainer/types.ts @srikanthccv
/deploy/ @SigNoz/devops /deploy/ @SigNoz/devops
.github @SigNoz/devops .github @SigNoz/devops
# Scaffold Owners
/pkg/config/ @grandwizard28 /pkg/config/ @grandwizard28
/pkg/errors/ @grandwizard28 /pkg/errors/ @grandwizard28
/pkg/factory/ @grandwizard28 /pkg/factory/ @grandwizard28
/pkg/types/ @grandwizard28 /pkg/types/ @grandwizard28
/pkg/valuer/ @grandwizard28
/cmd/ @grandwizard28
.golangci.yml @grandwizard28
# Zeus Owners
/pkg/zeus/ @vikrantgupta25
/ee/zeus/ @vikrantgupta25
/pkg/licensing/ @vikrantgupta25
/ee/licensing/ @vikrantgupta25
# SQL Owners
/pkg/sqlmigration/ @vikrantgupta25 /pkg/sqlmigration/ @vikrantgupta25
/ee/sqlmigration/ @vikrantgupta25 .golangci.yml @grandwizard28
/pkg/sqlschema/ @vikrantgupta25
/ee/sqlschema/ @vikrantgupta25
# Analytics Owners
/pkg/analytics/ @vikrantgupta25
/pkg/statsreporter/ @vikrantgupta25
# Querier Owners
/pkg/querier/ @srikanthccv
/pkg/variables/ @srikanthccv
/pkg/types/querybuildertypes/ @srikanthccv
/pkg/querybuilder/ @srikanthccv
/pkg/telemetrylogs/ @srikanthccv
/pkg/telemetrymetadata/ @srikanthccv
/pkg/telemetrymetrics/ @srikanthccv
/pkg/telemetrytraces/ @srikanthccv

View File

@@ -66,7 +66,7 @@ jobs:
GO_NAME: signoz-community GO_NAME: signoz-community
GO_INPUT_ARTIFACT_CACHE_KEY: community-jsbuild-${{ github.sha }} GO_INPUT_ARTIFACT_CACHE_KEY: community-jsbuild-${{ github.sha }}
GO_INPUT_ARTIFACT_PATH: frontend/build GO_INPUT_ARTIFACT_PATH: frontend/build
GO_BUILD_CONTEXT: ./cmd/community GO_BUILD_CONTEXT: ./pkg/query-service
GO_BUILD_FLAGS: >- GO_BUILD_FLAGS: >-
-tags timetzdata -tags timetzdata
-ldflags='-linkmode external -extldflags \"-static\" -s -w -ldflags='-linkmode external -extldflags \"-static\" -s -w
@@ -74,10 +74,9 @@ jobs:
-X github.com/SigNoz/signoz/pkg/version.variant=community -X github.com/SigNoz/signoz/pkg/version.variant=community
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }} -X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
-X github.com/SigNoz/signoz/pkg/version.time=${{ needs.prepare.outputs.time }} -X github.com/SigNoz/signoz/pkg/version.time=${{ needs.prepare.outputs.time }}
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }} -X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}'
-X github.com/SigNoz/signoz/pkg/analytics.key=9kRrJ7oPCGPEJLF6QjMPLt5bljFhRQBr'
GO_CGO_ENABLED: 1 GO_CGO_ENABLED: 1
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}' DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
DOCKER_DOCKERFILE_PATH: ./cmd/community/Dockerfile.multi-arch DOCKER_DOCKERFILE_PATH: ./pkg/query-service/Dockerfile.multi-arch
DOCKER_MANIFEST: true DOCKER_MANIFEST: true
DOCKER_PROVIDERS: dockerhub DOCKER_PROVIDERS: dockerhub

View File

@@ -67,8 +67,9 @@ jobs:
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
echo 'PYLON_APP_ID="${{ secrets.PYLON_APP_ID }}"' >> frontend/.env echo 'CUSTOMERIO_ID="${{ secrets.CUSTOMERIO_ID }}"' >> frontend/.env
echo 'APPCUES_APP_ID="${{ secrets.APPCUES_APP_ID }}"' >> frontend/.env echo 'CUSTOMERIO_SITE_ID="${{ secrets.CUSTOMERIO_SITE_ID }}"' >> frontend/.env
echo 'USERPILOT_KEY="${{ secrets.USERPILOT_KEY }}"' >> frontend/.env
- name: cache-dotenv - name: cache-dotenv
uses: actions/cache@v4 uses: actions/cache@v4
with: with:
@@ -96,7 +97,7 @@ jobs:
GO_VERSION: 1.23 GO_VERSION: 1.23
GO_INPUT_ARTIFACT_CACHE_KEY: enterprise-jsbuild-${{ github.sha }} GO_INPUT_ARTIFACT_CACHE_KEY: enterprise-jsbuild-${{ github.sha }}
GO_INPUT_ARTIFACT_PATH: frontend/build GO_INPUT_ARTIFACT_PATH: frontend/build
GO_BUILD_CONTEXT: ./cmd/enterprise GO_BUILD_CONTEXT: ./ee/query-service
GO_BUILD_FLAGS: >- GO_BUILD_FLAGS: >-
-tags timetzdata -tags timetzdata
-ldflags='-linkmode external -extldflags \"-static\" -s -w -ldflags='-linkmode external -extldflags \"-static\" -s -w
@@ -108,10 +109,9 @@ jobs:
-X github.com/SigNoz/signoz/ee/zeus.url=https://api.signoz.cloud -X github.com/SigNoz/signoz/ee/zeus.url=https://api.signoz.cloud
-X github.com/SigNoz/signoz/ee/zeus.deprecatedURL=https://license.signoz.io -X github.com/SigNoz/signoz/ee/zeus.deprecatedURL=https://license.signoz.io
-X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.signoz.cloud -X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.signoz.cloud
-X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.signoz.io/api/v1 -X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.signoz.io/api/v1'
-X github.com/SigNoz/signoz/pkg/analytics.key=9kRrJ7oPCGPEJLF6QjMPLt5bljFhRQBr'
GO_CGO_ENABLED: 1 GO_CGO_ENABLED: 1
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}' DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
DOCKER_DOCKERFILE_PATH: ./cmd/enterprise/Dockerfile.multi-arch DOCKER_DOCKERFILE_PATH: ./ee/query-service/Dockerfile.multi-arch
DOCKER_MANIFEST: true DOCKER_MANIFEST: true
DOCKER_PROVIDERS: ${{ needs.prepare.outputs.docker_providers }} DOCKER_PROVIDERS: ${{ needs.prepare.outputs.docker_providers }}

View File

@@ -66,8 +66,7 @@ jobs:
echo 'CI=1' > frontend/.env echo 'CI=1' > frontend/.env
echo 'TUNNEL_URL="${{ secrets.NP_TUNNEL_URL }}"' >> frontend/.env echo 'TUNNEL_URL="${{ secrets.NP_TUNNEL_URL }}"' >> frontend/.env
echo 'TUNNEL_DOMAIN="${{ secrets.NP_TUNNEL_DOMAIN }}"' >> frontend/.env echo 'TUNNEL_DOMAIN="${{ secrets.NP_TUNNEL_DOMAIN }}"' >> frontend/.env
echo 'PYLON_APP_ID="${{ secrets.NP_PYLON_APP_ID }}"' >> frontend/.env echo 'USERPILOT_KEY="${{ secrets.NP_USERPILOT_KEY }}"' >> frontend/.env
echo 'APPCUES_APP_ID="${{ secrets.NP_APPCUES_APP_ID }}"' >> frontend/.env
- name: cache-dotenv - name: cache-dotenv
uses: actions/cache@v4 uses: actions/cache@v4
with: with:
@@ -95,7 +94,7 @@ jobs:
GO_VERSION: 1.23 GO_VERSION: 1.23
GO_INPUT_ARTIFACT_CACHE_KEY: staging-jsbuild-${{ github.sha }} GO_INPUT_ARTIFACT_CACHE_KEY: staging-jsbuild-${{ github.sha }}
GO_INPUT_ARTIFACT_PATH: frontend/build GO_INPUT_ARTIFACT_PATH: frontend/build
GO_BUILD_CONTEXT: ./cmd/enterprise GO_BUILD_CONTEXT: ./ee/query-service
GO_BUILD_FLAGS: >- GO_BUILD_FLAGS: >-
-tags timetzdata -tags timetzdata
-ldflags='-linkmode external -extldflags \"-static\" -s -w -ldflags='-linkmode external -extldflags \"-static\" -s -w
@@ -107,11 +106,10 @@ jobs:
-X github.com/SigNoz/signoz/ee/zeus.url=https://api.staging.signoz.cloud -X github.com/SigNoz/signoz/ee/zeus.url=https://api.staging.signoz.cloud
-X github.com/SigNoz/signoz/ee/zeus.deprecatedURL=https://license.staging.signoz.cloud -X github.com/SigNoz/signoz/ee/zeus.deprecatedURL=https://license.staging.signoz.cloud
-X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.staging.signoz.cloud -X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.staging.signoz.cloud
-X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.staging.signoz.cloud/api/v1 -X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.staging.signoz.cloud/api/v1'
-X github.com/SigNoz/signoz/pkg/analytics.key=9kRrJ7oPCGPEJLF6QjMPLt5bljFhRQBr'
GO_CGO_ENABLED: 1 GO_CGO_ENABLED: 1
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}' DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
DOCKER_DOCKERFILE_PATH: ./cmd/enterprise/Dockerfile.multi-arch DOCKER_DOCKERFILE_PATH: ./ee/query-service/Dockerfile.multi-arch
DOCKER_MANIFEST: true DOCKER_MANIFEST: true
DOCKER_PROVIDERS: gcp DOCKER_PROVIDERS: gcp
staging: staging:

View File

@@ -36,7 +36,7 @@ jobs:
- ubuntu-latest - ubuntu-latest
- macos-latest - macos-latest
env: env:
CONFIG_PATH: cmd/community/.goreleaser.yaml CONFIG_PATH: pkg/query-service/.goreleaser.yaml
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
steps: steps:
- name: checkout - name: checkout
@@ -100,7 +100,7 @@ jobs:
needs: build needs: build
env: env:
DOCKER_CLI_EXPERIMENTAL: "enabled" DOCKER_CLI_EXPERIMENTAL: "enabled"
WORKDIR: cmd/community WORKDIR: pkg/query-service
steps: steps:
- name: checkout - name: checkout
uses: actions/checkout@v4 uses: actions/checkout@v4

View File

@@ -33,8 +33,9 @@ jobs:
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> .env echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> .env
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> .env echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> .env
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> .env echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> .env
echo 'PYLON_APP_ID="${{ secrets.PYLON_APP_ID }}"' >> .env echo 'CUSTOMERIO_ID="${{ secrets.CUSTOMERIO_ID }}"' >> .env
echo 'APPCUES_APP_ID="${{ secrets.APPCUES_APP_ID }}"' >> .env echo 'CUSTOMERIO_SITE_ID="${{ secrets.CUSTOMERIO_SITE_ID }}"' >> .env
echo 'USERPILOT_KEY="${{ secrets.USERPILOT_KEY }}"' >> .env
- name: build-frontend - name: build-frontend
run: make js-build run: make js-build
- name: upload-frontend-artifact - name: upload-frontend-artifact
@@ -50,7 +51,7 @@ jobs:
- ubuntu-latest - ubuntu-latest
- macos-latest - macos-latest
env: env:
CONFIG_PATH: cmd/enterprise/.goreleaser.yaml CONFIG_PATH: ee/query-service/.goreleaser.yaml
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
steps: steps:
- name: checkout - name: checkout

View File

@@ -15,16 +15,14 @@ jobs:
matrix: matrix:
src: src:
- bootstrap - bootstrap
- auth
- querier
sqlstore-provider: sqlstore-provider:
- postgres - postgres
- sqlite - sqlite
clickhouse-version: clickhouse-version:
- 24.1.2-alpine - 24.1.2-alpine
- 25.5.6 - 24.12-alpine
schema-migrator-version: schema-migrator-version:
- v0.128.1 - v0.111.38
postgres-version: postgres-version:
- 15 - 15
if: | if: |

View File

@@ -1,6 +1,10 @@
name: prereleaser name: prereleaser
on: on:
# schedule every wednesday 6:30 AM UTC (12:00 PM IST)
schedule:
- cron: '30 6 * * 3'
# allow manual triggering of the workflow by a maintainer # allow manual triggering of the workflow by a maintainer
workflow_dispatch: workflow_dispatch:
inputs: inputs:

View File

@@ -1,62 +0,0 @@
name: e2eci
on:
workflow_dispatch:
inputs:
userRole:
description: "Role of the user (ADMIN, EDITOR, VIEWER)"
required: true
type: choice
options:
- ADMIN
- EDITOR
- VIEWER
jobs:
test:
name: Run Playwright Tests
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: lts/*
- name: Mask secrets and input
run: |
echo "::add-mask::${{ secrets.BASE_URL }}"
echo "::add-mask::${{ secrets.LOGIN_USERNAME }}"
echo "::add-mask::${{ secrets.LOGIN_PASSWORD }}"
echo "::add-mask::${{ github.event.inputs.userRole }}"
- name: Install dependencies
working-directory: frontend
run: |
npm install -g yarn
yarn
- name: Install Playwright Browsers
working-directory: frontend
run: yarn playwright install --with-deps
- name: Run Playwright Tests
working-directory: frontend
run: |
BASE_URL="${{ secrets.BASE_URL }}" \
LOGIN_USERNAME="${{ secrets.LOGIN_USERNAME }}" \
LOGIN_PASSWORD="${{ secrets.LOGIN_PASSWORD }}" \
USER_ROLE="${{ github.event.inputs.userRole }}" \
yarn playwright test
- name: Upload Playwright Report
uses: actions/upload-artifact@v4
if: always()
with:
name: playwright-report
path: frontend/playwright-report/
retention-days: 30

1
.gitignore vendored
View File

@@ -66,7 +66,6 @@ e2e/.auth
# go # go
vendor/ vendor/
**/main/** **/main/**
__debug_bin**
# git-town # git-town
.git-branches.toml .git-branches.toml

View File

@@ -7,7 +7,6 @@ linters:
- sloglint - sloglint
- depguard - depguard
- iface - iface
- unparam
linters-settings: linters-settings:
sloglint: sloglint:

View File

@@ -1,62 +0,0 @@
# SigNoz Community Advocate Program
Our community is filled with passionate developers who love SigNoz and have been helping spread the word about observability across the world. The SigNoz Community Advocate Program is our way of recognizing these incredible community members and creating deeper collaboration opportunities.
## What is the SigNoz Community Advocate Program?
The SigNoz Community Advocate Program celebrates and supports community members who are already passionate about observability and helping fellow developers. If you're someone who loves discussing SigNoz, helping others with their implementations, or sharing knowledge about observability practices, this program is designed with you in mind.
Our advocates are the heart of the SigNoz community, helping other developers succeed with observability and providing valuable insights that help us build better products.
## What Do Advocates Do?
1. **Community Support**
- Help fellow developers in our Slack community and GitHub Discussions
- Answer questions and share solutions
- Guide newcomers through SigNoz self-host implementations
2. **Knowledge Sharing**
- Spread awareness about observability best practices on developer forums
- Create content like blog posts, social media posts, and videos
- Host local meetups and events in their regions
3. **Product Collaboration**
- Provide insights on features, changes, and improvements the community needs
- Beta test new features and provide early feedback
- Help us understand real-world use cases and pain points
## What's In It For You?
**Recognition & Swag**
- Official recognition as a SigNoz advocate
- Welcome hamper upon joining
- Exclusive swag box within your first 3 months
- Feature on our website (with your permission)
**Early Access**
- First look at new features and updates
- Direct line to the SigNoz team for feedback and suggestions
- Opportunity to influence product roadmap
**Community Impact**
- Help shape the observability landscape
- Build your reputation in the developer community
- Connect with like-minded developers globally
## How Does It Work?
Currently, the SigNoz Community Advocate Program is **invite-only**. We're starting with a small group of passionate community members who have already been making a difference.
We'll be working closely with our first advocates to shape the program details, benefits, and structure based on what works best for everyone involved.
If you're interested in learning more about the program or want to get more involved in the SigNoz community, join our [Slack community](https://signoz-community.slack.com/) and let us know!
---
*The SigNoz Community Advocate Program recognizes and celebrates the amazing community members who are already passionate about helping fellow developers succeed with observability.*

View File

@@ -78,4 +78,3 @@ Need assistance? Join our Slack community:
- Set up your [development environment](docs/contributing/development.md) - Set up your [development environment](docs/contributing/development.md)
- Deploy and observe [SigNoz in action with OpenTelemetry Demo Application](docs/otel-demo-docs.md) - Deploy and observe [SigNoz in action with OpenTelemetry Demo Application](docs/otel-demo-docs.md)
- Explore the [SigNoz Community Advocate Program](ADVOCATE.md), which recognises contributors who support the community, share their expertise, and help shape SigNoz's future.

View File

@@ -2,7 +2,7 @@ Copyright (c) 2020-present SigNoz Inc.
Portions of this software are licensed as follows: Portions of this software are licensed as follows:
* All content that resides under the "ee/" and the "cmd/enterprise/" directory of this repository, if that directory exists, is licensed under the license defined in "ee/LICENSE". * All content that resides under the "ee/" directory of this repository, if that directory exists, is licensed under the license defined in "ee/LICENSE".
* All third party components incorporated into the SigNoz Software are licensed under the original license provided by the owner of the applicable component. * All third party components incorporated into the SigNoz Software are licensed under the original license provided by the owner of the applicable component.
* Content outside of the above mentioned directories or restrictions above is available under the "MIT Expat" license as defined below. * Content outside of the above mentioned directories or restrictions above is available under the "MIT Expat" license as defined below.

View File

@@ -20,18 +20,18 @@ GO_BUILD_LDFLAG_LICENSE_SIGNOZ_IO = -X github.com/SigNoz/signoz/ee/zeus.depreca
GO_BUILD_VERSION_LDFLAGS = -X github.com/SigNoz/signoz/pkg/version.version=$(VERSION) -X github.com/SigNoz/signoz/pkg/version.hash=$(COMMIT_SHORT_SHA) -X github.com/SigNoz/signoz/pkg/version.time=$(TIMESTAMP) -X github.com/SigNoz/signoz/pkg/version.branch=$(BRANCH_NAME) GO_BUILD_VERSION_LDFLAGS = -X github.com/SigNoz/signoz/pkg/version.version=$(VERSION) -X github.com/SigNoz/signoz/pkg/version.hash=$(COMMIT_SHORT_SHA) -X github.com/SigNoz/signoz/pkg/version.time=$(TIMESTAMP) -X github.com/SigNoz/signoz/pkg/version.branch=$(BRANCH_NAME)
GO_BUILD_ARCHS_COMMUNITY = $(addprefix go-build-community-,$(ARCHS)) GO_BUILD_ARCHS_COMMUNITY = $(addprefix go-build-community-,$(ARCHS))
GO_BUILD_CONTEXT_COMMUNITY = $(SRC)/cmd/community GO_BUILD_CONTEXT_COMMUNITY = $(SRC)/pkg/query-service
GO_BUILD_LDFLAGS_COMMUNITY = $(GO_BUILD_VERSION_LDFLAGS) -X github.com/SigNoz/signoz/pkg/version.variant=community GO_BUILD_LDFLAGS_COMMUNITY = $(GO_BUILD_VERSION_LDFLAGS) -X github.com/SigNoz/signoz/pkg/version.variant=community
GO_BUILD_ARCHS_ENTERPRISE = $(addprefix go-build-enterprise-,$(ARCHS)) GO_BUILD_ARCHS_ENTERPRISE = $(addprefix go-build-enterprise-,$(ARCHS))
GO_BUILD_ARCHS_ENTERPRISE_RACE = $(addprefix go-build-enterprise-race-,$(ARCHS)) GO_BUILD_ARCHS_ENTERPRISE_RACE = $(addprefix go-build-enterprise-race-,$(ARCHS))
GO_BUILD_CONTEXT_ENTERPRISE = $(SRC)/cmd/enterprise GO_BUILD_CONTEXT_ENTERPRISE = $(SRC)/ee/query-service
GO_BUILD_LDFLAGS_ENTERPRISE = $(GO_BUILD_VERSION_LDFLAGS) -X github.com/SigNoz/signoz/pkg/version.variant=enterprise $(GO_BUILD_LDFLAG_ZEUS_URL) $(GO_BUILD_LDFLAG_LICENSE_SIGNOZ_IO) GO_BUILD_LDFLAGS_ENTERPRISE = $(GO_BUILD_VERSION_LDFLAGS) -X github.com/SigNoz/signoz/pkg/version.variant=enterprise $(GO_BUILD_LDFLAG_ZEUS_URL) $(GO_BUILD_LDFLAG_LICENSE_SIGNOZ_IO)
DOCKER_BUILD_ARCHS_COMMUNITY = $(addprefix docker-build-community-,$(ARCHS)) DOCKER_BUILD_ARCHS_COMMUNITY = $(addprefix docker-build-community-,$(ARCHS))
DOCKERFILE_COMMUNITY = $(SRC)/cmd/community/Dockerfile DOCKERFILE_COMMUNITY = $(SRC)/pkg/query-service/Dockerfile
DOCKER_REGISTRY_COMMUNITY ?= docker.io/signoz/signoz-community DOCKER_REGISTRY_COMMUNITY ?= docker.io/signoz/signoz-community
DOCKER_BUILD_ARCHS_ENTERPRISE = $(addprefix docker-build-enterprise-,$(ARCHS)) DOCKER_BUILD_ARCHS_ENTERPRISE = $(addprefix docker-build-enterprise-,$(ARCHS))
DOCKERFILE_ENTERPRISE = $(SRC)/cmd/enterprise/Dockerfile DOCKERFILE_ENTERPRISE = $(SRC)/ee/query-service/Dockerfile
DOCKER_REGISTRY_ENTERPRISE ?= docker.io/signoz/signoz DOCKER_REGISTRY_ENTERPRISE ?= docker.io/signoz/signoz
JS_BUILD_CONTEXT = $(SRC)/frontend JS_BUILD_CONTEXT = $(SRC)/frontend
@@ -61,17 +61,6 @@ devenv-postgres: ## Run postgres in devenv
@cd .devenv/docker/postgres; \ @cd .devenv/docker/postgres; \
docker compose -f compose.yaml up -d docker compose -f compose.yaml up -d
.PHONY: devenv-signoz-otel-collector
devenv-signoz-otel-collector: ## Run signoz-otel-collector in devenv (requires clickhouse to be running)
@cd .devenv/docker/signoz-otel-collector; \
docker compose -f compose.yaml up -d
.PHONY: devenv-up
devenv-up: devenv-clickhouse devenv-signoz-otel-collector ## Start both clickhouse and signoz-otel-collector for local development
@echo "Development environment is ready!"
@echo " - ClickHouse: http://localhost:8123"
@echo " - Signoz OTel Collector: grpc://localhost:4317, http://localhost:4318"
############################################################## ##############################################################
# go commands # go commands
############################################################## ##############################################################
@@ -85,7 +74,7 @@ go-run-enterprise: ## Runs the enterprise go backend server
SIGNOZ_TELEMETRYSTORE_PROVIDER=clickhouse \ SIGNOZ_TELEMETRYSTORE_PROVIDER=clickhouse \
SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://127.0.0.1:9000 \ SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://127.0.0.1:9000 \
go run -race \ go run -race \
$(GO_BUILD_CONTEXT_ENTERPRISE)/*.go \ $(GO_BUILD_CONTEXT_ENTERPRISE)/main.go \
--config ./conf/prometheus.yml \ --config ./conf/prometheus.yml \
--cluster cluster --cluster cluster
@@ -103,7 +92,7 @@ go-run-community: ## Runs the community go backend server
SIGNOZ_TELEMETRYSTORE_PROVIDER=clickhouse \ SIGNOZ_TELEMETRYSTORE_PROVIDER=clickhouse \
SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://127.0.0.1:9000 \ SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://127.0.0.1:9000 \
go run -race \ go run -race \
$(GO_BUILD_CONTEXT_COMMUNITY)/*.go server \ $(GO_BUILD_CONTEXT_COMMUNITY)/main.go \
--config ./conf/prometheus.yml \ --config ./conf/prometheus.yml \
--cluster cluster --cluster cluster

View File

@@ -8,6 +8,7 @@
<p align="center">All your logs, metrics, and traces in one place. Monitor your application, spot issues before they occur and troubleshoot downtime quickly with rich context. SigNoz is a cost-effective open-source alternative to Datadog and New Relic. Visit <a href="https://signoz.io" target="_blank">signoz.io</a> for the full documentation, tutorials, and guide.</p> <p align="center">All your logs, metrics, and traces in one place. Monitor your application, spot issues before they occur and troubleshoot downtime quickly with rich context. SigNoz is a cost-effective open-source alternative to Datadog and New Relic. Visit <a href="https://signoz.io" target="_blank">signoz.io</a> for the full documentation, tutorials, and guide.</p>
<p align="center"> <p align="center">
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/query-service?label=Docker Downloads"> </a>
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a> <img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability"> <a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a> <img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
@@ -230,8 +231,6 @@ Not sure how to get started? Just ping us on `#contributing` in our [slack commu
- [Shaheer Kochai](https://github.com/ahmadshaheer) - [Shaheer Kochai](https://github.com/ahmadshaheer)
- [Amlan Kumar Nandy](https://github.com/amlannandy) - [Amlan Kumar Nandy](https://github.com/amlannandy)
- [Sahil Khan](https://github.com/sawhil) - [Sahil Khan](https://github.com/sawhil)
- [Aditya Singh](https://github.com/aks07)
- [Abhi Kumar](https://github.com/ahrefabhi)
#### DevOps #### DevOps

View File

@@ -1,18 +0,0 @@
package main
import (
"log/slog"
"github.com/SigNoz/signoz/cmd"
"github.com/SigNoz/signoz/pkg/instrumentation"
)
func main() {
// initialize logger for logging in the cmd/ package. This logger is different from the logger used in the application.
logger := instrumentation.NewLogger(instrumentation.Config{Logs: instrumentation.LogsConfig{Level: slog.LevelInfo}})
// register a list of commands to the root command
registerServer(cmd.RootCmd, logger)
cmd.Execute(logger)
}

View File

@@ -1,116 +0,0 @@
package main
import (
"context"
"log/slog"
"time"
"github.com/SigNoz/signoz/cmd"
"github.com/SigNoz/signoz/ee/sqlstore/postgressqlstore"
"github.com/SigNoz/signoz/pkg/analytics"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/licensing/nooplicensing"
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/query-service/app"
"github.com/SigNoz/signoz/pkg/signoz"
"github.com/SigNoz/signoz/pkg/sqlschema"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/sqlstore/sqlstorehook"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/version"
"github.com/SigNoz/signoz/pkg/zeus"
"github.com/SigNoz/signoz/pkg/zeus/noopzeus"
"github.com/spf13/cobra"
)
func registerServer(parentCmd *cobra.Command, logger *slog.Logger) {
var flags signoz.DeprecatedFlags
serverCmd := &cobra.Command{
Use: "server",
Short: "Run the SigNoz server",
FParseErrWhitelist: cobra.FParseErrWhitelist{UnknownFlags: true},
RunE: func(currCmd *cobra.Command, args []string) error {
config, err := cmd.NewSigNozConfig(currCmd.Context(), flags)
if err != nil {
return err
}
return runServer(currCmd.Context(), config, logger)
},
}
flags.RegisterFlags(serverCmd)
parentCmd.AddCommand(serverCmd)
}
func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) error {
// print the version
version.Info.PrettyPrint(config.Version)
// add enterprise sqlstore factories to the community sqlstore factories
sqlstoreFactories := signoz.NewSQLStoreProviderFactories()
if err := sqlstoreFactories.Add(postgressqlstore.NewFactory(sqlstorehook.NewLoggingFactory())); err != nil {
logger.ErrorContext(ctx, "failed to add postgressqlstore factory", "error", err)
return err
}
jwt := authtypes.NewJWT(cmd.NewJWTSecret(ctx, logger), 30*time.Minute, 30*24*time.Hour)
signoz, err := signoz.New(
ctx,
config,
jwt,
zeus.Config{},
noopzeus.NewProviderFactory(),
licensing.Config{},
func(_ sqlstore.SQLStore, _ zeus.Zeus, _ organization.Getter, _ analytics.Analytics) factory.ProviderFactory[licensing.Licensing, licensing.Config] {
return nooplicensing.NewFactory()
},
signoz.NewEmailingProviderFactories(),
signoz.NewCacheProviderFactories(),
signoz.NewWebProviderFactories(),
func(sqlstore sqlstore.SQLStore) factory.NamedMap[factory.ProviderFactory[sqlschema.SQLSchema, sqlschema.Config]] {
return signoz.NewSQLSchemaProviderFactories(sqlstore)
},
signoz.NewSQLStoreProviderFactories(),
signoz.NewTelemetryStoreProviderFactories(),
)
if err != nil {
logger.ErrorContext(ctx, "failed to create signoz", "error", err)
return err
}
server, err := app.NewServer(config, signoz, jwt)
if err != nil {
logger.ErrorContext(ctx, "failed to create server", "error", err)
return err
}
if err := server.Start(ctx); err != nil {
logger.ErrorContext(ctx, "failed to start server", "error", err)
return err
}
signoz.Start(ctx)
if err := signoz.Wait(ctx); err != nil {
logger.ErrorContext(ctx, "failed to start signoz", "error", err)
return err
}
err = server.Stop(ctx)
if err != nil {
logger.ErrorContext(ctx, "failed to stop server", "error", err)
return err
}
err = signoz.Stop(ctx)
if err != nil {
logger.ErrorContext(ctx, "failed to stop signoz", "error", err)
return err
}
return nil
}

View File

@@ -1,45 +0,0 @@
package cmd
import (
"context"
"fmt"
"log/slog"
"os"
"github.com/SigNoz/signoz/pkg/config"
"github.com/SigNoz/signoz/pkg/config/envprovider"
"github.com/SigNoz/signoz/pkg/config/fileprovider"
"github.com/SigNoz/signoz/pkg/signoz"
)
func NewSigNozConfig(ctx context.Context, flags signoz.DeprecatedFlags) (signoz.Config, error) {
config, err := signoz.NewConfig(
ctx,
config.ResolverConfig{
Uris: []string{"env:"},
ProviderFactories: []config.ProviderFactory{
envprovider.NewFactory(),
fileprovider.NewFactory(),
},
},
flags,
)
if err != nil {
return signoz.Config{}, err
}
return config, nil
}
func NewJWTSecret(_ context.Context, _ *slog.Logger) string {
jwtSecret := os.Getenv("SIGNOZ_JWT_SECRET")
if len(jwtSecret) == 0 {
fmt.Println("🚨 CRITICAL SECURITY ISSUE: No JWT secret key specified!")
fmt.Println("SIGNOZ_JWT_SECRET environment variable is not set. This has dire consequences for the security of the application.")
fmt.Println("Without a JWT secret, user sessions are vulnerable to tampering and unauthorized access.")
fmt.Println("Please set the SIGNOZ_JWT_SECRET environment variable immediately.")
fmt.Println("For more information, please refer to https://github.com/SigNoz/signoz/issues/8400.")
}
return jwtSecret
}

View File

@@ -1,18 +0,0 @@
package main
import (
"log/slog"
"github.com/SigNoz/signoz/cmd"
"github.com/SigNoz/signoz/pkg/instrumentation"
)
func main() {
// initialize logger for logging in the cmd/ package. This logger is different from the logger used in the application.
logger := instrumentation.NewLogger(instrumentation.Config{Logs: instrumentation.LogsConfig{Level: slog.LevelInfo}})
// register a list of commands to the root command
registerServer(cmd.RootCmd, logger)
cmd.Execute(logger)
}

View File

@@ -1,124 +0,0 @@
package main
import (
"context"
"log/slog"
"time"
"github.com/SigNoz/signoz/cmd"
enterpriselicensing "github.com/SigNoz/signoz/ee/licensing"
"github.com/SigNoz/signoz/ee/licensing/httplicensing"
enterpriseapp "github.com/SigNoz/signoz/ee/query-service/app"
"github.com/SigNoz/signoz/ee/sqlschema/postgressqlschema"
"github.com/SigNoz/signoz/ee/sqlstore/postgressqlstore"
enterprisezeus "github.com/SigNoz/signoz/ee/zeus"
"github.com/SigNoz/signoz/ee/zeus/httpzeus"
"github.com/SigNoz/signoz/pkg/analytics"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/signoz"
"github.com/SigNoz/signoz/pkg/sqlschema"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/sqlstore/sqlstorehook"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/version"
"github.com/SigNoz/signoz/pkg/zeus"
"github.com/spf13/cobra"
)
func registerServer(parentCmd *cobra.Command, logger *slog.Logger) {
var flags signoz.DeprecatedFlags
serverCmd := &cobra.Command{
Use: "server",
Short: "Run the SigNoz server",
FParseErrWhitelist: cobra.FParseErrWhitelist{UnknownFlags: true},
RunE: func(currCmd *cobra.Command, args []string) error {
config, err := cmd.NewSigNozConfig(currCmd.Context(), flags)
if err != nil {
return err
}
return runServer(currCmd.Context(), config, logger)
},
}
flags.RegisterFlags(serverCmd)
parentCmd.AddCommand(serverCmd)
}
func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) error {
// print the version
version.Info.PrettyPrint(config.Version)
// add enterprise sqlstore factories to the community sqlstore factories
sqlstoreFactories := signoz.NewSQLStoreProviderFactories()
if err := sqlstoreFactories.Add(postgressqlstore.NewFactory(sqlstorehook.NewLoggingFactory())); err != nil {
logger.ErrorContext(ctx, "failed to add postgressqlstore factory", "error", err)
return err
}
jwt := authtypes.NewJWT(cmd.NewJWTSecret(ctx, logger), 30*time.Minute, 30*24*time.Hour)
signoz, err := signoz.New(
ctx,
config,
jwt,
enterprisezeus.Config(),
httpzeus.NewProviderFactory(),
enterpriselicensing.Config(24*time.Hour, 3),
func(sqlstore sqlstore.SQLStore, zeus zeus.Zeus, orgGetter organization.Getter, analytics analytics.Analytics) factory.ProviderFactory[licensing.Licensing, licensing.Config] {
return httplicensing.NewProviderFactory(sqlstore, zeus, orgGetter, analytics)
},
signoz.NewEmailingProviderFactories(),
signoz.NewCacheProviderFactories(),
signoz.NewWebProviderFactories(),
func(sqlstore sqlstore.SQLStore) factory.NamedMap[factory.ProviderFactory[sqlschema.SQLSchema, sqlschema.Config]] {
existingFactories := signoz.NewSQLSchemaProviderFactories(sqlstore)
if err := existingFactories.Add(postgressqlschema.NewFactory(sqlstore)); err != nil {
panic(err)
}
return existingFactories
},
sqlstoreFactories,
signoz.NewTelemetryStoreProviderFactories(),
)
if err != nil {
logger.ErrorContext(ctx, "failed to create signoz", "error", err)
return err
}
server, err := enterpriseapp.NewServer(config, signoz, jwt)
if err != nil {
logger.ErrorContext(ctx, "failed to create server", "error", err)
return err
}
if err := server.Start(ctx); err != nil {
logger.ErrorContext(ctx, "failed to start server", "error", err)
return err
}
signoz.Start(ctx)
if err := signoz.Wait(ctx); err != nil {
logger.ErrorContext(ctx, "failed to start signoz", "error", err)
return err
}
err = server.Stop(ctx)
if err != nil {
logger.ErrorContext(ctx, "failed to stop server", "error", err)
return err
}
err = signoz.Stop(ctx)
if err != nil {
logger.ErrorContext(ctx, "failed to stop signoz", "error", err)
return err
}
return nil
}

View File

@@ -1,33 +0,0 @@
package cmd
import (
"log/slog"
"os"
"github.com/SigNoz/signoz/pkg/version"
"github.com/spf13/cobra"
"go.uber.org/zap" //nolint:depguard
)
var RootCmd = &cobra.Command{
Use: "signoz",
Short: "OpenTelemetry-Native Logs, Metrics and Traces in a single pane",
Version: version.Info.Version(),
SilenceUsage: true,
SilenceErrors: true,
CompletionOptions: cobra.CompletionOptions{DisableDefaultCmd: true},
}
func Execute(logger *slog.Logger) {
zapLogger := newZapLogger()
zap.ReplaceGlobals(zapLogger)
defer func() {
_ = zapLogger.Sync()
}()
err := RootCmd.Execute()
if err != nil {
logger.ErrorContext(RootCmd.Context(), "error running command", "error", err)
os.Exit(1)
}
}

View File

@@ -1,15 +0,0 @@
package cmd
import (
"go.uber.org/zap" //nolint:depguard
"go.uber.org/zap/zapcore" //nolint:depguard
)
// Deprecated: Use `NewLogger` from `pkg/instrumentation` instead.
func newZapLogger() *zap.Logger {
config := zap.NewProductionConfig()
config.EncoderConfig.TimeKey = "timestamp"
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
logger, _ := config.Build()
return logger
}

View File

@@ -90,15 +90,6 @@ apiserver:
- /api/v1/version - /api/v1/version
- / - /
##################### Querier #####################
querier:
# The TTL for cached query results.
cache_ttl: 168h
# The interval for recent data that should not be cached.
flux_interval: 5m
# The maximum number of concurrent queries for missing ranges.
max_concurrent_queries: 4
##################### TelemetryStore ##################### ##################### TelemetryStore #####################
telemetrystore: telemetrystore:
# Maximum number of idle connections in the connection pool. # Maximum number of idle connections in the connection pool.
@@ -112,17 +103,13 @@ telemetrystore:
clickhouse: clickhouse:
# The DSN to use for clickhouse. # The DSN to use for clickhouse.
dsn: tcp://localhost:9000 dsn: tcp://localhost:9000
# The cluster name to use for clickhouse.
cluster: cluster
# The query settings for clickhouse. # The query settings for clickhouse.
settings: settings:
max_execution_time: 0 max_execution_time: 0
max_execution_time_leaf: 0 max_execution_time_leaf: 0
timeout_before_checking_execution_speed: 0 timeout_before_checking_execution_speed: 0
max_bytes_to_read: 0 max_bytes_to_read: 0
max_result_rows: 0 max_result_rows_for_ch_query: 0
ignore_data_skipping_indices: ""
secondary_indices_enable_bulk_filtering: false
##################### Prometheus ##################### ##################### Prometheus #####################
prometheus: prometheus:
@@ -178,6 +165,12 @@ alertmanager:
# Retention of the notification logs. # Retention of the notification logs.
retention: 120h retention: 120h
##################### Analytics #####################
analytics:
# Whether to enable analytics.
enabled: false
##################### Emailing ##################### ##################### Emailing #####################
emailing: emailing:
# Whether to enable emailing. # Whether to enable emailing.
@@ -214,35 +207,3 @@ emailing:
key_file_path: key_file_path:
# The path to the certificate file. # The path to the certificate file.
cert_file_path: cert_file_path:
##################### Sharder (experimental) #####################
sharder:
# Specifies the sharder provider to use.
provider: noop
single:
# The org id to which this instance belongs to.
org_id: org_id
##################### Analytics #####################
analytics:
# Whether to enable analytics.
enabled: false
segment:
# The key to use for segment.
key: ""
##################### StatsReporter #####################
statsreporter:
# Whether to enable stats reporter. This is used to provide valuable insights to the SigNoz team. It does not collect any sensitive/PII data.
enabled: true
# The interval at which the stats are collected.
interval: 6h
collect:
# Whether to collect identities and traits (emails).
identities: true
##################### Gateway (License only) #####################
gateway:
# The URL of the gateway's api.
url: http://localhost:8080

View File

@@ -39,7 +39,7 @@ x-clickhouse-defaults: &clickhouse-defaults
hard: 262144 hard: 262144
x-zookeeper-defaults: &zookeeper-defaults x-zookeeper-defaults: &zookeeper-defaults
!!merge <<: *common !!merge <<: *common
image: signoz/zookeeper:3.7.1 image: bitnami/zookeeper:3.7.1
user: root user: root
deploy: deploy:
labels: labels:
@@ -174,7 +174,7 @@ services:
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml # - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz: signoz:
!!merge <<: *db-depend !!merge <<: *db-depend
image: signoz/signoz:v0.92.1 image: signoz/signoz:v0.84.1
command: command:
- --config=/root/config/prometheus.yml - --config=/root/config/prometheus.yml
ports: ports:
@@ -194,7 +194,6 @@ services:
- TELEMETRY_ENABLED=true - TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-swarm - DEPLOYMENT_TYPE=docker-swarm
- SIGNOZ_JWT_SECRET=secret - SIGNOZ_JWT_SECRET=secret
- DOT_METRICS_ENABLED=true
healthcheck: healthcheck:
test: test:
- CMD - CMD
@@ -207,7 +206,7 @@ services:
retries: 3 retries: 3
otel-collector: otel-collector:
!!merge <<: *db-depend !!merge <<: *db-depend
image: signoz/signoz-otel-collector:v0.129.0 image: signoz/signoz-otel-collector:v0.111.41
command: command:
- --config=/etc/otel-collector-config.yaml - --config=/etc/otel-collector-config.yaml
- --manager-config=/etc/manager-config.yaml - --manager-config=/etc/manager-config.yaml
@@ -231,7 +230,7 @@ services:
- signoz - signoz
schema-migrator: schema-migrator:
!!merge <<: *common !!merge <<: *common
image: signoz/signoz-schema-migrator:v0.129.0 image: signoz/signoz-schema-migrator:v0.111.41
deploy: deploy:
restart_policy: restart_policy:
condition: on-failure condition: on-failure

View File

@@ -38,7 +38,7 @@ x-clickhouse-defaults: &clickhouse-defaults
hard: 262144 hard: 262144
x-zookeeper-defaults: &zookeeper-defaults x-zookeeper-defaults: &zookeeper-defaults
!!merge <<: *common !!merge <<: *common
image: signoz/zookeeper:3.7.1 image: bitnami/zookeeper:3.7.1
user: root user: root
deploy: deploy:
labels: labels:
@@ -100,32 +100,26 @@ services:
# - "9000:9000" # - "9000:9000"
# - "8123:8123" # - "8123:8123"
# - "9181:9181" # - "9181:9181"
configs:
- source: clickhouse-config
target: /etc/clickhouse-server/config.xml
- source: clickhouse-users
target: /etc/clickhouse-server/users.xml
- source: clickhouse-custom-function
target: /etc/clickhouse-server/custom-function.xml
- source: clickhouse-cluster
target: /etc/clickhouse-server/config.d/cluster.xml
volumes: volumes:
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
- ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
- clickhouse:/var/lib/clickhouse/ - clickhouse:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml # - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz: signoz:
!!merge <<: *db-depend !!merge <<: *db-depend
image: signoz/signoz:v0.92.1 image: signoz/signoz:v0.84.1
command: command:
- --config=/root/config/prometheus.yml - --config=/root/config/prometheus.yml
ports: ports:
- "8080:8080" # signoz port - "8080:8080" # signoz port
# - "6060:6060" # pprof port # - "6060:6060" # pprof port
volumes: volumes:
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
- ../common/dashboards:/root/config/dashboards
- sqlite:/var/lib/signoz/ - sqlite:/var/lib/signoz/
configs:
- source: signoz-prometheus-config
target: /root/config/prometheus.yml
environment: environment:
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz - SIGNOZ_ALERTMANAGER_PROVIDER=signoz
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000 - SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
@@ -135,7 +129,6 @@ services:
- GODEBUG=netdns=go - GODEBUG=netdns=go
- TELEMETRY_ENABLED=true - TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-swarm - DEPLOYMENT_TYPE=docker-swarm
- DOT_METRICS_ENABLED=true
healthcheck: healthcheck:
test: test:
- CMD - CMD
@@ -148,17 +141,15 @@ services:
retries: 3 retries: 3
otel-collector: otel-collector:
!!merge <<: *db-depend !!merge <<: *db-depend
image: signoz/signoz-otel-collector:v0.129.0 image: signoz/signoz-otel-collector:v0.111.41
command: command:
- --config=/etc/otel-collector-config.yaml - --config=/etc/otel-collector-config.yaml
- --manager-config=/etc/manager-config.yaml - --manager-config=/etc/manager-config.yaml
- --copy-path=/var/tmp/collector-config.yaml - --copy-path=/var/tmp/collector-config.yaml
- --feature-gates=-pkg.translator.prometheus.NormalizeName - --feature-gates=-pkg.translator.prometheus.NormalizeName
configs: volumes:
- source: otel-collector-config - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
target: /etc/otel-collector-config.yaml - ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
- source: otel-manager-config
target: /etc/manager-config.yaml
environment: environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}} - OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}}
- LOW_CARDINAL_EXCEPTION_GROUPING=false - LOW_CARDINAL_EXCEPTION_GROUPING=false
@@ -174,7 +165,7 @@ services:
- signoz - signoz
schema-migrator: schema-migrator:
!!merge <<: *common !!merge <<: *common
image: signoz/signoz-schema-migrator:v0.129.0 image: signoz/signoz-schema-migrator:v0.111.41
deploy: deploy:
restart_policy: restart_policy:
condition: on-failure condition: on-failure
@@ -195,24 +186,3 @@ volumes:
name: signoz-sqlite name: signoz-sqlite
zookeeper-1: zookeeper-1:
name: signoz-zookeeper-1 name: signoz-zookeeper-1
configs:
clickhouse-config:
file: ../common/clickhouse/config.xml
clickhouse-users:
file: ../common/clickhouse/users.xml
clickhouse-custom-function:
file: ../common/clickhouse/custom-function.xml
clickhouse-cluster:
file: ../common/clickhouse/cluster.xml
signoz-prometheus-config:
file: ../common/signoz/prometheus.yml
# If you have multiple dashboard files, you can list them individually:
# dashboard-foo:
# file: ../common/dashboards/foo.json
# dashboard-bar:
# file: ../common/dashboards/bar.json
otel-collector-config:
file: ./otel-collector-config.yaml
otel-manager-config:
file: ../common/signoz/otel-collector-opamp-config.yaml

View File

@@ -26,7 +26,7 @@ processors:
detectors: [env, system] detectors: [env, system]
timeout: 2s timeout: 2s
signozspanmetrics/delta: signozspanmetrics/delta:
metrics_exporter: signozclickhousemetrics metrics_exporter: clickhousemetricswrite, signozclickhousemetrics
metrics_flush_interval: 60s metrics_flush_interval: 60s
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ] latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
dimensions_cache_size: 100000 dimensions_cache_size: 100000
@@ -60,16 +60,27 @@ exporters:
datasource: tcp://clickhouse:9000/signoz_traces datasource: tcp://clickhouse:9000/signoz_traces
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING} low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
use_new_schema: true use_new_schema: true
clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/signoz_metrics
resource_to_telemetry_conversion:
enabled: true
disable_v2: true
clickhousemetricswrite/prometheus:
endpoint: tcp://clickhouse:9000/signoz_metrics
disable_v2: true
signozclickhousemetrics: signozclickhousemetrics:
dsn: tcp://clickhouse:9000/signoz_metrics dsn: tcp://clickhouse:9000/signoz_metrics
clickhouselogsexporter: clickhouselogsexporter:
dsn: tcp://clickhouse:9000/signoz_logs dsn: tcp://clickhouse:9000/signoz_logs
timeout: 10s timeout: 10s
use_new_schema: true use_new_schema: true
# debug: {}
service: service:
telemetry: telemetry:
logs: logs:
encoding: json encoding: json
metrics:
address: 0.0.0.0:8888
extensions: extensions:
- health_check - health_check
- pprof - pprof
@@ -81,11 +92,11 @@ service:
metrics: metrics:
receivers: [otlp] receivers: [otlp]
processors: [batch] processors: [batch]
exporters: [signozclickhousemetrics] exporters: [clickhousemetricswrite, signozclickhousemetrics]
metrics/prometheus: metrics/prometheus:
receivers: [prometheus] receivers: [prometheus]
processors: [batch] processors: [batch]
exporters: [signozclickhousemetrics] exporters: [clickhousemetricswrite/prometheus, signozclickhousemetrics]
logs: logs:
receivers: [otlp] receivers: [otlp]
processors: [batch] processors: [batch]

View File

@@ -42,7 +42,7 @@ x-clickhouse-defaults: &clickhouse-defaults
hard: 262144 hard: 262144
x-zookeeper-defaults: &zookeeper-defaults x-zookeeper-defaults: &zookeeper-defaults
!!merge <<: *common !!merge <<: *common
image: signoz/zookeeper:3.7.1 image: bitnami/zookeeper:3.7.1
user: root user: root
labels: labels:
signoz.io/scrape: "true" signoz.io/scrape: "true"
@@ -177,7 +177,7 @@ services:
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml # - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz: signoz:
!!merge <<: *db-depend !!merge <<: *db-depend
image: signoz/signoz:${VERSION:-v0.92.1} image: signoz/signoz:${VERSION:-v0.84.1}
container_name: signoz container_name: signoz
command: command:
- --config=/root/config/prometheus.yml - --config=/root/config/prometheus.yml
@@ -197,7 +197,6 @@ services:
- GODEBUG=netdns=go - GODEBUG=netdns=go
- TELEMETRY_ENABLED=true - TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-standalone-amd - DEPLOYMENT_TYPE=docker-standalone-amd
- DOT_METRICS_ENABLED=true
healthcheck: healthcheck:
test: test:
- CMD - CMD
@@ -211,7 +210,7 @@ services:
# TODO: support otel-collector multiple replicas. Nginx/Traefik for loadbalancing? # TODO: support otel-collector multiple replicas. Nginx/Traefik for loadbalancing?
otel-collector: otel-collector:
!!merge <<: *db-depend !!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.129.0} image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.41}
container_name: signoz-otel-collector container_name: signoz-otel-collector
command: command:
- --config=/etc/otel-collector-config.yaml - --config=/etc/otel-collector-config.yaml
@@ -237,7 +236,7 @@ services:
condition: service_healthy condition: service_healthy
schema-migrator-sync: schema-migrator-sync:
!!merge <<: *common !!merge <<: *common
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.0} image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.41}
container_name: schema-migrator-sync container_name: schema-migrator-sync
command: command:
- sync - sync
@@ -248,7 +247,7 @@ services:
condition: service_healthy condition: service_healthy
schema-migrator-async: schema-migrator-async:
!!merge <<: *db-depend !!merge <<: *db-depend
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.0} image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.41}
container_name: schema-migrator-async container_name: schema-migrator-async
command: command:
- async - async

View File

@@ -38,7 +38,7 @@ x-clickhouse-defaults: &clickhouse-defaults
hard: 262144 hard: 262144
x-zookeeper-defaults: &zookeeper-defaults x-zookeeper-defaults: &zookeeper-defaults
!!merge <<: *common !!merge <<: *common
image: signoz/zookeeper:3.7.1 image: bitnami/zookeeper:3.7.1
user: root user: root
labels: labels:
signoz.io/scrape: "true" signoz.io/scrape: "true"
@@ -110,7 +110,7 @@ services:
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml # - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz: signoz:
!!merge <<: *db-depend !!merge <<: *db-depend
image: signoz/signoz:${VERSION:-v0.92.1} image: signoz/signoz:${VERSION:-v0.84.1}
container_name: signoz container_name: signoz
command: command:
- --config=/root/config/prometheus.yml - --config=/root/config/prometheus.yml
@@ -130,7 +130,6 @@ services:
- GODEBUG=netdns=go - GODEBUG=netdns=go
- TELEMETRY_ENABLED=true - TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-standalone-amd - DEPLOYMENT_TYPE=docker-standalone-amd
- DOT_METRICS_ENABLED=true
healthcheck: healthcheck:
test: test:
- CMD - CMD
@@ -143,7 +142,7 @@ services:
retries: 3 retries: 3
otel-collector: otel-collector:
!!merge <<: *db-depend !!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.129.0} image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.41}
container_name: signoz-otel-collector container_name: signoz-otel-collector
command: command:
- --config=/etc/otel-collector-config.yaml - --config=/etc/otel-collector-config.yaml
@@ -165,7 +164,7 @@ services:
condition: service_healthy condition: service_healthy
schema-migrator-sync: schema-migrator-sync:
!!merge <<: *common !!merge <<: *common
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.0} image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.41}
container_name: schema-migrator-sync container_name: schema-migrator-sync
command: command:
- sync - sync
@@ -177,7 +176,7 @@ services:
restart: on-failure restart: on-failure
schema-migrator-async: schema-migrator-async:
!!merge <<: *db-depend !!merge <<: *db-depend
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.0} image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.41}
container_name: schema-migrator-async container_name: schema-migrator-async
command: command:
- async - async

View File

@@ -26,7 +26,7 @@ processors:
detectors: [env, system] detectors: [env, system]
timeout: 2s timeout: 2s
signozspanmetrics/delta: signozspanmetrics/delta:
metrics_exporter: signozclickhousemetrics metrics_exporter: clickhousemetricswrite, signozclickhousemetrics
metrics_flush_interval: 60s metrics_flush_interval: 60s
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ] latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
dimensions_cache_size: 100000 dimensions_cache_size: 100000
@@ -60,16 +60,27 @@ exporters:
datasource: tcp://clickhouse:9000/signoz_traces datasource: tcp://clickhouse:9000/signoz_traces
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING} low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
use_new_schema: true use_new_schema: true
clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/signoz_metrics
disable_v2: true
resource_to_telemetry_conversion:
enabled: true
clickhousemetricswrite/prometheus:
endpoint: tcp://clickhouse:9000/signoz_metrics
disable_v2: true
signozclickhousemetrics: signozclickhousemetrics:
dsn: tcp://clickhouse:9000/signoz_metrics dsn: tcp://clickhouse:9000/signoz_metrics
clickhouselogsexporter: clickhouselogsexporter:
dsn: tcp://clickhouse:9000/signoz_logs dsn: tcp://clickhouse:9000/signoz_logs
timeout: 10s timeout: 10s
use_new_schema: true use_new_schema: true
# debug: {}
service: service:
telemetry: telemetry:
logs: logs:
encoding: json encoding: json
metrics:
address: 0.0.0.0:8888
extensions: extensions:
- health_check - health_check
- pprof - pprof
@@ -81,11 +92,11 @@ service:
metrics: metrics:
receivers: [otlp] receivers: [otlp]
processors: [batch] processors: [batch]
exporters: [signozclickhousemetrics] exporters: [clickhousemetricswrite, signozclickhousemetrics]
metrics/prometheus: metrics/prometheus:
receivers: [prometheus] receivers: [prometheus]
processors: [batch] processors: [batch]
exporters: [signozclickhousemetrics] exporters: [clickhousemetricswrite/prometheus, signozclickhousemetrics]
logs: logs:
receivers: [otlp] receivers: [otlp]
processors: [batch] processors: [batch]

View File

@@ -44,35 +44,20 @@ Before diving in, make sure you have these tools installed:
SigNoz has three main components: Clickhouse, Backend, and Frontend. Let's set them up one by one. SigNoz has three main components: Clickhouse, Backend, and Frontend. Let's set them up one by one.
### 1. Setting up ClickHouse ### 1. Setting up Clickhouse
First, we need to get ClickHouse running: First, we need to get Clickhouse running:
```bash ```bash
make devenv-clickhouse make devenv-clickhouse
``` ```
This command: This command:
- Starts ClickHouse in a single-shard, single-replica cluster - Starts Clickhouse in a single-shard, single-replica cluster
- Sets up Zookeeper - Sets up Zookeeper
- Runs the latest schema migrations - Runs the latest schema migrations
### 2. Setting up SigNoz OpenTelemetry Collector ### 2. Starting the Backend
Next, start the OpenTelemetry Collector to receive telemetry data:
```bash
make devenv-signoz-otel-collector
```
This command:
- Starts the SigNoz OpenTelemetry Collector
- Listens on port 4317 (gRPC) and 4318 (HTTP) for incoming telemetry data
- Forwards data to ClickHouse for storage
> 💡 **Quick Setup**: Use `make devenv-up` to start both ClickHouse and OTel Collector together
### 3. Starting the Backend
1. Run the backend server: 1. Run the backend server:
```bash ```bash
@@ -88,24 +73,19 @@ This command:
> 💡 **Tip**: The API server runs at `http://localhost:8080/` by default > 💡 **Tip**: The API server runs at `http://localhost:8080/` by default
### 4. Setting up the Frontend ### 3. Setting up the Frontend
1. Navigate to the frontend directory: 1. Install dependencies:
```bash
cd frontend
```
2. Install dependencies:
```bash ```bash
yarn install yarn install
``` ```
3. Create a `.env` file in this directory: 2. Create a `.env` file in the `frontend` directory:
```env ```env
FRONTEND_API_ENDPOINT=http://localhost:8080 FRONTEND_API_ENDPOINT=http://localhost:8080
``` ```
4. Start the development server: 3. Start the development server:
```bash ```bash
yarn dev yarn dev
``` ```
@@ -113,25 +93,3 @@ This command:
> 💡 **Tip**: `yarn dev` will automatically rebuild when you make changes to the code > 💡 **Tip**: `yarn dev` will automatically rebuild when you make changes to the code
Now you're all set to start developing! Happy coding! 🎉 Now you're all set to start developing! Happy coding! 🎉
## Verifying Your Setup
To verify everything is working correctly:
1. **Check ClickHouse**: `curl http://localhost:8123/ping` (should return "Ok.")
2. **Check OTel Collector**: `curl http://localhost:13133` (should return health status)
3. **Check Backend**: `curl http://localhost:8080/api/v1/health` (should return `{"status":"ok"}`)
4. **Check Frontend**: Open `http://localhost:3301` in your browser
## How to send test data?
You can now send telemetry data to your local SigNoz instance:
- **OTLP gRPC**: `localhost:4317`
- **OTLP HTTP**: `localhost:4318`
For example, using `curl` to send a test trace:
```bash
curl -X POST http://localhost:4318/v1/traces \
-H "Content-Type: application/json" \
-d '{"resourceSpans":[{"resource":{"attributes":[{"key":"service.name","value":{"stringValue":"test-service"}}]},"scopeSpans":[{"spans":[{"traceId":"12345678901234567890123456789012","spanId":"1234567890123456","name":"test-span","startTimeUnixNano":"1609459200000000000","endTimeUnixNano":"1609459201000000000"}]}]}]}'
```

View File

@@ -1,51 +0,0 @@
# Endpoint
This guide outlines the recommended approach for designing endpoints, with a focus on entity relationships, RESTful structure, and examples from the codebase.
## How do we design an endpoint?
### Understand the core entities and their relationships
Start with understanding the core entities and their relationships. For example:
- **Organization**: an organization can have multiple users
### Structure Endpoints RESTfully
Endpoints should reflect the resource hierarchy and follow RESTful conventions. Use clear, **pluralized resource names** and versioning. For example:
- `POST /v1/organizations` — Create an organization
- `GET /v1/organizations/:id` — Get an organization by id
- `DELETE /v1/organizations/:id` — Delete an organization by id
- `PUT /v1/organizations/:id` — Update an organization by id
- `GET /v1/organizations/:id/users` — Get all users in an organization
- `GET /v1/organizations/me/users` — Get all users in my organization
Think in terms of resource navigation in a file system. For example, to find your organization, you would navigate to the root of the file system and then to the `organizations` directory. To find a user in an organization, you would navigate to the `organizations` directory and then to the `id` directory.
```bash
v1/
├── organizations/
│ └── 123/
│ └── users/
```
`me` endpoints are special. They are used to determine the actual id via some auth/external mechanism. For `me` endpoints, think of the `me` directory being symlinked to your organization directory. For example, if you are a part of the organization `123`, the `me` directory will be symlinked to `/v1/organizations/123`:
```bash
v1/
├── organizations/
│ └── me/ -> symlink to /v1/organizations/123
│ └── users/
│ └── 123/
│ └── users/
```
> 💡 **Note**: There are various ways to structure endpoints. Some prefer to use singular resource names instead of `me`. Others prefer to use singular resource names for all endpoints. We have, however, chosen to standardize our endpoints in the manner described above.
## What should I remember?
- Use clear, **plural resource names**
- Use `me` endpoints for determining the actual id via some auth mechanism
> 💡 **Note**: When in doubt, diagram the relationships and walk through the user flows as if navigating a file system. This will help you design endpoints that are both logical and user-friendly.

View File

@@ -1,106 +0,0 @@
# Provider
SigNoz is built on the provider pattern, a design approach where code is organized into providers that handle specific application responsibilities. Providers act as adapter components that integrate with external services and deliver required functionality to the application.
> 💡 **Note**: Coming from a DDD background? Providers are similar (not exactly the same) to adapter/infrastructure services.
## How to create a new provider?
To create a new provider, create a directory in the `pkg/` directory named after your provider. The provider package consists of four key components:
- **Interface** (`pkg/<name>/<name>.go`): Defines the provider's interface. Other packages should import this interface to use the provider.
- **Config** (`pkg/<name>/config.go`): Contains provider configuration, implementing the `factory.Config` interface from [factory/config.go](/pkg/factory/config.go).
- **Implementation** (`pkg/<name>/<implname><name>/provider.go`): Contains the provider implementation, including a `NewProvider` function that returns a `factory.Provider` interface from [factory/provider.go](/pkg/factory/provider.go).
- **Mock** (`pkg/<name>/<name>test.go`): Provides mocks for the provider, typically used by dependent packages for unit testing.
For example, the [prometheus](/pkg/prometheus) provider delivers a prometheus engine to the application:
- `pkg/prometheus/prometheus.go` - Interface definition
- `pkg/prometheus/config.go` - Configuration
- `pkg/prometheus/clickhouseprometheus/provider.go` - Clickhouse-powered implementation
- `pkg/prometheus/prometheustest/provider.go` - Mock implementation
## How to wire it up?
The `pkg/signoz` package contains the inversion of control container responsible for wiring providers. It handles instantiation, configuration, and assembly of providers based on configuration metadata.
> 💡 **Note**: Coming from a Java background? Providers are similar to Spring beans.
Wiring up a provider involves three steps:
1. Wiring up the configuration
Add your config from `pkg/<name>/config.go` to the `pkg/signoz/config.Config` struct and in new factories:
```go
type Config struct {
...
MyProvider myprovider.Config `mapstructure:"myprovider"`
...
}
func NewConfig(ctx context.Context, resolverConfig config.ResolverConfig, ....) (Config, error) {
...
configFactories := []factory.ConfigFactory{
myprovider.NewConfigFactory(),
}
...
}
```
2. Wiring up the provider
Add available provider implementations in `pkg/signoz/provider.go`:
```go
func NewMyProviderFactories() factory.NamedMap[factory.ProviderFactory[myprovider.MyProvider, myprovider.Config]] {
return factory.MustNewNamedMap(
myproviderone.NewFactory(),
myprovidertwo.NewFactory(),
)
}
```
3. Instantiate the provider by adding it to the `SigNoz` struct in `pkg/signoz/signoz.go`:
```go
type SigNoz struct {
...
MyProvider myprovider.MyProvider
...
}
func New(...) (*SigNoz, error) {
...
myprovider, err := myproviderone.New(ctx, settings, config.MyProvider, "one/two")
if err != nil {
return nil, err
}
...
}
```
## How to use it?
To use a provider, import its interface. For example, to use the prometheus provider, import `pkg/prometheus/prometheus.go`:
```go
import "github.com/SigNoz/signoz/pkg/prometheus/prometheus"
func CreateSomething(ctx context.Context, prometheus prometheus.Prometheus) {
...
prometheus.DoSomething()
...
}
```
## Why do we need this?
Like any dependency injection framework, providers decouple the codebase from implementation details. This is especially valuable in SigNoz's large codebase, where we need to swap implementations without changing dependent code. The provider pattern offers several benefits apart from the obvious one of decoupling:
- Configuration is **defined with each provider and centralized in one place**, making it easier to understand and manage through various methods (environment variables, config files, etc.)
- Provider mocking is **straightforward for unit testing**, with a consistent pattern for locating mocks
- **Multiple implementations** of the same provider are **supported**, as demonstrated by our sqlstore provider
## What should I remember?
- Use the provider pattern wherever applicable.
- Always create a provider **irrespective of the number of implementations**. This makes it easier to add new implementations in the future.

View File

@@ -16,7 +16,7 @@ __Table of Contents__
- [Prerequisites](#prerequisites-1) - [Prerequisites](#prerequisites-1)
- [Install Helm Repo and Charts](#install-helm-repo-and-charts) - [Install Helm Repo and Charts](#install-helm-repo-and-charts)
- [Start the OpenTelemetry Demo App](#start-the-opentelemetry-demo-app-1) - [Start the OpenTelemetry Demo App](#start-the-opentelemetry-demo-app-1)
- [Monitor with SigNoz (Kubernetes)](#monitor-with-signoz-kubernetes) - [Moniitor with SigNoz (Kubernetes)](#monitor-with-signoz-kubernetes)
- [What's next](#whats-next) - [What's next](#whats-next)

View File

@@ -1,34 +0,0 @@
package anomaly
import (
"context"
"github.com/SigNoz/signoz/pkg/valuer"
)
type DailyProvider struct {
BaseSeasonalProvider
}
var _ BaseProvider = (*DailyProvider)(nil)
func (dp *DailyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider {
return &dp.BaseSeasonalProvider
}
func NewDailyProvider(opts ...GenericProviderOption[*DailyProvider]) *DailyProvider {
dp := &DailyProvider{
BaseSeasonalProvider: BaseSeasonalProvider{},
}
for _, opt := range opts {
opt(dp)
}
return dp
}
func (p *DailyProvider) GetAnomalies(ctx context.Context, orgID valuer.UUID, req *AnomaliesRequest) (*AnomaliesResponse, error) {
req.Seasonality = SeasonalityDaily
return p.getAnomalies(ctx, orgID, req)
}

View File

@@ -1,35 +0,0 @@
package anomaly
import (
"context"
"github.com/SigNoz/signoz/pkg/valuer"
)
type HourlyProvider struct {
BaseSeasonalProvider
}
var _ BaseProvider = (*HourlyProvider)(nil)
func (hp *HourlyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider {
return &hp.BaseSeasonalProvider
}
// NewHourlyProvider now uses the generic option type
func NewHourlyProvider(opts ...GenericProviderOption[*HourlyProvider]) *HourlyProvider {
hp := &HourlyProvider{
BaseSeasonalProvider: BaseSeasonalProvider{},
}
for _, opt := range opts {
opt(hp)
}
return hp
}
func (p *HourlyProvider) GetAnomalies(ctx context.Context, orgID valuer.UUID, req *AnomaliesRequest) (*AnomaliesResponse, error) {
req.Seasonality = SeasonalityHourly
return p.getAnomalies(ctx, orgID, req)
}

View File

@@ -1,223 +0,0 @@
package anomaly
import (
"time"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/valuer"
)
type Seasonality struct{ valuer.String }
var (
SeasonalityHourly = Seasonality{valuer.NewString("hourly")}
SeasonalityDaily = Seasonality{valuer.NewString("daily")}
SeasonalityWeekly = Seasonality{valuer.NewString("weekly")}
)
var (
oneWeekOffset = uint64(24 * 7 * time.Hour.Milliseconds())
oneDayOffset = uint64(24 * time.Hour.Milliseconds())
oneHourOffset = uint64(time.Hour.Milliseconds())
fiveMinOffset = uint64(5 * time.Minute.Milliseconds())
)
func (s Seasonality) IsValid() bool {
switch s {
case SeasonalityHourly, SeasonalityDaily, SeasonalityWeekly:
return true
default:
return false
}
}
type AnomaliesRequest struct {
Params qbtypes.QueryRangeRequest
Seasonality Seasonality
}
type AnomaliesResponse struct {
Results []*qbtypes.TimeSeriesData
}
// anomalyParams is the params for anomaly detection
// prediction = avg(past_period_query) + avg(current_season_query) - mean(past_season_query, past2_season_query, past3_season_query)
//
// ^ ^
// | |
// (rounded value for past peiod) + (seasonal growth)
//
// score = abs(value - prediction) / stddev (current_season_query)
type anomalyQueryParams struct {
// CurrentPeriodQuery is the query range params for period user is looking at or eval window
// Example: (now-5m, now), (now-30m, now), (now-1h, now)
// The results obtained from this query are used to compare with predicted values
// and to detect anomalies
CurrentPeriodQuery qbtypes.QueryRangeRequest
// PastPeriodQuery is the query range params for past period of seasonality
// Example: For weekly seasonality, (now-1w-5m, now-1w)
// : For daily seasonality, (now-1d-5m, now-1d)
// : For hourly seasonality, (now-1h-5m, now-1h)
PastPeriodQuery qbtypes.QueryRangeRequest
// CurrentSeasonQuery is the query range params for current period (seasonal)
// Example: For weekly seasonality, this is the query range params for the (now-1w-5m, now)
// : For daily seasonality, this is the query range params for the (now-1d-5m, now)
// : For hourly seasonality, this is the query range params for the (now-1h-5m, now)
CurrentSeasonQuery qbtypes.QueryRangeRequest
// PastSeasonQuery is the query range params for past seasonal period to the current season
// Example: For weekly seasonality, this is the query range params for the (now-2w-5m, now-1w)
// : For daily seasonality, this is the query range params for the (now-2d-5m, now-1d)
// : For hourly seasonality, this is the query range params for the (now-2h-5m, now-1h)
PastSeasonQuery qbtypes.QueryRangeRequest
// Past2SeasonQuery is the query range params for past 2 seasonal period to the current season
// Example: For weekly seasonality, this is the query range params for the (now-3w-5m, now-2w)
// : For daily seasonality, this is the query range params for the (now-3d-5m, now-2d)
// : For hourly seasonality, this is the query range params for the (now-3h-5m, now-2h)
Past2SeasonQuery qbtypes.QueryRangeRequest
// Past3SeasonQuery is the query range params for past 3 seasonal period to the current season
// Example: For weekly seasonality, this is the query range params for the (now-4w-5m, now-3w)
// : For daily seasonality, this is the query range params for the (now-4d-5m, now-3d)
// : For hourly seasonality, this is the query range params for the (now-4h-5m, now-3h)
Past3SeasonQuery qbtypes.QueryRangeRequest
}
func prepareAnomalyQueryParams(req qbtypes.QueryRangeRequest, seasonality Seasonality) *anomalyQueryParams {
start := req.Start
end := req.End
currentPeriodQuery := qbtypes.QueryRangeRequest{
Start: start,
End: end,
RequestType: qbtypes.RequestTypeTimeSeries,
CompositeQuery: req.CompositeQuery,
NoCache: false,
}
var pastPeriodStart, pastPeriodEnd uint64
switch seasonality {
// for one week period, we fetch the data from the past week with 5 min offset
case SeasonalityWeekly:
pastPeriodStart = start - oneWeekOffset - fiveMinOffset
pastPeriodEnd = end - oneWeekOffset
// for one day period, we fetch the data from the past day with 5 min offset
case SeasonalityDaily:
pastPeriodStart = start - oneDayOffset - fiveMinOffset
pastPeriodEnd = end - oneDayOffset
// for one hour period, we fetch the data from the past hour with 5 min offset
case SeasonalityHourly:
pastPeriodStart = start - oneHourOffset - fiveMinOffset
pastPeriodEnd = end - oneHourOffset
}
pastPeriodQuery := qbtypes.QueryRangeRequest{
Start: pastPeriodStart,
End: pastPeriodEnd,
RequestType: qbtypes.RequestTypeTimeSeries,
CompositeQuery: req.CompositeQuery,
NoCache: false,
}
// seasonality growth trend
var currentGrowthPeriodStart, currentGrowthPeriodEnd uint64
switch seasonality {
case SeasonalityWeekly:
currentGrowthPeriodStart = start - oneWeekOffset
currentGrowthPeriodEnd = start
case SeasonalityDaily:
currentGrowthPeriodStart = start - oneDayOffset
currentGrowthPeriodEnd = start
case SeasonalityHourly:
currentGrowthPeriodStart = start - oneHourOffset
currentGrowthPeriodEnd = start
}
currentGrowthQuery := qbtypes.QueryRangeRequest{
Start: currentGrowthPeriodStart,
End: currentGrowthPeriodEnd,
RequestType: qbtypes.RequestTypeTimeSeries,
CompositeQuery: req.CompositeQuery,
NoCache: false,
}
var pastGrowthPeriodStart, pastGrowthPeriodEnd uint64
switch seasonality {
case SeasonalityWeekly:
pastGrowthPeriodStart = start - 2*oneWeekOffset
pastGrowthPeriodEnd = start - 1*oneWeekOffset
case SeasonalityDaily:
pastGrowthPeriodStart = start - 2*oneDayOffset
pastGrowthPeriodEnd = start - 1*oneDayOffset
case SeasonalityHourly:
pastGrowthPeriodStart = start - 2*oneHourOffset
pastGrowthPeriodEnd = start - 1*oneHourOffset
}
pastGrowthQuery := qbtypes.QueryRangeRequest{
Start: pastGrowthPeriodStart,
End: pastGrowthPeriodEnd,
RequestType: qbtypes.RequestTypeTimeSeries,
CompositeQuery: req.CompositeQuery,
NoCache: false,
}
var past2GrowthPeriodStart, past2GrowthPeriodEnd uint64
switch seasonality {
case SeasonalityWeekly:
past2GrowthPeriodStart = start - 3*oneWeekOffset
past2GrowthPeriodEnd = start - 2*oneWeekOffset
case SeasonalityDaily:
past2GrowthPeriodStart = start - 3*oneDayOffset
past2GrowthPeriodEnd = start - 2*oneDayOffset
case SeasonalityHourly:
past2GrowthPeriodStart = start - 3*oneHourOffset
past2GrowthPeriodEnd = start - 2*oneHourOffset
}
past2GrowthQuery := qbtypes.QueryRangeRequest{
Start: past2GrowthPeriodStart,
End: past2GrowthPeriodEnd,
RequestType: qbtypes.RequestTypeTimeSeries,
CompositeQuery: req.CompositeQuery,
NoCache: false,
}
var past3GrowthPeriodStart, past3GrowthPeriodEnd uint64
switch seasonality {
case SeasonalityWeekly:
past3GrowthPeriodStart = start - 4*oneWeekOffset
past3GrowthPeriodEnd = start - 3*oneWeekOffset
case SeasonalityDaily:
past3GrowthPeriodStart = start - 4*oneDayOffset
past3GrowthPeriodEnd = start - 3*oneDayOffset
case SeasonalityHourly:
past3GrowthPeriodStart = start - 4*oneHourOffset
past3GrowthPeriodEnd = start - 3*oneHourOffset
}
past3GrowthQuery := qbtypes.QueryRangeRequest{
Start: past3GrowthPeriodStart,
End: past3GrowthPeriodEnd,
RequestType: qbtypes.RequestTypeTimeSeries,
CompositeQuery: req.CompositeQuery,
NoCache: false,
}
return &anomalyQueryParams{
CurrentPeriodQuery: currentPeriodQuery,
PastPeriodQuery: pastPeriodQuery,
CurrentSeasonQuery: currentGrowthQuery,
PastSeasonQuery: pastGrowthQuery,
Past2SeasonQuery: past2GrowthQuery,
Past3SeasonQuery: past3GrowthQuery,
}
}
type anomalyQueryResults struct {
CurrentPeriodResults []*qbtypes.TimeSeriesData
PastPeriodResults []*qbtypes.TimeSeriesData
CurrentSeasonResults []*qbtypes.TimeSeriesData
PastSeasonResults []*qbtypes.TimeSeriesData
Past2SeasonResults []*qbtypes.TimeSeriesData
Past3SeasonResults []*qbtypes.TimeSeriesData
}

View File

@@ -1,11 +0,0 @@
package anomaly
import (
"context"
"github.com/SigNoz/signoz/pkg/valuer"
)
type Provider interface {
GetAnomalies(ctx context.Context, orgID valuer.UUID, req *AnomaliesRequest) (*AnomaliesResponse, error)
}

View File

@@ -1,463 +0,0 @@
package anomaly
import (
"context"
"log/slog"
"math"
"github.com/SigNoz/signoz/pkg/querier"
"github.com/SigNoz/signoz/pkg/valuer"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
)
var (
// TODO(srikanthccv): make this configurable?
movingAvgWindowSize = 7
)
// BaseProvider is an interface that includes common methods for all provider types
type BaseProvider interface {
GetBaseSeasonalProvider() *BaseSeasonalProvider
}
// GenericProviderOption is a generic type for provider options
type GenericProviderOption[T BaseProvider] func(T)
func WithQuerier[T BaseProvider](querier querier.Querier) GenericProviderOption[T] {
return func(p T) {
p.GetBaseSeasonalProvider().querier = querier
}
}
func WithLogger[T BaseProvider](logger *slog.Logger) GenericProviderOption[T] {
return func(p T) {
p.GetBaseSeasonalProvider().logger = logger
}
}
type BaseSeasonalProvider struct {
querier querier.Querier
logger *slog.Logger
}
func (p *BaseSeasonalProvider) getQueryParams(req *AnomaliesRequest) *anomalyQueryParams {
if !req.Seasonality.IsValid() {
req.Seasonality = SeasonalityDaily
}
return prepareAnomalyQueryParams(req.Params, req.Seasonality)
}
func (p *BaseSeasonalProvider) toTSResults(ctx context.Context, resp *qbtypes.QueryRangeResponse) []*qbtypes.TimeSeriesData {
tsData := []*qbtypes.TimeSeriesData{}
if resp == nil {
p.logger.InfoContext(ctx, "nil response from query range")
return tsData
}
for _, item := range resp.Data.Results {
if resultData, ok := item.(*qbtypes.TimeSeriesData); ok {
tsData = append(tsData, resultData)
}
}
return tsData
}
func (p *BaseSeasonalProvider) getResults(ctx context.Context, orgID valuer.UUID, params *anomalyQueryParams) (*anomalyQueryResults, error) {
// TODO(srikanthccv): parallelize this?
p.logger.InfoContext(ctx, "fetching results for current period", "anomaly_current_period_query", params.CurrentPeriodQuery)
currentPeriodResults, err := p.querier.QueryRange(ctx, orgID, &params.CurrentPeriodQuery)
if err != nil {
return nil, err
}
p.logger.InfoContext(ctx, "fetching results for past period", "anomaly_past_period_query", params.PastPeriodQuery)
pastPeriodResults, err := p.querier.QueryRange(ctx, orgID, &params.PastPeriodQuery)
if err != nil {
return nil, err
}
p.logger.InfoContext(ctx, "fetching results for current season", "anomaly_current_season_query", params.CurrentSeasonQuery)
currentSeasonResults, err := p.querier.QueryRange(ctx, orgID, &params.CurrentSeasonQuery)
if err != nil {
return nil, err
}
p.logger.InfoContext(ctx, "fetching results for past season", "anomaly_past_season_query", params.PastSeasonQuery)
pastSeasonResults, err := p.querier.QueryRange(ctx, orgID, &params.PastSeasonQuery)
if err != nil {
return nil, err
}
p.logger.InfoContext(ctx, "fetching results for past 2 season", "anomaly_past_2season_query", params.Past2SeasonQuery)
past2SeasonResults, err := p.querier.QueryRange(ctx, orgID, &params.Past2SeasonQuery)
if err != nil {
return nil, err
}
p.logger.InfoContext(ctx, "fetching results for past 3 season", "anomaly_past_3season_query", params.Past3SeasonQuery)
past3SeasonResults, err := p.querier.QueryRange(ctx, orgID, &params.Past3SeasonQuery)
if err != nil {
return nil, err
}
return &anomalyQueryResults{
CurrentPeriodResults: p.toTSResults(ctx, currentPeriodResults),
PastPeriodResults: p.toTSResults(ctx, pastPeriodResults),
CurrentSeasonResults: p.toTSResults(ctx, currentSeasonResults),
PastSeasonResults: p.toTSResults(ctx, pastSeasonResults),
Past2SeasonResults: p.toTSResults(ctx, past2SeasonResults),
Past3SeasonResults: p.toTSResults(ctx, past3SeasonResults),
}, nil
}
// getMatchingSeries gets the matching series from the query result
// for the given series
func (p *BaseSeasonalProvider) getMatchingSeries(_ context.Context, queryResult *qbtypes.TimeSeriesData, series *qbtypes.TimeSeries) *qbtypes.TimeSeries {
if queryResult == nil || len(queryResult.Aggregations) == 0 || len(queryResult.Aggregations[0].Series) == 0 {
return nil
}
for _, curr := range queryResult.Aggregations[0].Series {
currLabelsKey := qbtypes.GetUniqueSeriesKey(curr.Labels)
seriesLabelsKey := qbtypes.GetUniqueSeriesKey(series.Labels)
if currLabelsKey == seriesLabelsKey {
return curr
}
}
return nil
}
func (p *BaseSeasonalProvider) getAvg(series *qbtypes.TimeSeries) float64 {
if series == nil || len(series.Values) == 0 {
return 0
}
var sum float64
for _, smpl := range series.Values {
sum += smpl.Value
}
return sum / float64(len(series.Values))
}
func (p *BaseSeasonalProvider) getStdDev(series *qbtypes.TimeSeries) float64 {
if series == nil || len(series.Values) == 0 {
return 0
}
avg := p.getAvg(series)
var sum float64
for _, smpl := range series.Values {
sum += math.Pow(smpl.Value-avg, 2)
}
return math.Sqrt(sum / float64(len(series.Values)))
}
// getMovingAvg gets the moving average for the given series
// for the given window size and start index
func (p *BaseSeasonalProvider) getMovingAvg(series *qbtypes.TimeSeries, movingAvgWindowSize, startIdx int) float64 {
if series == nil || len(series.Values) == 0 {
return 0
}
if startIdx >= len(series.Values)-movingAvgWindowSize {
startIdx = int(math.Max(0, float64(len(series.Values)-movingAvgWindowSize)))
}
var sum float64
points := series.Values[startIdx:]
windowSize := int(math.Min(float64(movingAvgWindowSize), float64(len(points))))
for i := 0; i < windowSize; i++ {
sum += points[i].Value
}
avg := sum / float64(windowSize)
return avg
}
func (p *BaseSeasonalProvider) getMean(floats ...float64) float64 {
if len(floats) == 0 {
return 0
}
var sum float64
for _, f := range floats {
sum += f
}
return sum / float64(len(floats))
}
func (p *BaseSeasonalProvider) getPredictedSeries(
ctx context.Context,
series, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries *qbtypes.TimeSeries,
) *qbtypes.TimeSeries {
predictedSeries := &qbtypes.TimeSeries{
Labels: series.Labels,
Values: make([]*qbtypes.TimeSeriesValue, 0),
}
// for each point in the series, get the predicted value
// the predicted value is the moving average (with window size = 7) of the previous period series
// plus the average of the current season series
// minus the mean of the past season series, past2 season series and past3 season series
for idx, curr := range series.Values {
movingAvg := p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
avg := p.getAvg(currentSeasonSeries)
mean := p.getMean(p.getAvg(pastSeasonSeries), p.getAvg(past2SeasonSeries), p.getAvg(past3SeasonSeries))
predictedValue := movingAvg + avg - mean
if predictedValue < 0 {
// this should not happen (except when the data has extreme outliers)
// we will use the moving avg of the previous period series in this case
p.logger.WarnContext(ctx, "predicted value is less than 0 for series", "anomaly_predicted_value", predictedValue, "anomaly_labels", series.Labels)
predictedValue = p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
}
p.logger.DebugContext(ctx, "predicted value for series",
"anomaly_moving_avg", movingAvg,
"anomaly_avg", avg,
"anomaly_mean", mean,
"anomaly_labels", series.Labels,
"anomaly_predicted_value", predictedValue,
"anomaly_curr", curr.Value,
)
predictedSeries.Values = append(predictedSeries.Values, &qbtypes.TimeSeriesValue{
Timestamp: curr.Timestamp,
Value: predictedValue,
})
}
return predictedSeries
}
// getBounds gets the upper and lower bounds for the given series
// for the given z score threshold
// moving avg of the previous period series + z score threshold * std dev of the series
// moving avg of the previous period series - z score threshold * std dev of the series
func (p *BaseSeasonalProvider) getBounds(
series, predictedSeries *qbtypes.TimeSeries,
zScoreThreshold float64,
) (*qbtypes.TimeSeries, *qbtypes.TimeSeries) {
upperBoundSeries := &qbtypes.TimeSeries{
Labels: series.Labels,
Values: make([]*qbtypes.TimeSeriesValue, 0),
}
lowerBoundSeries := &qbtypes.TimeSeries{
Labels: series.Labels,
Values: make([]*qbtypes.TimeSeriesValue, 0),
}
for idx, curr := range series.Values {
upperBound := p.getMovingAvg(predictedSeries, movingAvgWindowSize, idx) + zScoreThreshold*p.getStdDev(series)
lowerBound := p.getMovingAvg(predictedSeries, movingAvgWindowSize, idx) - zScoreThreshold*p.getStdDev(series)
upperBoundSeries.Values = append(upperBoundSeries.Values, &qbtypes.TimeSeriesValue{
Timestamp: curr.Timestamp,
Value: upperBound,
})
lowerBoundSeries.Values = append(lowerBoundSeries.Values, &qbtypes.TimeSeriesValue{
Timestamp: curr.Timestamp,
Value: math.Max(lowerBound, 0),
})
}
return upperBoundSeries, lowerBoundSeries
}
// getExpectedValue gets the expected value for the given series
// for the given index
// prevSeriesAvg + currentSeasonSeriesAvg - mean of past season series, past2 season series and past3 season series
func (p *BaseSeasonalProvider) getExpectedValue(
_, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries *qbtypes.TimeSeries, idx int,
) float64 {
prevSeriesAvg := p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
currentSeasonSeriesAvg := p.getAvg(currentSeasonSeries)
pastSeasonSeriesAvg := p.getAvg(pastSeasonSeries)
past2SeasonSeriesAvg := p.getAvg(past2SeasonSeries)
past3SeasonSeriesAvg := p.getAvg(past3SeasonSeries)
return prevSeriesAvg + currentSeasonSeriesAvg - p.getMean(pastSeasonSeriesAvg, past2SeasonSeriesAvg, past3SeasonSeriesAvg)
}
// getScore gets the anomaly score for the given series
// for the given index
// (value - expectedValue) / std dev of the series
func (p *BaseSeasonalProvider) getScore(
series, prevSeries, weekSeries, weekPrevSeries, past2SeasonSeries, past3SeasonSeries *qbtypes.TimeSeries, value float64, idx int,
) float64 {
expectedValue := p.getExpectedValue(series, prevSeries, weekSeries, weekPrevSeries, past2SeasonSeries, past3SeasonSeries, idx)
if expectedValue < 0 {
expectedValue = p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
}
return (value - expectedValue) / p.getStdDev(weekSeries)
}
// getAnomalyScores gets the anomaly scores for the given series
// for the given index
// (value - expectedValue) / std dev of the series
func (p *BaseSeasonalProvider) getAnomalyScores(
series, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries *qbtypes.TimeSeries,
) *qbtypes.TimeSeries {
anomalyScoreSeries := &qbtypes.TimeSeries{
Labels: series.Labels,
Values: make([]*qbtypes.TimeSeriesValue, 0),
}
for idx, curr := range series.Values {
anomalyScore := p.getScore(series, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries, curr.Value, idx)
anomalyScoreSeries.Values = append(anomalyScoreSeries.Values, &qbtypes.TimeSeriesValue{
Timestamp: curr.Timestamp,
Value: anomalyScore,
})
}
return anomalyScoreSeries
}
func (p *BaseSeasonalProvider) getAnomalies(ctx context.Context, orgID valuer.UUID, req *AnomaliesRequest) (*AnomaliesResponse, error) {
anomalyParams := p.getQueryParams(req)
anomalyQueryResults, err := p.getResults(ctx, orgID, anomalyParams)
if err != nil {
return nil, err
}
currentPeriodResults := make(map[string]*qbtypes.TimeSeriesData)
for _, result := range anomalyQueryResults.CurrentPeriodResults {
currentPeriodResults[result.QueryName] = result
}
pastPeriodResults := make(map[string]*qbtypes.TimeSeriesData)
for _, result := range anomalyQueryResults.PastPeriodResults {
pastPeriodResults[result.QueryName] = result
}
currentSeasonResults := make(map[string]*qbtypes.TimeSeriesData)
for _, result := range anomalyQueryResults.CurrentSeasonResults {
currentSeasonResults[result.QueryName] = result
}
pastSeasonResults := make(map[string]*qbtypes.TimeSeriesData)
for _, result := range anomalyQueryResults.PastSeasonResults {
pastSeasonResults[result.QueryName] = result
}
past2SeasonResults := make(map[string]*qbtypes.TimeSeriesData)
for _, result := range anomalyQueryResults.Past2SeasonResults {
past2SeasonResults[result.QueryName] = result
}
past3SeasonResults := make(map[string]*qbtypes.TimeSeriesData)
for _, result := range anomalyQueryResults.Past3SeasonResults {
past3SeasonResults[result.QueryName] = result
}
for _, result := range currentPeriodResults {
funcs := req.Params.FuncsForQuery(result.QueryName)
var zScoreThreshold float64
for _, f := range funcs {
if f.Name == qbtypes.FunctionNameAnomaly {
for _, arg := range f.Args {
if arg.Name != "z_score_threshold" {
continue
}
value, ok := arg.Value.(float64)
if ok {
zScoreThreshold = value
} else {
p.logger.InfoContext(ctx, "z_score_threshold not provided, defaulting")
zScoreThreshold = 3
}
break
}
}
}
pastPeriodResult, ok := pastPeriodResults[result.QueryName]
if !ok {
continue
}
currentSeasonResult, ok := currentSeasonResults[result.QueryName]
if !ok {
continue
}
pastSeasonResult, ok := pastSeasonResults[result.QueryName]
if !ok {
continue
}
past2SeasonResult, ok := past2SeasonResults[result.QueryName]
if !ok {
continue
}
past3SeasonResult, ok := past3SeasonResults[result.QueryName]
if !ok {
continue
}
// no data;
if len(result.Aggregations) == 0 {
continue
}
aggOfInterest := result.Aggregations[0]
for _, series := range aggOfInterest.Series {
stdDev := p.getStdDev(series)
p.logger.InfoContext(ctx, "calculated standard deviation for series", "anomaly_std_dev", stdDev, "anomaly_labels", series.Labels)
pastPeriodSeries := p.getMatchingSeries(ctx, pastPeriodResult, series)
currentSeasonSeries := p.getMatchingSeries(ctx, currentSeasonResult, series)
pastSeasonSeries := p.getMatchingSeries(ctx, pastSeasonResult, series)
past2SeasonSeries := p.getMatchingSeries(ctx, past2SeasonResult, series)
past3SeasonSeries := p.getMatchingSeries(ctx, past3SeasonResult, series)
prevSeriesAvg := p.getAvg(pastPeriodSeries)
currentSeasonSeriesAvg := p.getAvg(currentSeasonSeries)
pastSeasonSeriesAvg := p.getAvg(pastSeasonSeries)
past2SeasonSeriesAvg := p.getAvg(past2SeasonSeries)
past3SeasonSeriesAvg := p.getAvg(past3SeasonSeries)
p.logger.InfoContext(ctx, "calculated mean for series",
"anomaly_prev_series_avg", prevSeriesAvg,
"anomaly_current_season_series_avg", currentSeasonSeriesAvg,
"anomaly_past_season_series_avg", pastSeasonSeriesAvg,
"anomaly_past_2season_series_avg", past2SeasonSeriesAvg,
"anomaly_past_3season_series_avg", past3SeasonSeriesAvg,
"anomaly_labels", series.Labels,
)
predictedSeries := p.getPredictedSeries(
ctx,
series,
pastPeriodSeries,
currentSeasonSeries,
pastSeasonSeries,
past2SeasonSeries,
past3SeasonSeries,
)
aggOfInterest.PredictedSeries = append(aggOfInterest.PredictedSeries, predictedSeries)
upperBoundSeries, lowerBoundSeries := p.getBounds(
series,
predictedSeries,
zScoreThreshold,
)
aggOfInterest.UpperBoundSeries = append(aggOfInterest.UpperBoundSeries, upperBoundSeries)
aggOfInterest.LowerBoundSeries = append(aggOfInterest.LowerBoundSeries, lowerBoundSeries)
anomalyScoreSeries := p.getAnomalyScores(
series,
pastPeriodSeries,
currentSeasonSeries,
pastSeasonSeries,
past2SeasonSeries,
past3SeasonSeries,
)
aggOfInterest.AnomalyScores = append(aggOfInterest.AnomalyScores, anomalyScoreSeries)
}
}
results := make([]*qbtypes.TimeSeriesData, 0, len(currentPeriodResults))
for _, result := range currentPeriodResults {
results = append(results, result)
}
return &AnomaliesResponse{
Results: results,
}, nil
}

View File

@@ -1,34 +0,0 @@
package anomaly
import (
"context"
"github.com/SigNoz/signoz/pkg/valuer"
)
type WeeklyProvider struct {
BaseSeasonalProvider
}
var _ BaseProvider = (*WeeklyProvider)(nil)
func (wp *WeeklyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider {
return &wp.BaseSeasonalProvider
}
func NewWeeklyProvider(opts ...GenericProviderOption[*WeeklyProvider]) *WeeklyProvider {
wp := &WeeklyProvider{
BaseSeasonalProvider: BaseSeasonalProvider{},
}
for _, opt := range opts {
opt(wp)
}
return wp
}
func (p *WeeklyProvider) GetAnomalies(ctx context.Context, orgID valuer.UUID, req *AnomaliesRequest) (*AnomaliesResponse, error) {
req.Seasonality = SeasonalityWeekly
return p.getAnomalies(ctx, orgID, req)
}

View File

@@ -6,13 +6,11 @@ import (
"time" "time"
"github.com/SigNoz/signoz/ee/licensing/licensingstore/sqllicensingstore" "github.com/SigNoz/signoz/ee/licensing/licensingstore/sqllicensingstore"
"github.com/SigNoz/signoz/pkg/analytics"
"github.com/SigNoz/signoz/pkg/errors" "github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory" "github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/licensing" "github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/sqlstore" "github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types/analyticstypes" "github.com/SigNoz/signoz/pkg/types/featuretypes"
"github.com/SigNoz/signoz/pkg/types/licensetypes" "github.com/SigNoz/signoz/pkg/types/licensetypes"
"github.com/SigNoz/signoz/pkg/valuer" "github.com/SigNoz/signoz/pkg/valuer"
"github.com/SigNoz/signoz/pkg/zeus" "github.com/SigNoz/signoz/pkg/zeus"
@@ -24,29 +22,19 @@ type provider struct {
zeus zeus.Zeus zeus zeus.Zeus
config licensing.Config config licensing.Config
settings factory.ScopedProviderSettings settings factory.ScopedProviderSettings
orgGetter organization.Getter
analytics analytics.Analytics
stopChan chan struct{} stopChan chan struct{}
} }
func NewProviderFactory(store sqlstore.SQLStore, zeus zeus.Zeus, orgGetter organization.Getter, analytics analytics.Analytics) factory.ProviderFactory[licensing.Licensing, licensing.Config] { func NewProviderFactory(store sqlstore.SQLStore, zeus zeus.Zeus) factory.ProviderFactory[licensing.Licensing, licensing.Config] {
return factory.NewProviderFactory(factory.MustNewName("http"), func(ctx context.Context, providerSettings factory.ProviderSettings, config licensing.Config) (licensing.Licensing, error) { return factory.NewProviderFactory(factory.MustNewName("http"), func(ctx context.Context, providerSettings factory.ProviderSettings, config licensing.Config) (licensing.Licensing, error) {
return New(ctx, providerSettings, config, store, zeus, orgGetter, analytics) return New(ctx, providerSettings, config, store, zeus)
}) })
} }
func New(ctx context.Context, ps factory.ProviderSettings, config licensing.Config, sqlstore sqlstore.SQLStore, zeus zeus.Zeus, orgGetter organization.Getter, analytics analytics.Analytics) (licensing.Licensing, error) { func New(ctx context.Context, ps factory.ProviderSettings, config licensing.Config, sqlstore sqlstore.SQLStore, zeus zeus.Zeus) (licensing.Licensing, error) {
settings := factory.NewScopedProviderSettings(ps, "github.com/SigNoz/signoz/ee/licensing/httplicensing") settings := factory.NewScopedProviderSettings(ps, "github.com/SigNoz/signoz/ee/licensing/httplicensing")
licensestore := sqllicensingstore.New(sqlstore) licensestore := sqllicensingstore.New(sqlstore)
return &provider{ return &provider{store: licensestore, zeus: zeus, config: config, settings: settings, stopChan: make(chan struct{})}, nil
store: licensestore,
zeus: zeus,
config: config,
settings: settings,
orgGetter: orgGetter,
stopChan: make(chan struct{}),
analytics: analytics,
}, nil
} }
func (provider *provider) Start(ctx context.Context) error { func (provider *provider) Start(ctx context.Context) error {
@@ -78,13 +66,20 @@ func (provider *provider) Stop(ctx context.Context) error {
} }
func (provider *provider) Validate(ctx context.Context) error { func (provider *provider) Validate(ctx context.Context) error {
organizations, err := provider.orgGetter.ListByOwnedKeyRange(ctx) organizations, err := provider.store.ListOrganizations(ctx)
if err != nil { if err != nil {
return err return err
} }
for _, organization := range organizations { for _, organizationID := range organizations {
err := provider.Refresh(ctx, organization.ID) err := provider.Refresh(ctx, organizationID)
if err != nil {
return err
}
}
if len(organizations) == 0 {
err = provider.InitFeatures(ctx, licensetypes.BasicPlan)
if err != nil { if err != nil {
return err return err
} }
@@ -110,6 +105,11 @@ func (provider *provider) Activate(ctx context.Context, organizationID valuer.UU
return err return err
} }
err = provider.InitFeatures(ctx, license.Features)
if err != nil {
return err
}
return nil return nil
} }
@@ -129,24 +129,28 @@ func (provider *provider) GetActive(ctx context.Context, organizationID valuer.U
func (provider *provider) Refresh(ctx context.Context, organizationID valuer.UUID) error { func (provider *provider) Refresh(ctx context.Context, organizationID valuer.UUID) error {
activeLicense, err := provider.GetActive(ctx, organizationID) activeLicense, err := provider.GetActive(ctx, organizationID)
if err != nil { if err != nil && !errors.Ast(err, errors.TypeNotFound) {
if errors.Ast(err, errors.TypeNotFound) {
return nil
}
provider.settings.Logger().ErrorContext(ctx, "license validation failed", "org_id", organizationID.StringValue()) provider.settings.Logger().ErrorContext(ctx, "license validation failed", "org_id", organizationID.StringValue())
return err return err
} }
if err != nil && errors.Ast(err, errors.TypeNotFound) {
provider.settings.Logger().DebugContext(ctx, "no active license found, defaulting to basic plan", "org_id", organizationID.StringValue())
err = provider.InitFeatures(ctx, licensetypes.BasicPlan)
if err != nil {
return err
}
return nil
}
data, err := provider.zeus.GetLicense(ctx, activeLicense.Key) data, err := provider.zeus.GetLicense(ctx, activeLicense.Key)
if err != nil { if err != nil {
if time.Since(activeLicense.LastValidatedAt) > time.Duration(provider.config.FailureThreshold)*provider.config.PollInterval { if time.Since(activeLicense.LastValidatedAt) > time.Duration(provider.config.FailureThreshold)*provider.config.PollInterval {
activeLicense.UpdateFeatures(licensetypes.BasicPlan) provider.settings.Logger().ErrorContext(ctx, "license validation failed for consecutive poll intervals, defaulting to basic plan", "failure_threshold", provider.config.FailureThreshold, "license_id", activeLicense.ID.StringValue(), "org_id", organizationID.StringValue())
updatedStorableLicense := licensetypes.NewStorableLicenseFromLicense(activeLicense) err = provider.InitFeatures(ctx, licensetypes.BasicPlan)
err = provider.store.Update(ctx, organizationID, updatedStorableLicense)
if err != nil { if err != nil {
return err return err
} }
return nil return nil
} }
return err return err
@@ -163,25 +167,6 @@ func (provider *provider) Refresh(ctx context.Context, organizationID valuer.UUI
return err return err
} }
stats := licensetypes.NewStatsFromLicense(activeLicense)
provider.analytics.Send(ctx,
analyticstypes.Track{
UserId: "stats_" + organizationID.String(),
Event: "License Updated",
Properties: analyticstypes.NewPropertiesFromMap(stats),
Context: &analyticstypes.Context{
Extra: map[string]interface{}{
analyticstypes.KeyGroupID: organizationID.String(),
},
},
},
analyticstypes.Group{
UserId: "stats_" + organizationID.String(),
GroupId: organizationID.String(),
Traits: analyticstypes.NewTraitsFromMap(stats),
},
)
return nil return nil
} }
@@ -223,27 +208,73 @@ func (provider *provider) Portal(ctx context.Context, organizationID valuer.UUID
return &licensetypes.GettableSubscription{RedirectURL: gjson.GetBytes(response, "url").String()}, nil return &licensetypes.GettableSubscription{RedirectURL: gjson.GetBytes(response, "url").String()}, nil
} }
func (provider *provider) GetFeatureFlags(ctx context.Context, organizationID valuer.UUID) ([]*licensetypes.Feature, error) { // feature surrogate
license, err := provider.GetActive(ctx, organizationID) func (provider *provider) CheckFeature(ctx context.Context, key string) error {
feature, err := provider.store.GetFeature(ctx, key)
if err != nil { if err != nil {
if errors.Ast(err, errors.TypeNotFound) { return err
return licensetypes.BasicPlan, nil
} }
if feature.Active {
return nil
}
return errors.Newf(errors.TypeUnsupported, licensing.ErrCodeFeatureUnavailable, "feature unavailable: %s", key)
}
func (provider *provider) GetFeatureFlag(ctx context.Context, key string) (*featuretypes.GettableFeature, error) {
featureStatus, err := provider.store.GetFeature(ctx, key)
if err != nil {
return nil, err
}
return &featuretypes.GettableFeature{
Name: featureStatus.Name,
Active: featureStatus.Active,
Usage: int64(featureStatus.Usage),
UsageLimit: int64(featureStatus.UsageLimit),
Route: featureStatus.Route,
}, nil
}
func (provider *provider) GetFeatureFlags(ctx context.Context) ([]*featuretypes.GettableFeature, error) {
storableFeatures, err := provider.store.GetAllFeatures(ctx)
if err != nil {
return nil, err return nil, err
} }
return license.Features, nil gettableFeatures := make([]*featuretypes.GettableFeature, len(storableFeatures))
for idx, gettableFeature := range storableFeatures {
gettableFeatures[idx] = &featuretypes.GettableFeature{
Name: gettableFeature.Name,
Active: gettableFeature.Active,
Usage: int64(gettableFeature.Usage),
UsageLimit: int64(gettableFeature.UsageLimit),
Route: gettableFeature.Route,
}
} }
func (provider *provider) Collect(ctx context.Context, orgID valuer.UUID) (map[string]any, error) { return gettableFeatures, nil
activeLicense, err := provider.GetActive(ctx, orgID)
if err != nil {
if errors.Ast(err, errors.TypeNotFound) {
return map[string]any{}, nil
} }
return nil, err func (provider *provider) InitFeatures(ctx context.Context, features []*featuretypes.GettableFeature) error {
featureStatus := make([]*featuretypes.StorableFeature, len(features))
for i, f := range features {
featureStatus[i] = &featuretypes.StorableFeature{
Name: f.Name,
Active: f.Active,
Usage: int(f.Usage),
UsageLimit: int(f.UsageLimit),
Route: f.Route,
}
} }
return licensetypes.NewStatsFromLicense(activeLicense), nil return provider.store.InitFeatures(ctx, featureStatus)
}
func (provider *provider) UpdateFeatureFlag(ctx context.Context, feature *featuretypes.GettableFeature) error {
return provider.store.UpdateFeature(ctx, &featuretypes.StorableFeature{
Name: feature.Name,
Active: feature.Active,
Usage: int(feature.Usage),
UsageLimit: int(feature.UsageLimit),
Route: feature.Route,
})
} }

View File

@@ -5,6 +5,8 @@ import (
"github.com/SigNoz/signoz/pkg/errors" "github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/sqlstore" "github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/featuretypes"
"github.com/SigNoz/signoz/pkg/types/licensetypes" "github.com/SigNoz/signoz/pkg/types/licensetypes"
"github.com/SigNoz/signoz/pkg/valuer" "github.com/SigNoz/signoz/pkg/valuer"
) )
@@ -79,3 +81,106 @@ func (store *store) Update(ctx context.Context, organizationID valuer.UUID, stor
return nil return nil
} }
func (store *store) ListOrganizations(ctx context.Context) ([]valuer.UUID, error) {
orgIDStrs := make([]string, 0)
err := store.sqlstore.
BunDB().
NewSelect().
Model(new(types.Organization)).
Column("id").
Scan(ctx, &orgIDStrs)
if err != nil {
return nil, err
}
orgIDs := make([]valuer.UUID, len(orgIDStrs))
for idx, orgIDStr := range orgIDStrs {
orgID, err := valuer.NewUUID(orgIDStr)
if err != nil {
return nil, err
}
orgIDs[idx] = orgID
}
return orgIDs, nil
}
func (store *store) CreateFeature(ctx context.Context, storableFeature *featuretypes.StorableFeature) error {
_, err := store.
sqlstore.
BunDB().
NewInsert().
Model(storableFeature).
Exec(ctx)
if err != nil {
return store.sqlstore.WrapAlreadyExistsErrf(err, errors.CodeAlreadyExists, "feature with name:%s already exists", storableFeature.Name)
}
return nil
}
func (store *store) GetFeature(ctx context.Context, key string) (*featuretypes.StorableFeature, error) {
storableFeature := new(featuretypes.StorableFeature)
err := store.
sqlstore.
BunDB().
NewSelect().
Model(storableFeature).
Where("name = ?", key).
Scan(ctx)
if err != nil {
return nil, store.sqlstore.WrapNotFoundErrf(err, errors.CodeNotFound, "feature with name:%s does not exist", key)
}
return storableFeature, nil
}
func (store *store) GetAllFeatures(ctx context.Context) ([]*featuretypes.StorableFeature, error) {
storableFeatures := make([]*featuretypes.StorableFeature, 0)
err := store.
sqlstore.
BunDB().
NewSelect().
Model(&storableFeatures).
Scan(ctx)
if err != nil {
return nil, store.sqlstore.WrapNotFoundErrf(err, errors.CodeNotFound, "features do not exist")
}
return storableFeatures, nil
}
func (store *store) InitFeatures(ctx context.Context, storableFeatures []*featuretypes.StorableFeature) error {
_, err := store.
sqlstore.
BunDB().
NewInsert().
Model(&storableFeatures).
On("CONFLICT (name) DO UPDATE").
Set("active = EXCLUDED.active").
Set("usage = EXCLUDED.usage").
Set("usage_limit = EXCLUDED.usage_limit").
Set("route = EXCLUDED.route").
Exec(ctx)
if err != nil {
return errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "unable to initialise features")
}
return nil
}
func (store *store) UpdateFeature(ctx context.Context, storableFeature *featuretypes.StorableFeature) error {
_, err := store.
sqlstore.
BunDB().
NewUpdate().
Model(storableFeature).
Exec(ctx)
if err != nil {
return errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "unable to update feature with key: %s", storableFeature.Name)
}
return nil
}

View File

@@ -0,0 +1,4 @@
.vscode
README.md
signoz.db
bin

View File

@@ -11,7 +11,7 @@ before:
builds: builds:
- id: signoz - id: signoz
binary: bin/signoz binary: bin/signoz
main: ./cmd/enterprise main: ee/query-service/main.go
env: env:
- CGO_ENABLED=1 - CGO_ENABLED=1
- >- - >-
@@ -39,7 +39,6 @@ builds:
- -X github.com/SigNoz/signoz/ee/zeus.deprecatedURL=https://license.signoz.io - -X github.com/SigNoz/signoz/ee/zeus.deprecatedURL=https://license.signoz.io
- -X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.signoz.cloud - -X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.signoz.cloud
- -X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.signoz.io/api/v1 - -X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.signoz.io/api/v1
- -X github.com/SigNoz/signoz/pkg/analytics.key=9kRrJ7oPCGPEJLF6QjMPLt5bljFhRQBr
- >- - >-
{{- if eq .Os "linux" }}-linkmode external -extldflags '-static'{{- end }} {{- if eq .Os "linux" }}-linkmode external -extldflags '-static'{{- end }}
mod_timestamp: "{{ .CommitTimestamp }}" mod_timestamp: "{{ .CommitTimestamp }}"

View File

@@ -11,9 +11,11 @@ RUN apk update && \
COPY ./target/${OS}-${TARGETARCH}/signoz /root/signoz COPY ./target/${OS}-${TARGETARCH}/signoz /root/signoz
COPY ./conf/prometheus.yml /root/config/prometheus.yml
COPY ./templates/email /root/templates COPY ./templates/email /root/templates
COPY frontend/build/ /etc/signoz/web/ COPY frontend/build/ /etc/signoz/web/
RUN chmod 755 /root /root/signoz RUN chmod 755 /root /root/signoz
ENTRYPOINT ["./signoz", "server"] ENTRYPOINT ["./signoz"]
CMD ["-config", "/root/config/prometheus.yml"]

View File

@@ -1,10 +1,3 @@
FROM node:18-bullseye AS build
WORKDIR /opt/
COPY ./frontend/ ./
RUN CI=1 yarn install
RUN CI=1 yarn build
FROM golang:1.23-bullseye FROM golang:1.23-bullseye
ARG OS="linux" ARG OS="linux"
@@ -30,7 +23,6 @@ COPY go.mod go.sum ./
RUN go mod download RUN go mod download
COPY ./cmd/ ./cmd/
COPY ./ee/ ./ee/ COPY ./ee/ ./ee/
COPY ./pkg/ ./pkg/ COPY ./pkg/ ./pkg/
COPY ./templates/email /root/templates COPY ./templates/email /root/templates
@@ -39,8 +31,6 @@ COPY Makefile Makefile
RUN TARGET_DIR=/root ARCHS=${TARGETARCH} ZEUS_URL=${ZEUSURL} LICENSE_URL=${ZEUSURL}/api/v1 make go-build-enterprise-race RUN TARGET_DIR=/root ARCHS=${TARGETARCH} ZEUS_URL=${ZEUSURL} LICENSE_URL=${ZEUSURL}/api/v1 make go-build-enterprise-race
RUN mv /root/linux-${TARGETARCH}/signoz /root/signoz RUN mv /root/linux-${TARGETARCH}/signoz /root/signoz
COPY --from=build /opt/build ./web/
RUN chmod 755 /root /root/signoz RUN chmod 755 /root /root/signoz
ENTRYPOINT ["/root/signoz", "server"] ENTRYPOINT ["/root/signoz"]

View File

@@ -12,9 +12,11 @@ RUN apk update && \
rm -rf /var/cache/apk/* rm -rf /var/cache/apk/*
COPY ./target/${OS}-${ARCH}/signoz /root/signoz COPY ./target/${OS}-${ARCH}/signoz /root/signoz
COPY ./conf/prometheus.yml /root/config/prometheus.yml
COPY ./templates/email /root/templates COPY ./templates/email /root/templates
COPY frontend/build/ /etc/signoz/web/ COPY frontend/build/ /etc/signoz/web/
RUN chmod 755 /root /root/signoz RUN chmod 755 /root /root/signoz
ENTRYPOINT ["./signoz", "server"] ENTRYPOINT ["./signoz"]
CMD ["-config", "/root/config/prometheus.yml"]

View File

@@ -1,22 +1,22 @@
package api package api
import ( import (
"context"
"net/http" "net/http"
"net/http/httputil" "net/http/httputil"
"time" "time"
"github.com/SigNoz/signoz/ee/licensing/httplicensing" "github.com/SigNoz/signoz/ee/licensing/httplicensing"
"github.com/SigNoz/signoz/ee/query-service/integrations/gateway" "github.com/SigNoz/signoz/ee/query-service/integrations/gateway"
"github.com/SigNoz/signoz/ee/query-service/interfaces"
"github.com/SigNoz/signoz/ee/query-service/usage" "github.com/SigNoz/signoz/ee/query-service/usage"
"github.com/SigNoz/signoz/pkg/alertmanager" "github.com/SigNoz/signoz/pkg/alertmanager"
"github.com/SigNoz/signoz/pkg/apis/fields" "github.com/SigNoz/signoz/pkg/apis/fields"
"github.com/SigNoz/signoz/pkg/http/middleware" "github.com/SigNoz/signoz/pkg/http/middleware"
querierAPI "github.com/SigNoz/signoz/pkg/querier"
baseapp "github.com/SigNoz/signoz/pkg/query-service/app" baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations" "github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
"github.com/SigNoz/signoz/pkg/query-service/app/integrations" "github.com/SigNoz/signoz/pkg/query-service/app/integrations"
"github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline" "github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline"
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model" basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
rules "github.com/SigNoz/signoz/pkg/query-service/rules" rules "github.com/SigNoz/signoz/pkg/query-service/rules"
"github.com/SigNoz/signoz/pkg/signoz" "github.com/SigNoz/signoz/pkg/signoz"
@@ -26,7 +26,8 @@ import (
) )
type APIHandlerOptions struct { type APIHandlerOptions struct {
DataConnector interfaces.Reader DataConnector interfaces.DataConnector
PreferSpanMetrics bool
RulesManager *rules.Manager RulesManager *rules.Manager
UsageManager *usage.Manager UsageManager *usage.Manager
IntegrationsController *integrations.Controller IntegrationsController *integrations.Controller
@@ -50,6 +51,7 @@ type APIHandler struct {
func NewAPIHandler(opts APIHandlerOptions, signoz *signoz.SigNoz) (*APIHandler, error) { func NewAPIHandler(opts APIHandlerOptions, signoz *signoz.SigNoz) (*APIHandler, error) {
baseHandler, err := baseapp.NewAPIHandler(baseapp.APIHandlerOpts{ baseHandler, err := baseapp.NewAPIHandler(baseapp.APIHandlerOpts{
Reader: opts.DataConnector, Reader: opts.DataConnector,
PreferSpanMetrics: opts.PreferSpanMetrics,
RuleManager: opts.RulesManager, RuleManager: opts.RulesManager,
IntegrationsController: opts.IntegrationsController, IntegrationsController: opts.IntegrationsController,
CloudIntegrationsController: opts.CloudIntegrationsController, CloudIntegrationsController: opts.CloudIntegrationsController,
@@ -57,9 +59,8 @@ func NewAPIHandler(opts APIHandlerOptions, signoz *signoz.SigNoz) (*APIHandler,
FluxInterval: opts.FluxInterval, FluxInterval: opts.FluxInterval,
AlertmanagerAPI: alertmanager.NewAPI(signoz.Alertmanager), AlertmanagerAPI: alertmanager.NewAPI(signoz.Alertmanager),
LicensingAPI: httplicensing.NewLicensingAPI(signoz.Licensing), LicensingAPI: httplicensing.NewLicensingAPI(signoz.Licensing),
FieldsAPI: fields.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.TelemetryStore), FieldsAPI: fields.NewAPI(signoz.TelemetryStore, signoz.Instrumentation.Logger()),
Signoz: signoz, Signoz: signoz,
QuerierAPI: querierAPI.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.Querier, signoz.Analytics),
}) })
if err != nil { if err != nil {
@@ -85,12 +86,23 @@ func (ah *APIHandler) Gateway() *httputil.ReverseProxy {
return ah.opts.Gateway return ah.opts.Gateway
} }
func (ah *APIHandler) CheckFeature(ctx context.Context, key string) bool {
err := ah.Signoz.Licensing.CheckFeature(ctx, key)
return err == nil
}
// RegisterRoutes registers routes for this handler on the given router // RegisterRoutes registers routes for this handler on the given router
func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *middleware.AuthZ) { func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *middleware.AuthZ) {
// note: add ee override methods first // note: add ee override methods first
// routes available only in ee version // routes available only in ee version
router.HandleFunc("/api/v1/features", am.ViewAccess(ah.getFeatureFlags)).Methods(http.MethodGet)
router.HandleFunc("/api/v1/featureFlags", am.OpenAccess(ah.getFeatureFlags)).Methods(http.MethodGet)
router.HandleFunc("/api/v1/loginPrecheck", am.OpenAccess(ah.Signoz.Handlers.User.LoginPrecheck)).Methods(http.MethodGet)
// invite
router.HandleFunc("/api/v1/invite/{token}", am.OpenAccess(ah.Signoz.Handlers.User.GetInvite)).Methods(http.MethodGet)
router.HandleFunc("/api/v1/invite/accept", am.OpenAccess(ah.Signoz.Handlers.User.AcceptInvite)).Methods(http.MethodPost)
// paid plans specific routes // paid plans specific routes
router.HandleFunc("/api/v1/complete/saml", am.OpenAccess(ah.receiveSAML)).Methods(http.MethodPost) router.HandleFunc("/api/v1/complete/saml", am.OpenAccess(ah.receiveSAML)).Methods(http.MethodPost)
@@ -102,6 +114,9 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *middleware.AuthZ) {
router.HandleFunc("/api/v1/billing", am.AdminAccess(ah.getBilling)).Methods(http.MethodGet) router.HandleFunc("/api/v1/billing", am.AdminAccess(ah.getBilling)).Methods(http.MethodGet)
router.HandleFunc("/api/v1/portal", am.AdminAccess(ah.LicensingAPI.Portal)).Methods(http.MethodPost) router.HandleFunc("/api/v1/portal", am.AdminAccess(ah.LicensingAPI.Portal)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/dashboards/{uuid}/lock", am.EditAccess(ah.lockDashboard)).Methods(http.MethodPut)
router.HandleFunc("/api/v1/dashboards/{uuid}/unlock", am.EditAccess(ah.unlockDashboard)).Methods(http.MethodPut)
// v3 // v3
router.HandleFunc("/api/v3/licenses", am.AdminAccess(ah.LicensingAPI.Activate)).Methods(http.MethodPost) router.HandleFunc("/api/v3/licenses", am.AdminAccess(ah.LicensingAPI.Activate)).Methods(http.MethodPost)
router.HandleFunc("/api/v3/licenses", am.AdminAccess(ah.LicensingAPI.Refresh)).Methods(http.MethodPut) router.HandleFunc("/api/v3/licenses", am.AdminAccess(ah.LicensingAPI.Refresh)).Methods(http.MethodPut)
@@ -110,11 +125,6 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *middleware.AuthZ) {
// v4 // v4
router.HandleFunc("/api/v4/query_range", am.ViewAccess(ah.queryRangeV4)).Methods(http.MethodPost) router.HandleFunc("/api/v4/query_range", am.ViewAccess(ah.queryRangeV4)).Methods(http.MethodPost)
// v5
router.HandleFunc("/api/v5/query_range", am.ViewAccess(ah.queryRangeV5)).Methods(http.MethodPost)
router.HandleFunc("/api/v5/substitute_vars", am.ViewAccess(ah.QuerierAPI.ReplaceVariables)).Methods(http.MethodPost)
// Gateway // Gateway
router.PathPrefix(gateway.RoutePrefix).HandlerFunc(am.EditAccess(ah.ServeGatewayHTTP)) router.PathPrefix(gateway.RoutePrefix).HandlerFunc(am.EditAccess(ah.ServeGatewayHTTP))

View File

@@ -9,7 +9,9 @@ import (
"go.uber.org/zap" "go.uber.org/zap"
"github.com/SigNoz/signoz/pkg/http/render"
"github.com/SigNoz/signoz/pkg/query-service/constants" "github.com/SigNoz/signoz/pkg/query-service/constants"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/valuer" "github.com/SigNoz/signoz/pkg/valuer"
) )
@@ -23,11 +25,29 @@ func handleSsoError(w http.ResponseWriter, r *http.Request, redirectURL string)
// receiveSAML completes a SAML request and gets user logged in // receiveSAML completes a SAML request and gets user logged in
func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) { func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
// this is the source url that initiated the login request // this is the source url that initiated the login request
redirectUri := constants.GetDefaultSiteURL() redirectUri := constants.GetDefaultSiteURL()
ctx := context.Background() ctx := context.Background()
err := r.ParseForm() _, err = ah.Signoz.Licensing.GetActive(ctx, orgID)
if err != nil {
zap.L().Error("[receiveSAML] sso requested but feature unavailable in org domain")
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
return
}
err = r.ParseForm()
if err != nil { if err != nil {
zap.L().Error("[receiveSAML] failed to process response - invalid response from IDP", zap.Error(err), zap.Any("request", r)) zap.L().Error("[receiveSAML] failed to process response - invalid response from IDP", zap.Error(err), zap.Any("request", r))
handleSsoError(w, r, redirectUri) handleSsoError(w, r, redirectUri)
@@ -56,19 +76,6 @@ func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
return return
} }
orgID, err := valuer.NewUUID(domain.OrgID)
if err != nil {
handleSsoError(w, r, redirectUri)
return
}
_, err = ah.Signoz.Licensing.GetActive(ctx, orgID)
if err != nil {
zap.L().Error("[receiveSAML] sso requested but feature unavailable in org domain")
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
return
}
sp, err := domain.PrepareSamlRequest(parsedState) sp, err := domain.PrepareSamlRequest(parsedState)
if err != nil { if err != nil {
zap.L().Error("[receiveSAML] failed to prepare saml request for domain", zap.String("domain", domain.String()), zap.Error(err)) zap.L().Error("[receiveSAML] failed to prepare saml request for domain", zap.String("domain", domain.String()), zap.Error(err))
@@ -96,7 +103,7 @@ func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
return return
} }
nextPage, err := ah.Signoz.Modules.User.PrepareSsoRedirect(ctx, redirectUri, email) nextPage, err := ah.Signoz.Modules.User.PrepareSsoRedirect(ctx, redirectUri, email, ah.opts.JWT)
if err != nil { if err != nil {
zap.L().Error("[receiveSAML] failed to generate redirect URI after successful login ", zap.String("domain", domain.String()), zap.Error(err)) zap.L().Error("[receiveSAML] failed to generate redirect URI after successful login ", zap.String("domain", domain.String()), zap.Error(err))
handleSsoError(w, r, redirectUri) handleSsoError(w, r, redirectUri)

View File

@@ -0,0 +1,62 @@
package api
import (
"net/http"
"strings"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/http/render"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/gorilla/mux"
)
func (ah *APIHandler) lockDashboard(w http.ResponseWriter, r *http.Request) {
ah.lockUnlockDashboard(w, r, true)
}
func (ah *APIHandler) unlockDashboard(w http.ResponseWriter, r *http.Request) {
ah.lockUnlockDashboard(w, r, false)
}
func (ah *APIHandler) lockUnlockDashboard(w http.ResponseWriter, r *http.Request, lock bool) {
// Locking can only be done by the owner of the dashboard
// or an admin
// - Fetch the dashboard
// - Check if the user is the owner or an admin
// - If yes, lock/unlock the dashboard
// - If no, return 403
// Get the dashboard UUID from the request
uuid := mux.Vars(r)["uuid"]
if strings.HasPrefix(uuid, "integration") {
render.Error(w, errors.Newf(errors.TypeForbidden, errors.CodeForbidden, "dashboards created by integrations cannot be modified"))
return
}
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, errors.Newf(errors.TypeUnauthenticated, errors.CodeUnauthenticated, "unauthenticated"))
return
}
dashboard, err := ah.Signoz.Modules.Dashboard.Get(r.Context(), claims.OrgID, uuid)
if err != nil {
render.Error(w, err)
return
}
if err := claims.IsAdmin(); err != nil && (dashboard.CreatedBy != claims.Email) {
render.Error(w, errors.Newf(errors.TypeForbidden, errors.CodeForbidden, "You are not authorized to lock/unlock this dashboard"))
return
}
// Lock/Unlock the dashboard
err = ah.Signoz.Modules.Dashboard.LockUnlock(r.Context(), claims.OrgID, uuid, lock)
if err != nil {
render.Error(w, err)
return
}
ah.Respond(w, "Dashboard updated successfully")
}

View File

@@ -12,7 +12,7 @@ import (
pkgError "github.com/SigNoz/signoz/pkg/errors" pkgError "github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/http/render" "github.com/SigNoz/signoz/pkg/http/render"
"github.com/SigNoz/signoz/pkg/types/authtypes" "github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types/licensetypes" "github.com/SigNoz/signoz/pkg/types/featuretypes"
"github.com/SigNoz/signoz/pkg/valuer" "github.com/SigNoz/signoz/pkg/valuer"
"go.uber.org/zap" "go.uber.org/zap"
) )
@@ -31,7 +31,7 @@ func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
return return
} }
featureSet, err := ah.Signoz.Licensing.GetFeatureFlags(r.Context(), orgID) featureSet, err := ah.Signoz.Licensing.GetFeatureFlags(r.Context())
if err != nil { if err != nil {
ah.HandleError(w, err, http.StatusInternalServerError) ah.HandleError(w, err, http.StatusInternalServerError)
return return
@@ -59,17 +59,9 @@ func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
} }
} }
if constants.IsPreferSpanMetrics { if ah.opts.PreferSpanMetrics {
for idx, feature := range featureSet { for idx, feature := range featureSet {
if feature.Name == licensetypes.UseSpanMetrics { if feature.Name == featuretypes.UseSpanMetrics {
featureSet[idx].Active = true
}
}
}
if constants.IsDotMetricsEnabled {
for idx, feature := range featureSet {
if feature.Name == licensetypes.DotMetricsEnabled {
featureSet[idx].Active = true featureSet[idx].Active = true
} }
} }
@@ -80,7 +72,7 @@ func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
// fetchZeusFeatures makes an HTTP GET request to the /zeusFeatures endpoint // fetchZeusFeatures makes an HTTP GET request to the /zeusFeatures endpoint
// and returns the FeatureSet. // and returns the FeatureSet.
func fetchZeusFeatures(url, licenseKey string) ([]*licensetypes.Feature, error) { func fetchZeusFeatures(url, licenseKey string) ([]*featuretypes.GettableFeature, error) {
// Check if the URL is empty // Check if the URL is empty
if url == "" { if url == "" {
return nil, fmt.Errorf("url is empty") return nil, fmt.Errorf("url is empty")
@@ -140,27 +132,27 @@ func fetchZeusFeatures(url, licenseKey string) ([]*licensetypes.Feature, error)
type ZeusFeaturesResponse struct { type ZeusFeaturesResponse struct {
Status string `json:"status"` Status string `json:"status"`
Data []*licensetypes.Feature `json:"data"` Data []*featuretypes.GettableFeature `json:"data"`
} }
// MergeFeatureSets merges two FeatureSet arrays with precedence to zeusFeatures. // MergeFeatureSets merges two FeatureSet arrays with precedence to zeusFeatures.
func MergeFeatureSets(zeusFeatures, internalFeatures []*licensetypes.Feature) []*licensetypes.Feature { func MergeFeatureSets(zeusFeatures, internalFeatures []*featuretypes.GettableFeature) []*featuretypes.GettableFeature {
// Create a map to store the merged features // Create a map to store the merged features
featureMap := make(map[string]*licensetypes.Feature) featureMap := make(map[string]*featuretypes.GettableFeature)
// Add all features from the otherFeatures set to the map // Add all features from the otherFeatures set to the map
for _, feature := range internalFeatures { for _, feature := range internalFeatures {
featureMap[feature.Name.StringValue()] = feature featureMap[feature.Name] = feature
} }
// Add all features from the zeusFeatures set to the map // Add all features from the zeusFeatures set to the map
// If a feature already exists (i.e., same name), the zeusFeature will overwrite it // If a feature already exists (i.e., same name), the zeusFeature will overwrite it
for _, feature := range zeusFeatures { for _, feature := range zeusFeatures {
featureMap[feature.Name.StringValue()] = feature featureMap[feature.Name] = feature
} }
// Convert the map back to a FeatureSet slice // Convert the map back to a FeatureSet slice
var mergedFeatures []*licensetypes.Feature var mergedFeatures []*featuretypes.GettableFeature
for _, feature := range featureMap { for _, feature := range featureMap {
mergedFeatures = append(mergedFeatures, feature) mergedFeatures = append(mergedFeatures, feature)
} }

View File

@@ -3,79 +3,78 @@ package api
import ( import (
"testing" "testing"
"github.com/SigNoz/signoz/pkg/types/licensetypes" "github.com/SigNoz/signoz/pkg/types/featuretypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
func TestMergeFeatureSets(t *testing.T) { func TestMergeFeatureSets(t *testing.T) {
tests := []struct { tests := []struct {
name string name string
zeusFeatures []*licensetypes.Feature zeusFeatures []*featuretypes.GettableFeature
internalFeatures []*licensetypes.Feature internalFeatures []*featuretypes.GettableFeature
expected []*licensetypes.Feature expected []*featuretypes.GettableFeature
}{ }{
{ {
name: "empty zeusFeatures and internalFeatures", name: "empty zeusFeatures and internalFeatures",
zeusFeatures: []*licensetypes.Feature{}, zeusFeatures: []*featuretypes.GettableFeature{},
internalFeatures: []*licensetypes.Feature{}, internalFeatures: []*featuretypes.GettableFeature{},
expected: []*licensetypes.Feature{}, expected: []*featuretypes.GettableFeature{},
}, },
{ {
name: "non-empty zeusFeatures and empty internalFeatures", name: "non-empty zeusFeatures and empty internalFeatures",
zeusFeatures: []*licensetypes.Feature{ zeusFeatures: []*featuretypes.GettableFeature{
{Name: valuer.NewString("Feature1"), Active: true}, {Name: "Feature1", Active: true},
{Name: valuer.NewString("Feature2"), Active: false}, {Name: "Feature2", Active: false},
}, },
internalFeatures: []*licensetypes.Feature{}, internalFeatures: []*featuretypes.GettableFeature{},
expected: []*licensetypes.Feature{ expected: []*featuretypes.GettableFeature{
{Name: valuer.NewString("Feature1"), Active: true}, {Name: "Feature1", Active: true},
{Name: valuer.NewString("Feature2"), Active: false}, {Name: "Feature2", Active: false},
}, },
}, },
{ {
name: "empty zeusFeatures and non-empty internalFeatures", name: "empty zeusFeatures and non-empty internalFeatures",
zeusFeatures: []*licensetypes.Feature{}, zeusFeatures: []*featuretypes.GettableFeature{},
internalFeatures: []*licensetypes.Feature{ internalFeatures: []*featuretypes.GettableFeature{
{Name: valuer.NewString("Feature1"), Active: true}, {Name: "Feature1", Active: true},
{Name: valuer.NewString("Feature2"), Active: false}, {Name: "Feature2", Active: false},
}, },
expected: []*licensetypes.Feature{ expected: []*featuretypes.GettableFeature{
{Name: valuer.NewString("Feature1"), Active: true}, {Name: "Feature1", Active: true},
{Name: valuer.NewString("Feature2"), Active: false}, {Name: "Feature2", Active: false},
}, },
}, },
{ {
name: "non-empty zeusFeatures and non-empty internalFeatures with no conflicts", name: "non-empty zeusFeatures and non-empty internalFeatures with no conflicts",
zeusFeatures: []*licensetypes.Feature{ zeusFeatures: []*featuretypes.GettableFeature{
{Name: valuer.NewString("Feature1"), Active: true}, {Name: "Feature1", Active: true},
{Name: valuer.NewString("Feature3"), Active: false}, {Name: "Feature3", Active: false},
}, },
internalFeatures: []*licensetypes.Feature{ internalFeatures: []*featuretypes.GettableFeature{
{Name: valuer.NewString("Feature2"), Active: true}, {Name: "Feature2", Active: true},
{Name: valuer.NewString("Feature4"), Active: false}, {Name: "Feature4", Active: false},
}, },
expected: []*licensetypes.Feature{ expected: []*featuretypes.GettableFeature{
{Name: valuer.NewString("Feature1"), Active: true}, {Name: "Feature1", Active: true},
{Name: valuer.NewString("Feature2"), Active: true}, {Name: "Feature2", Active: true},
{Name: valuer.NewString("Feature3"), Active: false}, {Name: "Feature3", Active: false},
{Name: valuer.NewString("Feature4"), Active: false}, {Name: "Feature4", Active: false},
}, },
}, },
{ {
name: "non-empty zeusFeatures and non-empty internalFeatures with conflicts", name: "non-empty zeusFeatures and non-empty internalFeatures with conflicts",
zeusFeatures: []*licensetypes.Feature{ zeusFeatures: []*featuretypes.GettableFeature{
{Name: valuer.NewString("Feature1"), Active: true}, {Name: "Feature1", Active: true},
{Name: valuer.NewString("Feature2"), Active: false}, {Name: "Feature2", Active: false},
}, },
internalFeatures: []*licensetypes.Feature{ internalFeatures: []*featuretypes.GettableFeature{
{Name: valuer.NewString("Feature1"), Active: false}, {Name: "Feature1", Active: false},
{Name: valuer.NewString("Feature3"), Active: true}, {Name: "Feature3", Active: true},
}, },
expected: []*licensetypes.Feature{ expected: []*featuretypes.GettableFeature{
{Name: valuer.NewString("Feature1"), Active: true}, {Name: "Feature1", Active: true},
{Name: valuer.NewString("Feature2"), Active: false}, {Name: "Feature2", Active: false},
{Name: valuer.NewString("Feature3"), Active: true}, {Name: "Feature3", Active: true},
}, },
}, },
} }

View File

@@ -2,16 +2,11 @@ package api
import ( import (
"bytes" "bytes"
"context"
"encoding/json"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
"runtime/debug"
anomalyV2 "github.com/SigNoz/signoz/ee/anomaly"
"github.com/SigNoz/signoz/ee/query-service/anomaly" "github.com/SigNoz/signoz/ee/query-service/anomaly"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/http/render" "github.com/SigNoz/signoz/pkg/http/render"
baseapp "github.com/SigNoz/signoz/pkg/query-service/app" baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder" "github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
@@ -20,8 +15,6 @@ import (
"github.com/SigNoz/signoz/pkg/types/authtypes" "github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/valuer" "github.com/SigNoz/signoz/pkg/valuer"
"go.uber.org/zap" "go.uber.org/zap"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
) )
func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
@@ -143,139 +136,3 @@ func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
aH.QueryRangeV4(w, r) aH.QueryRangeV4(w, r)
} }
} }
func extractSeasonality(anomalyQuery *qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]) anomalyV2.Seasonality {
for _, fn := range anomalyQuery.Functions {
if fn.Name == qbtypes.FunctionNameAnomaly {
for _, arg := range fn.Args {
if arg.Name == "seasonality" {
if seasonalityStr, ok := arg.Value.(string); ok {
switch seasonalityStr {
case "weekly":
return anomalyV2.SeasonalityWeekly
case "hourly":
return anomalyV2.SeasonalityHourly
}
}
}
}
}
}
return anomalyV2.SeasonalityDaily // default
}
func createAnomalyProvider(aH *APIHandler, seasonality anomalyV2.Seasonality) anomalyV2.Provider {
switch seasonality {
case anomalyV2.SeasonalityWeekly:
return anomalyV2.NewWeeklyProvider(
anomalyV2.WithQuerier[*anomalyV2.WeeklyProvider](aH.Signoz.Querier),
anomalyV2.WithLogger[*anomalyV2.WeeklyProvider](aH.Signoz.Instrumentation.Logger()),
)
case anomalyV2.SeasonalityHourly:
return anomalyV2.NewHourlyProvider(
anomalyV2.WithQuerier[*anomalyV2.HourlyProvider](aH.Signoz.Querier),
anomalyV2.WithLogger[*anomalyV2.HourlyProvider](aH.Signoz.Instrumentation.Logger()),
)
default:
return anomalyV2.NewDailyProvider(
anomalyV2.WithQuerier[*anomalyV2.DailyProvider](aH.Signoz.Querier),
anomalyV2.WithLogger[*anomalyV2.DailyProvider](aH.Signoz.Instrumentation.Logger()),
)
}
}
func (aH *APIHandler) handleAnomalyQuery(ctx context.Context, orgID valuer.UUID, anomalyQuery *qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation], queryRangeRequest qbtypes.QueryRangeRequest) (*anomalyV2.AnomaliesResponse, error) {
seasonality := extractSeasonality(anomalyQuery)
provider := createAnomalyProvider(aH, seasonality)
return provider.GetAnomalies(ctx, orgID, &anomalyV2.AnomaliesRequest{Params: queryRangeRequest})
}
func (aH *APIHandler) queryRangeV5(rw http.ResponseWriter, req *http.Request) {
bodyBytes, err := io.ReadAll(req.Body)
if err != nil {
render.Error(rw, errors.NewInvalidInputf(errors.CodeInvalidInput, "failed to read request body: %v", err))
return
}
req.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
ctx := req.Context()
claims, err := authtypes.ClaimsFromContext(ctx)
if err != nil {
render.Error(rw, err)
return
}
var queryRangeRequest qbtypes.QueryRangeRequest
if err := json.NewDecoder(req.Body).Decode(&queryRangeRequest); err != nil {
render.Error(rw, errors.NewInvalidInputf(errors.CodeInvalidInput, "failed to decode request body: %v", err))
return
}
defer func() {
if r := recover(); r != nil {
stackTrace := string(debug.Stack())
queryJSON, _ := json.Marshal(queryRangeRequest)
aH.Signoz.Instrumentation.Logger().ErrorContext(ctx, "panic in QueryRange",
"error", r,
"user", claims.UserID,
"payload", string(queryJSON),
"stacktrace", stackTrace,
)
render.Error(rw, errors.NewInternalf(
errors.CodeInternal,
"Something went wrong on our end. It's not you, it's us. Our team is notified about it. Reach out to support if issue persists.",
))
}
}()
if err := queryRangeRequest.Validate(); err != nil {
render.Error(rw, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(rw, err)
return
}
if anomalyQuery, ok := queryRangeRequest.IsAnomalyRequest(); ok {
anomalies, err := aH.handleAnomalyQuery(ctx, orgID, anomalyQuery, queryRangeRequest)
if err != nil {
render.Error(rw, errors.NewInternalf(errors.CodeInternal, "failed to get anomalies: %v", err))
return
}
results := []any{}
for _, item := range anomalies.Results {
results = append(results, item)
}
finalResp := &qbtypes.QueryRangeResponse{
Type: queryRangeRequest.RequestType,
Data: struct {
Results []any `json:"results"`
}{
Results: results,
},
Meta: struct {
RowsScanned uint64 `json:"rowsScanned"`
BytesScanned uint64 `json:"bytesScanned"`
DurationMS uint64 `json:"durationMs"`
}{},
}
render.Success(rw, http.StatusOK, finalResp)
return
} else {
// regular query range request, let the querier handle it
req.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
aH.QuerierAPI.QueryRange(rw, req)
}
}

View File

@@ -0,0 +1,39 @@
package db
import (
"time"
"github.com/ClickHouse/clickhouse-go/v2"
"github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/prometheus"
basechr "github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/telemetrystore"
)
type ClickhouseReader struct {
conn clickhouse.Conn
appdb sqlstore.SQLStore
*basechr.ClickHouseReader
}
func NewDataConnector(
sqlDB sqlstore.SQLStore,
telemetryStore telemetrystore.TelemetryStore,
prometheus prometheus.Prometheus,
cluster string,
fluxIntervalForTraceDetail time.Duration,
cache cache.Cache,
) *ClickhouseReader {
chReader := basechr.NewReader(sqlDB, telemetryStore, prometheus, cluster, fluxIntervalForTraceDetail, cache)
return &ClickhouseReader{
conn: telemetryStore.ClickhouseDB(),
appdb: sqlDB,
ClickHouseReader: chReader,
}
}
func (r *ClickhouseReader) GetSQLStore() sqlstore.SQLStore {
return r.appdb
}

View File

@@ -3,23 +3,24 @@ package app
import ( import (
"context" "context"
"fmt" "fmt"
"log/slog"
"net" "net"
"net/http" "net/http"
_ "net/http/pprof" // http profiler _ "net/http/pprof" // http profiler
"time"
"github.com/gorilla/handlers" "github.com/gorilla/handlers"
"github.com/jmoiron/sqlx"
"github.com/SigNoz/signoz/ee/query-service/app/api" "github.com/SigNoz/signoz/ee/query-service/app/api"
"github.com/SigNoz/signoz/ee/query-service/app/db"
"github.com/SigNoz/signoz/ee/query-service/constants"
"github.com/SigNoz/signoz/ee/query-service/integrations/gateway" "github.com/SigNoz/signoz/ee/query-service/integrations/gateway"
"github.com/SigNoz/signoz/ee/query-service/rules" "github.com/SigNoz/signoz/ee/query-service/rules"
"github.com/SigNoz/signoz/ee/query-service/usage" "github.com/SigNoz/signoz/ee/query-service/usage"
"github.com/SigNoz/signoz/pkg/alertmanager" "github.com/SigNoz/signoz/pkg/alertmanager"
"github.com/SigNoz/signoz/pkg/cache" "github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/http/middleware" "github.com/SigNoz/signoz/pkg/http/middleware"
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/prometheus" "github.com/SigNoz/signoz/pkg/prometheus"
"github.com/SigNoz/signoz/pkg/querier"
"github.com/SigNoz/signoz/pkg/signoz" "github.com/SigNoz/signoz/pkg/signoz"
"github.com/SigNoz/signoz/pkg/sqlstore" "github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/telemetrystore" "github.com/SigNoz/signoz/pkg/telemetrystore"
@@ -30,7 +31,6 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/agentConf" "github.com/SigNoz/signoz/pkg/query-service/agentConf"
baseapp "github.com/SigNoz/signoz/pkg/query-service/app" baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
"github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations" "github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
"github.com/SigNoz/signoz/pkg/query-service/app/integrations" "github.com/SigNoz/signoz/pkg/query-service/app/integrations"
"github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline" "github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline"
@@ -40,6 +40,7 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/healthcheck" "github.com/SigNoz/signoz/pkg/query-service/healthcheck"
baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces" baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces"
baserules "github.com/SigNoz/signoz/pkg/query-service/rules" baserules "github.com/SigNoz/signoz/pkg/query-service/rules"
"github.com/SigNoz/signoz/pkg/query-service/telemetry"
"github.com/SigNoz/signoz/pkg/query-service/utils" "github.com/SigNoz/signoz/pkg/query-service/utils"
"go.uber.org/zap" "go.uber.org/zap"
) )
@@ -57,57 +58,61 @@ type ServerOptions struct {
Jwt *authtypes.JWT Jwt *authtypes.JWT
} }
// Server runs HTTP, Mux and a grpc server // Server runs HTTP api service
type Server struct { type Server struct {
config signoz.Config serverOptions *ServerOptions
signoz *signoz.SigNoz
jwt *authtypes.JWT
ruleManager *baserules.Manager ruleManager *baserules.Manager
// public http router // public http router
httpConn net.Listener httpConn net.Listener
httpServer *http.Server httpServer *http.Server
httpHostPort string
// private http // private http
privateConn net.Listener privateConn net.Listener
privateHTTP *http.Server privateHTTP *http.Server
privateHostPort string
opampServer *opamp.Server
// Usage manager // Usage manager
usageManager *usage.Manager usageManager *usage.Manager
opampServer *opamp.Server
unavailableChannel chan healthcheck.Status unavailableChannel chan healthcheck.Status
} }
// HealthCheckStatus returns health check status channel a client can subscribe to
func (s Server) HealthCheckStatus() chan healthcheck.Status {
return s.unavailableChannel
}
// NewServer creates and initializes Server // NewServer creates and initializes Server
func NewServer(config signoz.Config, signoz *signoz.SigNoz, jwt *authtypes.JWT) (*Server, error) { func NewServer(serverOptions *ServerOptions) (*Server, error) {
gatewayProxy, err := gateway.NewProxy(config.Gateway.URL.String(), gateway.RoutePrefix) gatewayProxy, err := gateway.NewProxy(serverOptions.GatewayUrl, gateway.RoutePrefix)
if err != nil { if err != nil {
return nil, err return nil, err
} }
reader := clickhouseReader.NewReader( fluxIntervalForTraceDetail, err := time.ParseDuration(serverOptions.FluxIntervalForTraceDetail)
signoz.SQLStore, if err != nil {
signoz.TelemetryStore, return nil, err
signoz.Prometheus, }
signoz.TelemetryStore.Cluster(),
config.Querier.FluxInterval, reader := db.NewDataConnector(
signoz.Cache, serverOptions.SigNoz.SQLStore,
serverOptions.SigNoz.TelemetryStore,
serverOptions.SigNoz.Prometheus,
serverOptions.Cluster,
fluxIntervalForTraceDetail,
serverOptions.SigNoz.Cache,
) )
rm, err := makeRulesManager( rm, err := makeRulesManager(
serverOptions.SigNoz.SQLStore.SQLxDB(),
reader, reader,
signoz.Cache, serverOptions.SigNoz.Cache,
signoz.Alertmanager, serverOptions.SigNoz.Alertmanager,
signoz.SQLStore, serverOptions.SigNoz.SQLStore,
signoz.TelemetryStore, serverOptions.SigNoz.TelemetryStore,
signoz.Prometheus, serverOptions.SigNoz.Prometheus,
signoz.Modules.OrgGetter,
signoz.Querier,
signoz.Instrumentation.Logger(),
) )
if err != nil { if err != nil {
@@ -115,16 +120,19 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz, jwt *authtypes.JWT)
} }
// initiate opamp // initiate opamp
opAmpModel.Init(signoz.SQLStore, signoz.Instrumentation.Logger(), signoz.Modules.OrgGetter) _, err = opAmpModel.InitDB(serverOptions.SigNoz.SQLStore.SQLxDB())
if err != nil {
return nil, err
}
integrationsController, err := integrations.NewController(signoz.SQLStore) integrationsController, err := integrations.NewController(serverOptions.SigNoz.SQLStore)
if err != nil { if err != nil {
return nil, fmt.Errorf( return nil, fmt.Errorf(
"couldn't create integrations controller: %w", err, "couldn't create integrations controller: %w", err,
) )
} }
cloudIntegrationsController, err := cloudintegrations.NewController(signoz.SQLStore) cloudIntegrationsController, err := cloudintegrations.NewController(serverOptions.SigNoz.SQLStore)
if err != nil { if err != nil {
return nil, fmt.Errorf( return nil, fmt.Errorf(
"couldn't create cloud provider integrations controller: %w", err, "couldn't create cloud provider integrations controller: %w", err,
@@ -133,8 +141,7 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz, jwt *authtypes.JWT)
// ingestion pipelines manager // ingestion pipelines manager
logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController( logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(
signoz.SQLStore, serverOptions.SigNoz.SQLStore, integrationsController.GetPipelinesForInstalledIntegrations,
integrationsController.GetPipelinesForInstalledIntegrations,
) )
if err != nil { if err != nil {
return nil, err return nil, err
@@ -142,7 +149,7 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz, jwt *authtypes.JWT)
// initiate agent config handler // initiate agent config handler
agentConfMgr, err := agentConf.Initiate(&agentConf.ManagerOptions{ agentConfMgr, err := agentConf.Initiate(&agentConf.ManagerOptions{
Store: signoz.SQLStore, DB: serverOptions.SigNoz.SQLStore.SQLxDB(),
AgentFeatures: []agentConf.AgentFeature{logParsingPipelineController}, AgentFeatures: []agentConf.AgentFeature{logParsingPipelineController},
}) })
if err != nil { if err != nil {
@@ -150,7 +157,7 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz, jwt *authtypes.JWT)
} }
// start the usagemanager // start the usagemanager
usageManager, err := usage.New(signoz.Licensing, signoz.TelemetryStore.ClickhouseDB(), signoz.Zeus, signoz.Modules.OrgGetter) usageManager, err := usage.New(serverOptions.SigNoz.Licensing, serverOptions.SigNoz.TelemetryStore.ClickhouseDB(), serverOptions.SigNoz.Zeus, serverOptions.SigNoz.Modules.Organization)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -159,36 +166,47 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz, jwt *authtypes.JWT)
return nil, err return nil, err
} }
telemetry.GetInstance().SetReader(reader)
telemetry.GetInstance().SetSqlStore(serverOptions.SigNoz.SQLStore)
telemetry.GetInstance().SetSaasOperator(constants.SaasSegmentKey)
telemetry.GetInstance().SetSavedViewsInfoCallback(telemetry.GetSavedViewsInfo)
telemetry.GetInstance().SetAlertsInfoCallback(telemetry.GetAlertsInfo)
telemetry.GetInstance().SetGetUsersCallback(telemetry.GetUsers)
telemetry.GetInstance().SetUserCountCallback(telemetry.GetUserCount)
telemetry.GetInstance().SetDashboardsInfoCallback(telemetry.GetDashboardsInfo)
fluxInterval, err := time.ParseDuration(serverOptions.FluxInterval)
if err != nil {
return nil, err
}
apiOpts := api.APIHandlerOptions{ apiOpts := api.APIHandlerOptions{
DataConnector: reader, DataConnector: reader,
PreferSpanMetrics: serverOptions.PreferSpanMetrics,
RulesManager: rm, RulesManager: rm,
UsageManager: usageManager, UsageManager: usageManager,
IntegrationsController: integrationsController, IntegrationsController: integrationsController,
CloudIntegrationsController: cloudIntegrationsController, CloudIntegrationsController: cloudIntegrationsController,
LogsParsingPipelineController: logParsingPipelineController, LogsParsingPipelineController: logParsingPipelineController,
FluxInterval: config.Querier.FluxInterval, FluxInterval: fluxInterval,
Gateway: gatewayProxy, Gateway: gatewayProxy,
GatewayUrl: config.Gateway.URL.String(), GatewayUrl: serverOptions.GatewayUrl,
JWT: jwt, JWT: serverOptions.Jwt,
} }
apiHandler, err := api.NewAPIHandler(apiOpts, signoz) apiHandler, err := api.NewAPIHandler(apiOpts, serverOptions.SigNoz)
if err != nil { if err != nil {
return nil, err return nil, err
} }
s := &Server{ s := &Server{
config: config,
signoz: signoz,
jwt: jwt,
ruleManager: rm, ruleManager: rm,
httpHostPort: baseconst.HTTPHostPort, serverOptions: serverOptions,
privateHostPort: baseconst.PrivateHostPort,
unavailableChannel: make(chan healthcheck.Status), unavailableChannel: make(chan healthcheck.Status),
usageManager: usageManager, usageManager: usageManager,
} }
httpServer, err := s.createPublicServer(apiHandler, signoz.Web) httpServer, err := s.createPublicServer(apiHandler, serverOptions.SigNoz.Web)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -204,28 +222,36 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz, jwt *authtypes.JWT)
s.privateHTTP = privateServer s.privateHTTP = privateServer
s.opampServer = opamp.InitializeServer( s.opampServer = opamp.InitializeServer(
&opAmpModel.AllAgents, agentConfMgr, signoz.Instrumentation, &opAmpModel.AllAgents, agentConfMgr,
) )
orgs, err := apiHandler.Signoz.Modules.Organization.GetAll(context.Background())
if err != nil {
return nil, err
}
for _, org := range orgs {
errorList := reader.PreloadMetricsMetadata(context.Background(), org.ID)
for _, er := range errorList {
zap.L().Error("failed to preload metrics metadata", zap.Error(er))
}
}
return s, nil return s, nil
} }
// HealthCheckStatus returns health check status channel a client can subscribe to
func (s Server) HealthCheckStatus() chan healthcheck.Status {
return s.unavailableChannel
}
func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server, error) { func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server, error) {
r := baseapp.NewRouter() r := baseapp.NewRouter()
r.Use(middleware.NewAuth(s.jwt, []string{"Authorization", "Sec-WebSocket-Protocol"}, s.signoz.Sharder, s.signoz.Instrumentation.Logger()).Wrap) r.Use(middleware.NewAuth(s.serverOptions.Jwt, []string{"Authorization", "Sec-WebSocket-Protocol"}).Wrap)
r.Use(middleware.NewAPIKey(s.signoz.SQLStore, []string{"SIGNOZ-API-KEY"}, s.signoz.Instrumentation.Logger(), s.signoz.Sharder).Wrap) r.Use(middleware.NewAPIKey(s.serverOptions.SigNoz.SQLStore, []string{"SIGNOZ-API-KEY"}, s.serverOptions.SigNoz.Instrumentation.Logger()).Wrap)
r.Use(middleware.NewTimeout(s.signoz.Instrumentation.Logger(), r.Use(middleware.NewTimeout(s.serverOptions.SigNoz.Instrumentation.Logger(),
s.config.APIServer.Timeout.ExcludedRoutes, s.serverOptions.Config.APIServer.Timeout.ExcludedRoutes,
s.config.APIServer.Timeout.Default, s.serverOptions.Config.APIServer.Timeout.Default,
s.config.APIServer.Timeout.Max, s.serverOptions.Config.APIServer.Timeout.Max,
).Wrap) ).Wrap)
r.Use(middleware.NewLogging(s.signoz.Instrumentation.Logger(), s.config.APIServer.Logging.ExcludedRoutes).Wrap) r.Use(middleware.NewAnalytics().Wrap)
r.Use(middleware.NewLogging(s.serverOptions.SigNoz.Instrumentation.Logger(), s.serverOptions.Config.APIServer.Logging.ExcludedRoutes).Wrap)
apiHandler.RegisterPrivateRoutes(r) apiHandler.RegisterPrivateRoutes(r)
@@ -247,17 +273,17 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*http.Server, error) { func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*http.Server, error) {
r := baseapp.NewRouter() r := baseapp.NewRouter()
am := middleware.NewAuthZ(s.signoz.Instrumentation.Logger()) am := middleware.NewAuthZ(s.serverOptions.SigNoz.Instrumentation.Logger())
r.Use(middleware.NewAuth(s.jwt, []string{"Authorization", "Sec-WebSocket-Protocol"}, s.signoz.Sharder, s.signoz.Instrumentation.Logger()).Wrap) r.Use(middleware.NewAuth(s.serverOptions.Jwt, []string{"Authorization", "Sec-WebSocket-Protocol"}).Wrap)
r.Use(middleware.NewAPIKey(s.signoz.SQLStore, []string{"SIGNOZ-API-KEY"}, s.signoz.Instrumentation.Logger(), s.signoz.Sharder).Wrap) r.Use(middleware.NewAPIKey(s.serverOptions.SigNoz.SQLStore, []string{"SIGNOZ-API-KEY"}, s.serverOptions.SigNoz.Instrumentation.Logger()).Wrap)
r.Use(middleware.NewTimeout(s.signoz.Instrumentation.Logger(), r.Use(middleware.NewTimeout(s.serverOptions.SigNoz.Instrumentation.Logger(),
s.config.APIServer.Timeout.ExcludedRoutes, s.serverOptions.Config.APIServer.Timeout.ExcludedRoutes,
s.config.APIServer.Timeout.Default, s.serverOptions.Config.APIServer.Timeout.Default,
s.config.APIServer.Timeout.Max, s.serverOptions.Config.APIServer.Timeout.Max,
).Wrap) ).Wrap)
r.Use(middleware.NewLogging(s.signoz.Instrumentation.Logger(), s.config.APIServer.Logging.ExcludedRoutes).Wrap) r.Use(middleware.NewAnalytics().Wrap)
r.Use(middleware.NewComment().Wrap) r.Use(middleware.NewLogging(s.serverOptions.SigNoz.Instrumentation.Logger(), s.serverOptions.Config.APIServer.Logging.ExcludedRoutes).Wrap)
apiHandler.RegisterRoutes(r, am) apiHandler.RegisterRoutes(r, am)
apiHandler.RegisterLogsRoutes(r, am) apiHandler.RegisterLogsRoutes(r, am)
@@ -267,12 +293,10 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*h
apiHandler.RegisterQueryRangeV3Routes(r, am) apiHandler.RegisterQueryRangeV3Routes(r, am)
apiHandler.RegisterInfraMetricsRoutes(r, am) apiHandler.RegisterInfraMetricsRoutes(r, am)
apiHandler.RegisterQueryRangeV4Routes(r, am) apiHandler.RegisterQueryRangeV4Routes(r, am)
apiHandler.RegisterQueryRangeV5Routes(r, am)
apiHandler.RegisterWebSocketPaths(r, am) apiHandler.RegisterWebSocketPaths(r, am)
apiHandler.RegisterMessagingQueuesRoutes(r, am) apiHandler.RegisterMessagingQueuesRoutes(r, am)
apiHandler.RegisterThirdPartyApiRoutes(r, am) apiHandler.RegisterThirdPartyApiRoutes(r, am)
apiHandler.MetricExplorerRoutes(r, am) apiHandler.MetricExplorerRoutes(r, am)
apiHandler.RegisterTraceFunnelsRoutes(r, am)
c := cors.New(cors.Options{ c := cors.New(cors.Options{
AllowedOrigins: []string{"*"}, AllowedOrigins: []string{"*"},
@@ -298,7 +322,7 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*h
func (s *Server) initListeners() error { func (s *Server) initListeners() error {
// listen on public port // listen on public port
var err error var err error
publicHostPort := s.httpHostPort publicHostPort := s.serverOptions.HTTPHostPort
if publicHostPort == "" { if publicHostPort == "" {
return fmt.Errorf("baseconst.HTTPHostPort is required") return fmt.Errorf("baseconst.HTTPHostPort is required")
} }
@@ -308,10 +332,10 @@ func (s *Server) initListeners() error {
return err return err
} }
zap.L().Info(fmt.Sprintf("Query server started listening on %s...", s.httpHostPort)) zap.L().Info(fmt.Sprintf("Query server started listening on %s...", s.serverOptions.HTTPHostPort))
// listen on private port to support internal services // listen on private port to support internal services
privateHostPort := s.privateHostPort privateHostPort := s.serverOptions.PrivateHostPort
if privateHostPort == "" { if privateHostPort == "" {
return fmt.Errorf("baseconst.PrivateHostPort is required") return fmt.Errorf("baseconst.PrivateHostPort is required")
@@ -321,7 +345,7 @@ func (s *Server) initListeners() error {
if err != nil { if err != nil {
return err return err
} }
zap.L().Info(fmt.Sprintf("Query server started listening on private port %s...", s.privateHostPort)) zap.L().Info(fmt.Sprintf("Query server started listening on private port %s...", s.serverOptions.PrivateHostPort))
return nil return nil
} }
@@ -341,7 +365,7 @@ func (s *Server) Start(ctx context.Context) error {
} }
go func() { go func() {
zap.L().Info("Starting HTTP server", zap.Int("port", httpPort), zap.String("addr", s.httpHostPort)) zap.L().Info("Starting HTTP server", zap.Int("port", httpPort), zap.String("addr", s.serverOptions.HTTPHostPort))
switch err := s.httpServer.Serve(s.httpConn); err { switch err := s.httpServer.Serve(s.httpConn); err {
case nil, http.ErrServerClosed, cmux.ErrListenerClosed: case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
@@ -367,7 +391,7 @@ func (s *Server) Start(ctx context.Context) error {
} }
go func() { go func() {
zap.L().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.privateHostPort)) zap.L().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.serverOptions.PrivateHostPort))
switch err := s.privateHTTP.Serve(s.privateConn); err { switch err := s.privateHTTP.Serve(s.privateConn); err {
case nil, http.ErrServerClosed, cmux.ErrListenerClosed: case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
@@ -419,32 +443,28 @@ func (s *Server) Stop(ctx context.Context) error {
} }
func makeRulesManager( func makeRulesManager(
db *sqlx.DB,
ch baseint.Reader, ch baseint.Reader,
cache cache.Cache, cache cache.Cache,
alertmanager alertmanager.Alertmanager, alertmanager alertmanager.Alertmanager,
sqlstore sqlstore.SQLStore, sqlstore sqlstore.SQLStore,
telemetryStore telemetrystore.TelemetryStore, telemetryStore telemetrystore.TelemetryStore,
prometheus prometheus.Prometheus, prometheus prometheus.Prometheus,
orgGetter organization.Getter,
querier querier.Querier,
logger *slog.Logger,
) (*baserules.Manager, error) { ) (*baserules.Manager, error) {
// create manager opts // create manager opts
managerOpts := &baserules.ManagerOptions{ managerOpts := &baserules.ManagerOptions{
TelemetryStore: telemetryStore, TelemetryStore: telemetryStore,
Prometheus: prometheus, Prometheus: prometheus,
DBConn: db,
Context: context.Background(), Context: context.Background(),
Logger: zap.L(), Logger: zap.L(),
Reader: ch, Reader: ch,
Querier: querier,
SLogger: logger,
Cache: cache, Cache: cache,
EvalDelay: baseconst.GetEvalDelay(), EvalDelay: baseconst.GetEvalDelay(),
PrepareTaskFunc: rules.PrepareTaskFunc, PrepareTaskFunc: rules.PrepareTaskFunc,
PrepareTestRuleFunc: rules.TestNotification, PrepareTestRuleFunc: rules.TestNotification,
Alertmanager: alertmanager, Alertmanager: alertmanager,
SQLStore: sqlstore, SQLStore: sqlstore,
OrgGetter: orgGetter,
} }
// create Manager // create Manager

View File

@@ -4,10 +4,6 @@ import (
"os" "os"
) )
const (
DefaultSiteURL = "https://localhost:8080"
)
var LicenseSignozIo = "https://license.signoz.io/api/v1" var LicenseSignozIo = "https://license.signoz.io/api/v1"
var LicenseAPIKey = GetOrDefaultEnv("SIGNOZ_LICENSE_API_KEY", "") var LicenseAPIKey = GetOrDefaultEnv("SIGNOZ_LICENSE_API_KEY", "")
var SaasSegmentKey = GetOrDefaultEnv("SIGNOZ_SAAS_SEGMENT_KEY", "") var SaasSegmentKey = GetOrDefaultEnv("SIGNOZ_SAAS_SEGMENT_KEY", "")
@@ -24,27 +20,3 @@ func GetOrDefaultEnv(key string, fallback string) string {
} }
return v return v
} }
// constant functions that override env vars
// GetDefaultSiteURL returns default site url, primarily
// used to send saml request and allowing backend to
// handle http redirect
func GetDefaultSiteURL() string {
return GetOrDefaultEnv("SIGNOZ_SITE_URL", DefaultSiteURL)
}
const DotMetricsEnabled = "DOT_METRICS_ENABLED"
var IsDotMetricsEnabled = false
var IsPreferSpanMetrics = false
func init() {
if GetOrDefaultEnv(DotMetricsEnabled, "false") == "true" {
IsDotMetricsEnabled = true
}
if GetOrDefaultEnv("USE_SPAN_METRICS", "false") == "true" {
IsPreferSpanMetrics = true
}
}

View File

@@ -0,0 +1,11 @@
package interfaces
import (
baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces"
)
// Connector defines methods for interaction
// with o11y data. for example - clickhouse
type DataConnector interface {
baseint.Reader
}

186
ee/query-service/main.go Normal file
View File

@@ -0,0 +1,186 @@
package main
import (
"context"
"flag"
"os"
"time"
"github.com/SigNoz/signoz/ee/licensing"
"github.com/SigNoz/signoz/ee/licensing/httplicensing"
"github.com/SigNoz/signoz/ee/query-service/app"
"github.com/SigNoz/signoz/ee/sqlstore/postgressqlstore"
"github.com/SigNoz/signoz/ee/zeus"
"github.com/SigNoz/signoz/ee/zeus/httpzeus"
"github.com/SigNoz/signoz/pkg/config"
"github.com/SigNoz/signoz/pkg/config/envprovider"
"github.com/SigNoz/signoz/pkg/config/fileprovider"
"github.com/SigNoz/signoz/pkg/factory"
pkglicensing "github.com/SigNoz/signoz/pkg/licensing"
baseconst "github.com/SigNoz/signoz/pkg/query-service/constants"
"github.com/SigNoz/signoz/pkg/signoz"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/sqlstore/sqlstorehook"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/version"
pkgzeus "github.com/SigNoz/signoz/pkg/zeus"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
// Deprecated: Please use the logger from pkg/instrumentation.
func initZapLog() *zap.Logger {
config := zap.NewProductionConfig()
config.EncoderConfig.TimeKey = "timestamp"
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
logger, _ := config.Build()
return logger
}
func main() {
var promConfigPath, skipTopLvlOpsPath string
// disables rule execution but allows change to the rule definition
var disableRules bool
// the url used to build link in the alert messages in slack and other systems
var ruleRepoURL string
var cluster string
var useLogsNewSchema bool
var useTraceNewSchema bool
var cacheConfigPath, fluxInterval, fluxIntervalForTraceDetail string
var preferSpanMetrics bool
var maxIdleConns int
var maxOpenConns int
var dialTimeout time.Duration
var gatewayUrl string
var useLicensesV3 bool
// Deprecated
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
// Deprecated
flag.BoolVar(&useTraceNewSchema, "use-trace-new-schema", false, "use new schema for traces")
// Deprecated
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
// Deprecated
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
// Deprecated
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
flag.BoolVar(&preferSpanMetrics, "prefer-span-metrics", false, "(prefer span metrics for service level metrics)")
// Deprecated
flag.IntVar(&maxIdleConns, "max-idle-conns", 50, "(number of connections to maintain in the pool.)")
// Deprecated
flag.IntVar(&maxOpenConns, "max-open-conns", 100, "(max connections for use at any time.)")
// Deprecated
flag.DurationVar(&dialTimeout, "dial-timeout", 5*time.Second, "(the maximum time to establish a connection.)")
// Deprecated
flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)")
// Deprecated
flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)")
flag.StringVar(&fluxInterval, "flux-interval", "5m", "(the interval to exclude data from being cached to avoid incorrect cache for data in motion)")
flag.StringVar(&fluxIntervalForTraceDetail, "flux-interval-trace-detail", "2m", "(the interval to exclude data from being cached to avoid incorrect cache for trace data in motion)")
flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')")
flag.StringVar(&gatewayUrl, "gateway-url", "", "(url to the gateway)")
// Deprecated
flag.BoolVar(&useLicensesV3, "use-licenses-v3", false, "use licenses_v3 schema for licenses")
flag.Parse()
loggerMgr := initZapLog()
zap.ReplaceGlobals(loggerMgr)
defer loggerMgr.Sync() // flushes buffer, if any
ctx := context.Background()
config, err := signoz.NewConfig(ctx, config.ResolverConfig{
Uris: []string{"env:"},
ProviderFactories: []config.ProviderFactory{
envprovider.NewFactory(),
fileprovider.NewFactory(),
},
}, signoz.DeprecatedFlags{
MaxIdleConns: maxIdleConns,
MaxOpenConns: maxOpenConns,
DialTimeout: dialTimeout,
Config: promConfigPath,
})
if err != nil {
zap.L().Fatal("Failed to create config", zap.Error(err))
}
version.Info.PrettyPrint(config.Version)
sqlStoreFactories := signoz.NewSQLStoreProviderFactories()
if err := sqlStoreFactories.Add(postgressqlstore.NewFactory(sqlstorehook.NewLoggingFactory())); err != nil {
zap.L().Fatal("Failed to add postgressqlstore factory", zap.Error(err))
}
jwtSecret := os.Getenv("SIGNOZ_JWT_SECRET")
if len(jwtSecret) == 0 {
zap.L().Warn("No JWT secret key is specified.")
} else {
zap.L().Info("JWT secret key set successfully.")
}
jwt := authtypes.NewJWT(jwtSecret, 30*time.Minute, 30*24*time.Hour)
signoz, err := signoz.New(
context.Background(),
config,
jwt,
zeus.Config(),
httpzeus.NewProviderFactory(),
licensing.Config(24*time.Hour, 3),
func(sqlstore sqlstore.SQLStore, zeus pkgzeus.Zeus) factory.ProviderFactory[pkglicensing.Licensing, pkglicensing.Config] {
return httplicensing.NewProviderFactory(sqlstore, zeus)
},
signoz.NewEmailingProviderFactories(),
signoz.NewCacheProviderFactories(),
signoz.NewWebProviderFactories(),
sqlStoreFactories,
signoz.NewTelemetryStoreProviderFactories(),
)
if err != nil {
zap.L().Fatal("Failed to create signoz", zap.Error(err))
}
serverOptions := &app.ServerOptions{
Config: config,
SigNoz: signoz,
HTTPHostPort: baseconst.HTTPHostPort,
PreferSpanMetrics: preferSpanMetrics,
PrivateHostPort: baseconst.PrivateHostPort,
FluxInterval: fluxInterval,
FluxIntervalForTraceDetail: fluxIntervalForTraceDetail,
Cluster: cluster,
GatewayUrl: gatewayUrl,
Jwt: jwt,
}
server, err := app.NewServer(serverOptions)
if err != nil {
zap.L().Fatal("Failed to create server", zap.Error(err))
}
if err := server.Start(ctx); err != nil {
zap.L().Fatal("Could not start server", zap.Error(err))
}
signoz.Start(ctx)
if err := signoz.Wait(ctx); err != nil {
zap.L().Fatal("Failed to start signoz", zap.Error(err))
}
err = server.Stop(ctx)
if err != nil {
zap.L().Fatal("Failed to stop server", zap.Error(err))
}
err = signoz.Stop(ctx)
if err != nil {
zap.L().Fatal("Failed to stop signoz", zap.Error(err))
}
}

View File

@@ -4,17 +4,17 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"log/slog"
"math" "math"
"strings" "strings"
"sync" "sync"
"time" "time"
"go.uber.org/zap"
"github.com/SigNoz/signoz/ee/query-service/anomaly" "github.com/SigNoz/signoz/ee/query-service/anomaly"
"github.com/SigNoz/signoz/pkg/cache" "github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/query-service/common" "github.com/SigNoz/signoz/pkg/query-service/common"
"github.com/SigNoz/signoz/pkg/query-service/model" "github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/transition"
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes" ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
"github.com/SigNoz/signoz/pkg/valuer" "github.com/SigNoz/signoz/pkg/valuer"
@@ -30,11 +30,6 @@ import (
baserules "github.com/SigNoz/signoz/pkg/query-service/rules" baserules "github.com/SigNoz/signoz/pkg/query-service/rules"
querierV5 "github.com/SigNoz/signoz/pkg/querier"
anomalyV2 "github.com/SigNoz/signoz/ee/anomaly"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
yaml "gopkg.in/yaml.v2" yaml "gopkg.in/yaml.v2"
) )
@@ -52,14 +47,7 @@ type AnomalyRule struct {
// querierV2 is used for alerts created after the introduction of new metrics query builder // querierV2 is used for alerts created after the introduction of new metrics query builder
querierV2 interfaces.Querier querierV2 interfaces.Querier
// querierV5 is used for alerts migrated after the introduction of new query builder
querierV5 querierV5.Querier
provider anomaly.Provider provider anomaly.Provider
providerV2 anomalyV2.Provider
version string
logger *slog.Logger
seasonality anomaly.Seasonality seasonality anomaly.Seasonality
} }
@@ -69,15 +57,11 @@ func NewAnomalyRule(
orgID valuer.UUID, orgID valuer.UUID,
p *ruletypes.PostableRule, p *ruletypes.PostableRule,
reader interfaces.Reader, reader interfaces.Reader,
querierV5 querierV5.Querier,
logger *slog.Logger,
cache cache.Cache, cache cache.Cache,
opts ...baserules.RuleOption, opts ...baserules.RuleOption,
) (*AnomalyRule, error) { ) (*AnomalyRule, error) {
logger.Info("creating new AnomalyRule", "rule_id", id) zap.L().Info("creating new AnomalyRule", zap.String("id", id), zap.Any("opts", opts))
opts = append(opts, baserules.WithLogger(logger))
if p.RuleCondition.CompareOp == ruletypes.ValueIsBelow { if p.RuleCondition.CompareOp == ruletypes.ValueIsBelow {
target := -1 * *p.RuleCondition.Target target := -1 * *p.RuleCondition.Target
@@ -104,7 +88,7 @@ func NewAnomalyRule(
t.seasonality = anomaly.SeasonalityDaily t.seasonality = anomaly.SeasonalityDaily
} }
logger.Info("using seasonality", "seasonality", t.seasonality.String()) zap.L().Info("using seasonality", zap.String("seasonality", t.seasonality.String()))
querierOptsV2 := querierV2.QuerierOptions{ querierOptsV2 := querierV2.QuerierOptions{
Reader: reader, Reader: reader,
@@ -133,27 +117,6 @@ func NewAnomalyRule(
anomaly.WithReader[*anomaly.WeeklyProvider](reader), anomaly.WithReader[*anomaly.WeeklyProvider](reader),
) )
} }
if t.seasonality == anomaly.SeasonalityHourly {
t.providerV2 = anomalyV2.NewHourlyProvider(
anomalyV2.WithQuerier[*anomalyV2.HourlyProvider](querierV5),
anomalyV2.WithLogger[*anomalyV2.HourlyProvider](logger),
)
} else if t.seasonality == anomaly.SeasonalityDaily {
t.providerV2 = anomalyV2.NewDailyProvider(
anomalyV2.WithQuerier[*anomalyV2.DailyProvider](querierV5),
anomalyV2.WithLogger[*anomalyV2.DailyProvider](logger),
)
} else if t.seasonality == anomaly.SeasonalityWeekly {
t.providerV2 = anomalyV2.NewWeeklyProvider(
anomalyV2.WithQuerier[*anomalyV2.WeeklyProvider](querierV5),
anomalyV2.WithLogger[*anomalyV2.WeeklyProvider](logger),
)
}
t.querierV5 = querierV5
t.version = p.Version
t.logger = logger
return &t, nil return &t, nil
} }
@@ -161,11 +124,9 @@ func (r *AnomalyRule) Type() ruletypes.RuleType {
return RuleTypeAnomaly return RuleTypeAnomaly
} }
func (r *AnomalyRule) prepareQueryRange(ctx context.Context, ts time.Time) (*v3.QueryRangeParamsV3, error) { func (r *AnomalyRule) prepareQueryRange(ts time.Time) (*v3.QueryRangeParamsV3, error) {
r.logger.InfoContext( zap.L().Info("prepareQueryRange", zap.Int64("ts", ts.UnixMilli()), zap.Int64("evalWindow", r.EvalWindow().Milliseconds()), zap.Int64("evalDelay", r.EvalDelay().Milliseconds()))
ctx, "prepare query range request v4", "ts", ts.UnixMilli(), "eval_window", r.EvalWindow().Milliseconds(), "eval_delay", r.EvalDelay().Milliseconds(),
)
start := ts.Add(-time.Duration(r.EvalWindow())).UnixMilli() start := ts.Add(-time.Duration(r.EvalWindow())).UnixMilli()
end := ts.UnixMilli() end := ts.UnixMilli()
@@ -195,34 +156,13 @@ func (r *AnomalyRule) prepareQueryRange(ctx context.Context, ts time.Time) (*v3.
}, nil }, nil
} }
func (r *AnomalyRule) prepareQueryRangeV5(ctx context.Context, ts time.Time) (*qbtypes.QueryRangeRequest, error) {
r.logger.InfoContext(ctx, "prepare query range request v5", "ts", ts.UnixMilli(), "eval_window", r.EvalWindow().Milliseconds(), "eval_delay", r.EvalDelay().Milliseconds())
startTs, endTs := r.Timestamps(ts)
start, end := startTs.UnixMilli(), endTs.UnixMilli()
req := &qbtypes.QueryRangeRequest{
Start: uint64(start),
End: uint64(end),
RequestType: qbtypes.RequestTypeTimeSeries,
CompositeQuery: qbtypes.CompositeQuery{
Queries: make([]qbtypes.QueryEnvelope, 0),
},
NoCache: true,
}
req.CompositeQuery.Queries = make([]qbtypes.QueryEnvelope, len(r.Condition().CompositeQuery.Queries))
copy(req.CompositeQuery.Queries, r.Condition().CompositeQuery.Queries)
return req, nil
}
func (r *AnomalyRule) GetSelectedQuery() string { func (r *AnomalyRule) GetSelectedQuery() string {
return r.Condition().GetSelectedQueryName() return r.Condition().GetSelectedQueryName()
} }
func (r *AnomalyRule) buildAndRunQuery(ctx context.Context, orgID valuer.UUID, ts time.Time) (ruletypes.Vector, error) { func (r *AnomalyRule) buildAndRunQuery(ctx context.Context, orgID valuer.UUID, ts time.Time) (ruletypes.Vector, error) {
params, err := r.prepareQueryRange(ctx, ts) params, err := r.prepareQueryRange(ts)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -250,50 +190,7 @@ func (r *AnomalyRule) buildAndRunQuery(ctx context.Context, orgID valuer.UUID, t
var resultVector ruletypes.Vector var resultVector ruletypes.Vector
scoresJSON, _ := json.Marshal(queryResult.AnomalyScores) scoresJSON, _ := json.Marshal(queryResult.AnomalyScores)
r.logger.InfoContext(ctx, "anomaly scores", "scores", string(scoresJSON)) zap.L().Info("anomaly scores", zap.String("scores", string(scoresJSON)))
for _, series := range queryResult.AnomalyScores {
smpl, shouldAlert := r.ShouldAlert(*series)
if shouldAlert {
resultVector = append(resultVector, smpl)
}
}
return resultVector, nil
}
func (r *AnomalyRule) buildAndRunQueryV5(ctx context.Context, orgID valuer.UUID, ts time.Time) (ruletypes.Vector, error) {
params, err := r.prepareQueryRangeV5(ctx, ts)
if err != nil {
return nil, err
}
anomalies, err := r.providerV2.GetAnomalies(ctx, orgID, &anomalyV2.AnomaliesRequest{
Params: *params,
Seasonality: anomalyV2.Seasonality{String: valuer.NewString(r.seasonality.String())},
})
if err != nil {
return nil, err
}
var qbResult *qbtypes.TimeSeriesData
for _, result := range anomalies.Results {
if result.QueryName == r.GetSelectedQuery() {
qbResult = result
break
}
}
if qbResult == nil {
r.logger.WarnContext(ctx, "nil qb result", "ts", ts.UnixMilli())
}
queryResult := transition.ConvertV5TimeSeriesDataToV4Result(qbResult)
var resultVector ruletypes.Vector
scoresJSON, _ := json.Marshal(queryResult.AnomalyScores)
r.logger.InfoContext(ctx, "anomaly scores", "scores", string(scoresJSON))
for _, series := range queryResult.AnomalyScores { for _, series := range queryResult.AnomalyScores {
smpl, shouldAlert := r.ShouldAlert(*series) smpl, shouldAlert := r.ShouldAlert(*series)
@@ -309,17 +206,8 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
prevState := r.State() prevState := r.State()
valueFormatter := formatter.FromUnit(r.Unit()) valueFormatter := formatter.FromUnit(r.Unit())
res, err := r.buildAndRunQuery(ctx, r.OrgID(), ts)
var res ruletypes.Vector
var err error
if r.version == "v5" {
r.logger.InfoContext(ctx, "running v5 query")
res, err = r.buildAndRunQueryV5(ctx, r.OrgID(), ts)
} else {
r.logger.InfoContext(ctx, "running v4 query")
res, err = r.buildAndRunQuery(ctx, r.OrgID(), ts)
}
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -338,7 +226,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
value := valueFormatter.Format(smpl.V, r.Unit()) value := valueFormatter.Format(smpl.V, r.Unit())
threshold := valueFormatter.Format(r.TargetVal(), r.Unit()) threshold := valueFormatter.Format(r.TargetVal(), r.Unit())
r.logger.DebugContext(ctx, "Alert template data for rule", "rule_name", r.Name(), "formatter", valueFormatter.Name(), "value", value, "threshold", threshold) zap.L().Debug("Alert template data for rule", zap.String("name", r.Name()), zap.String("formatter", valueFormatter.Name()), zap.String("value", value), zap.String("threshold", threshold))
tmplData := ruletypes.AlertTemplateData(l, value, threshold) tmplData := ruletypes.AlertTemplateData(l, value, threshold)
// Inject some convenience variables that are easier to remember for users // Inject some convenience variables that are easier to remember for users
@@ -359,7 +247,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
result, err := tmpl.Expand() result, err := tmpl.Expand()
if err != nil { if err != nil {
result = fmt.Sprintf("<error expanding template: %s>", err) result = fmt.Sprintf("<error expanding template: %s>", err)
r.logger.ErrorContext(ctx, "Expanding alert template failed", "error", err, "data", tmplData, "rule_name", r.Name()) zap.L().Error("Expanding alert template failed", zap.Error(err), zap.Any("data", tmplData))
} }
return result return result
} }
@@ -388,7 +276,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
resultFPs[h] = struct{}{} resultFPs[h] = struct{}{}
if _, ok := alerts[h]; ok { if _, ok := alerts[h]; ok {
r.logger.ErrorContext(ctx, "the alert query returns duplicate records", "rule_id", r.ID(), "alert", alerts[h]) zap.L().Error("the alert query returns duplicate records", zap.String("ruleid", r.ID()), zap.Any("alert", alerts[h]))
err = fmt.Errorf("duplicate alert found, vector contains metrics with the same labelset after applying alert labels") err = fmt.Errorf("duplicate alert found, vector contains metrics with the same labelset after applying alert labels")
return nil, err return nil, err
} }
@@ -406,7 +294,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
} }
} }
r.logger.InfoContext(ctx, "number of alerts found", "rule_name", r.Name(), "alerts_count", len(alerts)) zap.L().Info("number of alerts found", zap.String("name", r.Name()), zap.Int("count", len(alerts)))
// alerts[h] is ready, add or update active list now // alerts[h] is ready, add or update active list now
for h, a := range alerts { for h, a := range alerts {
@@ -429,7 +317,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
for fp, a := range r.Active { for fp, a := range r.Active {
labelsJSON, err := json.Marshal(a.QueryResultLables) labelsJSON, err := json.Marshal(a.QueryResultLables)
if err != nil { if err != nil {
r.logger.ErrorContext(ctx, "error marshaling labels", "error", err, "labels", a.Labels) zap.L().Error("error marshaling labels", zap.Error(err), zap.Any("labels", a.Labels))
} }
if _, ok := resultFPs[fp]; !ok { if _, ok := resultFPs[fp]; !ok {
// If the alert was previously firing, keep it around for a given // If the alert was previously firing, keep it around for a given

View File

@@ -27,8 +27,6 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
opts.OrgID, opts.OrgID,
opts.Rule, opts.Rule,
opts.Reader, opts.Reader,
opts.Querier,
opts.SLogger,
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay), baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
baserules.WithSQLStore(opts.SQLStore), baserules.WithSQLStore(opts.SQLStore),
) )
@@ -49,7 +47,7 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
ruleId, ruleId,
opts.OrgID, opts.OrgID,
opts.Rule, opts.Rule,
opts.SLogger, opts.Logger,
opts.Reader, opts.Reader,
opts.ManagerOpts.Prometheus, opts.ManagerOpts.Prometheus,
baserules.WithSQLStore(opts.SQLStore), baserules.WithSQLStore(opts.SQLStore),
@@ -71,8 +69,6 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
opts.OrgID, opts.OrgID,
opts.Rule, opts.Rule,
opts.Reader, opts.Reader,
opts.Querier,
opts.SLogger,
opts.Cache, opts.Cache,
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay), baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
baserules.WithSQLStore(opts.SQLStore), baserules.WithSQLStore(opts.SQLStore),
@@ -130,8 +126,6 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
opts.OrgID, opts.OrgID,
parsedRule, parsedRule,
opts.Reader, opts.Reader,
opts.Querier,
opts.SLogger,
baserules.WithSendAlways(), baserules.WithSendAlways(),
baserules.WithSendUnmatched(), baserules.WithSendUnmatched(),
baserules.WithSQLStore(opts.SQLStore), baserules.WithSQLStore(opts.SQLStore),
@@ -149,7 +143,7 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
alertname, alertname,
opts.OrgID, opts.OrgID,
parsedRule, parsedRule,
opts.SLogger, opts.Logger,
opts.Reader, opts.Reader,
opts.ManagerOpts.Prometheus, opts.ManagerOpts.Prometheus,
baserules.WithSendAlways(), baserules.WithSendAlways(),
@@ -168,8 +162,6 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
opts.OrgID, opts.OrgID,
parsedRule, parsedRule,
opts.Reader, opts.Reader,
opts.Querier,
opts.SLogger,
opts.Cache, opts.Cache,
baserules.WithSendAlways(), baserules.WithSendAlways(),
baserules.WithSendUnmatched(), baserules.WithSendUnmatched(),

View File

@@ -41,16 +41,16 @@ type Manager struct {
zeus zeus.Zeus zeus zeus.Zeus
orgGetter organization.Getter organizationModule organization.Module
} }
func New(licenseService licensing.Licensing, clickhouseConn clickhouse.Conn, zeus zeus.Zeus, orgGetter organization.Getter) (*Manager, error) { func New(licenseService licensing.Licensing, clickhouseConn clickhouse.Conn, zeus zeus.Zeus, organizationModule organization.Module) (*Manager, error) {
m := &Manager{ m := &Manager{
clickhouseConn: clickhouseConn, clickhouseConn: clickhouseConn,
licenseService: licenseService, licenseService: licenseService,
scheduler: gocron.NewScheduler(time.UTC).Every(1).Day().At("00:00"), // send usage every at 00:00 UTC scheduler: gocron.NewScheduler(time.UTC).Every(1).Day().At("00:00"), // send usage every at 00:00 UTC
zeus: zeus, zeus: zeus,
orgGetter: orgGetter, organizationModule: organizationModule,
} }
return m, nil return m, nil
} }
@@ -74,7 +74,8 @@ func (lm *Manager) Start(ctx context.Context) error {
return nil return nil
} }
func (lm *Manager) UploadUsage(ctx context.Context) { func (lm *Manager) UploadUsage(ctx context.Context) {
organizations, err := lm.orgGetter.ListByOwnedKeyRange(ctx)
organizations, err := lm.organizationModule.GetAll(context.Background())
if err != nil { if err != nil {
zap.L().Error("failed to get organizations", zap.Error(err)) zap.L().Error("failed to get organizations", zap.Error(err))
return return

View File

@@ -1,36 +0,0 @@
package postgressqlschema
import (
"strings"
"github.com/SigNoz/signoz/pkg/sqlschema"
)
type Formatter struct {
sqlschema.Formatter
}
func (formatter Formatter) SQLDataTypeOf(dataType sqlschema.DataType) string {
if dataType == sqlschema.DataTypeTimestamp {
return "TIMESTAMPTZ"
}
return strings.ToUpper(dataType.String())
}
func (formatter Formatter) DataTypeOf(dataType string) sqlschema.DataType {
switch strings.ToUpper(dataType) {
case "TIMESTAMPTZ", "TIMESTAMP", "TIMESTAMP WITHOUT TIME ZONE", "TIMESTAMP WITH TIME ZONE":
return sqlschema.DataTypeTimestamp
case "INT8":
return sqlschema.DataTypeBigInt
case "INT2", "INT4", "SMALLINT", "INTEGER":
return sqlschema.DataTypeInteger
case "BOOL", "BOOLEAN":
return sqlschema.DataTypeBoolean
case "VARCHAR", "CHARACTER VARYING", "CHARACTER":
return sqlschema.DataTypeText
}
return formatter.Formatter.DataTypeOf(dataType)
}

View File

@@ -1,285 +0,0 @@
package postgressqlschema
import (
"context"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlschema"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/uptrace/bun"
)
type provider struct {
settings factory.ScopedProviderSettings
fmter sqlschema.SQLFormatter
sqlstore sqlstore.SQLStore
operator sqlschema.SQLOperator
}
func NewFactory(sqlstore sqlstore.SQLStore) factory.ProviderFactory[sqlschema.SQLSchema, sqlschema.Config] {
return factory.NewProviderFactory(factory.MustNewName("postgres"), func(ctx context.Context, providerSettings factory.ProviderSettings, config sqlschema.Config) (sqlschema.SQLSchema, error) {
return New(ctx, providerSettings, config, sqlstore)
})
}
func New(ctx context.Context, providerSettings factory.ProviderSettings, config sqlschema.Config, sqlstore sqlstore.SQLStore) (sqlschema.SQLSchema, error) {
settings := factory.NewScopedProviderSettings(providerSettings, "github.com/SigNoz/signoz/pkg/sqlschema/postgressqlschema")
fmter := Formatter{Formatter: sqlschema.NewFormatter(sqlstore.BunDB().Dialect())}
return &provider{
sqlstore: sqlstore,
fmter: fmter,
settings: settings,
operator: sqlschema.NewOperator(fmter, sqlschema.OperatorSupport{
DropConstraint: true,
ColumnIfNotExistsExists: true,
AlterColumnSetNotNull: true,
}),
}, nil
}
func (provider *provider) Formatter() sqlschema.SQLFormatter {
return provider.fmter
}
func (provider *provider) Operator() sqlschema.SQLOperator {
return provider.operator
}
func (provider *provider) GetTable(ctx context.Context, tableName sqlschema.TableName) (*sqlschema.Table, []*sqlschema.UniqueConstraint, error) {
rows, err := provider.
sqlstore.
BunDB().
QueryContext(ctx, `
SELECT
c.column_name,
c.is_nullable = 'YES',
c.udt_name,
c.column_default
FROM
information_schema.columns AS c
WHERE
c.table_name = ?`, string(tableName))
if err != nil {
return nil, nil, err
}
defer func() {
if err := rows.Close(); err != nil {
provider.settings.Logger().ErrorContext(ctx, "error closing rows", "error", err)
}
}()
columns := make([]*sqlschema.Column, 0)
for rows.Next() {
var (
name string
sqlDataType string
nullable bool
defaultVal *string
)
if err := rows.Scan(&name, &nullable, &sqlDataType, &defaultVal); err != nil {
return nil, nil, err
}
columnDefault := ""
if defaultVal != nil {
columnDefault = *defaultVal
}
columns = append(columns, &sqlschema.Column{
Name: sqlschema.ColumnName(name),
Nullable: nullable,
DataType: provider.fmter.DataTypeOf(sqlDataType),
Default: columnDefault,
})
}
constraintsRows, err := provider.
sqlstore.
BunDB().
QueryContext(ctx, `
SELECT
c.column_name,
constraint_name,
constraint_type
FROM
information_schema.table_constraints tc
JOIN information_schema.constraint_column_usage AS ccu USING (constraint_schema, constraint_catalog, table_name, constraint_name)
JOIN information_schema.columns AS c ON c.table_schema = tc.constraint_schema AND tc.table_name = c.table_name AND ccu.column_name = c.column_name
WHERE
c.table_name = ?`, string(tableName))
if err != nil {
return nil, nil, err
}
defer func() {
if err := constraintsRows.Close(); err != nil {
provider.settings.Logger().ErrorContext(ctx, "error closing rows", "error", err)
}
}()
var primaryKeyConstraint *sqlschema.PrimaryKeyConstraint
uniqueConstraintsMap := make(map[string]*sqlschema.UniqueConstraint)
for constraintsRows.Next() {
var (
name string
constraintName string
constraintType string
)
if err := constraintsRows.Scan(&name, &constraintName, &constraintType); err != nil {
return nil, nil, err
}
if constraintType == "PRIMARY KEY" {
if primaryKeyConstraint == nil {
primaryKeyConstraint = (&sqlschema.PrimaryKeyConstraint{
ColumnNames: []sqlschema.ColumnName{sqlschema.ColumnName(name)},
}).Named(constraintName).(*sqlschema.PrimaryKeyConstraint)
} else {
primaryKeyConstraint.ColumnNames = append(primaryKeyConstraint.ColumnNames, sqlschema.ColumnName(name))
}
}
if constraintType == "UNIQUE" {
if _, ok := uniqueConstraintsMap[constraintName]; !ok {
uniqueConstraintsMap[constraintName] = (&sqlschema.UniqueConstraint{
ColumnNames: []sqlschema.ColumnName{sqlschema.ColumnName(name)},
}).Named(constraintName).(*sqlschema.UniqueConstraint)
} else {
uniqueConstraintsMap[constraintName].ColumnNames = append(uniqueConstraintsMap[constraintName].ColumnNames, sqlschema.ColumnName(name))
}
}
}
foreignKeyConstraintsRows, err := provider.
sqlstore.
BunDB().
QueryContext(ctx, `
SELECT
tc.constraint_name,
kcu.table_name AS referencing_table,
kcu.column_name AS referencing_column,
ccu.table_name AS referenced_table,
ccu.column_name AS referenced_column
FROM
information_schema.key_column_usage kcu
JOIN information_schema.table_constraints tc ON kcu.constraint_name = tc.constraint_name AND kcu.table_schema = tc.table_schema
JOIN information_schema.constraint_column_usage ccu ON ccu.constraint_name = tc.constraint_name AND ccu.table_schema = tc.table_schema
WHERE
tc.constraint_type = ?
AND kcu.table_name = ?`, "FOREIGN KEY", string(tableName))
if err != nil {
return nil, nil, err
}
defer func() {
if err := foreignKeyConstraintsRows.Close(); err != nil {
provider.settings.Logger().ErrorContext(ctx, "error closing rows", "error", err)
}
}()
foreignKeyConstraints := make([]*sqlschema.ForeignKeyConstraint, 0)
for foreignKeyConstraintsRows.Next() {
var (
constraintName string
referencingTable string
referencingColumn string
referencedTable string
referencedColumn string
)
if err := foreignKeyConstraintsRows.Scan(&constraintName, &referencingTable, &referencingColumn, &referencedTable, &referencedColumn); err != nil {
return nil, nil, err
}
foreignKeyConstraints = append(foreignKeyConstraints, (&sqlschema.ForeignKeyConstraint{
ReferencingColumnName: sqlschema.ColumnName(referencingColumn),
ReferencedTableName: sqlschema.TableName(referencedTable),
ReferencedColumnName: sqlschema.ColumnName(referencedColumn),
}).Named(constraintName).(*sqlschema.ForeignKeyConstraint))
}
uniqueConstraints := make([]*sqlschema.UniqueConstraint, 0)
for _, uniqueConstraint := range uniqueConstraintsMap {
uniqueConstraints = append(uniqueConstraints, uniqueConstraint)
}
return &sqlschema.Table{
Name: tableName,
Columns: columns,
PrimaryKeyConstraint: primaryKeyConstraint,
ForeignKeyConstraints: foreignKeyConstraints,
}, uniqueConstraints, nil
}
func (provider *provider) GetIndices(ctx context.Context, name sqlschema.TableName) ([]sqlschema.Index, error) {
rows, err := provider.
sqlstore.
BunDB().
QueryContext(ctx, `
SELECT
ct.relname AS table_name,
ci.relname AS index_name,
i.indisunique AS unique,
i.indisprimary AS primary,
a.attname AS column_name
FROM
pg_index i
LEFT JOIN pg_class ct ON ct.oid = i.indrelid
LEFT JOIN pg_class ci ON ci.oid = i.indexrelid
LEFT JOIN pg_attribute a ON a.attrelid = ct.oid
LEFT JOIN pg_constraint con ON con.conindid = i.indexrelid
WHERE
a.attnum = ANY(i.indkey)
AND con.oid IS NULL
AND ct.relkind = 'r'
AND ct.relname = ?`, string(name))
if err != nil {
return nil, err
}
defer func() {
if err := rows.Close(); err != nil {
provider.settings.Logger().ErrorContext(ctx, "error closing rows", "error", err)
}
}()
uniqueIndicesMap := make(map[string]*sqlschema.UniqueIndex)
for rows.Next() {
var (
tableName string
indexName string
unique bool
primary bool
columnName string
)
if err := rows.Scan(&tableName, &indexName, &unique, &primary, &columnName); err != nil {
return nil, err
}
if unique {
if _, ok := uniqueIndicesMap[indexName]; !ok {
uniqueIndicesMap[indexName] = &sqlschema.UniqueIndex{
TableName: name,
ColumnNames: []sqlschema.ColumnName{sqlschema.ColumnName(columnName)},
}
} else {
uniqueIndicesMap[indexName].ColumnNames = append(uniqueIndicesMap[indexName].ColumnNames, sqlschema.ColumnName(columnName))
}
}
}
indices := make([]sqlschema.Index, 0)
for _, index := range uniqueIndicesMap {
indices = append(indices, index)
}
return indices, nil
}
func (provider *provider) ToggleFKEnforcement(_ context.Context, _ bun.IDB, _ bool) error {
return nil
}

View File

@@ -22,7 +22,6 @@ var (
UserNoCascade = "user_no_cascade" UserNoCascade = "user_no_cascade"
FactorPassword = "factor_password" FactorPassword = "factor_password"
CloudIntegration = "cloud_integration" CloudIntegration = "cloud_integration"
AgentConfigVersion = "agent_config_version"
) )
var ( var (
@@ -31,7 +30,6 @@ var (
UserReferenceNoCascade = `("user_id") REFERENCES "users" ("id")` UserReferenceNoCascade = `("user_id") REFERENCES "users" ("id")`
FactorPasswordReference = `("password_id") REFERENCES "factor_password" ("id")` FactorPasswordReference = `("password_id") REFERENCES "factor_password" ("id")`
CloudIntegrationReference = `("cloud_integration_id") REFERENCES "cloud_integration" ("id") ON DELETE CASCADE` CloudIntegrationReference = `("cloud_integration_id") REFERENCES "cloud_integration" ("id") ON DELETE CASCADE`
AgentConfigVersionReference = `("version_id") REFERENCES "agent_config_version" ("id")`
) )
type dialect struct{} type dialect struct{}
@@ -276,8 +274,6 @@ func (dialect *dialect) RenameTableAndModifyModel(ctx context.Context, bun bun.I
fkReferences = append(fkReferences, FactorPasswordReference) fkReferences = append(fkReferences, FactorPasswordReference)
} else if reference == CloudIntegration && !slices.Contains(fkReferences, CloudIntegrationReference) { } else if reference == CloudIntegration && !slices.Contains(fkReferences, CloudIntegrationReference) {
fkReferences = append(fkReferences, CloudIntegrationReference) fkReferences = append(fkReferences, CloudIntegrationReference)
} else if reference == AgentConfigVersion && !slices.Contains(fkReferences, AgentConfigVersionReference) {
fkReferences = append(fkReferences, AgentConfigVersionReference)
} }
} }

View File

@@ -10,6 +10,7 @@ import (
"github.com/jackc/pgx/v5/pgconn" "github.com/jackc/pgx/v5/pgconn"
"github.com/jackc/pgx/v5/pgxpool" "github.com/jackc/pgx/v5/pgxpool"
"github.com/jackc/pgx/v5/stdlib" "github.com/jackc/pgx/v5/stdlib"
"github.com/jmoiron/sqlx"
"github.com/uptrace/bun" "github.com/uptrace/bun"
"github.com/uptrace/bun/dialect/pgdialect" "github.com/uptrace/bun/dialect/pgdialect"
) )
@@ -18,6 +19,7 @@ type provider struct {
settings factory.ScopedProviderSettings settings factory.ScopedProviderSettings
sqldb *sql.DB sqldb *sql.DB
bundb *sqlstore.BunDB bundb *sqlstore.BunDB
sqlxdb *sqlx.DB
dialect *dialect dialect *dialect
} }
@@ -59,6 +61,7 @@ func New(ctx context.Context, providerSettings factory.ProviderSettings, config
settings: settings, settings: settings,
sqldb: sqldb, sqldb: sqldb,
bundb: sqlstore.NewBunDB(settings, sqldb, pgdialect.New(), hooks), bundb: sqlstore.NewBunDB(settings, sqldb, pgdialect.New(), hooks),
sqlxdb: sqlx.NewDb(sqldb, "postgres"),
dialect: new(dialect), dialect: new(dialect),
}, nil }, nil
} }
@@ -71,6 +74,10 @@ func (provider *provider) SQLDB() *sql.DB {
return provider.sqldb return provider.sqldb
} }
func (provider *provider) SQLxDB() *sqlx.DB {
return provider.sqlxdb
}
func (provider *provider) Dialect() sqlstore.SQLDialect { func (provider *provider) Dialect() sqlstore.SQLDialect {
return provider.dialect return provider.dialect
} }

View File

@@ -1,5 +1,4 @@
module.exports = { module.exports = {
ignorePatterns: ['src/parser/*.ts'],
env: { env: {
browser: true, browser: true,
es2021: true, es2021: true,

27
frontend/.gitignore vendored
View File

@@ -2,30 +2,3 @@
# Sentry Config File # Sentry Config File
.env.sentry-build-plugin .env.sentry-build-plugin
.qodo .qodo
# Playwright
node_modules/
/test-results/
/playwright-report/
/blob-report/
/playwright/.cache/
/playwright/test-results/
/playwright/blob-report/
/playwright/playwright-report/
e2e/test-plan/alerts/
e2e/test-plan/dashboards/
e2e/test-plan/exceptions/
e2e/test-plan/external-apis/
e2e/test-plan/help-support/
e2e/test-plan/infrastructure/
e2e/test-plan/logs/
e2e/test-plan/messaging-queues/
e2e/test-plan/metrics/
e2e/test-plan/navigation/
e2e/test-plan/onboarding/
e2e/test-plan/saved-views/
e2e/test-plan/service-map/
e2e/test-plan/services/
e2e/test-plan/traces/
e2e/test-plan/user-preferences/

View File

@@ -8,6 +8,3 @@ public/
# Ignore all JSON files: # Ignore all JSON files:
**/*.json **/*.json
# Ignore all files in parser folder:
src/parser/**

View File

@@ -1,29 +0,0 @@
# SigNoz E2E Test Plan
This directory contains the structured test plan for the SigNoz application. Each subfolder corresponds to a main module or feature area, and contains scenario files for all user journeys, edge cases, and cross-module flows. These documents serve as the basis for generating Playwright MCP-driven E2E tests.
## Structure
- Each main module (e.g., logs, traces, dashboards, alerts, settings, etc.) has its own folder or markdown file.
- Each file contains detailed scenario templates, including preconditions, step-by-step actions, and expected outcomes.
- Use these documents to write, review, and update test cases as the application evolves.
## Folders & Files
- `logs/` — Logs module scenarios
- `traces/` — Traces module scenarios
- `metrics/` — Metrics module scenarios
- `dashboards/` — Dashboards module scenarios
- `alerts/` — Alerts module scenarios
- `services/` — Services module scenarios
- `settings/` — Settings and all sub-settings scenarios
- `onboarding/` — Onboarding and signup flows
- `navigation/` — Navigation, sidebar, and cross-module flows
- `exceptions/` — Exception and error handling scenarios
- `external-apis/` — External API monitoring scenarios
- `messaging-queues/` — Messaging queue scenarios
- `infrastructure/` — Infrastructure monitoring scenarios
- `help-support/` — Help & support scenarios
- `user-preferences/` — User preferences and personalization scenarios
- `service-map/` — Service map scenarios
- `saved-views/` — Saved views scenarios

View File

@@ -1,16 +0,0 @@
# Settings Module Test Plan
This folder contains E2E test scenarios for the Settings module and all sub-settings.
## Scenario Categories
- General settings (org/workspace, branding, version info)
- Billing settings
- Members & SSO
- Custom domain
- Integrations
- Notification channels
- API keys
- Ingestion
- Account settings (profile, password, preferences)
- Keyboard shortcuts

View File

@@ -1,43 +0,0 @@
# Account Settings E2E Scenarios (Updated)
## 1. Update Name
- **Precondition:** User is logged in
- **Steps:**
1. Click 'Update name' button
2. Edit name field in the modal/dialog
3. Save changes
- **Expected:** Name is updated in the UI
## 2. Update Email
- **Note:** The email field is not editable in the current UI.
## 3. Reset Password
- **Precondition:** User is logged in
- **Steps:**
1. Click 'Reset password' button
2. Complete reset flow (modal/dialog or external flow)
- **Expected:** Password is reset
## 4. Toggle 'Adapt to my timezone'
- **Precondition:** User is logged in
- **Steps:**
1. Toggle 'Adapt to my timezone' switch
- **Expected:** Timezone adapts accordingly (UI feedback/confirmation should be checked)
## 5. Toggle Theme (Dark/Light)
- **Precondition:** User is logged in
- **Steps:**
1. Toggle theme radio buttons ('Dark', 'Light Beta')
- **Expected:** Theme changes
## 6. Toggle Sidebar Always Open
- **Precondition:** User is logged in
- **Steps:**
1. Toggle 'Keep the primary sidebar always open' switch
- **Expected:** Sidebar remains open/closed as per toggle

View File

@@ -1,26 +0,0 @@
# API Keys E2E Scenarios (Updated)
## 1. Create a New API Key
- **Precondition:** User is admin
- **Steps:**
1. Click 'New Key' button
2. Enter details in the modal/dialog
3. Click 'Save'
- **Expected:** API key is created and listed in the table
## 2. Revoke an API Key
- **Precondition:** API key exists
- **Steps:**
1. In the table, locate the API key row
2. Click the revoke/delete button (icon button in the Action column)
3. Confirm if prompted
- **Expected:** API key is revoked/removed from the table
## 3. View API Key Usage
- **Precondition:** API key exists
- **Steps:**
1. View the 'Last used' and 'Expired' columns in the table
- **Expected:** Usage data is displayed for each API key

View File

@@ -1,17 +0,0 @@
# Billing Settings E2E Scenarios (Updated)
## 1. View Billing Information
- **Precondition:** User is admin
- **Steps:**
1. Navigate to Billing Settings
2. Wait for the billing chart/data to finish loading
- **Expected:**
- Billing heading and subheading are displayed
- Usage/cost table is visible with columns: Unit, Data Ingested, Price per Unit, Cost (Billing period to date)
- "Download CSV" and "Manage Billing" buttons are present and enabled after loading
- Test clicking "Download CSV" and "Manage Billing" for expected behavior (e.g., file download, navigation, or modal)
> Note: If these features are expected to trigger specific flows, document the observed behavior for each button.

View File

@@ -1,18 +0,0 @@
# Custom Domain E2E Scenarios (Updated)
## 1. Add or Update Custom Domain
- **Precondition:** User is admin
- **Steps:**
1. Click 'Customize teams URL' button
2. In the 'Customize your teams URL' dialog, enter the preferred subdomain
3. Click 'Apply Changes'
- **Expected:** Domain is set/updated for the team (UI feedback/confirmation should be checked)
## 2. Verify Domain Ownership
- **Note:** No explicit 'Verify' button or flow is present in the current UI. If verification is required, it may be handled automatically or via support.
## 3. Remove a Custom Domain
- **Note:** No explicit 'Remove' button or flow is present in the current UI. The only available action is to update the subdomain.

View File

@@ -1,31 +0,0 @@
# General Settings E2E Scenarios
## 1. View General Settings
- **Precondition:** User is logged in
- **Steps:**
1. Navigate to General Settings
- **Expected:** General settings are displayed
## 2. Update Organization/Workspace Name
- **Precondition:** User is admin
- **Steps:**
1. Edit organization/workspace name
2. Save changes
- **Expected:** Name is updated and visible
## 3. Update Logo or Branding
- **Precondition:** User is admin
- **Steps:**
1. Upload new logo/branding
2. Save changes
- **Expected:** Branding is updated
## 4. View Version/Build Info
- **Precondition:** User is logged in
- **Steps:**
1. View version/build info section
- **Expected:** Version/build info is displayed

View File

@@ -1,20 +0,0 @@
# Ingestion E2E Scenarios (Updated)
## 1. View Ingestion Sources
- **Precondition:** User is admin
- **Steps:**
1. Navigate to the Integrations page
- **Expected:** List of available data sources/integrations is displayed
## 2. Configure Ingestion Sources
- **Precondition:** User is admin
- **Steps:**
1. Click 'Configure' for a data source/integration
2. Complete the configuration flow (modal or page, as available)
- **Expected:** Source is configured (UI feedback/confirmation should be checked)
## 3. Disable/Enable Ingestion
- **Note:** No visible enable/disable toggle for ingestion sources in the current UI. Ingestion is managed via the Integrations configuration flows.

View File

@@ -1,51 +0,0 @@
# Integrations E2E Scenarios (Updated)
## 1. View List of Available Integrations
- **Precondition:** User is logged in
- **Steps:**
1. Navigate to Integrations
- **Expected:** List of integrations is displayed, each with a name, description, and 'Configure' button
## 2. Search Integrations by Name/Type
- **Precondition:** Integrations exist
- **Steps:**
1. Enter search/filter criteria in the 'Search for an integration...' box
- **Expected:** Only matching integrations are shown
## 3. Connect a New Integration
- **Precondition:** User is admin
- **Steps:**
1. Click 'Configure' for an integration
2. Complete the configuration flow (modal or page, as available)
- **Expected:** Integration is connected/configured (UI feedback/confirmation should be checked)
## 4. Disconnect an Integration
- **Note:** No visible 'Disconnect' button in the main list. This may be available in the configuration flow for a connected integration.
## 5. Configure Integration Settings
- **Note:** Configuration is handled in the flow after clicking 'Configure' for an integration.
## 6. Test Integration Connection
- **Note:** No visible 'Test Connection' button in the main list. This may be available in the configuration flow.
## 7. View Integration Status/Logs
- **Note:** No visible status/logs section in the main list. This may be available in the configuration flow.
## 8. Filter Integrations by Category
- **Note:** No explicit category filter in the current UI, only a search box.
## 9. View Integration Documentation/Help
- **Note:** No visible 'Help/Docs' button in the main list. This may be available in the configuration flow.
## 10. Update Integration Configuration
- **Note:** Configuration is handled in the flow after clicking 'Configure' for an integration.

View File

@@ -1,19 +0,0 @@
# Keyboard Shortcuts E2E Scenarios (Updated)
## 1. View Keyboard Shortcuts
- **Precondition:** User is logged in
- **Steps:**
1. Navigate to Keyboard Shortcuts
- **Expected:** Shortcuts are displayed in categorized tables (Global, Logs Explorer, Query Builder, Dashboard)
## 2. Customize Keyboard Shortcuts (if supported)
- **Note:** Customization is not available in the current UI. Shortcuts are view-only.
## 3. Use Keyboard Shortcuts for Navigation/Actions
- **Precondition:** User is logged in
- **Steps:**
1. Use shortcut for navigation/action (e.g., shift+s for Services, cmd+enter for running query)
- **Expected:** Navigation/action is performed as per shortcut

View File

@@ -1,49 +0,0 @@
# Members & SSO E2E Scenarios (Updated)
## 1. Invite a New Member
- **Precondition:** User is admin
- **Steps:**
1. Click 'Invite Members' button
2. In the 'Invite team members' dialog, enter email address, name (optional), and select role
3. (Optional) Click 'Add another team member' to invite more
4. Click 'Invite team members' to send invite(s)
- **Expected:** Pending invite appears in the 'Pending Invites' table
## 2. Remove a Member
- **Precondition:** User is admin, member exists
- **Steps:**
1. In the 'Members' table, locate the member row
2. Click 'Delete' in the Action column
3. Confirm removal if prompted
- **Expected:** Member is removed from the table
## 3. Update Member Roles
- **Precondition:** User is admin, member exists
- **Steps:**
1. In the 'Members' table, locate the member row
2. Click 'Edit' in the Action column
3. Change role in the edit dialog/modal
4. Save changes
- **Expected:** Member role is updated in the table
## 4. Configure SSO
- **Precondition:** User is admin
- **Steps:**
1. In the 'Authenticated Domains' section, locate the domain row
2. Click 'Configure SSO' or 'Edit Google Auth' as available
3. Complete SSO provider configuration in the modal/dialog
4. Save settings
- **Expected:** SSO is configured for the domain
## 5. Login via SSO
- **Precondition:** SSO is configured
- **Steps:**
1. Log out from the app
2. On the login page, click 'Login with SSO'
3. Complete SSO login flow
- **Expected:** User is logged in via SSO

View File

@@ -1,39 +0,0 @@
# Notification Channels E2E Scenarios (Updated)
## 1. Add a New Notification Channel
- **Precondition:** User is admin
- **Steps:**
1. Click 'New Alert Channel' button
2. In the 'New Notification Channel' form, fill in required fields (Name, Type, Webhook URL, etc.)
3. (Optional) Toggle 'Send resolved alerts'
4. (Optional) Click 'Test' to send a test notification
5. Click 'Save' to add the channel
- **Expected:** Channel is added and listed in the table
## 2. Test Notification Channel
- **Precondition:** Channel is being created or edited
- **Steps:**
1. In the 'New Notification Channel' or 'Edit Notification Channel' form, click 'Test'
- **Expected:** Test notification is sent (UI feedback/confirmation should be checked)
## 3. Remove a Notification Channel
- **Precondition:** Channel is added
- **Steps:**
1. In the table, locate the channel row
2. Click 'Delete' in the Action column
3. Confirm removal if prompted
- **Expected:** Channel is removed from the table
## 4. Update Notification Channel Settings
- **Precondition:** Channel is added
- **Steps:**
1. In the table, locate the channel row
2. Click 'Edit' in the Action column
3. In the 'Edit Notification Channel' form, update fields as needed
4. (Optional) Click 'Test' to send a test notification
5. Click 'Save' to update the channel
- **Expected:** Settings are updated

View File

@@ -1,199 +0,0 @@
# SigNoz Test Plan Validation Report
This report documents the validation of the E2E test plan against the current live application using Playwright MCP. Each module is reviewed for coverage, gaps, and required updates.
---
## Home Module
- **Coverage:**
- Widgets for logs, traces, metrics, dashboards, alerts, services, saved views, onboarding checklist
- Quick access buttons: Explore Logs, Create dashboard, Create an alert
- **Gaps/Updates:**
- Add scenarios for checklist interactions (e.g., “Ill do this later”, progress tracking)
- Add scenarios for Saved Views and cross-module links
- Add scenario for onboarding checklist completion
---
## Logs Module
- **Coverage:**
- Explorer, Pipelines, Views tabs
- Filtering by service, environment, severity, host, k8s, etc.
- Search, save view, create alert, add to dashboard, export, view mode switching
- **Gaps/Updates:**
- Add scenario for quick filter customization
- Add scenario for “Old Explorer” button
- Add scenario for frequency chart toggle
- Add scenario for “Stage & Run Query” workflow
---
## Traces Module
- **Coverage:**
- Tabs: Explorer, Funnels, Views
- Filtering by name, error status, duration, environment, function, service, RPC, status code, HTTP, trace ID, etc.
- Search, save view, create alert, add to dashboard, export, view mode switching (List, Traces, Time Series, Table)
- Pagination, quick filter customization, group by, aggregation
- **Gaps/Updates:**
- Add scenario for quick filter customization
- Add scenario for “Stage & Run Query” workflow
- Add scenario for all view modes (List, Traces, Time Series, Table)
- Add scenario for group by/aggregation
- Add scenario for trace detail navigation (clicking on trace row)
- Add scenario for Funnels tab (create/edit/delete funnel)
- Add scenario for Views tab (manage saved views)
---
## Metrics Module
- **Coverage:**
- Tabs: Summary, Explorer, Views
- Filtering by metric, type, unit, etc.
- Search, save view, add to dashboard, export, view mode switching (chart, table, proportion view)
- Pagination, group by, aggregation, custom queries
- **Gaps/Updates:**
- Add scenario for Proportion View in Summary
- Add scenario for all view modes (chart, table, proportion)
- Add scenario for group by/aggregation
- Add scenario for custom queries in Explorer
- Add scenario for Views tab (manage saved views)
---
## Dashboards Module
- **Coverage:**
- List, search, and filter dashboards
- Create new dashboard (button and template link)
- Edit, delete, and view dashboard details
- Add/edit/delete widgets (implied by dashboard detail)
- Pagination through dashboards
- **Gaps/Updates:**
- Add scenario for browsing dashboard templates (external link)
- Add scenario for requesting new template
- Add scenario for dashboard owner and creation info
- Add scenario for dashboard tags and filtering by tags
- Add scenario for dashboard sharing (if available)
- Add scenario for dashboard image/preview
---
## Messaging Queues Module
- **Coverage:**
- Overview tab: queue metrics, filters (Service Name, Span Name, Msg System, Destination, Kind)
- Search across all columns
- Pagination of queue data
- Sync and Share buttons
- Tabs for Kafka and Celery
- **Gaps/Updates:**
- Add scenario for Kafka tab (detailed metrics, actions)
- Add scenario for Celery tab (detailed metrics, actions)
- Add scenario for filter combinations and edge cases
- Add scenario for sharing queue data
- Add scenario for time range selection
---
## External APIs Module
- **Coverage:**
- Accessed via side navigation under MORE
- Explorer tab: domain, endpoints, last used, rate, error %, avg. latency
- Filters: Deployment Environment, Service Name, Rpc Method, Show IP addresses
- Table pagination
- Share and Stage & Run Query buttons
- **Gaps/Updates:**
- Add scenario for customizing quick filters
- Add scenario for running and staging queries
- Add scenario for sharing API data
- Add scenario for edge cases in filters and table data
---
## Alerts Module
- **Coverage:**
- Alert Rules tab: list, search, create (New Alert), edit, delete, enable/disable, severity, labels, actions
- Triggered Alerts tab (visible in tablist)
- Configuration tab (visible in tablist)
- Table pagination
- **Gaps/Updates:**
- Add scenario for triggered alerts (view, acknowledge, resolve)
- Add scenario for alert configuration (settings, integrations)
- Add scenario for edge cases in alert creation and management
- Add scenario for searching and filtering alerts
---
## Integrations Module
- **Coverage:**
- Integrations tab: list, search, configure (e.g., AWS), request new integration
- One-click setup for AWS monitoring
- Request more integrations (form)
- **Gaps/Updates:**
- Add scenario for configuring integrations (step-by-step)
- Add scenario for searching and filtering integrations
- Add scenario for requesting new integrations
- Add scenario for edge cases (e.g., failed configuration)
---
## Exceptions Module
- **Coverage:**
- All Exceptions: list, search, filter (Deployment Environment, Service Name, Host Name, K8s Cluster/Deployment/Namespace, Net Peer Name)
- Table: Exception Type, Error Message, Count, Last Seen, First Seen, Application
- Pagination
- Exception detail links
- Share and Stage & Run Query buttons
- **Gaps/Updates:**
- Add scenario for exception detail view
- Add scenario for advanced filtering and edge cases
- Add scenario for sharing and running queries
- Add scenario for error grouping and navigation
---
## Service Map Module
- **Coverage:**
- Service Map visualization (main graph)
- Filters: environment, resource attributes
- Time range selection
- Sync and Share buttons
- **Gaps/Updates:**
- Add scenario for interacting with the map (zoom, pan, select service)
- Add scenario for filtering and edge cases
- Add scenario for sharing the map
- Add scenario for time range and environment combinations
---
## Billing Module
- **Coverage:**
- Billing overview: cost monitoring, invoices, CSV download (disabled), manage billing (disabled)
- Teams Cloud section
- Billing table: Unit, Data Ingested, Price per Unit, Cost (Billing period to date)
- **Gaps/Updates:**
- Add scenario for invoice download and management (when enabled)
- Add scenario for cost monitoring and edge cases
- Add scenario for billing table data validation
- Add scenario for permissions and access control
---
## Usage Explorer Module
- **Status:**
- Not accessible in the current environment. Removing from test plan flows.
---
## [Next modules will be filled as validation proceeds]

View File

@@ -1,42 +0,0 @@
import { expect, test } from '@playwright/test';
import { ensureLoggedIn } from '../../../utils/login.util';
test('Account Settings - View and Assert Static Controls', async ({ page }) => {
await ensureLoggedIn(page);
// 1. Open the sidebar settings menu using data-testid
await page.getByTestId('settings-nav-item').click();
// 2. Click Account Settings in the dropdown (by role/name or data-testid if available)
await page.getByRole('menuitem', { name: 'Account Settings' }).click();
// Assert the main tabpanel/heading (confirmed by DOM)
await expect(page.getByTestId('settings-page-title')).toBeVisible();
// Assert General section and controls (confirmed by DOM)
await expect(
page.getByLabel('My Settings').getByText('General'),
).toBeVisible();
await expect(page.getByText('Manage your account settings.')).toBeVisible();
await expect(page.getByRole('button', { name: 'Update name' })).toBeVisible();
await expect(
page.getByRole('button', { name: 'Reset password' }),
).toBeVisible();
// Assert User Preferences section and controls (confirmed by DOM)
await expect(page.getByText('User Preferences')).toBeVisible();
await expect(
page.getByText('Tailor the SigNoz console to work according to your needs.'),
).toBeVisible();
await expect(page.getByText('Select your theme')).toBeVisible();
const themeSelector = page.getByTestId('theme-selector');
await expect(themeSelector.getByText('Dark')).toBeVisible();
await expect(themeSelector.getByText('Light')).toBeVisible();
await expect(themeSelector.getByText('System')).toBeVisible();
await expect(page.getByTestId('timezone-adaptation-switch')).toBeVisible();
await expect(page.getByTestId('side-nav-pinned-switch')).toBeVisible();
});

View File

@@ -1,42 +0,0 @@
import { expect, test } from '@playwright/test';
import { ensureLoggedIn } from '../../../utils/login.util';
test('API Keys Settings - View and Interact', async ({ page }) => {
await ensureLoggedIn(page);
// 1. Open the sidebar settings menu using data-testid
await page.getByTestId('settings-nav-item').click();
// 2. Click Account Settings in the dropdown (by role/name or data-testid if available)
await page.getByRole('menuitem', { name: 'Account Settings' }).click();
// Assert the main tabpanel/heading (confirmed by DOM)
await expect(page.getByTestId('settings-page-title')).toBeVisible();
// Focus on the settings page sidenav
await page.getByTestId('settings-page-sidenav').focus();
// Click API Keys tab in the settings sidebar (by data-testid)
await page.getByTestId('api-keys').click();
// Assert heading and subheading
await expect(page.getByRole('heading', { name: 'API Keys' })).toBeVisible();
await expect(
page.getByText('Create and manage API keys for the SigNoz API'),
).toBeVisible();
// Assert presence of New Key button
const newKeyBtn = page.getByRole('button', { name: 'New Key' });
await expect(newKeyBtn).toBeVisible();
// Assert table columns
await expect(page.getByText('Last used').first()).toBeVisible();
await expect(page.getByText('Expired').first()).toBeVisible();
// Assert at least one API key row with action buttons
// Select the first action cell's first button (icon button)
const firstActionCell = page.locator('table tr').nth(1).locator('td').last();
const deleteBtn = firstActionCell.locator('button').first();
await expect(deleteBtn).toBeVisible();
});

View File

@@ -1,71 +0,0 @@
import { expect, test } from '@playwright/test';
import { ensureLoggedIn } from '../../../utils/login.util';
// E2E: Billing Settings - View Billing Information and Button Actions
test('View Billing Information and Button Actions', async ({
page,
context,
}) => {
// Ensure user is logged in
await ensureLoggedIn(page);
// 1. Open the sidebar settings menu using data-testid
await page.getByTestId('settings-nav-item').click();
// 2. Click Account Settings in the dropdown (by role/name or data-testid if available)
await page.getByRole('menuitem', { name: 'Account Settings' }).click();
// Assert the main tabpanel/heading (confirmed by DOM)
await expect(page.getByTestId('settings-page-title')).toBeVisible();
// Focus on the settings page sidenav
await page.getByTestId('settings-page-sidenav').focus();
// Click Billing tab in the settings sidebar (by data-testid)
await page.getByTestId('billing').click();
// Wait for billing chart/data to finish loading
await page.getByText('loading').first().waitFor({ state: 'hidden' });
// Assert visibility of subheading (unique)
await expect(
page.getByText(
'Manage your billing information, invoices, and monitor costs.',
),
).toBeVisible();
// Assert visibility of Teams Cloud heading
await expect(page.getByRole('heading', { name: 'Teams Cloud' })).toBeVisible();
// Assert presence of summary and detailed tables
await expect(page.getByText('TOTAL SPENT')).toBeVisible();
await expect(page.getByText('Data Ingested')).toBeVisible();
await expect(page.getByText('Price per Unit')).toBeVisible();
await expect(page.getByText('Cost (Billing period to date)')).toBeVisible();
// Assert presence of alert and note
await expect(
page.getByText('Your current billing period is from', { exact: false }),
).toBeVisible();
await expect(
page.getByText('Billing metrics are updated once every 24 hours.'),
).toBeVisible();
// Test Download CSV button
const [download] = await Promise.all([
page.waitForEvent('download'),
page.getByRole('button', { name: 'cloud-download Download CSV' }).click(),
]);
// Optionally, check download file name
expect(download.suggestedFilename()).toContain('billing_usage');
// Test Manage Billing button (opens Stripe in new tab)
const [newPage] = await Promise.all([
context.waitForEvent('page'),
page.getByTestId('header-billing-button').click(),
]);
await newPage.waitForLoadState();
expect(newPage.url()).toContain('stripe.com');
await newPage.close();
});

View File

@@ -1,52 +0,0 @@
import { expect, test } from '@playwright/test';
import { ensureLoggedIn } from '../../../utils/login.util';
test('Custom Domain Settings - View and Interact', async ({ page }) => {
await ensureLoggedIn(page);
// 1. Open the sidebar settings menu using data-testid
await page.getByTestId('settings-nav-item').click();
// 2. Click Account Settings in the dropdown (by role/name or data-testid if available)
await page.getByRole('menuitem', { name: 'Account Settings' }).click();
// Assert the main tabpanel/heading (confirmed by DOM)
await expect(page.getByTestId('settings-page-title')).toBeVisible();
// Focus on the settings page sidenav
await page.getByTestId('settings-page-sidenav').focus();
// Click Custom Domain tab in the settings sidebar (by data-testid)
await page.getByTestId('custom-domain').click();
// Wait for custom domain chart/data to finish loading
await page.getByText('loading').first().waitFor({ state: 'hidden' });
// Assert heading and subheading
await expect(
page.getByRole('heading', { name: 'Custom Domain Settings' }),
).toBeVisible();
await expect(
page.getByText('Personalize your workspace domain effortlessly.'),
).toBeVisible();
// Assert presence of Customize teams URL button
const customizeBtn = page.getByRole('button', {
name: 'Customize teams URL',
});
await expect(customizeBtn).toBeVisible();
await customizeBtn.click();
// Assert modal/dialog fields and buttons
await expect(
page.getByRole('dialog', { name: 'Customize your teams URL' }),
).toBeVisible();
await expect(page.getByLabel('Teams URL subdomain')).toBeVisible();
await expect(
page.getByRole('button', { name: 'Apply Changes' }),
).toBeVisible();
await expect(page.getByRole('button', { name: 'Close' })).toBeVisible();
// Close the modal
await page.getByRole('button', { name: 'Close' }).click();
});

View File

@@ -1,32 +0,0 @@
import { expect, test } from '@playwright/test';
import { ensureLoggedIn } from '../../../utils/login.util';
test('View General Settings', async ({ page }) => {
await ensureLoggedIn(page);
// 1. Open the sidebar settings menu using data-testid
await page.getByTestId('settings-nav-item').click();
// 2. Click Account Settings in the dropdown (by role/name or data-testid if available)
await page.getByRole('menuitem', { name: 'Account Settings' }).click();
// Assert the main tabpanel/heading (confirmed by DOM)
await expect(page.getByTestId('settings-page-title')).toBeVisible();
// Focus on the settings page sidenav
await page.getByTestId('settings-page-sidenav').focus();
// Click General tab in the settings sidebar (by data-testid)
await page.getByTestId('general').click();
// Wait for General tab to be visible
await page.getByRole('tabpanel', { name: 'General' }).waitFor();
// Assert visibility of definitive/static elements
await expect(page.getByRole('heading', { name: 'Metrics' })).toBeVisible();
await expect(page.getByRole('heading', { name: 'Traces' })).toBeVisible();
await expect(page.getByRole('heading', { name: 'Logs' })).toBeVisible();
await expect(page.getByText('Please')).toBeVisible();
await expect(page.getByRole('link', { name: 'email us' })).toBeVisible();
});

View File

@@ -1,48 +0,0 @@
import { expect, test } from '@playwright/test';
import { ensureLoggedIn } from '../../../utils/login.util';
test('Ingestion Settings - View and Interact', async ({ page }) => {
await ensureLoggedIn(page);
// 1. Open the sidebar settings menu using data-testid
await page.getByTestId('settings-nav-item').click();
// 2. Click Account Settings in the dropdown (by role/name or data-testid if available)
await page.getByRole('menuitem', { name: 'Account Settings' }).click();
// Assert the main tabpanel/heading (confirmed by DOM)
await expect(page.getByTestId('settings-page-title')).toBeVisible();
// Focus on the settings page sidenav
await page.getByTestId('settings-page-sidenav').focus();
// Click Ingestion tab in the settings sidebar (by data-testid)
await page.getByTestId('ingestion').click();
// Assert heading and subheading (Integrations page)
await expect(
page.getByRole('heading', { name: 'Integrations' }),
).toBeVisible();
await expect(
page.getByText('Manage Integrations for this workspace'),
).toBeVisible();
// Assert presence of search box
await expect(
page.getByPlaceholder('Search for an integration...'),
).toBeVisible();
// Assert at least one data source with Configure button
const configureBtn = page.getByRole('button', { name: 'Configure' }).first();
await expect(configureBtn).toBeVisible();
// Assert Request more integrations section
await expect(
page.getByText(
"Can't find what youre looking for? Request more integrations",
),
).toBeVisible();
await expect(page.getByPlaceholder('Enter integration name...')).toBeVisible();
await expect(page.getByRole('button', { name: 'Submit' })).toBeVisible();
});

View File

@@ -1,48 +0,0 @@
import { expect, test } from '@playwright/test';
import { ensureLoggedIn } from '../../../utils/login.util';
test('Integrations Settings - View and Interact', async ({ page }) => {
await ensureLoggedIn(page);
// 1. Open the sidebar settings menu using data-testid
await page.getByTestId('settings-nav-item').click();
// 2. Click Account Settings in the dropdown (by role/name or data-testid if available)
await page.getByRole('menuitem', { name: 'Account Settings' }).click();
// Assert the main tabpanel/heading (confirmed by DOM)
await expect(page.getByTestId('settings-page-title')).toBeVisible();
// Focus on the settings page sidenav
await page.getByTestId('settings-page-sidenav').focus();
// Click Integrations tab in the settings sidebar (by data-testid)
await page.getByTestId('integrations').click();
// Assert heading and subheading
await expect(
page.getByRole('heading', { name: 'Integrations' }),
).toBeVisible();
await expect(
page.getByText('Manage Integrations for this workspace'),
).toBeVisible();
// Assert presence of search box
await expect(
page.getByPlaceholder('Search for an integration...'),
).toBeVisible();
// Assert at least one integration with Configure button
const configureBtn = page.getByRole('button', { name: 'Configure' }).first();
await expect(configureBtn).toBeVisible();
// Assert Request more integrations section
await expect(
page.getByText(
"Can't find what youre looking for? Request more integrations",
),
).toBeVisible();
await expect(page.getByPlaceholder('Enter integration name...')).toBeVisible();
await expect(page.getByRole('button', { name: 'Submit' })).toBeVisible();
});

View File

@@ -1,56 +0,0 @@
import { expect, test } from '@playwright/test';
import { ensureLoggedIn } from '../../../utils/login.util';
test('Members & SSO Settings - View and Interact', async ({ page }) => {
await ensureLoggedIn(page);
// 1. Open the sidebar settings menu using data-testid
await page.getByTestId('settings-nav-item').click();
// 2. Click Account Settings in the dropdown (by role/name or data-testid if available)
await page.getByRole('menuitem', { name: 'Account Settings' }).click();
// Assert the main tabpanel/heading (confirmed by DOM)
await expect(page.getByTestId('settings-page-title')).toBeVisible();
// Focus on the settings page sidenav
await page.getByTestId('settings-page-sidenav').focus();
// Click Members & SSO tab in the settings sidebar (by data-testid)
await page.getByTestId('members-sso').click();
// Assert headings and tables
await expect(
page.getByRole('heading', { name: /Members \(\d+\)/ }),
).toBeVisible();
await expect(
page.getByRole('heading', { name: /Pending Invites \(\d+\)/ }),
).toBeVisible();
await expect(
page.getByRole('heading', { name: 'Authenticated Domains' }),
).toBeVisible();
// Assert Invite Members button is visible and clickable
const inviteBtn = page.getByRole('button', { name: /Invite Members/ });
await expect(inviteBtn).toBeVisible();
await inviteBtn.click();
// Assert Invite Members modal/dialog appears (modal title is unique)
await expect(page.getByText('Invite team members').first()).toBeVisible();
// Close the modal (use unique 'Close' button)
await page.getByRole('button', { name: 'Close' }).click();
// Assert Edit and Delete buttons are present for at least one member
const editBtn = page.getByRole('button', { name: /Edit/ }).first();
const deleteBtn = page.getByRole('button', { name: /Delete/ }).first();
await expect(editBtn).toBeVisible();
await expect(deleteBtn).toBeVisible();
// Assert Add Domains button is visible
await expect(page.getByRole('button', { name: /Add Domains/ })).toBeVisible();
// Assert Configure SSO or Edit Google Auth button is visible for at least one domain
const ssoBtn = page
.getByRole('button', { name: /Configure SSO|Edit Google Auth/ })
.first();
await expect(ssoBtn).toBeVisible();
});

View File

@@ -1,57 +0,0 @@
import { expect, test } from '@playwright/test';
import { ensureLoggedIn } from '../../../utils/login.util';
test('Notification Channels Settings - View and Interact', async ({ page }) => {
await ensureLoggedIn(page);
// 1. Open the sidebar settings menu using data-testid
await page.getByTestId('settings-nav-item').click();
// 2. Click Account Settings in the dropdown (by role/name or data-testid if available)
await page.getByRole('menuitem', { name: 'Account Settings' }).click();
// Assert the main tabpanel/heading (confirmed by DOM)
await expect(page.getByTestId('settings-page-title')).toBeVisible();
// Focus on the settings page sidenav
await page.getByTestId('settings-page-sidenav').focus();
// Click Notification Channels tab in the settings sidebar (by data-testid)
await page.getByTestId('notification-channels').click();
// Wait for loading to finish
await page.getByText('loading').first().waitFor({ state: 'hidden' });
// Assert presence of New Alert Channel button
const newChannelBtn = page.getByRole('button', { name: /New Alert Channel/ });
await expect(newChannelBtn).toBeVisible();
// Assert table columns
await expect(page.getByText('Name')).toBeVisible();
await expect(page.getByText('Type')).toBeVisible();
await expect(page.getByText('Action')).toBeVisible();
// Click New Alert Channel and assert modal fields/buttons
await newChannelBtn.click();
await expect(
page.getByRole('heading', { name: 'New Notification Channel' }),
).toBeVisible();
await expect(page.getByLabel('Name')).toBeVisible();
await expect(page.getByLabel('Type')).toBeVisible();
await expect(page.getByLabel('Webhook URL')).toBeVisible();
await expect(
page.getByRole('switch', { name: 'Send resolved alerts' }),
).toBeVisible();
await expect(page.getByRole('button', { name: 'Save' })).toBeVisible();
await expect(page.getByRole('button', { name: 'Test' })).toBeVisible();
await expect(page.getByRole('button', { name: 'Back' })).toBeVisible();
// Close modal
await page.getByRole('button', { name: 'Back' }).click();
// Assert Edit and Delete buttons for at least one channel
const editBtn = page.getByRole('button', { name: 'Edit' }).first();
const deleteBtn = page.getByRole('button', { name: 'Delete' }).first();
await expect(editBtn).toBeVisible();
await expect(deleteBtn).toBeVisible();
});

View File

@@ -1,35 +0,0 @@
import { Page } from '@playwright/test';
// Read credentials from environment variables
const username = process.env.LOGIN_USERNAME;
const password = process.env.LOGIN_PASSWORD;
const baseURL = process.env.BASE_URL;
/**
* Ensures the user is logged in. If not, performs the login steps.
* Follows the MCP process step-by-step.
*/
export async function ensureLoggedIn(page: Page): Promise<void> {
// if already in home page, return
if (await page.url().includes('/home')) {
return;
}
if (!username || !password) {
throw new Error(
'E2E_EMAIL and E2E_PASSWORD environment variables must be set.',
);
}
await page.goto(`${baseURL}/login`);
await page.getByTestId('email').click();
await page.getByTestId('email').fill(username);
await page.getByTestId('initiate_login').click();
await page.getByTestId('password').click();
await page.getByTestId('password').fill(password);
await page.getByRole('button', { name: 'Login' }).click();
await page
.getByText('Hello there, Welcome to your')
.waitFor({ state: 'visible' });
}

View File

@@ -1,7 +1,6 @@
NODE_ENV="development" NODE_ENV="development"
BUNDLE_ANALYSER="true" BUNDLE_ANALYSER="true"
FRONTEND_API_ENDPOINT="http://localhost:8080/" FRONTEND_API_ENDPOINT="http://localhost:8080/"
PYLON_APP_ID="pylon-app-id" INTERCOM_APP_ID="intercom-app-id"
APPCUES_APP_ID="appcess-app-id"
CI="1" CI="1"

View File

@@ -25,7 +25,7 @@ const config: Config.InitialOptions = {
'^.+\\.(js|jsx)$': 'babel-jest', '^.+\\.(js|jsx)$': 'babel-jest',
}, },
transformIgnorePatterns: [ transformIgnorePatterns: [
'node_modules/(?!(lodash-es|react-dnd|core-dnd|@react-dnd|dnd-core|react-dnd-html5-backend|axios|@signozhq/design-tokens|d3-interpolate|d3-color|api|@codemirror|@lezer|@marijn)/)', 'node_modules/(?!(lodash-es|react-dnd|core-dnd|@react-dnd|dnd-core|react-dnd-html5-backend|axios|@signozhq/design-tokens|d3-interpolate|d3-color|api)/)',
], ],
setupFilesAfterEnv: ['<rootDir>jest.setup.ts'], setupFilesAfterEnv: ['<rootDir>jest.setup.ts'],
testPathIgnorePatterns: ['/node_modules/', '/public/'], testPathIgnorePatterns: ['/node_modules/', '/public/'],

View File

@@ -28,8 +28,6 @@
"dependencies": { "dependencies": {
"@ant-design/colors": "6.0.0", "@ant-design/colors": "6.0.0",
"@ant-design/icons": "4.8.0", "@ant-design/icons": "4.8.0",
"@codemirror/autocomplete": "6.18.6",
"@codemirror/lang-javascript": "6.2.3",
"@dnd-kit/core": "6.1.0", "@dnd-kit/core": "6.1.0",
"@dnd-kit/modifiers": "7.0.0", "@dnd-kit/modifiers": "7.0.0",
"@dnd-kit/sortable": "8.0.0", "@dnd-kit/sortable": "8.0.0",
@@ -38,7 +36,6 @@
"@mdx-js/loader": "2.3.0", "@mdx-js/loader": "2.3.0",
"@mdx-js/react": "2.3.0", "@mdx-js/react": "2.3.0",
"@monaco-editor/react": "^4.3.1", "@monaco-editor/react": "^4.3.1",
"@playwright/test": "1.54.1",
"@radix-ui/react-tabs": "1.0.4", "@radix-ui/react-tabs": "1.0.4",
"@radix-ui/react-tooltip": "1.0.7", "@radix-ui/react-tooltip": "1.0.7",
"@sentry/react": "8.41.0", "@sentry/react": "8.41.0",
@@ -46,9 +43,6 @@
"@signozhq/design-tokens": "1.1.4", "@signozhq/design-tokens": "1.1.4",
"@tanstack/react-table": "8.20.6", "@tanstack/react-table": "8.20.6",
"@tanstack/react-virtual": "3.11.2", "@tanstack/react-virtual": "3.11.2",
"@uiw/codemirror-theme-github": "4.24.1",
"@uiw/codemirror-theme-copilot": "4.23.11",
"@uiw/react-codemirror": "4.23.10",
"@uiw/react-md-editor": "3.23.5", "@uiw/react-md-editor": "3.23.5",
"@visx/group": "3.3.0", "@visx/group": "3.3.0",
"@visx/hierarchy": "3.12.0", "@visx/hierarchy": "3.12.0",
@@ -58,7 +52,6 @@
"ansi-to-html": "0.7.2", "ansi-to-html": "0.7.2",
"antd": "5.11.0", "antd": "5.11.0",
"antd-table-saveas-excel": "2.2.1", "antd-table-saveas-excel": "2.2.1",
"antlr4": "4.13.2",
"axios": "1.8.2", "axios": "1.8.2",
"babel-eslint": "^10.1.0", "babel-eslint": "^10.1.0",
"babel-jest": "^29.6.4", "babel-jest": "^29.6.4",
@@ -85,7 +78,7 @@
"fontfaceobserver": "2.3.0", "fontfaceobserver": "2.3.0",
"history": "4.10.1", "history": "4.10.1",
"html-webpack-plugin": "5.5.0", "html-webpack-plugin": "5.5.0",
"http-proxy-middleware": "3.0.5", "http-proxy-middleware": "3.0.3",
"http-status-codes": "2.3.0", "http-status-codes": "2.3.0",
"i18next": "^21.6.12", "i18next": "^21.6.12",
"i18next-browser-languagedetector": "^6.1.3", "i18next-browser-languagedetector": "^6.1.3",
@@ -141,7 +134,7 @@
"uuid": "^8.3.2", "uuid": "^8.3.2",
"web-vitals": "^0.2.4", "web-vitals": "^0.2.4",
"webpack": "5.94.0", "webpack": "5.94.0",
"webpack-dev-server": "^5.2.1", "webpack-dev-server": "^4.15.2",
"webpack-retry-chunk-load-plugin": "3.1.1", "webpack-retry-chunk-load-plugin": "3.1.1",
"xstate": "^4.31.0" "xstate": "^4.31.0"
}, },
@@ -204,6 +197,7 @@
"babel-plugin-styled-components": "^1.12.0", "babel-plugin-styled-components": "^1.12.0",
"compression-webpack-plugin": "9.0.0", "compression-webpack-plugin": "9.0.0",
"copy-webpack-plugin": "^11.0.0", "copy-webpack-plugin": "^11.0.0",
"critters-webpack-plugin": "^3.0.1",
"eslint": "^7.32.0", "eslint": "^7.32.0",
"eslint-config-airbnb": "^19.0.4", "eslint-config-airbnb": "^19.0.4",
"eslint-config-airbnb-typescript": "^16.1.4", "eslint-config-airbnb-typescript": "^16.1.4",
@@ -220,9 +214,7 @@
"eslint-plugin-simple-import-sort": "^7.0.0", "eslint-plugin-simple-import-sort": "^7.0.0",
"eslint-plugin-sonarjs": "^0.12.0", "eslint-plugin-sonarjs": "^0.12.0",
"husky": "^7.0.4", "husky": "^7.0.4",
"image-minimizer-webpack-plugin": "^4.0.0", "image-webpack-loader": "8.1.0",
"imagemin": "^8.0.1",
"imagemin-svgo": "^10.0.1",
"is-ci": "^3.0.1", "is-ci": "^3.0.1",
"jest-styled-components": "^7.0.8", "jest-styled-components": "^7.0.8",
"lint-staged": "^12.5.0", "lint-staged": "^12.5.0",
@@ -239,12 +231,11 @@
"redux-mock-store": "1.5.4", "redux-mock-store": "1.5.4",
"sass": "1.66.1", "sass": "1.66.1",
"sass-loader": "13.3.2", "sass-loader": "13.3.2",
"sharp": "^0.33.4",
"ts-jest": "^27.1.5", "ts-jest": "^27.1.5",
"ts-node": "^10.2.1", "ts-node": "^10.2.1",
"typescript-plugin-css-modules": "5.2.0", "typescript-plugin-css-modules": "5.0.1",
"webpack-bundle-analyzer": "^4.5.0", "webpack-bundle-analyzer": "^4.5.0",
"webpack-cli": "^5.1.4" "webpack-cli": "^4.9.2"
}, },
"lint-staged": { "lint-staged": {
"*.(js|jsx|ts|tsx)": [ "*.(js|jsx|ts|tsx)": [
@@ -260,12 +251,10 @@
"xml2js": "0.5.0", "xml2js": "0.5.0",
"phin": "^3.7.1", "phin": "^3.7.1",
"body-parser": "1.20.3", "body-parser": "1.20.3",
"http-proxy-middleware": "3.0.5", "http-proxy-middleware": "3.0.3",
"cross-spawn": "7.0.5", "cross-spawn": "7.0.5",
"cookie": "^0.7.1", "cookie": "^0.7.1",
"serialize-javascript": "6.0.2", "serialize-javascript": "6.0.2",
"prismjs": "1.30.0", "prismjs": "1.30.0"
"got": "11.8.5",
"form-data": "4.0.4"
} }
} }

View File

@@ -1,95 +0,0 @@
import { defineConfig, devices } from '@playwright/test';
import dotenv from 'dotenv';
import path from 'path';
// Read from ".env" file.
dotenv.config({ path: path.resolve(__dirname, '.env') });
/**
* Read environment variables from file.
* https://github.com/motdotla/dotenv
*/
// import dotenv from 'dotenv';
// import path from 'path';
// dotenv.config({ path: path.resolve(__dirname, '.env') });
/**
* See https://playwright.dev/docs/test-configuration.
*/
export default defineConfig({
testDir: './e2e/tests',
/* Run tests in files in parallel */
fullyParallel: true,
/* Fail the build on CI if you accidentally left test.only in the source code. */
forbidOnly: !!process.env.CI,
/* Retry on CI only */
retries: process.env.CI ? 2 : 0,
/* Run tests in parallel even in CI - optimized for GitHub Actions free tier */
workers: process.env.CI ? 2 : undefined,
/* Reporter to use. See https://playwright.dev/docs/test-reporters */
reporter: 'html',
/* Shared settings for all the projects below. See https://playwright.dev/docs/api/class-testoptions. */
use: {
/* Base URL to use in actions like `await page.goto('/')`. */
baseURL:
process.env.SIGNOZ_E2E_BASE_URL || 'https://app.us.staging.signoz.cloud',
/* Collect trace when retrying the failed test. See https://playwright.dev/docs/trace-viewer */
trace: 'on-first-retry',
colorScheme: 'dark',
locale: 'en-US',
viewport: { width: 1280, height: 720 },
},
/* Configure projects for major browsers */
projects: [
{
name: 'chromium',
use: {
launchOptions: { args: ['--start-maximized'] },
viewport: null,
colorScheme: 'dark',
locale: 'en-US',
baseURL: 'https://app.us.staging.signoz.cloud',
trace: 'on-first-retry',
},
},
{
name: 'firefox',
use: { ...devices['Desktop Firefox'] },
},
{
name: 'webkit',
use: { ...devices['Desktop Safari'] },
},
/* Test against mobile viewports. */
// {
// name: 'Mobile Chrome',
// use: { ...devices['Pixel 5'] },
// },
// {
// name: 'Mobile Safari',
// use: { ...devices['iPhone 12'] },
// },
/* Test against branded browsers. */
// {
// name: 'Microsoft Edge',
// use: { ...devices['Desktop Edge'], channel: 'msedge' },
// },
// {
// name: 'Google Chrome',
// use: { ...devices['Desktop Chrome'], channel: 'chrome' },
// },
],
/* Run your local dev server before starting the tests */
// webServer: {
// command: 'npm run start',
// url: 'http://localhost:3000',
// reuseExistingServer: !process.env.CI,
// },
});

View File

@@ -1,16 +0,0 @@
RULE: All test code for this repo must be generated by following the step-by-step Playwright MCP process as described below.
- You are a playwright test generator.
- You are given a scenario and you need to generate a playwright test for it.
- Use login util if not logged in.
- DO NOT generate test code based on the scenario alone.
- DO run steps one by one using the tools provided by the Playwright MCP.
- Only after all steps are completed, emit a Playwright TypeScript test that uses @playwright/test based on message history
- Gather correct selectors before writing the test
- DO NOT valiate for dynamic content in the tests, only validate for the correctness with meta data
- Always inspect the DOM at each navigation or interaction step to determine the correct selector for the next action. Do not assume selectors, confirm via inspection before proceeding.
- Assert visibility of definitive/static elements in the UI (such as labels, headings, or section titles) rather than dynamic values or content that may change between runs.
- Save generated test file in the tests directory
- Execute the test file and iterate until the test passes

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 6.1 KiB

Some files were not shown because too many files have changed in this diff Show More