Compare commits

..

9 Commits

1385 changed files with 18488 additions and 102763 deletions

View File

@@ -1,73 +0,0 @@
services:
clickhouse:
image: clickhouse/clickhouse-server:24.1.2-alpine
container_name: clickhouse
volumes:
- ${PWD}/fs/etc/clickhouse-server/config.d/config.xml:/etc/clickhouse-server/config.d/config.xml
- ${PWD}/fs/etc/clickhouse-server/users.d/users.xml:/etc/clickhouse-server/users.d/users.xml
- ${PWD}/fs/tmp/var/lib/clickhouse/:/var/lib/clickhouse/
- ${PWD}/fs/tmp/var/lib/clickhouse/user_scripts/:/var/lib/clickhouse/user_scripts/
ports:
- '127.0.0.1:8123:8123'
- '127.0.0.1:9000:9000'
tty: true
healthcheck:
test:
- CMD
- wget
- --spider
- -q
- 0.0.0.0:8123/ping
interval: 30s
timeout: 5s
retries: 3
depends_on:
- zookeeper
zookeeper:
image: bitnami/zookeeper:3.7.1
container_name: zookeeper
volumes:
- ${PWD}/fs/tmp/zookeeper:/bitnami/zookeeper
ports:
- '127.0.0.1:2181:2181'
environment:
- ALLOW_ANONYMOUS_LOGIN=yes
healthcheck:
test:
- CMD-SHELL
- curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
interval: 30s
timeout: 5s
retries: 3
schema-migrator-sync:
image: signoz/signoz-schema-migrator:0.111.29
container_name: schema-migrator-sync
command:
- sync
- --cluster-name=cluster
- --dsn=tcp://clickhouse:9000
- --replication=true
- --up=
depends_on:
clickhouse:
condition: service_healthy
restart: on-failure
schema-migrator-async:
image: signoz/signoz-schema-migrator:0.111.29
container_name: schema-migrator-async
command:
- async
- --cluster-name=cluster
- --dsn=tcp://clickhouse:9000
- --replication=true
- --up=
depends_on:
clickhouse:
condition: service_healthy
schema-migrator-sync:
condition: service_completed_successfully
restart: on-failure

View File

@@ -1,47 +0,0 @@
<clickhouse replace="true">
<logger>
<level>information</level>
<formatting>
<type>json</type>
</formatting>
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
<size>1000M</size>
<count>3</count>
</logger>
<display_name>cluster</display_name>
<listen_host>0.0.0.0</listen_host>
<http_port>8123</http_port>
<tcp_port>9000</tcp_port>
<user_directories>
<users_xml>
<path>users.xml</path>
</users_xml>
<local_directory>
<path>/var/lib/clickhouse/access/</path>
</local_directory>
</user_directories>
<distributed_ddl>
<path>/clickhouse/task_queue/ddl</path>
</distributed_ddl>
<remote_servers>
<cluster>
<shard>
<replica>
<host>clickhouse</host>
<port>9000</port>
</replica>
</shard>
</cluster>
</remote_servers>
<zookeeper>
<node>
<host>zookeeper</host>
<port>2181</port>
</node>
</zookeeper>
<macros>
<shard>01</shard>
<replica>01</replica>
</macros>
</clickhouse>

View File

@@ -1,36 +0,0 @@
<?xml version="1.0"?>
<clickhouse replace="true">
<profiles>
<default>
<max_memory_usage>10000000000</max_memory_usage>
<use_uncompressed_cache>0</use_uncompressed_cache>
<load_balancing>in_order</load_balancing>
<log_queries>1</log_queries>
</default>
</profiles>
<users>
<default>
<profile>default</profile>
<networks>
<ip>::/0</ip>
</networks>
<quota>default</quota>
<access_management>1</access_management>
<named_collection_control>1</named_collection_control>
<show_named_collections>1</show_named_collections>
<show_named_collections_secrets>1</show_named_collections_secrets>
</default>
</users>
<quotas>
<default>
<interval>
<duration>3600</duration>
<queries>0</queries>
<errors>0</errors>
<result_rows>0</result_rows>
<read_rows>0</read_rows>
<execution_time>0</execution_time>
</interval>
</default>
</quotas>
</clickhouse>

View File

@@ -1,27 +0,0 @@
services:
postgres:
image: postgres:15
container_name: postgres
environment:
POSTGRES_DB: signoz
POSTGRES_USER: postgres
POSTGRES_PASSWORD: password
healthcheck:
test:
[
"CMD",
"pg_isready",
"-d",
"signoz",
"-U",
"postgres"
]
interval: 30s
timeout: 30s
retries: 3
restart: on-failure
ports:
- "127.0.0.1:5432:5432/tcp"
volumes:
- ${PWD}/fs/tmp/var/lib/postgresql/data/:/var/lib/postgresql/data/

View File

@@ -1,7 +1,6 @@
.git
.github
.vscode
.devenv
README.md
deploy
sample-apps

5
.github/CODEOWNERS vendored
View File

@@ -6,8 +6,5 @@
/frontend/src/container/MetricsApplication @srikanthccv
/frontend/src/container/NewWidget/RightContainer/types.ts @srikanthccv
/deploy/ @SigNoz/devops
/sample-apps/ @SigNoz/devops
.github @SigNoz/devops
/pkg/config/ @grandwizard28
/pkg/errors/ @grandwizard28
/pkg/factory/ @grandwizard28
/pkg/types/ @grandwizard28

42
.github/workflows/README.md vendored Normal file
View File

@@ -0,0 +1,42 @@
# Github actions
## Testing the UI manually on each PR
First we need to make sure the UI is ready
* Check the `Start tunnel` step in `e2e-k8s/deploy-on-k3s-cluster` job and make sure you see `your url is: https://pull-<number>-signoz.loca.lt`
* This job will run until the PR is merged or closed to keep the local tunneling alive
- github will cancel this job if the PR wasn't merged after 6h
- if the job was cancel, go to the action and press `Re-run all jobs`
Now you can open your browser at https://pull-<number>-signoz.loca.lt and check the UI.
## Environment Variables
To run GitHub workflow, a few environment variables needs to add in GitHub secrets
<table>
<tr>
<th> Variables </th>
<th> Description </th>
<th> Example </th>
</tr>
<tr>
<td> REPONAME </td>
<td> Provide the DockerHub user/organisation name of the image. </td>
<td> signoz</td>
</tr>
<tr>
<td> DOCKERHUB_USERNAME </td>
<td> Docker hub username </td>
<td> signoz</td>
</tr>
<tr>
<td> DOCKERHUB_TOKEN </td>
<td> Docker hub password/token with push permission </td>
<td> **** </td>
</tr>
<tr>
<td> SONAR_TOKEN </td>
<td> <a href="https://sonarcloud.io">SonarCloud</a> token </td>
<td> **** </td>
</tr>

View File

@@ -1,81 +0,0 @@
name: build-community
on:
push:
tags:
- 'v[0-9]+.[0-9]+.[0-9]+'
- 'v[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+'
defaults:
run:
shell: bash
env:
PRIMUS_HOME: .primus
MAKE: make --no-print-directory --makefile=.primus/src/make/main.mk
jobs:
prepare:
runs-on: ubuntu-latest
outputs:
version: ${{ steps.build-info.outputs.version }}
hash: ${{ steps.build-info.outputs.hash }}
time: ${{ steps.build-info.outputs.time }}
branch: ${{ steps.build-info.outputs.branch }}
steps:
- name: self-checkout
uses: actions/checkout@v4
- id: token
name: github-token-gen
uses: actions/create-github-app-token@v1
with:
app-id: ${{ secrets.PRIMUS_APP_ID }}
private-key: ${{ secrets.PRIMUS_PRIVATE_KEY }}
owner: ${{ github.repository_owner }}
- name: primus-checkout
uses: actions/checkout@v4
with:
repository: signoz/primus
ref: main
path: .primus
token: ${{ steps.token.outputs.token }}
- name: build-info
run: |
echo "version=$($MAKE info-version)" >> $GITHUB_OUTPUT
echo "hash=$($MAKE info-commit-short)" >> $GITHUB_OUTPUT
echo "time=$($MAKE info-timestamp)" >> $GITHUB_OUTPUT
echo "branch=$($MAKE info-branch)" >> $GITHUB_OUTPUT
js-build:
uses: signoz/primus.workflows/.github/workflows/js-build.yaml@main
needs: prepare
secrets: inherit
with:
PRIMUS_REF: main
JS_SRC: frontend
JS_OUTPUT_ARTIFACT_CACHE_KEY: community-jsbuild-${{ github.sha }}
JS_OUTPUT_ARTIFACT_PATH: frontend/build
DOCKER_BUILD: false
DOCKER_MANIFEST: false
go-build:
uses: signoz/primus.workflows/.github/workflows/go-build.yaml@main
needs: [prepare, js-build]
secrets: inherit
with:
PRIMUS_REF: main
GO_NAME: signoz-community
GO_INPUT_ARTIFACT_CACHE_KEY: community-jsbuild-${{ github.sha }}
GO_INPUT_ARTIFACT_PATH: frontend/build
GO_BUILD_CONTEXT: ./pkg/query-service
GO_BUILD_FLAGS: >-
-tags timetzdata
-ldflags='-linkmode external -extldflags \"-static\" -s -w
-X github.com/SigNoz/signoz/pkg/version.version=${{ needs.prepare.outputs.version }}
-X github.com/SigNoz/signoz/pkg/version.variant=community
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
-X github.com/SigNoz/signoz/pkg/version.time=${{ needs.prepare.outputs.time }}
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}'
GO_CGO_ENABLED: 1
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
DOCKER_DOCKERFILE_PATH: ./pkg/query-service/Dockerfile.multi-arch
DOCKER_MANIFEST: true
DOCKER_PROVIDERS: dockerhub

View File

@@ -1,113 +0,0 @@
name: build-enterprise
on:
push:
tags:
- v*
defaults:
run:
shell: bash
env:
PRIMUS_HOME: .primus
MAKE: make --no-print-directory --makefile=.primus/src/make/main.mk
jobs:
prepare:
runs-on: ubuntu-latest
outputs:
docker_providers: ${{ steps.set-docker-providers.outputs.providers }}
version: ${{ steps.build-info.outputs.version }}
hash: ${{ steps.build-info.outputs.hash }}
time: ${{ steps.build-info.outputs.time }}
branch: ${{ steps.build-info.outputs.branch }}
steps:
- name: self-checkout
uses: actions/checkout@v4
- id: token
name: github-token-gen
uses: actions/create-github-app-token@v1
with:
app-id: ${{ secrets.PRIMUS_APP_ID }}
private-key: ${{ secrets.PRIMUS_PRIVATE_KEY }}
owner: ${{ github.repository_owner }}
- name: primus-checkout
uses: actions/checkout@v4
with:
repository: signoz/primus
ref: main
path: .primus
token: ${{ steps.token.outputs.token }}
- name: build-info
id: build-info
run: |
echo "version=$($MAKE info-version)" >> $GITHUB_OUTPUT
echo "hash=$($MAKE info-commit-short)" >> $GITHUB_OUTPUT
echo "time=$($MAKE info-timestamp)" >> $GITHUB_OUTPUT
echo "branch=$($MAKE info-branch)" >> $GITHUB_OUTPUT
- name: set-docker-providers
id: set-docker-providers
run: |
if [[ ${{ github.event.ref }} =~ ^refs/tags/v[0-9]+\.[0-9]+\.[0-9]+$ || ${{ github.event.ref }} =~ ^refs/tags/v[0-9]+\.[0-9]+\.[0-9]+-rc\.[0-9]+$ ]]; then
echo "providers=dockerhub gcp" >> $GITHUB_OUTPUT
else
echo "providers=gcp" >> $GITHUB_OUTPUT
fi
- name: create-dotenv
run: |
mkdir -p frontend
echo 'CI=1' > frontend/.env
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' >> frontend/.env
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
echo 'CUSTOMERIO_ID="${{ secrets.CUSTOMERIO_ID }}"' >> frontend/.env
echo 'CUSTOMERIO_SITE_ID="${{ secrets.CUSTOMERIO_SITE_ID }}"' >> frontend/.env
- name: cache-dotenv
uses: actions/cache@v4
with:
path: frontend/.env
key: enterprise-dotenv-${{ github.sha }}
js-build:
uses: signoz/primus.workflows/.github/workflows/js-build.yaml@main
needs: prepare
secrets: inherit
with:
PRIMUS_REF: main
JS_SRC: frontend
JS_INPUT_ARTIFACT_CACHE_KEY: enterprise-dotenv-${{ github.sha }}
JS_INPUT_ARTIFACT_PATH: frontend/.env
JS_OUTPUT_ARTIFACT_CACHE_KEY: enterprise-jsbuild-${{ github.sha }}
JS_OUTPUT_ARTIFACT_PATH: frontend/build
DOCKER_BUILD: false
DOCKER_MANIFEST: false
go-build:
uses: signoz/primus.workflows/.github/workflows/go-build.yaml@main
needs: [prepare, js-build]
secrets: inherit
with:
PRIMUS_REF: main
GO_INPUT_ARTIFACT_CACHE_KEY: enterprise-jsbuild-${{ github.sha }}
GO_INPUT_ARTIFACT_PATH: frontend/build
GO_BUILD_CONTEXT: ./ee/query-service
GO_BUILD_FLAGS: >-
-tags timetzdata
-ldflags='-linkmode external -extldflags \"-static\" -s -w
-X github.com/SigNoz/signoz/pkg/version.version=${{ needs.prepare.outputs.version }}
-X github.com/SigNoz/signoz/pkg/version.variant=enterprise
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
-X github.com/SigNoz/signoz/pkg/version.time=${{ needs.prepare.outputs.time }}
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}
-X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.signoz.cloud
-X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.signoz.io/api/v1'
GO_CGO_ENABLED: 1
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
DOCKER_DOCKERFILE_PATH: ./ee/query-service/Dockerfile.multi-arch
DOCKER_MANIFEST: true
DOCKER_PROVIDERS: ${{ needs.prepare.outputs.docker_providers }}

View File

@@ -1,122 +0,0 @@
name: build-staging
on:
push:
branches:
- main
pull_request:
types: [labeled]
defaults:
run:
shell: bash
env:
PRIMUS_HOME: .primus
MAKE: make --no-print-directory --makefile=.primus/src/make/main.mk
jobs:
prepare:
runs-on: ubuntu-latest
if: ${{ contains(github.event.label.name, 'staging:') || github.event.ref == 'refs/heads/main' }}
outputs:
version: ${{ steps.build-info.outputs.version }}
hash: ${{ steps.build-info.outputs.hash }}
time: ${{ steps.build-info.outputs.time }}
branch: ${{ steps.build-info.outputs.branch }}
deployment: ${{ steps.build-info.outputs.deployment }}
steps:
- name: self-checkout
uses: actions/checkout@v4
- id: token
name: github-token-gen
uses: actions/create-github-app-token@v1
with:
app-id: ${{ secrets.PRIMUS_APP_ID }}
private-key: ${{ secrets.PRIMUS_PRIVATE_KEY }}
owner: ${{ github.repository_owner }}
- name: primus-checkout
uses: actions/checkout@v4
with:
repository: signoz/primus
ref: main
path: .primus
token: ${{ steps.token.outputs.token }}
- name: build-info
id: build-info
run: |
echo "version=$($MAKE info-version)" >> $GITHUB_OUTPUT
echo "hash=$($MAKE info-commit-short)" >> $GITHUB_OUTPUT
echo "time=$($MAKE info-timestamp)" >> $GITHUB_OUTPUT
echo "branch=$($MAKE info-branch)" >> $GITHUB_OUTPUT
staging_label="${{ github.event.label.name }}"
if [[ "${staging_label}" == "staging:"* ]]; then
deployment=${staging_label#"staging:"}
elif [[ "${{ github.event.ref }}" == "refs/heads/main" ]]; then
deployment="staging"
else
echo "error: not able to determine deployment - please verify the PR label or the branch"
exit 1
fi
echo "deployment=${deployment}" >> $GITHUB_OUTPUT
- name: create-dotenv
run: |
mkdir -p frontend
echo 'CI=1' > frontend/.env
echo 'TUNNEL_URL=https://telemetry.staging.signoz.cloud/tunnel' >> frontend/.env
echo 'TUNNEL_DOMAIN=https://telemetry.staging.signoz.cloud' >> frontend/.env
- name: cache-dotenv
uses: actions/cache@v4
with:
path: frontend/.env
key: staging-dotenv-${{ github.sha }}
js-build:
uses: signoz/primus.workflows/.github/workflows/js-build.yaml@main
needs: prepare
secrets: inherit
with:
PRIMUS_REF: main
JS_SRC: frontend
JS_INPUT_ARTIFACT_CACHE_KEY: staging-dotenv-${{ github.sha }}
JS_INPUT_ARTIFACT_PATH: frontend/.env
JS_OUTPUT_ARTIFACT_CACHE_KEY: staging-jsbuild-${{ github.sha }}
JS_OUTPUT_ARTIFACT_PATH: frontend/build
DOCKER_BUILD: false
DOCKER_MANIFEST: false
go-build:
uses: signoz/primus.workflows/.github/workflows/go-build.yaml@main
needs: [prepare, js-build]
secrets: inherit
with:
PRIMUS_REF: main
GO_INPUT_ARTIFACT_CACHE_KEY: staging-jsbuild-${{ github.sha }}
GO_INPUT_ARTIFACT_PATH: frontend/build
GO_BUILD_CONTEXT: ./ee/query-service
GO_BUILD_FLAGS: >-
-tags timetzdata
-ldflags='-linkmode external -extldflags \"-static\" -s -w
-X github.com/SigNoz/signoz/pkg/version.version=${{ needs.prepare.outputs.version }}
-X github.com/SigNoz/signoz/pkg/version.variant=enterprise
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
-X github.com/SigNoz/signoz/pkg/version.time=${{ needs.prepare.outputs.time }}
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}
-X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.staging.signoz.cloud
-X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.staging.signoz.cloud/api/v1'
GO_CGO_ENABLED: 1
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
DOCKER_DOCKERFILE_PATH: ./ee/query-service/Dockerfile.multi-arch
DOCKER_MANIFEST: true
DOCKER_PROVIDERS: gcp
staging:
if: ${{ contains(github.event.label.name, 'staging:') || github.event.ref == 'refs/heads/main' }}
uses: signoz/primus.workflows/.github/workflows/github-trigger.yaml@main
secrets: inherit
needs: [prepare, go-build]
with:
PRIMUS_REF: main
GITHUB_ENVIRONMENT: staging
GITHUB_SILENT: true
GITHUB_REPOSITORY_NAME: charts-saas-v3-staging
GITHUB_EVENT_NAME: releaser
GITHUB_EVENT_PAYLOAD: "{\"deployment\": \"${{ needs.prepare.outputs.deployment }}\", \"signoz_version\": \"${{ needs.prepare.outputs.version }}\"}"

89
.github/workflows/build.yaml vendored Normal file
View File

@@ -0,0 +1,89 @@
name: build-pipeline
on:
pull_request:
branches:
- main
- release/v*
jobs:
check-no-ee-references:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Run check
run: make check-no-ee-references
build-frontend:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install dependencies
run: cd frontend && yarn install
- name: Run ESLint
run: cd frontend && npm run lint
- name: Run Jest
run: cd frontend && npm run jest
- name: TSC
run: yarn tsc
working-directory: ./frontend
- name: Build frontend docker image
shell: bash
run: |
make build-frontend-amd64
build-frontend-ee:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Create .env file
run: |
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
- name: Install dependencies
run: cd frontend && yarn install
- name: Run ESLint
run: cd frontend && npm run lint
- name: Run Jest
run: cd frontend && npm run jest
- name: TSC
run: yarn tsc
working-directory: ./frontend
- name: Build frontend docker image
shell: bash
run: |
make build-frontend-amd64
build-query-service:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup golang
uses: actions/setup-go@v4
with:
go-version: "1.21"
- name: Run tests
shell: bash
run: |
make test
- name: Build query-service image
shell: bash
run: |
make build-query-service-amd64
build-ee-query-service:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup golang
uses: actions/setup-go@v4
with:
go-version: "1.21"
- name: Build EE query-service image
shell: bash
run: |
make build-ee-query-service-amd64

17
.github/workflows/codeball.yml vendored Normal file
View File

@@ -0,0 +1,17 @@
name: Codeball
on: [pull_request]
jobs:
codeball_job:
runs-on: ubuntu-latest
name: Codeball
steps:
# Run Codeball on all new Pull Requests 🚀
# For customizations and more documentation, see https://github.com/sturdy-dev/codeball-action
- name: Codeball
uses: sturdy-dev/codeball-action@v2
with:
approvePullRequests: "true"
labelPullRequestsWhenApproved: "true"
labelPullRequestsWhenReviewNeeded: "false"
failJobsWhenReviewNeeded: "false"

71
.github/workflows/codeql.yaml vendored Normal file
View File

@@ -0,0 +1,71 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches: [ main, v* ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ main ]
schedule:
- cron: '32 5 * * 5'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'go', 'javascript', 'python' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
steps:
- name: Checkout repository
uses: actions/checkout@v4
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v2
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2

View File

@@ -1,27 +0,0 @@
name: commitci
on:
pull_request:
branches:
- main
pull_request_target:
types:
- labeled
jobs:
refcheck:
if: |
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v4
- name: check
run: |
if grep -R --include="*.go" '.*/ee/.*' pkg/; then
echo "Error: Found references to 'ee' packages in 'pkg' directory"
exit 1
else
echo "No references to 'ee' packages found in 'pkg' directory"
fi

13
.github/workflows/commitlint.yml vendored Normal file
View File

@@ -0,0 +1,13 @@
name: commitlint
on: [pull_request]
defaults:
run:
working-directory: frontend
jobs:
lint-commits:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: wagoid/commitlint-github-action@v5

View File

@@ -0,0 +1,27 @@
on:
pull_request_target:
types:
- closed
env:
GITHUB_ACCESS_TOKEN: ${{ secrets.CI_BOT_TOKEN }}
PR_NUMBER: ${{ github.event.number }}
jobs:
create_issue_on_merge:
if: github.event.pull_request.merged == true
runs-on: ubuntu-latest
steps:
- name: Checkout Codebase
uses: actions/checkout@v4
with:
repository: signoz/gh-bot
- name: Use Node v16
uses: actions/setup-node@v4
with:
node-version: 16
- name: Setup Cache & Install Dependencies
uses: bahmutov/npm-install@v1
with:
install-command: yarn --frozen-lockfile
- name: Comment on PR
run: node create-issue.js

22
.github/workflows/dependency-review.yml vendored Normal file
View File

@@ -0,0 +1,22 @@
# Dependency Review Action
#
# This Action will scan dependency manifest files that change as part of a Pull Request, surfacing known-vulnerable versions of the packages declared or updated in the PR. Once installed, if the workflow run is marked as required, PRs introducing known-vulnerable packages will be blocked from merging.
#
# Source repository: https://github.com/actions/dependency-review-action
# Public documentation: https://docs.github.com/en/code-security/supply-chain-security/understanding-your-software-supply-chain/about-dependency-review#dependency-review-enforcement
name: 'Dependency Review'
on: [pull_request]
permissions:
contents: read
jobs:
dependency-review:
runs-on: ubuntu-latest
steps:
- name: 'Checkout Repository'
uses: actions/checkout@v4
- name: 'Dependency Review'
with:
fail-on-severity: high
uses: actions/dependency-review-action@v3

93
.github/workflows/e2e-k3s.yaml vendored Normal file
View File

@@ -0,0 +1,93 @@
name: e2e-k3s
on:
pull_request:
types: [labeled]
jobs:
e2e-k3s:
runs-on: ubuntu-latest
if: ${{ github.event.label.name == 'ok-to-test' }}
env:
DOCKER_TAG: pull-${{ github.event.number }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup golang
uses: actions/setup-go@v4
with:
go-version: "1.21"
- name: Build query-service image
env:
DEV_BUILD: 1
run: make build-ee-query-service-amd64
- name: Build frontend image
run: make build-frontend-amd64
- name: Create a k3s cluster
uses: AbsaOSS/k3d-action@v2
with:
cluster-name: "signoz"
- name: Inject the images to the cluster
run: k3d image import signoz/query-service:$DOCKER_TAG signoz/frontend:$DOCKER_TAG -c signoz
- name: Set up HotROD sample-app
run: |
# create sample-application namespace
kubectl create ns sample-application
# apply hotrod k8s manifest file
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
# wait for all deployments in sample-application namespace to be READY
kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s
- name: Deploy the app
run: |
# add signoz helm repository
helm repo add signoz https://charts.signoz.io
# create platform namespace
kubectl create ns platform
# installing signoz using helm
helm install my-release signoz/signoz -n platform \
--wait \
--timeout 10m0s \
--set frontend.service.type=LoadBalancer \
--set queryService.image.tag=$DOCKER_TAG \
--set frontend.image.tag=$DOCKER_TAG
# get pods, services and the container images
kubectl get pods -n platform
kubectl get svc -n platform
- name: Kick off a sample-app workload
run: |
# start the locust swarm
kubectl --namespace sample-application run strzal --image=djbingham/curl \
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
'user_count=6' -F 'spawn_rate=2' http://locust-master:8089/swarm
- name: Get short commit SHA, display tunnel URL and IP Address of the worker node
id: get-subdomain
run: |
subdomain="pr-$(git rev-parse --short HEAD)"
echo "URL for tunnelling: https://$subdomain.loca.lt"
echo "subdomain=$subdomain" >> $GITHUB_OUTPUT
worker_ip="$(curl -4 -s ipconfig.io/ip)"
echo "Worker node IP address: $worker_ip"
- name: Start tunnel
env:
SUBDOMAIN: ${{ steps.get-subdomain.outputs.subdomain }}
run: |
npm install -g localtunnel
host=$(kubectl get svc -n platform | grep frontend | tr -s ' ' | cut -d" " -f4)
port=$(kubectl get svc -n platform | grep frontend | tr -s ' ' | cut -d" " -f5 | cut -d":" -f1)
lt -p $port -l $host -s $SUBDOMAIN

View File

@@ -1,63 +0,0 @@
name: goci
on:
pull_request:
branches:
- main
pull_request_target:
types:
- labeled
jobs:
test:
if: |
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
uses: signoz/primus.workflows/.github/workflows/go-test.yaml@main
secrets: inherit
with:
PRIMUS_REF: main
GO_TEST_CONTEXT: ./...
fmt:
if: |
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
uses: signoz/primus.workflows/.github/workflows/go-fmt.yaml@main
secrets: inherit
with:
PRIMUS_REF: main
lint:
if: |
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
uses: signoz/primus.workflows/.github/workflows/go-lint.yaml@main
secrets: inherit
with:
PRIMUS_REF: main
build:
if: |
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
runs-on: ubuntu-latest
steps:
- name: self-checkout
uses: actions/checkout@v4
- name: go-install
uses: actions/setup-go@v5
with:
go-version: "1.22"
- name: qemu-install
uses: docker/setup-qemu-action@v3
- name: aarch64-install
run: |
set -ex
sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
- name: docker-community
shell: bash
run: |
make docker-build-community
- name: docker-enterprise
shell: bash
run: |
make docker-build-enterprise

View File

@@ -1,155 +0,0 @@
name: gor-signoz-community
on:
push:
tags:
- 'v[0-9]+.[0-9]+.[0-9]+'
- 'v[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+'
permissions:
contents: write
jobs:
prepare:
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: get-sha
shell: bash
run: |
echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
- name: build-frontend
run: make js-build
- name: upload-frontend-artifact
uses: actions/upload-artifact@v4
with:
name: community-frontend-build-${{ env.sha_short }}
path: frontend/build
build:
needs: prepare
strategy:
matrix:
os:
- ubuntu-latest
- macos-latest
env:
CONFIG_PATH: pkg/query-service/.goreleaser.yaml
runs-on: ${{ matrix.os }}
steps:
- name: checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: setup-qemu
uses: docker/setup-qemu-action@v3
if: matrix.os == 'ubuntu-latest'
- name: setup-buildx
uses: docker/setup-buildx-action@v3
if: matrix.os == 'ubuntu-latest'
- name: ghcr-login
uses: docker/login-action@v3
if: matrix.os != 'macos-latest'
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: setup-go
uses: actions/setup-go@v5
with:
go-version: "1.22"
- name: cross-compilation-tools
if: matrix.os == 'ubuntu-latest'
run: |
sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
- name: get-sha
shell: bash
run: |
echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
- name: download-frontend-artifact
uses: actions/download-artifact@v4
with:
name: community-frontend-build-${{ env.sha_short }}
path: frontend/build
- name: cache-linux
uses: actions/cache@v4
if: matrix.os == 'ubuntu-latest'
with:
path: dist/linux
key: signoz-community-linux-${{ env.sha_short }}
- name: cache-darwin
uses: actions/cache@v4
if: matrix.os == 'macos-latest'
with:
path: dist/darwin
key: signoz-community-darwin-${{ env.sha_short }}
- name: release
uses: goreleaser/goreleaser-action@v6
with:
distribution: goreleaser-pro
version: '~> v2'
args: release --config ${{ env.CONFIG_PATH }} --clean --split
workdir: .
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}
release:
runs-on: ubuntu-latest
needs: build
env:
DOCKER_CLI_EXPERIMENTAL: "enabled"
WORKDIR: pkg/query-service
steps:
- name: checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: setup-qemu
uses: docker/setup-qemu-action@v3
- name: setup-buildx
uses: docker/setup-buildx-action@v3
- name: cosign-installer
uses: sigstore/cosign-installer@v3.8.1
- name: download-syft
uses: anchore/sbom-action/download-syft@v0.18.0
- name: ghcr-login
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: setup-go
uses: actions/setup-go@v5
with:
go-version: "1.22"
# copy the caches from build
- name: get-sha
shell: bash
run: |
echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
- name: cache-linux
id: cache-linux
uses: actions/cache@v4
with:
path: dist/linux
key: signoz-community-linux-${{ env.sha_short }}
- name: cache-darwin
id: cache-darwin
uses: actions/cache@v4
with:
path: dist/darwin
key: signoz-community-darwin-${{ env.sha_short }}
# release
- uses: goreleaser/goreleaser-action@v6
if: steps.cache-linux.outputs.cache-hit == 'true' && steps.cache-darwin.outputs.cache-hit == 'true' # only run if caches hit
with:
distribution: goreleaser-pro
version: '~> v2'
args: continue --merge
workdir: .
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}

View File

@@ -1,168 +0,0 @@
name: gor-signoz
on:
push:
tags:
- 'v[0-9]+.[0-9]+.[0-9]+'
- 'v[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+'
permissions:
contents: write
jobs:
prepare:
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: get-sha
shell: bash
run: |
echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
- name: dotenv-frontend
working-directory: frontend
run: |
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > .env
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> .env
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> .env
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> .env
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> .env
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> .env
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> .env
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> .env
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> .env
echo 'CUSTOMERIO_ID="${{ secrets.CUSTOMERIO_ID }}"' >> .env
echo 'CUSTOMERIO_SITE_ID="${{ secrets.CUSTOMERIO_SITE_ID }}"' >> .env
- name: build-frontend
run: make js-build
- name: upload-frontend-artifact
uses: actions/upload-artifact@v4
with:
name: frontend-build-${{ env.sha_short }}
path: frontend/build
build:
needs: prepare
strategy:
matrix:
os:
- ubuntu-latest
- macos-latest
env:
CONFIG_PATH: ee/query-service/.goreleaser.yaml
runs-on: ${{ matrix.os }}
steps:
- name: checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: setup-qemu
uses: docker/setup-qemu-action@v3
if: matrix.os == 'ubuntu-latest'
- name: setup-buildx
uses: docker/setup-buildx-action@v3
if: matrix.os == 'ubuntu-latest'
- name: ghcr-login
uses: docker/login-action@v3
if: matrix.os != 'macos-latest'
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: setup-go
uses: actions/setup-go@v5
with:
go-version: "1.22"
- name: cross-compilation-tools
if: matrix.os == 'ubuntu-latest'
run: |
sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
- name: get-sha
shell: bash
run: |
echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
- name: download-frontend-artifact
uses: actions/download-artifact@v4
with:
name: frontend-build-${{ env.sha_short }}
path: frontend/build
- name: cache-linux
uses: actions/cache@v4
if: matrix.os == 'ubuntu-latest'
with:
path: dist/linux
key: signoz-linux-${{ env.sha_short }}
- name: cache-darwin
uses: actions/cache@v4
if: matrix.os == 'macos-latest'
with:
path: dist/darwin
key: signoz-darwin-${{ env.sha_short }}
- name: release
uses: goreleaser/goreleaser-action@v6
with:
distribution: goreleaser-pro
version: '~> v2'
args: release --config ${{ env.CONFIG_PATH }} --clean --split
workdir: .
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}
release:
runs-on: ubuntu-latest
needs: build
env:
DOCKER_CLI_EXPERIMENTAL: "enabled"
steps:
- name: checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: setup-qemu
uses: docker/setup-qemu-action@v3
- name: setup-buildx
uses: docker/setup-buildx-action@v3
- name: cosign-installer
uses: sigstore/cosign-installer@v3.8.1
- name: download-syft
uses: anchore/sbom-action/download-syft@v0.18.0
- name: ghcr-login
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: setup-go
uses: actions/setup-go@v5
with:
go-version: "1.22"
# copy the caches from build
- name: get-sha
shell: bash
run: |
echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
- name: cache-linux
id: cache-linux
uses: actions/cache@v4
with:
path: dist/linux
key: signoz-linux-${{ env.sha_short }}
- name: cache-darwin
id: cache-darwin
uses: actions/cache@v4
with:
path: dist/darwin
key: signoz-darwin-${{ env.sha_short }}
# release
- uses: goreleaser/goreleaser-action@v6
if: steps.cache-linux.outputs.cache-hit == 'true' && steps.cache-darwin.outputs.cache-hit == 'true' # only run if caches hit
with:
distribution: goreleaser-pro
version: '~> v2'
args: continue --merge
workdir: .
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}

View File

@@ -1,8 +1,9 @@
name: gor-histogramquantile
name: goreleaser
on:
push:
tags:
- v*
- histogram-quantile/v*
permissions:

View File

@@ -1,53 +0,0 @@
name: integrationci
on:
pull_request:
types:
- labeled
pull_request_target:
types:
- labeled
jobs:
test:
strategy:
fail-fast: false
matrix:
src:
- bootstrap
sqlstore-provider:
- postgres
- sqlite
clickhouse-version:
- 24.1.2-alpine
- 24.12-alpine
schema-migrator-version:
- v0.111.38
postgres-version:
- 15
if: |
((github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))) && contains(github.event.pull_request.labels.*.name, 'safe-to-integrate')
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v4
- name: python
uses: actions/setup-python@v5
with:
python-version: 3.13
- name: poetry
run: |
python -m pip install poetry==2.1.2
python -m poetry config virtualenvs.in-project true
cd tests/integration && poetry install --no-root
- name: run
run: |
cd tests/integration && \
poetry run pytest \
--basetemp=./tmp/ \
src/${{matrix.src}} \
--sqlstore-provider ${{matrix.sqlstore-provider}} \
--postgres-version ${{matrix.postgres-version}} \
--clickhouse-version ${{matrix.clickhouse-version}} \
--schema-migrator-version ${{matrix.schema-migrator-version}}

View File

@@ -0,0 +1,32 @@
name: Jest Coverage - changed files
on:
pull_request:
branches:
- main
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
ref: "refs/heads/main"
token: ${{ secrets.GITHUB_TOKEN }} # Provide the GitHub token for authentication
- name: Fetch branch
run: git fetch origin ${{ github.event.pull_request.head.ref }}
- run: |
git checkout ${{ github.event.pull_request.head.sha }}
- uses: actions/setup-node@v4
with:
node-version: lts/*
- name: Install dependencies
run: cd frontend && npm install -g yarn && yarn
- name: npm run test:changedsince
run: cd frontend && npm run i18n:generate-hash && npm run test:changedsince

View File

@@ -1,50 +0,0 @@
name: jsci
on:
pull_request:
branches:
- main
pull_request_target:
types:
- labeled
jobs:
tsc:
if: |
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v4
- name: install
run: cd frontend && yarn install
- name: tsc
run: cd frontend && yarn tsc
test:
if: |
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
uses: signoz/primus.workflows/.github/workflows/js-test.yaml@main
secrets: inherit
with:
PRIMUS_REF: main
JS_SRC: frontend
fmt:
if: |
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
uses: signoz/primus.workflows/.github/workflows/js-fmt.yaml@main
secrets: inherit
with:
PRIMUS_REF: main
JS_SRC: frontend
lint:
if: |
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
uses: signoz/primus.workflows/.github/workflows/js-lint.yaml@main
secrets: inherit
with:
PRIMUS_REF: main
JS_SRC: frontend

24
.github/workflows/playwright.yaml vendored Normal file
View File

@@ -0,0 +1,24 @@
name: Playwright Tests
on: [pull_request]
jobs:
playwright:
defaults:
run:
working-directory: frontend
timeout-minutes: 60
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: "16.x"
- name: Install dependencies
run: CI=1 yarn install
- name: Install Playwright
run: npx playwright install --with-deps
- name: Run Playwright tests
run: yarn playwright
env:
# This might depend on your test-runner/language binding
PLAYWRIGHT_TEST_BASE_URL: ${{ secrets.PLAYWRIGHT_TEST_BASE_URL }}

View File

@@ -1,18 +0,0 @@
name: postci
on:
pull_request_target:
branches:
- main
types:
- synchronize
jobs:
remove:
if: github.event.pull_request.head.repo.fork || github.event.pull_request.user.login == 'dependabot[bot]'
uses: signoz/primus.workflows/.github/workflows/github-label.yaml@main
secrets: inherit
with:
PRIMUS_REF: main
GITHUB_LABEL_ACTION: remove
GITHUB_LABEL_NAME: safe-to-test

View File

@@ -1,9 +1,9 @@
name: prereleaser
on:
# schedule every wednesday 6:30 AM UTC (12:00 PM IST)
# schedule every wednesday 9:30 AM UTC (3pm IST)
schedule:
- cron: '30 6 * * 3'
- cron: '30 9 * * 3'
# allow manual triggering of the workflow by a maintainer
workflow_dispatch:

207
.github/workflows/push.yaml vendored Normal file
View File

@@ -0,0 +1,207 @@
name: push
on:
push:
branches:
- main
tags:
- v*
jobs:
image-build-and-push-query-service:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup golang
uses: actions/setup-go@v4
with:
go-version: "1.21"
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: latest
- name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- uses: benjlevesque/short-sha@v2.2
id: short-sha
- name: Get branch name
id: branch-name
uses: tj-actions/branch-names@v7.0.7
- name: Set docker tag environment
run: |
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
tag="${{ steps.branch-name.outputs.tag }}"
tag="${tag:1}"
echo "DOCKER_TAG=${tag}-oss" >> $GITHUB_ENV
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
echo "DOCKER_TAG=latest-oss" >> $GITHUB_ENV
else
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}-oss" >> $GITHUB_ENV
fi
- name: Install cross-compilation tools
run: |
set -ex
sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
- name: Build and push docker image
run: make build-push-query-service
image-build-and-push-ee-query-service:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Create .env file
run: |
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
- name: Setup golang
uses: actions/setup-go@v4
with:
go-version: "1.21"
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: latest
- name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- uses: benjlevesque/short-sha@v2.2
id: short-sha
- name: Get branch name
id: branch-name
uses: tj-actions/branch-names@v7.0.7
- name: Set docker tag environment
run: |
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
tag="${{ steps.branch-name.outputs.tag }}"
tag="${tag:1}"
echo "DOCKER_TAG=$tag" >> $GITHUB_ENV
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
echo "DOCKER_TAG=latest" >> $GITHUB_ENV
else
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
fi
- name: Install cross-compilation tools
run: |
set -ex
sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
- name: Build and push docker image
run: make build-push-ee-query-service
image-build-and-push-frontend:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install dependencies
working-directory: frontend
run: yarn install
- name: Run Prettier
working-directory: frontend
run: npm run prettify
continue-on-error: true
- name: Run ESLint
working-directory: frontend
run: npm run lint
continue-on-error: true
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: latest
- name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- uses: benjlevesque/short-sha@v2.2
id: short-sha
- name: Get branch name
id: branch-name
uses: tj-actions/branch-names@v7.0.7
- name: Set docker tag environment
run: |
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
tag="${{ steps.branch-name.outputs.tag }}"
tag="${tag:1}"
echo "DOCKER_TAG=$tag" >> $GITHUB_ENV
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
echo "DOCKER_TAG=latest" >> $GITHUB_ENV
else
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
fi
- name: Build and push docker image
run: make build-push-frontend
image-build-and-push-frontend-ee:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Create .env file
run: |
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
- name: Install dependencies
working-directory: frontend
run: yarn install
- name: Run Prettier
working-directory: frontend
run: npm run prettify
continue-on-error: true
- name: Run ESLint
working-directory: frontend
run: npm run lint
continue-on-error: true
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: latest
- name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- uses: benjlevesque/short-sha@v2.2
id: short-sha
- name: Get branch name
id: branch-name
uses: tj-actions/branch-names@v7.0.7
- name: Set docker tag environment
run: |
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
tag="${{ steps.branch-name.outputs.tag }}"
tag="${tag:1}"
echo "DOCKER_TAG=${tag}-ee" >> $GITHUB_ENV
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
echo "DOCKER_TAG=latest-ee" >> $GITHUB_ENV
else
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}-ee" >> $GITHUB_ENV
fi
- name: Build and push docker image
run: make build-push-frontend

22
.github/workflows/remove-label.yaml vendored Normal file
View File

@@ -0,0 +1,22 @@
name: remove-label
on:
pull_request_target:
types: [synchronize]
jobs:
remove:
runs-on: ubuntu-latest
steps:
- name: Remove label ok-to-test from PR
uses: buildsville/add-remove-label@v2.0.0
with:
label: ok-to-test
type: remove
token: ${{ secrets.GITHUB_TOKEN }}
- name: Remove label testing-deploy from PR
uses: buildsville/add-remove-label@v2.0.0
with:
label: testing-deploy
type: remove
token: ${{ secrets.GITHUB_TOKEN }}

25
.github/workflows/sonar.yml vendored Normal file
View File

@@ -0,0 +1,25 @@
name: sonar
on:
pull_request:
branches:
- main
paths:
- 'frontend/**'
defaults:
run:
working-directory: frontend
jobs:
sonar-analysis:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Sonar analysis
uses: sonarsource/sonarcloud-github-action@master
with:
projectBaseDir: frontend
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}

View File

@@ -0,0 +1,56 @@
name: staging-deployment
# Trigger deployment only on push to main branch
on:
push:
branches:
- main
jobs:
deploy:
name: Deploy latest main branch to staging
runs-on: ubuntu-latest
environment: staging
permissions:
contents: 'read'
id-token: 'write'
steps:
- id: 'auth'
uses: 'google-github-actions/auth@v2'
with:
workload_identity_provider: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }}
service_account: ${{ secrets.GCP_SERVICE_ACCOUNT }}
- name: 'sdk'
uses: 'google-github-actions/setup-gcloud@v2'
- name: 'ssh'
shell: bash
env:
GITHUB_BRANCH: ${{ github.head_ref || github.ref_name }}
GITHUB_SHA: ${{ github.sha }}
GCP_PROJECT: ${{ secrets.GCP_PROJECT }}
GCP_ZONE: ${{ secrets.GCP_ZONE }}
GCP_INSTANCE: ${{ secrets.GCP_INSTANCE }}
CLOUDSDK_CORE_DISABLE_PROMPTS: 1
run: |
read -r -d '' COMMAND <<EOF || true
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
echo "GITHUB_SHA: ${GITHUB_SHA}"
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
export OTELCOL_TAG="main"
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
export KAFKA_SPAN_EVAL="true"
docker system prune --force
docker pull signoz/signoz-otel-collector:main
docker pull signoz/signoz-schema-migrator:main
cd ~/signoz
git status
git add .
git stash push -m "stashed on $(date --iso-8601=seconds)"
git fetch origin
git checkout ${GITHUB_BRANCH}
git pull
make build-ee-query-service-amd64
make build-frontend-amd64
make run-testing
EOF
gcloud beta compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --ssh-key-expire-after=15m --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}"

View File

@@ -0,0 +1,56 @@
name: testing-deployment
# Trigger deployment only on testing-deploy label on pull request
on:
pull_request:
types: [labeled]
jobs:
deploy:
name: Deploy PR branch to testing
runs-on: ubuntu-latest
environment: testing
if: ${{ github.event.label.name == 'testing-deploy' }}
permissions:
contents: 'read'
id-token: 'write'
steps:
- id: 'auth'
uses: 'google-github-actions/auth@v2'
with:
workload_identity_provider: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }}
service_account: ${{ secrets.GCP_SERVICE_ACCOUNT }}
- name: 'sdk'
uses: 'google-github-actions/setup-gcloud@v2'
- name: 'ssh'
shell: bash
env:
GITHUB_BRANCH: ${{ github.head_ref || github.ref_name }}
GITHUB_SHA: ${{ github.sha }}
GCP_PROJECT: ${{ secrets.GCP_PROJECT }}
GCP_ZONE: ${{ secrets.GCP_ZONE }}
GCP_INSTANCE: ${{ secrets.GCP_INSTANCE }}
CLOUDSDK_CORE_DISABLE_PROMPTS: 1
run: |
read -r -d '' COMMAND <<EOF || true
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
echo "GITHUB_SHA: ${GITHUB_SHA}"
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
export DEV_BUILD="1"
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
docker system prune --force
cd ~/signoz
git status
git add .
git stash push -m "stashed on $(date --iso-8601=seconds)"
git fetch origin
git checkout main
git pull
# This is added to include the scenerio when new commit in PR is force-pushed
git branch -D ${GITHUB_BRANCH}
git checkout --track origin/${GITHUB_BRANCH}
make build-ee-query-service-amd64
make build-frontend-amd64
make run-testing
EOF
gcloud beta compute ssh ${GCP_INSTANCE} --zone ${GCP_ZONE} --ssh-key-expire-after=15m --tunnel-through-iap --project ${GCP_PROJECT} --command "${COMMAND}"

154
.gitignore vendored
View File

@@ -54,7 +54,6 @@ ee/query-service/tests/test-deploy/data/
bin/
.local/
*/query-service/queries.active
ee/query-service/db
# e2e
@@ -77,156 +76,3 @@ dist/
# ignore user_scripts that is fetched by init-clickhouse
deploy/common/clickhouse/user_scripts/
queries.active
# tmp
**/tmp/**
# .devenv tmp files
.devenv/**/tmp/**
.qodo
### Python ###
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
### Python Patch ###
# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
poetry.toml
# ruff
.ruff_cache/
# LSP config files
pyrightconfig.json
# End of https://www.toptal.com/developers/gitignore/api/python

View File

@@ -16,8 +16,10 @@ tasks:
yarn dev
ports:
- port: 8080
- port: 3301
onOpen: open-browser
- port: 8080
onOpen: ignore
- port: 9000
onOpen: ignore
- port: 8123

View File

@@ -1,17 +0,0 @@
#### Auto generated by make docker-version-alpine. DO NOT EDIT! ####
amd64=029a752048e32e843bd6defe3841186fb8d19a28dae8ec287f433bb9d6d1ad85
unknown=5fea95373b9ec85974843f31446fa6a9df4492dddae4e1cb056193c34a20a5be
arm=b4aef1a899e0271f06d948c9a8fa626ecdb2202d3a178bc14775dd559e23df8e
unknown=a4d1e27e63a9d6353046eb25a2f0ec02945012b217f4364cd83a73fe6dfb0b15
arm=4fdafe217d0922f3c3e2b4f64cf043f8403a4636685cd9c51fea2cbd1f419740
unknown=7f21ac2018d95b2c51a5779c1d5ca6c327504adc3b0fdc747a6725d30b3f13c2
arm64=ea3c5a9671f7b3f7eb47eab06f73bc6591df978b0d5955689a9e6f943aa368c0
unknown=a8ba68c1a9e6eea8041b4b8f996c235163440808b9654a865976fdcbede0f433
386=dea9f02e103e837849f984d5679305c758aba7fea1b95b7766218597f61a05ab
unknown=3c6629bec05c8273a927d46b77428bf4a378dad911a0ae284887becdc149b734
ppc64le=0880443bffa028dfbbc4094a32dd6b7ac25684e4c0a3d50da9e0acae355c5eaf
unknown=bb48308f976b266e3ab39bbf9af84521959bd9c295d3c763690cf41f8df2a626
riscv64=d76e6fbe348ff20c2931bb7f101e49379648e026de95dd37f96e00ce1909dcf7
unknown=dd807544365f6dc187cbe6de0806adce2ea9de3e7124717d1d8e8b7a18b77b64
s390x=b815fadf80495594eb6296a6af0bc647ae5f193e0044e07acec7e5b378c9ce2d
unknown=74681be74a280a88abb53ff1e048eb1fb624b30d0066730df6d8afd02ba82e01

View File

@@ -1,80 +1,389 @@
# Contributing Guidelines
Thank you for your interest in contributing to our project! We greatly value feedback and contributions from our community. This document will guide you through the contribution process.
## Welcome to SigNoz Contributing section 🎉
## How can I contribute?
Hi there! We're thrilled that you'd like to contribute to this project, thank you for your interest. Whether it's a bug report, new feature, correction, or additional documentation, we greatly value feedback and contributions from our community.
### Finding Issues to Work On
- Check our [existing open issues](https://github.com/SigNoz/signoz/issues?q=is%3Aopen+is%3Aissue)
- Look for [good first issues](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) to start with
- Review [recently closed issues](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aclosed) to avoid duplicates
Please read through this document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your bug report or contribution.
### Types of Contributions
- We accept contributions made to the [SigNoz `develop` branch]()
- Find all SigNoz Docker Hub images here
- [signoz/frontend](https://hub.docker.com/r/signoz/frontend)
- [signoz/query-service](https://hub.docker.com/r/signoz/query-service)
- [signoz/otelcontribcol](https://hub.docker.com/r/signoz/otelcontribcol)
1. **Report Bugs**: Use our [Bug Report template](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=)
2. **Request Features**: Submit using [Feature Request template](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=)
3. **Improve Documentation**: Create an issue with the `documentation` label
4. **Report Performance Issues**: Use our [Performance Issue template](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=performance-issue-report.md&title=)
5. **Request Dashboards**: Submit using [Dashboard Request template](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=dashboard-template&projects=&template=request_dashboard.md&title=%5BDashboard+Request%5D+)
6. **Report Security Issues**: Follow our [Security Policy](https://github.com/SigNoz/signoz/security/policy)
7. **Join Discussions**: Participate in [project discussions](https://github.com/SigNoz/signoz/discussions)
## Finding contributions to work on 💬
### Creating Helpful Issues
Looking at the existing issues is a great way to find something to contribute on.
Also, have a look at these [good first issues label](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) to start with.
When creating issues, include:
- **For Feature Requests**:
- Clear use case and requirements
- Proposed solution or improvement
- Any open questions or considerations
## Sections:
- [General Instructions](#1-general-instructions-)
- [For Creating Issue(s)](#11-for-creating-issues)
- [For Pull Requests(s)](#12-for-pull-requests)
- [How to Contribute](#2-how-to-contribute-%EF%B8%8F)
- [Develop Frontend](#3-develop-frontend-)
- [Contribute to Frontend with Docker installation of SigNoz](#31-contribute-to-frontend-with-docker-installation-of-signoz)
- [Contribute to Frontend without installing SigNoz backend](#32-contribute-to-frontend-without-installing-signoz-backend)
- [Contribute to Backend (Query-Service)](#4-contribute-to-backend-query-service-)
- [To run ClickHouse setup](#41-to-run-clickhouse-setup-recommended-for-local-development)
- [Contribute to SigNoz Helm Chart](#5-contribute-to-signoz-helm-chart-)
- [To run helm chart for local development](#51-to-run-helm-chart-for-local-development)
- [Contribute to Dashboards](#6-contribute-to-dashboards-)
- [Other Ways to Contribute](#other-ways-to-contribute)
- **For Bug Reports**:
- Step-by-step reproduction steps
- Version information
- Relevant environment details
- Any modifications you've made
- Expected vs actual behavior
# 1. General Instructions 📝
### Submitting Pull Requests
## 1.1 For Creating Issue(s)
Before making any significant changes and before filing a new issue, please check [existing open](https://github.com/SigNoz/signoz/issues?q=is%3Aopen+is%3Aissue), or [recently closed](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aclosed) issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can.
1. **Development**:
- Setup your [development environment](docs/contributing/development.md)
- Work against the latest `main` branch
- Focus on specific changes
- Ensure all tests pass locally
- Follow our [commit convention](#commit-convention)
**Issue Types** - [Bug Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=) | [Feature Request](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=) | [Performance Issue Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=performance-issue-report.md&title=) | [Request Dashboard](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=dashboard-template&projects=&template=request_dashboard.md&title=%5BDashboard+Request%5D+) | [Report a Security Vulnerability](https://github.com/SigNoz/signoz/security/policy)
2. **Submit PR**:
- Ensure your branch can be auto-merged
- Address any CI failures
- Respond to review comments promptly
#### Details like these are incredibly useful:
For substantial changes, please split your contribution into multiple PRs:
- **Requirement** - what kind of use case are you trying to solve?
- **Proposal** - what do you suggest to solve the problem or improve the existing
situation?
- Any open questions to address❓
1. First PR: Overall structure (README, configurations, interfaces)
2. Second PR: Core implementation (split further if needed)
3. Final PR: Documentation updates and end-to-end tests
#### If you are reporting a bug, details like these are incredibly useful:
### Commit Convention
- A reproducible test case or series of steps.
- The version of our code being used.
- Any modifications you've made relevant to the bug🐞.
- Anything unusual about your environment or deployment.
We follow [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/). All commits and PRs should include type specifiers (e.g., `feat:`, `fix:`, `docs:`, etc.).
Discussing your proposed changes ahead of time will make the contribution
process smooth for everyone 🙌.
## How can I contribute to other repositories?
**[`^top^`](#contributing-guidelines)**
You can find other repositories in the [SigNoz](https://github.com/SigNoz) organization to contribute to. Here is a list of **highlighted** repositories:
<hr>
- [charts](https://github.com/SigNoz/charts)
- [dashboards](https://github.com/SigNoz/dashboards)
## 1.2 For Pull Request(s)
Each repository has its own contributing guidelines. Please refer to the guidelines of the repository you want to contribute to.
Contributions via pull requests are much appreciated. Once the approach is agreed upon ✅, make your changes and open a Pull Request(s).
Before sending us a pull request, please ensure that,
## How can I get help?
- Fork the SigNoz repo on GitHub, clone it on your machine.
- Create a branch with your changes.
- You are working against the latest source on the `develop` branch.
- Modify the source; please focus only on the specific change you are contributing.
- Ensure local tests pass.
- Commit to your fork using clear commit messages.
- Send us a pull request, answering any default questions in the pull request interface.
- Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation
- Once you've pushed your commits to GitHub, make sure that your branch can be auto-merged (there are no merge conflicts). If not, on your computer, merge main into your branch, resolve any merge conflicts, make sure everything still runs correctly and passes all the tests, and then push up those changes.
- Once the change has been approved and merged, we will inform you in a comment.
Need assistance? Join our Slack community:
- [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M)
- [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B)
## Where do I go from here?
GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
- Set up your [development environment](docs/contributing/development.md)
- Deploy and observe [SigNoz in action with OpenTelemetry Demo Application](docs/otel-demo-docs.md)
**Note:** Unless your change is small, **please** consider submitting different Pull Request(s):
* 1⃣ First PR should include the overall structure of the new component:
* Readme, configuration, interfaces or base classes, etc...
* This PR is usually trivial to review, so the size limit does not apply to
it.
* 2⃣ Second PR should include the concrete implementation of the component. If the
size of this PR is larger than the recommended size, consider **splitting** ⚔️ it into
multiple PRs.
* If there are multiple sub-component then ideally each one should be implemented as
a **separate** pull request.
* Last PR should include changes to **any user-facing documentation.** And should include
end-to-end tests if applicable. The component must be enabled
only after sufficient testing, and there is enough confidence in the
stability and quality of the component.
You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [slack community](https://signoz.io/slack).
### Pointers:
- If you find any **bugs** → please create an [**issue.**](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=)
- If you find anything **missing** in documentation → you can create an issue with the label **`documentation`**.
- If you want to build any **new feature** → please create an [issue with the label **`enhancement`**.](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=)
- If you want to **discuss** something about the product, start a new [**discussion**.](https://github.com/SigNoz/signoz/discussions)
- If you want to request a new **dashboard template** → please create an issue [here](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=dashboard-template&projects=&template=request_dashboard.md&title=%5BDashboard+Request%5D+).
<hr>
### Conventions to follow when submitting Commits and Pull Request(s).
We try to follow [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/), more specifically the commits and PRs **should have type specifiers** prefixed in the name. [This](https://www.conventionalcommits.org/en/v1.0.0/#specification) should give you a better idea.
e.g. If you are submitting a fix for an issue in frontend, the PR name should be prefixed with **`fix(FE):`**
- Follow [GitHub Flow](https://guides.github.com/introduction/flow/) guidelines for your contribution flows.
- Feel free to ping us on [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) or [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B) on our slack community if you need any help on this :)
**[`^top^`](#contributing-guidelines)**
<hr>
# 2. How to Contribute 🙋🏻‍♂️
#### There are primarily 2 areas in which you can contribute to SigNoz
- [**Frontend**](#3-develop-frontend-) (Written in Typescript, React)
- [**Backend**](#4-contribute-to-backend-query-service-) (Query Service, written in Go)
- [**Dashboard Templates**](#6-contribute-to-dashboards-) (JSON dashboard templates built with SigNoz)
Depending upon your area of expertise & interest, you can choose one or more to contribute. Below are detailed instructions to contribute in each area.
**Please note:** If you want to work on an issue, please add a brief description of your solution on the issue before starting work on it.
**[`^top^`](#contributing-guidelines)**
<hr>
# 3. Develop Frontend 🌚
**Need to Update: [https://github.com/SigNoz/signoz/tree/main/frontend](https://github.com/SigNoz/signoz/tree/main/frontend)**
Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/main/frontend/README.md) sections for more info on how to setup SigNoz frontend locally (with and without Docker).
## 3.1 Contribute to Frontend with Docker installation of SigNoz
- Clone the SigNoz repository and cd into signoz directory,
```
git clone https://github.com/SigNoz/signoz.git && cd signoz
```
- Comment out `frontend` service section at [`deploy/docker/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L68)
![develop-frontend](https://user-images.githubusercontent.com/52788043/179009217-6692616b-17dc-4d27-b587-9d007098d739.jpeg)
- run `cd deploy` to move to deploy directory,
- Install signoz locally **without** the frontend,
- Add / Uncomment the below configuration to query-service section at [`deploy/docker/docker-compose.yaml#L47`](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L47)
```
ports:
- "8080:8080"
```
<img width="869" alt="query service" src="https://user-images.githubusercontent.com/52788043/179010251-8489be31-04ca-42f8-b30d-ef0bb6accb6b.png">
- Next run,
```
cd deploy/docker
sudo docker compose up -d
```
- `cd ../frontend` and change baseURL in file [`frontend/src/constants/env.ts#L2`](https://github.com/SigNoz/signoz/blob/main/frontend/src/constants/env.ts#L2) and for that, you need to create a `.env` file in the `frontend` directory with the following environment variable (`FRONTEND_API_ENDPOINT`) matching your configuration.
If you have backend api exposed via frontend nginx:
```
FRONTEND_API_ENDPOINT=http://localhost:3301
```
If not:
```
FRONTEND_API_ENDPOINT=http://localhost:8080
```
- Next,
```
yarn install
yarn dev
```
## 3.2 Contribute to Frontend without installing SigNoz backend
If you don't want to install the SigNoz backend just for doing frontend development, we can provide you with test environments that you can use as the backend.
- Clone the SigNoz repository and cd into signoz/frontend directory,
```
git clone https://github.com/SigNoz/signoz.git && cd signoz/frontend
````
- Create a file `.env` in the `frontend` directory with `FRONTEND_API_ENDPOINT=<test environment URL>`
- Next,
```
yarn install
yarn dev
```
Please ping us in the [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) channel or ask `@Prashant Shahi` in our [Slack Community](https://signoz.io/slack) and we will DM you with `<test environment URL>`.
**Frontend should now be accessible at** [`http://localhost:3301/services`](http://localhost:3301/services)
**[`^top^`](#contributing-guidelines)**
<hr>
# 4. Contribute to Backend (Query-Service) 🌑
**Need to Update: [https://github.com/SigNoz/signoz/tree/main/pkg/query-service](https://github.com/SigNoz/signoz/tree/main/pkg/query-service)**
## 4.1 Prerequisites
### 4.1.1 Install SQLite3
- Run `sqlite3` command to check if you already have SQLite3 installed on your machine.
- If not installed already, Install using below command
- on Linux
- on Debian / Ubuntu
```
sudo apt install sqlite3
```
- on CentOS / Fedora / RedHat
```
sudo yum install sqlite3
```
## 4.2 To run ClickHouse setup (recommended for local development)
- Clone the SigNoz repository and cd into signoz directory,
```
git clone https://github.com/SigNoz/signoz.git && cd signoz
```
- run `sudo make dev-setup` to configure local setup to run query-service,
- Comment out `frontend` service section at [`deploy/docker/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L68)
<img width="982" alt="develop-frontend" src="https://user-images.githubusercontent.com/52788043/179043977-012be8b0-a2ed-40d1-b2e6-2ab72d7989c0.png">
- Comment out `query-service` section at [`deploy/docker/docker-compose.yaml#L41`,](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L41)
<img width="1068" alt="Screenshot 2022-07-14 at 22 48 07" src="https://user-images.githubusercontent.com/52788043/179044151-a65ba571-db0b-4a16-b64b-ca3fadcf3af0.png">
- add below configuration to `clickhouse` section at [`deploy/docker/docker-compose.yaml`,](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml)
```
ports:
- 9001:9000
```
<img width="1013" alt="Screenshot 2022-07-14 at 22 50 37" src="https://user-images.githubusercontent.com/52788043/179044544-a293d3bc-4c4f-49ea-a276-505a381de67d.png">
- run `cd pkg/query-service/` to move to `query-service` directory,
- Then, you need to create a `.env` file with the following environment variable
```
SIGNOZ_SQLSTORE_SQLITE_PATH="./signoz.db"
```
to set your local environment with the right `RELATIONAL_DATASOURCE_PATH` as mentioned in [`./constants/constants.go#L38`,](https://github.com/SigNoz/signoz/blob/main/pkg/query-service/constants/constants.go#L38)
- Now, install SigNoz locally **without** the `frontend` and `query-service`,
- If you are using `x86_64` processors (All Intel/AMD processors) run `sudo make run-x86`
- If you are on `arm64` processors (Apple M1 Macs) run `sudo make run-arm`
#### Run locally,
```
ClickHouseUrl=tcp://localhost:9001 STORAGE=clickhouse go run main.go
```
#### Build and Run locally
```
cd pkg/query-service
go build -o build/query-service main.go
ClickHouseUrl=tcp://localhost:9001 STORAGE=clickhouse build/query-service
```
#### Docker Images
The docker images of query-service is available at https://hub.docker.com/r/signoz/query-service
```
docker pull signoz/query-service
```
```
docker pull signoz/query-service:latest
```
```
docker pull signoz/query-service:develop
```
### Important Note:
**Query Service should now be available at** [`http://localhost:8080`](http://localhost:8080)
If you want to see how the frontend plays with query service, you can run the frontend also in your local env with the baseURL changed to `http://localhost:8080` in file [`frontend/src/constants/env.ts`](https://github.com/SigNoz/signoz/blob/main/frontend/src/constants/env.ts) as the `query-service` is now running at port `8080`.
<!-- Instead of configuring a local setup, you can also use [Gitpod](https://www.gitpod.io/), a VSCode-based Web IDE.
Click the button below. A workspace with all required environments will be created.
[![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/SigNoz/signoz)
> To use it on your forked repo, edit the 'Open in Gitpod' button URL to `https://gitpod.io/#https://github.com/<your-github-username>/signoz` -->
**[`^top^`](#contributing-guidelines)**
<hr>
# 5. Contribute to SigNoz Helm Chart 📊
**Need to Update: [https://github.com/SigNoz/charts](https://github.com/SigNoz/charts).**
## 5.1 To run helm chart for local development
- Clone the SigNoz repository and cd into charts directory,
```
git clone https://github.com/SigNoz/charts.git && cd charts
```
- It is recommended to use lightweight kubernetes (k8s) cluster for local development:
- [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation)
- [k3d](https://k3d.io/#installation)
- [minikube](https://minikube.sigs.k8s.io/docs/start/)
- create a k8s cluster and make sure `kubectl` points to the locally created k8s cluster,
- run `make dev-install` to install SigNoz chart with `my-release` release name in `platform` namespace,
- next run,
```
kubectl -n platform port-forward svc/my-release-signoz-frontend 3301:3301
```
to make SigNoz UI available at [localhost:3301](http://localhost:3301)
**5.1.1 To install the HotROD sample app:**
```bash
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh \
| HELM_RELEASE=my-release SIGNOZ_NAMESPACE=platform bash
```
**5.1.2 To load data with the HotROD sample app:**
```bash
kubectl -n sample-application run strzal --image=djbingham/curl \
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
'user_count=6' -F 'spawn_rate=2' http://locust-master:8089/swarm
```
**5.1.3 To stop the load generation:**
```bash
kubectl -n sample-application run strzal --image=djbingham/curl \
--restart='OnFailure' -i --tty --rm --command -- curl \
http://locust-master:8089/stop
```
**5.1.4 To delete the HotROD sample app:**
```bash
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh \
| HOTROD_NAMESPACE=sample-application bash
```
**[`^top^`](#contributing-guidelines)**
---
# 6. Contribute to Dashboards 📈
**Need to Update: [https://github.com/SigNoz/dashboards](https://github.com/SigNoz/dashboards)**
To contribute a new dashboard template for any service, follow the contribution guidelines in the [Dashboard Contributing Guide](https://github.com/SigNoz/dashboards/blob/main/CONTRIBUTING.md). In brief:
1. Create a dashboard JSON file.
2. Add a README file explaining the dashboard, the metrics ingested, and the configurations needed.
3. Include screenshots of the dashboard in the `assets/` directory.
4. Submit a pull request for review.
## Other Ways to Contribute
There are many other ways to get involved with the community and to participate in this project:
- Use the product, submitting GitHub issues when a problem is found.
- Help code review pull requests and participate in issue threads.
- Submit a new feature request as an issue.
- Help answer questions on forums such as Stack Overflow and [SigNoz Community Slack Channel](https://signoz.io/slack).
- Tell others about the project on Twitter, your blog, etc.
Again, Feel free to ping us on [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) or [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B) on our slack community if you need any help on this :)
Thank You!

358
Makefile
View File

@@ -1,204 +1,196 @@
##############################################################
# variables
##############################################################
SHELL := /bin/bash
SRC ?= $(shell pwd)
NAME ?= signoz
OS ?= $(shell uname -s | tr '[A-Z]' '[a-z]')
ARCH ?= $(shell uname -m | sed 's/x86_64/amd64/g' | sed 's/aarch64/arm64/g')
COMMIT_SHORT_SHA ?= $(shell git rev-parse --short HEAD)
BRANCH_NAME ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
VERSION ?= $(BRANCH_NAME)-$(COMMIT_SHORT_SHA)
TIMESTAMP ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
ARCHS ?= amd64 arm64
TARGET_DIR ?= $(shell pwd)/target
#
# Reference Guide - https://www.gnu.org/software/make/manual/make.html
#
ZEUS_URL ?= https://api.signoz.cloud
GO_BUILD_LDFLAG_ZEUS_URL = -X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=$(ZEUS_URL)
LICENSE_URL ?= https://license.signoz.io/api/v1
GO_BUILD_LDFLAG_LICENSE_SIGNOZ_IO = -X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=$(LICENSE_URL)
# Build variables
BUILD_VERSION ?= $(shell git describe --always --tags)
BUILD_HASH ?= $(shell git rev-parse --short HEAD)
BUILD_TIME ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
DEV_LICENSE_SIGNOZ_IO ?= https://staging-license.signoz.io/api/v1
ZEUS_URL ?= https://api.signoz.cloud
DEV_BUILD ?= "" # set to any non-empty value to enable dev build
GO_BUILD_VERSION_LDFLAGS = -X github.com/SigNoz/signoz/pkg/version.version=$(VERSION) -X github.com/SigNoz/signoz/pkg/version.hash=$(COMMIT_SHORT_SHA) -X github.com/SigNoz/signoz/pkg/version.time=$(TIMESTAMP) -X github.com/SigNoz/signoz/pkg/version.branch=$(BRANCH_NAME)
GO_BUILD_ARCHS_COMMUNITY = $(addprefix go-build-community-,$(ARCHS))
GO_BUILD_CONTEXT_COMMUNITY = $(SRC)/pkg/query-service
GO_BUILD_LDFLAGS_COMMUNITY = $(GO_BUILD_VERSION_LDFLAGS) -X github.com/SigNoz/signoz/pkg/version.variant=community
GO_BUILD_ARCHS_ENTERPRISE = $(addprefix go-build-enterprise-,$(ARCHS))
GO_BUILD_ARCHS_ENTERPRISE_RACE = $(addprefix go-build-enterprise-race-,$(ARCHS))
GO_BUILD_CONTEXT_ENTERPRISE = $(SRC)/ee/query-service
GO_BUILD_LDFLAGS_ENTERPRISE = $(GO_BUILD_VERSION_LDFLAGS) -X github.com/SigNoz/signoz/pkg/version.variant=enterprise $(GO_BUILD_LDFLAG_ZEUS_URL) $(GO_BUILD_LDFLAG_LICENSE_SIGNOZ_IO)
# Internal variables or constants.
FRONTEND_DIRECTORY ?= frontend
QUERY_SERVICE_DIRECTORY ?= pkg/query-service
EE_QUERY_SERVICE_DIRECTORY ?= ee/query-service
STANDALONE_DIRECTORY ?= deploy/docker
SWARM_DIRECTORY ?= deploy/docker-swarm
CH_HISTOGRAM_QUANTILE_DIRECTORY ?= scripts/clickhouse/histogramquantile
DOCKER_BUILD_ARCHS_COMMUNITY = $(addprefix docker-build-community-,$(ARCHS))
DOCKERFILE_COMMUNITY = $(SRC)/pkg/query-service/Dockerfile
DOCKER_REGISTRY_COMMUNITY ?= docker.io/signoz/signoz-community
DOCKER_BUILD_ARCHS_ENTERPRISE = $(addprefix docker-build-enterprise-,$(ARCHS))
DOCKERFILE_ENTERPRISE = $(SRC)/ee/query-service/Dockerfile
DOCKER_REGISTRY_ENTERPRISE ?= docker.io/signoz/signoz
JS_BUILD_CONTEXT = $(SRC)/frontend
GOOS ?= $(shell go env GOOS)
GOARCH ?= $(shell go env GOARCH)
GOPATH ?= $(shell go env GOPATH)
##############################################################
# directories
##############################################################
$(TARGET_DIR):
mkdir -p $(TARGET_DIR)
REPONAME ?= signoz
DOCKER_TAG ?= $(subst v,,$(BUILD_VERSION))
FRONTEND_DOCKER_IMAGE ?= frontend
QUERY_SERVICE_DOCKER_IMAGE ?= query-service
##############################################################
# common commands
##############################################################
.PHONY: help
help: ## Displays help.
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n\nTargets:\n"} /^[a-z0-9A-Z_-]+:.*?##/ { printf " \033[36m%-40s\033[0m %s\n", $$1, $$2 }' $(MAKEFILE_LIST)
# Build-time Go variables
PACKAGE?=go.signoz.io/signoz
buildVersion=${PACKAGE}/pkg/query-service/version.buildVersion
buildHash=${PACKAGE}/pkg/query-service/version.buildHash
buildTime=${PACKAGE}/pkg/query-service/version.buildTime
gitBranch=${PACKAGE}/pkg/query-service/version.gitBranch
licenseSignozIo=${PACKAGE}/ee/query-service/constants.LicenseSignozIo
zeusURL=${PACKAGE}/ee/query-service/constants.ZeusURL
##############################################################
# devenv commands
##############################################################
.PHONY: devenv-clickhouse
devenv-clickhouse: ## Run clickhouse in devenv
@cd .devenv/docker/clickhouse; \
docker compose -f compose.yaml up -d
LD_FLAGS=-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH} -X ${zeusURL}=${ZEUS_URL}
DEV_LD_FLAGS=-X ${licenseSignozIo}=${DEV_LICENSE_SIGNOZ_IO}
.PHONY: devenv-postgres
devenv-postgres: ## Run postgres in devenv
@cd .devenv/docker/postgres; \
docker compose -f compose.yaml up -d
all: build-push-frontend build-push-query-service
##############################################################
# go commands
##############################################################
.PHONY: go-run-enterprise
go-run-enterprise: ## Runs the enterprise go backend server
@SIGNOZ_INSTRUMENTATION_LOGS_LEVEL=debug \
SIGNOZ_SQLSTORE_SQLITE_PATH=signoz.db \
SIGNOZ_WEB_ENABLED=false \
SIGNOZ_JWT_SECRET=secret \
SIGNOZ_ALERTMANAGER_PROVIDER=signoz \
SIGNOZ_TELEMETRYSTORE_PROVIDER=clickhouse \
SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://127.0.0.1:9000 \
go run -race \
$(GO_BUILD_CONTEXT_ENTERPRISE)/main.go \
--config ./conf/prometheus.yml \
--cluster cluster \
--use-logs-new-schema true \
--use-trace-new-schema true
# Steps to build static files of frontend
build-frontend-static:
@echo "------------------"
@echo "--> Building frontend static files"
@echo "------------------"
@cd $(FRONTEND_DIRECTORY) && \
rm -rf build && \
CI=1 yarn install && \
yarn build && \
ls -l build
.PHONY: go-test
go-test: ## Runs go unit tests
@go test -race ./...
# Steps to build and push docker image of frontend
.PHONY: build-frontend-amd64 build-push-frontend
# Step to build docker image of frontend in amd64 (used in build pipeline)
build-frontend-amd64: build-frontend-static
@echo "------------------"
@echo "--> Building frontend docker image for amd64"
@echo "------------------"
@cd $(FRONTEND_DIRECTORY) && \
docker build --file Dockerfile -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) \
--build-arg TARGETPLATFORM="linux/amd64" .
.PHONY: go-run-community
go-run-community: ## Runs the community go backend server
@SIGNOZ_INSTRUMENTATION_LOGS_LEVEL=debug \
SIGNOZ_SQLSTORE_SQLITE_PATH=signoz.db \
SIGNOZ_WEB_ENABLED=false \
SIGNOZ_JWT_SECRET=secret \
SIGNOZ_ALERTMANAGER_PROVIDER=signoz \
SIGNOZ_TELEMETRYSTORE_PROVIDER=clickhouse \
SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://127.0.0.1:9000 \
go run -race \
$(GO_BUILD_CONTEXT_COMMUNITY)/main.go \
--config ./conf/prometheus.yml \
--cluster cluster \
--use-logs-new-schema true \
--use-trace-new-schema true
# Step to build and push docker image of frontend(used in push pipeline)
build-push-frontend: build-frontend-static
@echo "------------------"
@echo "--> Building and pushing frontend docker image"
@echo "------------------"
@cd $(FRONTEND_DIRECTORY) && \
docker buildx build --file Dockerfile --progress plain --push --platform linux/arm64,linux/amd64 \
--tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) .
.PHONY: go-build-community $(GO_BUILD_ARCHS_COMMUNITY)
go-build-community: ## Builds the go backend server for community
go-build-community: $(GO_BUILD_ARCHS_COMMUNITY)
$(GO_BUILD_ARCHS_COMMUNITY): go-build-community-%: $(TARGET_DIR)
@mkdir -p $(TARGET_DIR)/$(OS)-$*
@echo ">> building binary $(TARGET_DIR)/$(OS)-$*/$(NAME)-community"
@if [ $* = "arm64" ]; then \
CC=aarch64-linux-gnu-gcc CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_COMMUNITY) -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME)-community -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_COMMUNITY)"; \
# Steps to build static binary of query service
.PHONY: build-query-service-static
build-query-service-static:
@echo "------------------"
@echo "--> Building query-service static binary"
@echo "------------------"
@if [ $(DEV_BUILD) != "" ]; then \
cd $(QUERY_SERVICE_DIRECTORY) && \
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/query-service-${GOOS}-${GOARCH} \
-ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS} ${DEV_LD_FLAGS}"; \
else \
CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_COMMUNITY) -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME)-community -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_COMMUNITY)"; \
cd $(QUERY_SERVICE_DIRECTORY) && \
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/query-service-${GOOS}-${GOARCH} \
-ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS}"; \
fi
.PHONY: build-query-service-static-amd64
build-query-service-static-amd64:
make GOARCH=amd64 build-query-service-static
.PHONY: go-build-enterprise $(GO_BUILD_ARCHS_ENTERPRISE)
go-build-enterprise: ## Builds the go backend server for enterprise
go-build-enterprise: $(GO_BUILD_ARCHS_ENTERPRISE)
$(GO_BUILD_ARCHS_ENTERPRISE): go-build-enterprise-%: $(TARGET_DIR)
@mkdir -p $(TARGET_DIR)/$(OS)-$*
@echo ">> building binary $(TARGET_DIR)/$(OS)-$*/$(NAME)"
@if [ $* = "arm64" ]; then \
CC=aarch64-linux-gnu-gcc CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
.PHONY: build-query-service-static-arm64
build-query-service-static-arm64:
make CC=aarch64-linux-gnu-gcc GOARCH=arm64 build-query-service-static
# Steps to build static binary of query service for all platforms
.PHONY: build-query-service-static-all
build-query-service-static-all: build-query-service-static-amd64 build-query-service-static-arm64 build-frontend-static
# Steps to build and push docker image of query service
.PHONY: build-query-service-amd64 build-push-query-service
# Step to build docker image of query service in amd64 (used in build pipeline)
build-query-service-amd64: build-query-service-static-amd64 build-frontend-static
@echo "------------------"
@echo "--> Building query-service docker image for amd64"
@echo "------------------"
@docker build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile \
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
--build-arg TARGETPLATFORM="linux/amd64" .
# Step to build and push docker image of query in amd64 and arm64 (used in push pipeline)
build-push-query-service: build-query-service-static-all
@echo "------------------"
@echo "--> Building and pushing query-service docker image"
@echo "------------------"
@docker buildx build --file $(QUERY_SERVICE_DIRECTORY)/Dockerfile --progress plain \
--push --platform linux/arm64,linux/amd64 \
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
# Step to build EE docker image of query service in amd64 (used in build pipeline)
build-ee-query-service-amd64:
@echo "------------------"
@echo "--> Building query-service docker image for amd64"
@echo "------------------"
make QUERY_SERVICE_DIRECTORY=${EE_QUERY_SERVICE_DIRECTORY} build-query-service-amd64
# Step to build and push EE docker image of query in amd64 and arm64 (used in push pipeline)
build-push-ee-query-service:
@echo "------------------"
@echo "--> Building and pushing query-service docker image"
@echo "------------------"
make QUERY_SERVICE_DIRECTORY=${EE_QUERY_SERVICE_DIRECTORY} build-push-query-service
dev-setup:
mkdir -p /var/lib/signoz
sqlite3 /var/lib/signoz/signoz.db "VACUUM";
mkdir -p pkg/query-service/config/dashboards
@echo "------------------"
@echo "--> Local Setup completed"
@echo "------------------"
pull-signoz:
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml pull
run-signoz:
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml up --build -d
run-testing:
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.testing.yaml up --build -d
down-signoz:
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml down -v
clear-standalone-data:
@docker run --rm -v "$(PWD)/$(STANDALONE_DIRECTORY)/data:/pwd" busybox \
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse*/* signoz/* zookeeper-*/*"
clear-swarm-data:
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse*/* signoz/* zookeeper-*/*"
clear-standalone-ch:
@docker run --rm -v "$(PWD)/$(STANDALONE_DIRECTORY)/data:/pwd" busybox \
sh -c "cd /pwd && rm -rf clickhouse*/* zookeeper-*/*"
clear-swarm-ch:
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
sh -c "cd /pwd && rm -rf clickhouse*/* zookeeper-*/*"
check-no-ee-references:
@echo "Checking for 'ee' package references in 'pkg' directory..."
@if grep -R --include="*.go" '.*/ee/.*' pkg/; then \
echo "Error: Found references to 'ee' packages in 'pkg' directory"; \
exit 1; \
else \
CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
echo "No references to 'ee' packages found in 'pkg' directory"; \
fi
.PHONY: go-build-enterprise-race $(GO_BUILD_ARCHS_ENTERPRISE_RACE)
go-build-enterprise-race: ## Builds the go backend server for enterprise with race
go-build-enterprise-race: $(GO_BUILD_ARCHS_ENTERPRISE_RACE)
$(GO_BUILD_ARCHS_ENTERPRISE_RACE): go-build-enterprise-race-%: $(TARGET_DIR)
@mkdir -p $(TARGET_DIR)/$(OS)-$*
@echo ">> building binary $(TARGET_DIR)/$(OS)-$*/$(NAME)"
@if [ $* = "arm64" ]; then \
CC=aarch64-linux-gnu-gcc CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -race -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
test:
go test ./pkg/...
goreleaser-snapshot:
@if [[ ${GORELEASER_WORKDIR} ]]; then \
cd ${GORELEASER_WORKDIR} && \
goreleaser release --clean --snapshot; \
cd -; \
else \
CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -race -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
goreleaser release --clean --snapshot; \
fi
##############################################################
# js commands
##############################################################
.PHONY: js-build
js-build: ## Builds the js frontend
@echo ">> building js frontend"
@cd $(JS_BUILD_CONTEXT) && CI=1 yarn install && yarn build
##############################################################
# docker commands
##############################################################
.PHONY: docker-build-community $(DOCKER_BUILD_ARCHS_COMMUNITY)
docker-build-community: ## Builds the docker image for community
docker-build-community: $(DOCKER_BUILD_ARCHS_COMMUNITY)
$(DOCKER_BUILD_ARCHS_COMMUNITY): docker-build-community-%: go-build-community-% js-build
@echo ">> building docker image for $(NAME)-community"
@docker build -t "$(DOCKER_REGISTRY_COMMUNITY):$(VERSION)-$*" \
--build-arg TARGETARCH="$*" \
-f $(DOCKERFILE_COMMUNITY) $(SRC)
.PHONY: docker-buildx-community
docker-buildx-community: ## Builds the docker image for community using buildx
docker-buildx-community: go-build-community js-build
@echo ">> building docker image for $(NAME)-community"
@docker buildx build --file $(DOCKERFILE_COMMUNITY) \
--progress plain \
--platform linux/arm64,linux/amd64 \
--push \
--tag $(DOCKER_REGISTRY_COMMUNITY):$(VERSION) $(SRC)
.PHONY: docker-build-enterprise $(DOCKER_BUILD_ARCHS_ENTERPRISE)
docker-build-enterprise: ## Builds the docker image for enterprise
docker-build-enterprise: $(DOCKER_BUILD_ARCHS_ENTERPRISE)
$(DOCKER_BUILD_ARCHS_ENTERPRISE): docker-build-enterprise-%: go-build-enterprise-% js-build
@echo ">> building docker image for $(NAME)"
@docker build -t "$(DOCKER_REGISTRY_ENTERPRISE):$(VERSION)-$*" \
--build-arg TARGETARCH="$*" \
-f $(DOCKERFILE_ENTERPRISE) $(SRC)
.PHONY: docker-buildx-enterprise
docker-buildx-enterprise: ## Builds the docker image for enterprise using buildx
docker-buildx-enterprise: go-build-enterprise js-build
@echo ">> building docker image for $(NAME)"
@docker buildx build --file $(DOCKERFILE_ENTERPRISE) \
--progress plain \
--platform linux/arm64,linux/amd64 \
--push \
--tag $(DOCKER_REGISTRY_ENTERPRISE):$(VERSION) $(SRC)
##############################################################
# python commands
##############################################################
.PHONY: py-fmt
py-fmt: ## Run black for integration tests
@cd tests/integration && poetry run black .
.PHONY: py-lint
py-lint: ## Run lint for integration tests
@cd tests/integration && poetry run isort .
@cd tests/integration && poetry run autoflake .
@cd tests/integration && poetry run pylint .
.PHONY: py-test
py-test: ## Runs integration tests
@cd tests/integration && poetry run pytest --basetemp=./tmp/ -vv --capture=no src/
goreleaser-snapshot-histogram-quantile:
make GORELEASER_WORKDIR=$(CH_HISTOGRAM_QUANTILE_DIRECTORY) goreleaser-snapshot

View File

@@ -219,18 +219,12 @@ Not sure how to get started? Just ping us on `#contributing` in our [slack commu
- [Nityananda Gohain](https://github.com/nityanandagohain)
- [Srikanth Chekuri](https://github.com/srikanthccv)
- [Vishal Sharma](https://github.com/makeavish)
- [Shivanshu Raj Shrivastava](https://github.com/shivanshuraj1333)
- [Ekansh Gupta](https://github.com/eKuG)
- [Aniket Agarwal](https://github.com/aniketio-ctrl)
#### Frontend
- [Yunus M](https://github.com/YounixM)
- [Vikrant Gupta](https://github.com/vikrantgupta25)
- [Sagar Rajput](https://github.com/SagarRajput-7)
- [Shaheer Kochai](https://github.com/ahmadshaheer)
- [Amlan Kumar Nandy](https://github.com/amlannandy)
- [Sahil Khan](https://github.com/sawhil)
#### DevOps

View File

@@ -3,12 +3,6 @@
# Do not modify this file
#
##################### Version #####################
version:
banner:
# Whether to enable the version banner on startup.
enabled: true
##################### Instrumentation #####################
instrumentation:
logs:
@@ -72,95 +66,30 @@ sqlstore:
# The path to the SQLite database file.
path: /var/lib/signoz/signoz.db
##################### APIServer #####################
apiserver:
timeout:
# Default request timeout.
default: 60s
# Maximum request timeout.
max: 600s
# List of routes to exclude from request timeout.
excluded_routes:
- /api/v1/logs/tail
- /api/v3/logs/livetail
logging:
# List of routes to exclude from request responselogging.
excluded_routes:
- /api/v1/health
- /api/v1/version
- /
##################### TelemetryStore #####################
telemetrystore:
# specifies the telemetrystore provider to use.
provider: clickhouse
clickhouse:
# The DSN to use for ClickHouse.
dsn: http://localhost:9000
# Maximum number of idle connections in the connection pool.
max_idle_conns: 50
# Maximum number of open connections to the database.
max_open_conns: 100
# Maximum time to wait for a connection to be established.
dial_timeout: 5s
# Specifies the telemetrystore provider to use.
provider: clickhouse
clickhouse:
# The DSN to use for clickhouse.
dsn: tcp://localhost:9000
# The query settings for clickhouse.
settings:
max_execution_time: 0
max_execution_time_leaf: 0
timeout_before_checking_execution_speed: 0
max_bytes_to_read: 0
max_result_rows_for_ch_query: 0
##################### Prometheus #####################
prometheus:
active_query_tracker:
# Whether to enable the active query tracker.
enabled: true
# The path to use for the active query tracker.
path: ""
# The maximum number of concurrent queries.
max_concurrent: 20
##################### Alertmanager #####################
alertmanager:
# Specifies the alertmanager provider to use.
provider: legacy
legacy:
# The API URL (with prefix) of the legacy Alertmanager instance.
api_url: http://localhost:9093/api
signoz:
# The poll interval for periodically syncing the alertmanager with the config in the store.
poll_interval: 1m
# The URL under which Alertmanager is externally reachable (for example, if Alertmanager is served via a reverse proxy). Used for generating relative and absolute links back to Alertmanager itself.
external_url: http://localhost:8080
# The global configuration for the alertmanager. All the exahustive fields can be found in the upstream: https://github.com/prometheus/alertmanager/blob/efa05feffd644ba4accb526e98a8c6545d26a783/config/config.go#L833
global:
# ResolveTimeout is the time after which an alert is declared resolved if it has not been updated.
resolve_timeout: 5m
route:
# GroupByStr is the list of labels to group alerts by.
group_by:
- alertname
# GroupInterval is the interval at which alerts are grouped.
group_interval: 1m
# GroupWait is the time to wait before sending alerts to receivers.
group_wait: 1m
# RepeatInterval is the interval at which alerts are repeated.
repeat_interval: 1h
alerts:
# Interval between garbage collection of alerts.
gc_interval: 30m
silences:
# Maximum number of silences, including expired silences. If negative or zero, no limit is set.
max: 0
# Maximum size of the silences in bytes. If negative or zero, no limit is set.
max_size_bytes: 0
# Interval between garbage collection and snapshotting of the silences. The snapshot will be stored in the state store.
maintenance_interval: 15m
# Retention of the silences.
retention: 120h
nflog:
# Interval between garbage collection and snapshotting of the notification logs. The snapshot will be stored in the state store.
maintenance_interval: 15m
# Retention of the notification logs.
retention: 120h

View File

@@ -26,7 +26,7 @@ cd deploy/docker
docker compose up -d
```
Open http://localhost:8080 in your favourite browser.
Open http://localhost:3301 in your favourite browser.
To start collecting logs and metrics from your infrastructure, run the following command:
@@ -55,7 +55,7 @@ cd deploy/docker-swarm
docker stack deploy -c docker-compose.yaml signoz
```
Open http://localhost:8080 in your favourite browser.
Open http://localhost:3301 in your favourite browser.
To start collecting logs and metrics from your infrastructure, run the following command:

View File

@@ -0,0 +1,64 @@
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
server {
listen 3301;
server_name _;
gzip on;
gzip_static on;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
gzip_proxied any;
gzip_vary on;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
# to handle uri issue 414 from nginx
client_max_body_size 24M;
large_client_header_buffers 8 128k;
location / {
if ( $uri = '/index.html' ) {
add_header Cache-Control no-store always;
}
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
}
location ~ ^/api/(v1|v3)/logs/(tail|livetail){
proxy_pass http://query-service:8080;
proxy_http_version 1.1;
# connection will be closed if no data is read for 600s between successive read operations
proxy_read_timeout 600s;
# dont buffer the data send it directly to client.
proxy_buffering off;
proxy_cache off;
}
location /api {
proxy_pass http://query-service:8080/api;
# connection will be closed if no data is read for 600s between successive read operations
proxy_read_timeout 600s;
}
location /ws {
proxy_pass http://query-service:8080/ws;
proxy_http_version 1.1;
proxy_set_header Upgrade "websocket";
proxy_set_header Connection "upgrade";
proxy_read_timeout 86400;
}
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}

View File

@@ -1 +1 @@
server_endpoint: ws://signoz:4320/v1/opamp
server_endpoint: ws://query-service:4320/v1/opamp

View File

@@ -76,9 +76,6 @@ services:
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
tar -xvzf histogram-quantile.tar.gz
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
deploy:
restart_policy:
condition: on-failure
volumes:
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
zookeeper-1:
@@ -172,30 +169,39 @@ services:
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
- ./clickhouse-setup/data/clickhouse-3/:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
alertmanager:
!!merge <<: *common
image: signoz/alertmanager:0.23.7
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
volumes:
- ./clickhouse-setup/data/alertmanager:/data
depends_on:
- query-service
query-service:
!!merge <<: *db-depend
image: signoz/signoz:v0.80.0
image: signoz/query-service:0.70.1
command:
- --config=/root/config/prometheus.yml
- --use-logs-new-schema=true
- --use-trace-new-schema=true
ports:
- "8080:8080" # signoz port
# ports:
# - "8080:8080" # signoz port
# - "6060:6060" # pprof port
volumes:
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
- ../common/dashboards:/root/config/dashboards
- ./clickhouse-setup/data/signoz/:/var/lib/signoz/
environment:
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-swarm
- SIGNOZ_JWT_SECRET=secret
healthcheck:
test:
- CMD
@@ -206,9 +212,19 @@ services:
interval: 30s
timeout: 5s
retries: 3
frontend:
!!merge <<: *common
image: signoz/frontend:0.70.1
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:v0.111.39
image: signoz/signoz-otel-collector:0.111.25
command:
- --config=/etc/otel-collector-config.yaml
- --manager-config=/etc/manager-config.yaml
@@ -229,10 +245,10 @@ services:
depends_on:
- clickhouse
- schema-migrator
- signoz
- query-service
schema-migrator:
!!merge <<: *common
image: signoz/signoz-schema-migrator:v0.111.39
image: signoz/signoz-schema-migrator:0.111.24
deploy:
restart_policy:
condition: on-failure
@@ -247,6 +263,8 @@ networks:
signoz-net:
name: signoz-net
volumes:
alertmanager:
name: signoz-alertmanager
clickhouse:
name: signoz-clickhouse
clickhouse-2:

View File

@@ -73,9 +73,6 @@ services:
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
tar -xvzf histogram-quantile.tar.gz
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
deploy:
restart_policy:
condition: on-failure
volumes:
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
zookeeper-1:
@@ -108,23 +105,33 @@ services:
- ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
- clickhouse:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
alertmanager:
!!merge <<: *common
image: signoz/alertmanager:0.23.7
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
volumes:
- alertmanager:/data
depends_on:
- query-service
query-service:
!!merge <<: *db-depend
image: signoz/signoz:v0.80.0
image: signoz/query-service:0.70.1
command:
- --config=/root/config/prometheus.yml
- --use-logs-new-schema=true
- --use-trace-new-schema=true
ports:
- "8080:8080" # signoz port
# ports:
# - "8080:8080" # signoz port
# - "6060:6060" # pprof port
volumes:
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
- ../common/dashboards:/root/config/dashboards
- sqlite:/var/lib/signoz/
environment:
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
@@ -141,9 +148,19 @@ services:
interval: 30s
timeout: 5s
retries: 3
frontend:
!!merge <<: *common
image: signoz/frontend:0.70.1
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:v0.111.39
image: signoz/signoz-otel-collector:0.111.25
command:
- --config=/etc/otel-collector-config.yaml
- --manager-config=/etc/manager-config.yaml
@@ -164,10 +181,10 @@ services:
depends_on:
- clickhouse
- schema-migrator
- signoz
- query-service
schema-migrator:
!!merge <<: *common
image: signoz/signoz-schema-migrator:v0.111.39
image: signoz/signoz-schema-migrator:0.111.24
deploy:
restart_policy:
condition: on-failure
@@ -182,6 +199,8 @@ networks:
signoz-net:
name: signoz-net
volumes:
alertmanager:
name: signoz-alertmanager
clickhouse:
name: signoz-clickhouse
sqlite:

View File

@@ -42,7 +42,7 @@ receivers:
# please remove names from below if you want to collect logs from them
- type: filter
id: signoz_logs_filter
expr: 'attributes.container_name matches "^(signoz_(logspout|signoz|otel-collector|clickhouse|zookeeper))|(infra_(logspout|otel-agent|otel-metrics)).*"'
expr: 'attributes.container_name matches "^(signoz_(logspout|alertmanager|query-service|otel-collector|clickhouse|zookeeper))|(infra_(logspout|otel-agent|otel-metrics)).*"'
processors:
batch:
send_batch_size: 10000

View File

@@ -66,7 +66,7 @@ exporters:
enabled: true
clickhousemetricswrite/prometheus:
endpoint: tcp://clickhouse:9000/signoz_metrics
signozclickhousemetrics:
clickhousemetricswritev2:
dsn: tcp://clickhouse:9000/signoz_metrics
clickhouselogsexporter:
dsn: tcp://clickhouse:9000/signoz_logs
@@ -90,11 +90,11 @@ service:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [clickhousemetricswrite, signozclickhousemetrics]
exporters: [clickhousemetricswrite, clickhousemetricswritev2]
metrics/prometheus:
receivers: [prometheus]
processors: [batch]
exporters: [clickhousemetricswrite/prometheus, signozclickhousemetrics]
exporters: [clickhousemetricswrite/prometheus, clickhousemetricswritev2]
logs:
receivers: [otlp]
processors: [batch]

View File

@@ -2,7 +2,7 @@ version: "3"
x-common: &common
networks:
- signoz-net
restart: unless-stopped
restart: on-failure
logging:
options:
max-size: 50m
@@ -79,7 +79,6 @@ services:
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
tar -xvzf histogram-quantile.tar.gz
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
restart: on-failure
volumes:
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
zookeeper-1:
@@ -175,24 +174,36 @@ services:
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
- clickhouse-3:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
alertmanager:
!!merge <<: *common
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
container_name: signoz-alertmanager
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
volumes:
- alertmanager:/data
depends_on:
query-service:
condition: service_healthy
query-service:
!!merge <<: *db-depend
image: signoz/signoz:${VERSION:-v0.80.0}
container_name: signoz
image: signoz/query-service:${DOCKER_TAG:-0.70.1}
container_name: signoz-query-service
command:
- --config=/root/config/prometheus.yml
- --use-logs-new-schema=true
- --use-trace-new-schema=true
ports:
- "8080:8080" # signoz port
# ports:
# - "3301:8080" # signoz port
# - "6060:6060" # pprof port
volumes:
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
- ../common/dashboards:/root/config/dashboards
- sqlite:/var/lib/signoz/
environment:
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
@@ -209,10 +220,21 @@ services:
interval: 30s
timeout: 5s
retries: 3
frontend:
!!merge <<: *common
image: signoz/frontend:${DOCKER_TAG:-0.70.1}
container_name: signoz-frontend
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
# TODO: support otel-collector multiple replicas. Nginx/Traefik for loadbalancing?
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.39}
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.25}
container_name: signoz-otel-collector
command:
- --config=/etc/otel-collector-config.yaml
@@ -234,11 +256,11 @@ services:
condition: service_healthy
schema-migrator-sync:
condition: service_completed_successfully
signoz:
query-service:
condition: service_healthy
schema-migrator-sync:
!!merge <<: *common
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.39}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
container_name: schema-migrator-sync
command:
- sync
@@ -249,17 +271,18 @@ services:
condition: service_healthy
schema-migrator-async:
!!merge <<: *db-depend
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.39}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
container_name: schema-migrator-async
command:
- async
- --dsn=tcp://clickhouse:9000
- --up=
restart: on-failure
networks:
signoz-net:
name: signoz-net
volumes:
alertmanager:
name: signoz-alertmanager
clickhouse:
name: signoz-clickhouse
clickhouse-2:

View File

@@ -0,0 +1,221 @@
version: "3"
x-common: &common
networks:
- signoz-net
restart: on-failure
logging:
options:
max-size: 50m
max-file: "3"
x-clickhouse-defaults: &clickhouse-defaults
!!merge <<: *common
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
image: clickhouse/clickhouse-server:24.1.2-alpine
tty: true
labels:
signoz.io/scrape: "true"
signoz.io/port: "9363"
signoz.io/path: "/metrics"
depends_on:
init-clickhouse:
condition: service_completed_successfully
zookeeper-1:
condition: service_healthy
healthcheck:
test:
- CMD
- wget
- --spider
- -q
- 0.0.0.0:8123/ping
interval: 30s
timeout: 5s
retries: 3
ulimits:
nproc: 65535
nofile:
soft: 262144
hard: 262144
x-zookeeper-defaults: &zookeeper-defaults
!!merge <<: *common
image: bitnami/zookeeper:3.7.1
user: root
labels:
signoz.io/scrape: "true"
signoz.io/port: "9141"
signoz.io/path: "/metrics"
healthcheck:
test:
- CMD-SHELL
- curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
interval: 30s
timeout: 5s
retries: 3
x-db-depend: &db-depend
!!merge <<: *common
depends_on:
clickhouse:
condition: service_healthy
schema-migrator-sync:
condition: service_completed_successfully
services:
init-clickhouse:
!!merge <<: *common
image: clickhouse/clickhouse-server:24.1.2-alpine
container_name: signoz-init-clickhouse
command:
- bash
- -c
- |
version="v0.0.1"
node_os=$$(uname -s | tr '[:upper:]' '[:lower:]')
node_arch=$$(uname -m | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)
echo "Fetching histogram-binary for $${node_os}/$${node_arch}"
cd /tmp
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
tar -xvzf histogram-quantile.tar.gz
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
volumes:
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
zookeeper-1:
!!merge <<: *zookeeper-defaults
container_name: signoz-zookeeper-1
ports:
- "2181:2181"
- "2888:2888"
- "3888:3888"
volumes:
- zookeeper-1:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=1
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
clickhouse:
!!merge <<: *clickhouse-defaults
container_name: signoz-clickhouse
ports:
- "9000:9000"
- "8123:8123"
- "9181:9181"
volumes:
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
- ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
- clickhouse:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
alertmanager:
!!merge <<: *common
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
container_name: signoz-alertmanager
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
volumes:
- alertmanager:/data
depends_on:
query-service:
condition: service_healthy
query-service:
!!merge <<: *db-depend
image: signoz/query-service:${DOCKER_TAG:-0.70.1}
container_name: signoz-query-service
command:
- --config=/root/config/prometheus.yml
- --gateway-url=https://api.staging.signoz.cloud
- --use-logs-new-schema=true
- --use-trace-new-schema=true
# ports:
# - "8080:8080" # signoz port
# - "6060:6060" # pprof port
volumes:
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
- ../common/dashboards:/root/config/dashboards
- sqlite:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-standalone-amd
- KAFKA_SPAN_EVAL=${KAFKA_SPAN_EVAL:-false}
healthcheck:
test:
- CMD
- wget
- --spider
- -q
- localhost:8080/api/v1/health
interval: 30s
timeout: 5s
retries: 3
frontend:
!!merge <<: *common
image: signoz/frontend:${DOCKER_TAG:-0.70.1}
container_name: signoz-frontend
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.25}
container_name: signoz-otel-collector
command:
- --config=/etc/otel-collector-config.yaml
- --manager-config=/etc/manager-config.yaml
- --copy-path=/var/tmp/collector-config.yaml
- --feature-gates=-pkg.translator.prometheus.NormalizeName
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
depends_on:
query-service:
condition: service_healthy
schema-migrator-sync:
!!merge <<: *common
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
container_name: schema-migrator-sync
command:
- sync
- --dsn=tcp://clickhouse:9000
- --up=
depends_on:
clickhouse:
condition: service_healthy
schema-migrator-async:
!!merge <<: *db-depend
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
container_name: schema-migrator-async
command:
- async
- --dsn=tcp://clickhouse:9000
- --up=
networks:
signoz-net:
name: signoz-net
volumes:
alertmanager:
name: signoz-alertmanager
clickhouse:
name: signoz-clickhouse
sqlite:
name: signoz-sqlite
zookeeper-1:
name: signoz-zookeeper-1

View File

@@ -2,7 +2,7 @@ version: "3"
x-common: &common
networks:
- signoz-net
restart: unless-stopped
restart: on-failure
logging:
options:
max-size: 50m
@@ -75,7 +75,6 @@ services:
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
tar -xvzf histogram-quantile.tar.gz
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
restart: on-failure
volumes:
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
zookeeper-1:
@@ -108,24 +107,36 @@ services:
- ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
- clickhouse:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
alertmanager:
!!merge <<: *common
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
container_name: signoz-alertmanager
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
volumes:
- alertmanager:/data
depends_on:
query-service:
condition: service_healthy
query-service:
!!merge <<: *db-depend
image: signoz/signoz:${VERSION:-v0.80.0}
container_name: signoz
image: signoz/query-service:${DOCKER_TAG:-0.70.1}
container_name: signoz-query-service
command:
- --config=/root/config/prometheus.yml
- --use-logs-new-schema=true
- --use-trace-new-schema=true
ports:
- "8080:8080" # signoz port
# ports:
# - "3301:8080" # signoz port
# - "6060:6060" # pprof port
volumes:
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
- ../common/dashboards:/root/config/dashboards
- sqlite:/var/lib/signoz/
environment:
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
@@ -142,9 +153,20 @@ services:
interval: 30s
timeout: 5s
retries: 3
frontend:
!!merge <<: *common
image: signoz/frontend:${DOCKER_TAG:-0.70.1}
container_name: signoz-frontend
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.39}
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.25}
container_name: signoz-otel-collector
command:
- --config=/etc/otel-collector-config.yaml
@@ -162,11 +184,11 @@ services:
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
depends_on:
signoz:
query-service:
condition: service_healthy
schema-migrator-sync:
!!merge <<: *common
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.39}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
container_name: schema-migrator-sync
command:
- sync
@@ -175,20 +197,20 @@ services:
depends_on:
clickhouse:
condition: service_healthy
restart: on-failure
schema-migrator-async:
!!merge <<: *db-depend
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.39}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
container_name: schema-migrator-async
command:
- async
- --dsn=tcp://clickhouse:9000
- --up=
restart: on-failure
networks:
signoz-net:
name: signoz-net
volumes:
alertmanager:
name: signoz-alertmanager
clickhouse:
name: signoz-clickhouse
sqlite:

View File

@@ -8,7 +8,7 @@ x-common: &common
options:
max-size: 50m
max-file: "3"
restart: unless-stopped
restart: on-failure
services:
hotrod:
<<: *common

View File

@@ -8,7 +8,7 @@ x-common: &common
options:
max-size: 50m
max-file: "3"
restart: unless-stopped
restart: on-failure
services:
otel-agent:
<<: *common

View File

@@ -79,7 +79,7 @@ receivers:
# please remove names from below if you want to collect logs from them
- type: filter
id: signoz_logs_filter
expr: 'attributes.container_name matches "^signoz|(signoz-(|otel-collector|clickhouse|zookeeper))|(infra-(logspout|otel-agent)-.*)"'
expr: 'attributes.container_name matches "^(signoz-(|alertmanager|query-service|otel-collector|clickhouse|zookeeper))|(infra-(logspout|otel-agent)-.*)"'
processors:
batch:
send_batch_size: 10000

View File

@@ -66,7 +66,7 @@ exporters:
enabled: true
clickhousemetricswrite/prometheus:
endpoint: tcp://clickhouse:9000/signoz_metrics
signozclickhousemetrics:
clickhousemetricswritev2:
dsn: tcp://clickhouse:9000/signoz_metrics
clickhouselogsexporter:
dsn: tcp://clickhouse:9000/signoz_logs
@@ -90,11 +90,11 @@ service:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [clickhousemetricswrite, signozclickhousemetrics]
exporters: [clickhousemetricswrite, clickhousemetricswritev2]
metrics/prometheus:
receivers: [prometheus]
processors: [batch]
exporters: [clickhousemetricswrite/prometheus, signozclickhousemetrics]
exporters: [clickhousemetricswrite/prometheus, clickhousemetricswritev2]
logs:
receivers: [otlp]
processors: [batch]

View File

@@ -127,7 +127,7 @@ check_os() {
# The script should error out in case they aren't available
check_ports_occupied() {
local port_check_output
local ports_pattern="8080|4317"
local ports_pattern="3301|4317"
if is_mac; then
port_check_output="$(netstat -anp tcp | awk '$6 == "LISTEN" && $4 ~ /^.*\.('"$ports_pattern"')$/')"
@@ -144,7 +144,7 @@ check_ports_occupied() {
send_event "port_not_available"
echo "+++++++++++ ERROR ++++++++++++++++++++++"
echo "SigNoz requires ports 8080 & 4317 to be open. Please shut down any other service(s) that may be running on these ports."
echo "SigNoz requires ports 3301 & 4317 to be open. Please shut down any other service(s) that may be running on these ports."
echo "You can run SigNoz on another port following this guide https://signoz.io/docs/install/troubleshooting/"
echo "++++++++++++++++++++++++++++++++++++++++"
echo ""
@@ -248,7 +248,7 @@ wait_for_containers_start() {
# The while loop is important because for-loops don't work for dynamic values
while [[ $timeout -gt 0 ]]; do
status_code="$(curl -s -o /dev/null -w "%{http_code}" "http://localhost:8080/api/v1/health?live=1" || true)"
status_code="$(curl -s -o /dev/null -w "%{http_code}" "http://localhost:3301/api/v1/health?live=1" || true)"
if [[ status_code -eq 200 ]]; then
break
else
@@ -484,7 +484,7 @@ pushd "${BASE_DIR}/${DOCKER_STANDALONE_DIR}" > /dev/null 2>&1
# check for open ports, if signoz is not installed
if is_command_present docker-compose; then
if $sudo_cmd $docker_compose_cmd ps | grep "signoz" | grep -q "healthy" > /dev/null 2>&1; then
if $sudo_cmd $docker_compose_cmd ps | grep "signoz-query-service" | grep -q "healthy" > /dev/null 2>&1; then
echo "SigNoz already installed, skipping the occupied ports check"
else
check_ports_occupied
@@ -533,7 +533,7 @@ else
echo ""
echo "🟢 Your installation is complete!"
echo ""
echo -e "🟢 SigNoz is running on http://localhost:8080"
echo -e "🟢 Your frontend is running on http://localhost:3301"
echo ""
echo " By default, retention period is set to 15 days for logs and traces, and 30 days for metrics."
echo -e "To change this, navigate to the General tab on the Settings page of SigNoz UI. For more details, refer to https://signoz.io/docs/userguide/retention-period \n"

View File

@@ -1,95 +0,0 @@
# Development Guide
Welcome! This guide will help you set up your local development environment for SigNoz. Let's get you started! 🚀
## What do I need?
Before diving in, make sure you have these tools installed:
- **Git** - Our version control system
- Download from [git-scm.com](https://git-scm.com/)
- **Go** - Powers our backend
- Download from [go.dev/dl](https://go.dev/dl/)
- Check [go.mod](../../go.mod#L3) for the minimum version
- **GCC** - Required for CGO dependencies
- Download from [gcc.gnu.org](https://gcc.gnu.org/)
- **Node** - Powers our frontend
- Download from [nodejs.org](https://nodejs.org)
- Check [.nvmrc](../../frontend/.nvmrc) for the version
- **Yarn** - Our frontend package manager
- Follow the [installation guide](https://yarnpkg.com/getting-started/install)
- **Docker** - For running Clickhouse and Postgres locally
- Get it from [docs.docker.com/get-docker](https://docs.docker.com/get-docker/)
> 💡 **Tip**: Run `make help` to see all available commands with descriptions
## How do I get the code?
1. Open your terminal
2. Clone the repository:
```bash
git clone https://github.com/SigNoz/signoz.git
```
3. Navigate to the project:
```bash
cd signoz
```
## How do I run it locally?
SigNoz has three main components: Clickhouse, Backend, and Frontend. Let's set them up one by one.
### 1. Setting up Clickhouse
First, we need to get Clickhouse running:
```bash
make devenv-clickhouse
```
This command:
- Starts Clickhouse in a single-shard, single-replica cluster
- Sets up Zookeeper
- Runs the latest schema migrations
### 2. Starting the Backend
1. Run the backend server:
```bash
make go-run-community
```
2. Verify it's working:
```bash
curl http://localhost:8080/api/v1/health
```
You should see: `{"status":"ok"}`
> 💡 **Tip**: The API server runs at `http://localhost:8080/` by default
### 3. Setting up the Frontend
1. Install dependencies:
```bash
yarn install
```
2. Create a `.env` file in the `frontend` directory:
```env
FRONTEND_API_ENDPOINT=http://localhost:8080
```
3. Start the development server:
```bash
yarn dev
```
> 💡 **Tip**: `yarn dev` will automatically rebuild when you make changes to the code
Now you're all set to start developing! Happy coding! 🎉

Binary file not shown.

Before

Width:  |  Height:  |  Size: 143 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 157 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 76 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 306 KiB

View File

@@ -1,246 +0,0 @@
# Configuring OpenTelemetry Demo App with SigNoz
[The OpenTelemetry Astronomy Shop](https://github.com/open-telemetry/opentelemetry-demo) is an e-commerce web application, with **15 core microservices** in a **distributed system** which communicate over gRPC. Designed as a **polyglot** environment, it leverages a diverse set of programming languages, including Go, Python, .NET, Java, and others, showcasing cross-language instrumentation with OpenTelemetry. The intention is to get a quickstart application to send data and experience SigNoz firsthand.
This guide provides a step-by-step walkthrough for setting up the **OpenTelemetry Demo App** with **SigNoz** as backend for observability. It outlines steps to export telemetry data to **SigNoz self-hosted with Docker**, **SigNoz self-hosted with Kubernetes** and **SigNoz cloud**.
<br/>
__Table of Contents__
- [Send data to SigNoz Self-hosted with Docker](#send-data-to-signoz-self-hosted-with-docker)
- [Prerequisites](#prerequisites)
- [Clone the OpenTelemetry Demo App Repository](#clone-the-opentelemetry-demo-app-repository)
- [Modify OpenTelemetry Collector Config](#modify-opentelemetry-collector-config)
- [Start the OpenTelemetry Demo App](#start-the-opentelemetry-demo-app)
- [Monitor with SigNoz (Docker)](#monitor-with-signoz-docker)
- [Send data to SigNoz Self-hosted with Kubernetes](#send-data-to-signoz-self-hosted-with-kubernetes)
- [Prerequisites](#prerequisites-1)
- [Install Helm Repo and Charts](#install-helm-repo-and-charts)
- [Start the OpenTelemetry Demo App](#start-the-opentelemetry-demo-app-1)
- [Moniitor with SigNoz (Kubernetes)](#monitor-with-signoz-kubernetes)
- [What's next](#whats-next)
# Send data to SigNoz Self-hosted with Docker
In this guide you will install the OTel demo application using Docker and send telemetry data to SigNoz hosted with Docker, referred as SigNoz [Docker] from now.
## Prerequisites
- Docker and Docker Compose installed
- 6 GB of RAM for the application [as per OpenTelemetry documentation]
- Nice to have Docker Desktop, for easy monitoring
## Clone the OpenTelemetry Demo App Repository
Clone the OTel demo app to any folder of your choice.
```sh
# Clone the OpenTelemetry Demo repository
git clone https://github.com/open-telemetry/opentelemetry-demo.git
cd opentelemetry-demo
```
## Modify OpenTelemetry Collector Config
By default, the collector in the demo application will merge the configuration from two files:
1. otelcol-config.yml &nbsp;&nbsp;[we don't touch this]
2. otelcol-config-extras.yml &nbsp;&nbsp; [we modify this]
To add SigNoz [Docker] as the backend, open the file `src/otel-collector/otelcol-config-extras.yml` and add the following,
```yaml
exporters:
otlp:
endpoint: "http://host.docker.internal:4317"
tls:
insecure: true
debug:
verbosity: detailed
service:
pipelines:
metrics:
exporters: [otlp]
traces:
exporters: [spanmetrics, otlp]
logs:
exporters: [otlp]
```
The SigNoz OTel collector [sigNoz's otel-collector service] listens at 4317 port on localhost. When the OTel demo app is running within a Docker container and needs to transmit telemetry data to SigNoz, it cannot directly reference 'localhost' as this would refer to the container's own internal network. Instead, Docker provides a special DNS name, `host.docker.internal`, which resolves to the host machine's IP address from within containers. By configuring the OpenTelemetry Demo application to send data to `host.docker.internal:4317`, we establish a network path that allows the containerized application to transmit telemetry data across the container boundary to the SigNoz OTel collector running on the host machine's port 4317.
>
> Note: When merging extra configuration values with the existing collector config (`src/otel-collector/otelcol-config.yml`), objects are merged and arrays are replaced resulting in previous pipeline configurations getting overridden.
The spanmetrics exporter must be included in the array of exporters for the traces pipeline if overridden. Not including this exporter will result in an error.
>
<br>
<u>To send data to SigNoz Cloud</u>
If you want to send data to cloud instead, open the file `src/otel-collector/otelcol-config-extras.yml` and add the following,
```yaml
exporters:
otlp:
endpoint: "https://ingest.{your-region}.signoz.cloud:443"
tls:
insecure: false
headers:
signoz-access-token: <SIGNOZ-KEY>
debug:
verbosity: detailed
service:
pipelines:
metrics:
exporters: [otlp]
traces:
exporters: [spanmetrics, otlp]
logs:
exporters: [otlp]
```
Remember to replace the region and ingestion key with proper values as obtained from your account.
## Start the OpenTelemetry Demo App
Both SigNoz and OTel demo app [frontend-proxy service, to be accurate] share common port allocation at 8080. To prevent port allocation conflicts, modify the OTel demo application config to use port 8081 as the `ENVOY_PORT` value as shown below, and run docker compose command.
```sh
ENVOY_PORT=8081 docker compose up -d
```
This spins up multiple microservices, with OpenTelemetry instrumentation enabled. you can verify this by,
```sh
docker compose ps -a
```
The result should look similar to this,
![](/docs/img/otel-demo-docker-containers.png)
Navigate to `http://localhost:8081/` where you can access OTel demo app UI. Generate some traffic to send to SigNoz [Docker].
## Monitor with SigNoz [Docker]
Signoz exposes its UI at `http://localhost:8080/`. You should be able to see multiple services listed down as shown in the snapshot below.
![](/docs/img/otel-demo-services.png)
This verifies that your OTel demo app is successfully sending telemetry data to SigNoz [Docker] as expected.
# Send data to SigNoz Self-hosted with Kubernetes
In this guide you will install the OTel demo application using Helm and send telemetry data to SigNoz hosted with Kubernetes, referred as SigNoz [Kubernetes] from now.
## Prerequisites
- Helm charts installed
- 6 GB of free RAM for the application [as per OpenTelemetry documentation]
- A kubernetes cluster (EKS, GKE, Minikube)
- kubectl [CLI for Kubernetes]
>Note: We will be installing OTel demo app using Helm charts, since it is recommended by OpenTelemetry. If you wish to install using kubectl, follow [this](https://opentelemetry.io/docs/demo/kubernetes-deployment/#install-using-kubectl).
## Install Helm Repo and Charts
Youll need to **install the Helm repository** to start sending data to SigNoz cloud.
```sh
helm repo add open-telemetry https://open-telemetry.github.io/opentelemetry-helm-charts
```
The OpenTelemetry Collectors configuration is exposed in the Helm chart. All additions made will be merged into the default configuration. We use this capability to add SigNoz as an exporter, and make pipelines as desired.
For this we have to create a `values.yaml` which will override the existing configurations that comes with the Helm chart.
```yaml
default:
env:
- name: OTEL_SERVICE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: "metadata.labels['app.kubernetes.io/component']"
- name: OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE
value: cumulative
- name: OTEL_RESOURCE_ATTRIBUTES
value: 'service.name=$(OTEL_SERVICE_NAME),service.namespace=opentelemetry-demo'
- name: OTEL_COLLECTOR_NAME
value: signoz-otel-collector.<namespace>.svc.cluster.local
```
Replace namespace with your appropriate namespace. This file will replace the charts existing settings with our new ones, ensuring telemetry data is sent to SigNoz [Kubernetes].
> Note: When merging YAML values with Helm, objects are merged and arrays are replaced. The spanmetrics exporter must be included in the array of exporters for the traces pipeline if overridden. Not including this exporter will result in an error.
<br>
<u>To send data to SigNoz cloud</u>
If you wish to send data to cloud instance of SigNoz, we have to create a `values.yaml` which will override the existing configurations that comes with the Helm chart.
```sh
opentelemetry-collector:
config:
exporters:
otlp:
endpoint: "https://ingest.{your-region}.signoz.cloud:443"
tls:
insecure: false
headers:
signoz-access-token: <SIGNOZ-KEY>
debug:
verbosity: detailed
service:
pipelines:
traces:
exporters: [spanmetrics, otlp]
metrics:
exporters: [otlp]
logs:
exporters: [otlp]
```
Make sure to replace the region and key with values obtained from the account
Now **install the helm chart** with a release name and namespace of your choice. Let's take *my-otel-demo* as the release name and *otel-demo* as the namespace for the context of the code snippet below,
```sh
# Create a new Kubernetes namespace called "otel-demo"
kubectl create namespace otel-demo
# Install the OpenTelemetry Demo Helm chart with the release name "my-otel-demo"
helm install my-otel-demo open-telemetry/opentelemetry-demo --namespace otel-demo -f values.yaml
```
You should see a similar output on your terminal,
![](/docs/img/otel-demo-helm.png)
To verify if all the pods are running,
```sh
kubectl get pods -n otel-demo
```
The output should look similar to this,
![](/docs/img/otel-demo-pods.png)
## Start the OpenTelemetry Demo App
To expose the OTel demo app UI [frontend-proxy service] use the following command (replace my-otel-demo with your Helm chart release name):
```sh
kubectl port-forward svc/my-otel-demo-frontend-proxy 8080:8081
```
Navigate to `http://localhost:8081/` where you can access OTel demo app UI. Generate some traffic to send to SigNoz [Kubernetes].
## Monitor with SigNoz [Kubernetes]
Signoz exposes it's UI at `http://localhost:8080/`. You should be able to see multiple services listed down as shown in the snapshot below.
![](/docs/img/otel-demo-services.png)
This verifies that your OTel demo app is successfully sending telemetry data to SigNoz [Kubernetes] as expected.
# What's next?
Don't forget to check our OpenTelemetry [track](https://signoz.io/resource-center/opentelemetry/), guaranteed to take you from a newbie to sensei in no time!
Also from a fellow OTel fan to another, we at [SigNoz](https://signoz.io/) are building an open-source, OTel native, observability platform (one of its kind). So, show us love - star us on [GitHub](https://github.com/SigNoz/signoz), nitpick our [docs](https://signoz.io/docs/introduction/), or just tell your app were the ones wholl catch its crashes mid-flight and finally shush all the 3am panic calls!

14
e2e/package.json Normal file
View File

@@ -0,0 +1,14 @@
{
"name": "e2e",
"version": "1.0.0",
"main": "index.js",
"license": "MIT",
"devDependencies": {
"@playwright/test": "^1.22.0",
"@types/node": "^20.9.2"
},
"scripts": {},
"dependencies": {
"dotenv": "8.2.0"
}
}

46
e2e/playwright.config.ts Normal file
View File

@@ -0,0 +1,46 @@
import { defineConfig, devices } from "@playwright/test";
import dotenv from "dotenv";
dotenv.config();
export default defineConfig({
testDir: "./tests",
fullyParallel: true,
forbidOnly: !!process.env.CI,
name: "Signoz E2E",
retries: process.env.CI ? 2 : 0,
reporter: process.env.CI ? "github" : "list",
preserveOutput: "always",
updateSnapshots: "all",
quiet: false,
testMatch: ["**/*.spec.ts"],
use: {
trace: "on-first-retry",
baseURL:
process.env.PLAYWRIGHT_TEST_BASE_URL || "https://stagingapp.signoz.io/",
},
projects: [
{ name: "setup", testMatch: /.*\.setup\.ts/ },
{
name: "chromium",
use: {
...devices["Desktop Chrome"],
// Use prepared auth state.
storageState: ".auth/user.json",
},
dependencies: ["setup"],
},
],
});

37
e2e/tests/auth.setup.ts Normal file
View File

@@ -0,0 +1,37 @@
import { test, expect } from "@playwright/test";
import ROUTES from "../../frontend/src/constants/routes";
import dotenv from "dotenv";
dotenv.config();
const authFile = ".auth/user.json";
test("E2E Login Test", async ({ page }) => {
await Promise.all([page.goto("/"), page.waitForRequest("**/version")]);
const signup = "Monitor your applications. Find what is causing issues.";
const el = await page.locator(`text=${signup}`);
expect(el).toBeVisible();
await page
.locator("id=loginEmail")
.type(
process.env.PLAYWRIGHT_USERNAME ? process.env.PLAYWRIGHT_USERNAME : ""
);
await page.getByText("Next").click();
await page
.locator('input[id="currentPassword"]')
.fill(
process.env.PLAYWRIGHT_PASSWORD ? process.env.PLAYWRIGHT_PASSWORD : ""
);
await page.locator('button[data-attr="signup"]').click();
await expect(page).toHaveURL(ROUTES.APPLICATION);
await page.context().storageState({ path: authFile });
});

10
e2e/tests/contants.ts Normal file
View File

@@ -0,0 +1,10 @@
export const SERVICE_TABLE_HEADERS = {
APPLICATION: "Applicaton",
P99LATENCY: "P99 latency (in ms)",
ERROR_RATE: "Error Rate (% of total)",
OPS_PER_SECOND: "Operations Per Second",
};
export const DATA_TEST_IDS = {
NEW_DASHBOARD_BTN: "create-new-dashboard",
};

View File

@@ -0,0 +1,40 @@
import { test, expect } from "@playwright/test";
import ROUTES from "../../frontend/src/constants/routes";
import { DATA_TEST_IDS, SERVICE_TABLE_HEADERS } from "./contants";
test("Basic Navigation Check across different resources", async ({ page }) => {
// route to services page and check if the page renders fine with BE contract
await Promise.all([
page.goto(ROUTES.APPLICATION),
page.waitForRequest("**/v1/services"),
]);
const p99Latency = page.locator(
`th:has-text("${SERVICE_TABLE_HEADERS.P99LATENCY}")`
);
await expect(p99Latency).toBeVisible();
// route to the new trace explorer page and check if the page renders fine
await page.goto(ROUTES.TRACES_EXPLORER);
await page.waitForLoadState("networkidle");
const listViewTable = await page
.locator('div[role="presentation"]')
.isVisible();
expect(listViewTable).toBeTruthy();
// route to the dashboards page and check if the page renders fine
await Promise.all([
page.goto(ROUTES.ALL_DASHBOARD),
page.waitForRequest("**/v1/dashboards"),
]);
const newDashboardBtn = await page
.locator(`data-testid=${DATA_TEST_IDS.NEW_DASHBOARD_BTN}`)
.isVisible();
expect(newDashboardBtn).toBeTruthy();
});

46
e2e/yarn.lock Normal file
View File

@@ -0,0 +1,46 @@
# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
# yarn lockfile v1
"@playwright/test@^1.22.0":
version "1.40.0"
resolved "https://registry.yarnpkg.com/@playwright/test/-/test-1.40.0.tgz#d06c506977dd7863aa16e07f2136351ecc1be6ed"
integrity sha512-PdW+kn4eV99iP5gxWNSDQCbhMaDVej+RXL5xr6t04nbKLCBwYtA046t7ofoczHOm8u6c+45hpDKQVZqtqwkeQg==
dependencies:
playwright "1.40.0"
"@types/node@^20.9.2":
version "20.9.2"
resolved "https://registry.yarnpkg.com/@types/node/-/node-20.9.2.tgz#002815c8e87fe0c9369121c78b52e800fadc0ac6"
integrity sha512-WHZXKFCEyIUJzAwh3NyyTHYSR35SevJ6mZ1nWwJafKtiQbqRTIKSRcw3Ma3acqgsent3RRDqeVwpHntMk+9irg==
dependencies:
undici-types "~5.26.4"
dotenv@8.2.0:
version "8.2.0"
resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-8.2.0.tgz#97e619259ada750eea3e4ea3e26bceea5424b16a"
integrity sha512-8sJ78ElpbDJBHNeBzUbUVLsqKdccaa/BXF1uPTw3GrvQTBgrQrtObr2mUrE38vzYd8cEv+m/JBfDLioYcfXoaw==
fsevents@2.3.2:
version "2.3.2"
resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.2.tgz#8a526f78b8fdf4623b709e0b975c52c24c02fd1a"
integrity sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==
playwright-core@1.40.0:
version "1.40.0"
resolved "https://registry.yarnpkg.com/playwright-core/-/playwright-core-1.40.0.tgz#82f61e5504cb3097803b6f8bbd98190dd34bdf14"
integrity sha512-fvKewVJpGeca8t0ipM56jkVSU6Eo0RmFvQ/MaCQNDYm+sdvKkMBBWTE1FdeMqIdumRaXXjZChWHvIzCGM/tA/Q==
playwright@1.40.0:
version "1.40.0"
resolved "https://registry.yarnpkg.com/playwright/-/playwright-1.40.0.tgz#2a1824b9fe5c4fe52ed53db9ea68003543a99df0"
integrity sha512-gyHAgQjiDf1m34Xpwzaqb76KgfzYrhK7iih+2IzcOCoZWr/8ZqmdBw+t0RU85ZmfJMgtgAiNtBQ/KS2325INXw==
dependencies:
playwright-core "1.40.0"
optionalDependencies:
fsevents "2.3.2"
undici-types@~5.26.4:
version "5.26.5"
resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-5.26.5.tgz#bcd539893d00b56e964fd2657a4866b221a65617"
integrity sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==

View File

@@ -1,91 +0,0 @@
package middleware
import (
"net/http"
"time"
eeTypes "github.com/SigNoz/signoz/ee/types"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"go.uber.org/zap"
)
type Pat struct {
store sqlstore.SQLStore
uuid *authtypes.UUID
headers []string
}
func NewPat(store sqlstore.SQLStore, headers []string) *Pat {
return &Pat{store: store, uuid: authtypes.NewUUID(), headers: headers}
}
func (p *Pat) Wrap(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var values []string
var patToken string
var pat eeTypes.StorablePersonalAccessToken
for _, header := range p.headers {
values = append(values, r.Header.Get(header))
}
ctx, err := p.uuid.ContextFromRequest(r.Context(), values...)
if err != nil {
next.ServeHTTP(w, r)
return
}
patToken, ok := authtypes.UUIDFromContext(ctx)
if !ok {
next.ServeHTTP(w, r)
return
}
err = p.store.BunDB().NewSelect().Model(&pat).Where("token = ?", patToken).Scan(r.Context())
if err != nil {
next.ServeHTTP(w, r)
return
}
if pat.ExpiresAt < time.Now().Unix() && pat.ExpiresAt != 0 {
next.ServeHTTP(w, r)
return
}
// get user from db
user := types.User{}
err = p.store.BunDB().NewSelect().Model(&user).Where("id = ?", pat.UserID).Scan(r.Context())
if err != nil {
next.ServeHTTP(w, r)
return
}
role, err := authtypes.NewRole(user.Role)
if err != nil {
next.ServeHTTP(w, r)
return
}
jwt := authtypes.Claims{
UserID: user.ID,
Role: role,
Email: user.Email,
OrgID: user.OrgID,
}
ctx = authtypes.NewContextWithClaims(ctx, jwt)
r = r.WithContext(ctx)
next.ServeHTTP(w, r)
pat.LastUsed = time.Now().Unix()
_, err = p.store.BunDB().NewUpdate().Model(&pat).Column("last_used").Where("token = ?", patToken).Where("revoked = false").Exec(r.Context())
if err != nil {
zap.L().Error("Failed to update PAT last used in db, err: %v", zap.Error(err))
}
})
}

View File

@@ -1,68 +0,0 @@
# yaml-language-server: $schema=https://goreleaser.com/static/schema-pro.json
# vim: set ts=2 sw=2 tw=0 fo=cnqoj
version: 2
project_name: signoz
before:
hooks:
- go mod tidy
builds:
- id: signoz
binary: bin/signoz
main: ee/query-service/main.go
env:
- CGO_ENABLED=1
- >-
{{- if eq .Os "linux" }}
{{- if eq .Arch "arm64" }}CC=aarch64-linux-gnu-gcc{{- end }}
{{- end }}
goos:
- linux
- darwin
goarch:
- amd64
- arm64
goamd64:
- v1
goarm64:
- v8.0
ldflags:
- -s -w
- -X github.com/SigNoz/signoz/pkg/version.version=v{{ .Version }}
- -X github.com/SigNoz/signoz/pkg/version.variant=enterprise
- -X github.com/SigNoz/signoz/pkg/version.hash={{ .ShortCommit }}
- -X github.com/SigNoz/signoz/pkg/version.time={{ .CommitTimestamp }}
- -X github.com/SigNoz/signoz/pkg/version.branch={{ .Branch }}
- -X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.signoz.cloud
- -X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.signoz.io/api/v1
- >-
{{- if eq .Os "linux" }}-linkmode external -extldflags '-static'{{- end }}
mod_timestamp: "{{ .CommitTimestamp }}"
tags:
- timetzdata
archives:
- formats:
- tar.gz
name_template: >-
{{ .ProjectName }}_{{- .Os }}_{{- .Arch }}
wrap_in_directory: true
strip_binary_directory: false
files:
- src: README.md
dst: README.md
- src: LICENSE
dst: LICENSE
- src: frontend/build
dst: web
- src: conf
dst: conf
- src: templates
dst: templates
release:
name_template: "v{{ .Version }}"
draft: false
prerelease: auto

View File

@@ -1,21 +1,34 @@
# use a minimal alpine image
FROM alpine:3.20.3
# Add Maintainer Info
LABEL maintainer="signoz"
# define arguments that can be passed during build time
ARG TARGETOS TARGETARCH
# add ca-certificates in case you need them
RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/*
# set working directory
WORKDIR /root
ARG OS="linux"
ARG TARGETARCH
# copy the query-service binary
COPY ee/query-service/bin/query-service-${TARGETOS}-${TARGETARCH} /root/query-service
RUN apk update && \
apk add ca-certificates && \
rm -rf /var/cache/apk/*
# copy prometheus YAML config
COPY pkg/query-service/config/prometheus.yml /root/config/prometheus.yml
COPY pkg/query-service/templates /root/templates
# Make query-service executable for non-root users
RUN chmod 755 /root /root/query-service
COPY ./target/${OS}-${TARGETARCH}/signoz /root/signoz
COPY ./conf/prometheus.yml /root/config/prometheus.yml
COPY ./templates/email /root/templates
# Copy frontend
COPY frontend/build/ /etc/signoz/web/
RUN chmod 755 /root /root/signoz
# run the binary
ENTRYPOINT ["./query-service"]
ENTRYPOINT ["./signoz"]
CMD ["-config", "/root/config/prometheus.yml"]
EXPOSE 8080

View File

@@ -1,36 +0,0 @@
FROM golang:1.22-bullseye
ARG OS="linux"
ARG TARGETARCH
ARG ZEUSURL
# This path is important for stacktraces
WORKDIR $GOPATH/src/github.com/signoz/signoz
WORKDIR /root
RUN set -eux; \
apt-get update; \
apt-get install -y --no-install-recommends \
g++ \
gcc \
libc6-dev \
make \
pkg-config \
; \
rm -rf /var/lib/apt/lists/*
COPY go.mod go.sum ./
RUN go mod download
COPY ./ee/ ./ee/
COPY ./pkg/ ./pkg/
COPY ./templates/email /root/templates
COPY Makefile Makefile
RUN TARGET_DIR=/root ARCHS=${TARGETARCH} ZEUS_URL=${ZEUSURL} LICENSE_URL=${ZEUSURL}/api/v1 make go-build-enterprise-race
RUN mv /root/linux-${TARGETARCH}/signoz /root/signoz
RUN chmod 755 /root /root/signoz
ENTRYPOINT ["/root/signoz"]

View File

@@ -1,22 +0,0 @@
ARG ALPINE_SHA="pass-a-valid-docker-sha-otherwise-this-will-fail"
FROM alpine@sha256:${ALPINE_SHA}
LABEL maintainer="signoz"
WORKDIR /root
ARG OS="linux"
ARG ARCH
RUN apk update && \
apk add ca-certificates && \
rm -rf /var/cache/apk/*
COPY ./target/${OS}-${ARCH}/signoz /root/signoz
COPY ./conf/prometheus.yml /root/config/prometheus.yml
COPY ./templates/email /root/templates
COPY frontend/build/ /etc/signoz/web/
RUN chmod 755 /root /root/signoz
ENTRYPOINT ["./signoz"]
CMD ["-config", "/root/config/prometheus.yml"]

View File

@@ -3,8 +3,8 @@ package anomaly
import (
"context"
querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2"
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
)
type DailyProvider struct {
@@ -28,10 +28,11 @@ func NewDailyProvider(opts ...GenericProviderOption[*DailyProvider]) *DailyProvi
}
dp.querierV2 = querierV2.NewQuerier(querierV2.QuerierOptions{
Reader: dp.reader,
Cache: dp.cache,
KeyGenerator: queryBuilder.NewKeyGenerator(),
FluxInterval: dp.fluxInterval,
Reader: dp.reader,
Cache: dp.cache,
KeyGenerator: queryBuilder.NewKeyGenerator(),
FluxInterval: dp.fluxInterval,
FeatureLookup: dp.ff,
})
return dp

View File

@@ -3,8 +3,8 @@ package anomaly
import (
"context"
querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2"
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
)
type HourlyProvider struct {
@@ -28,10 +28,11 @@ func NewHourlyProvider(opts ...GenericProviderOption[*HourlyProvider]) *HourlyPr
}
hp.querierV2 = querierV2.NewQuerier(querierV2.QuerierOptions{
Reader: hp.reader,
Cache: hp.cache,
KeyGenerator: queryBuilder.NewKeyGenerator(),
FluxInterval: hp.fluxInterval,
Reader: hp.reader,
Cache: hp.cache,
KeyGenerator: queryBuilder.NewKeyGenerator(),
FluxInterval: hp.fluxInterval,
FeatureLookup: hp.ff,
})
return hp

View File

@@ -4,8 +4,8 @@ import (
"math"
"time"
"github.com/SigNoz/signoz/pkg/query-service/common"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/query-service/common"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
)
type Seasonality string

View File

@@ -5,11 +5,11 @@ import (
"math"
"time"
"github.com/SigNoz/signoz/pkg/query-service/cache"
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/query-service/postprocess"
"github.com/SigNoz/signoz/pkg/query-service/utils/labels"
"go.signoz.io/signoz/pkg/query-service/cache"
"go.signoz.io/signoz/pkg/query-service/interfaces"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/query-service/postprocess"
"go.signoz.io/signoz/pkg/query-service/utils/labels"
"go.uber.org/zap"
)
@@ -38,6 +38,12 @@ func WithKeyGenerator[T BaseProvider](keyGenerator cache.KeyGenerator) GenericPr
}
}
func WithFeatureLookup[T BaseProvider](ff interfaces.FeatureLookup) GenericProviderOption[T] {
return func(p T) {
p.GetBaseSeasonalProvider().ff = ff
}
}
func WithReader[T BaseProvider](reader interfaces.Reader) GenericProviderOption[T] {
return func(p T) {
p.GetBaseSeasonalProvider().reader = reader
@@ -50,6 +56,7 @@ type BaseSeasonalProvider struct {
fluxInterval time.Duration
cache cache.Cache
keyGenerator cache.KeyGenerator
ff interfaces.FeatureLookup
}
func (p *BaseSeasonalProvider) getQueryParams(req *GetAnomaliesRequest) *anomalyQueryParams {
@@ -306,9 +313,6 @@ func (p *BaseSeasonalProvider) getScore(
series, prevSeries, weekSeries, weekPrevSeries, past2SeasonSeries, past3SeasonSeries *v3.Series, value float64, idx int,
) float64 {
expectedValue := p.getExpectedValue(series, prevSeries, weekSeries, weekPrevSeries, past2SeasonSeries, past3SeasonSeries, idx)
if expectedValue < 0 {
expectedValue = p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
}
return (value - expectedValue) / p.getStdDev(weekSeries)
}

View File

@@ -3,8 +3,8 @@ package anomaly
import (
"context"
querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2"
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2"
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
)
type WeeklyProvider struct {
@@ -27,10 +27,11 @@ func NewWeeklyProvider(opts ...GenericProviderOption[*WeeklyProvider]) *WeeklyPr
}
wp.querierV2 = querierV2.NewQuerier(querierV2.QuerierOptions{
Reader: wp.reader,
Cache: wp.cache,
KeyGenerator: queryBuilder.NewKeyGenerator(),
FluxInterval: wp.fluxInterval,
Reader: wp.reader,
Cache: wp.cache,
KeyGenerator: queryBuilder.NewKeyGenerator(),
FluxInterval: wp.fluxInterval,
FeatureLookup: wp.ff,
})
return wp

View File

@@ -5,30 +5,26 @@ import (
"net/http/httputil"
"time"
"github.com/SigNoz/signoz/ee/query-service/dao"
"github.com/SigNoz/signoz/ee/query-service/integrations/gateway"
"github.com/SigNoz/signoz/ee/query-service/interfaces"
"github.com/SigNoz/signoz/ee/query-service/license"
"github.com/SigNoz/signoz/ee/query-service/usage"
"github.com/SigNoz/signoz/pkg/alertmanager"
"github.com/SigNoz/signoz/pkg/apis/fields"
"github.com/SigNoz/signoz/pkg/http/middleware"
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
"github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline"
"github.com/SigNoz/signoz/pkg/query-service/cache"
baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
rules "github.com/SigNoz/signoz/pkg/query-service/rules"
"github.com/SigNoz/signoz/pkg/signoz"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/version"
"github.com/gorilla/mux"
"go.signoz.io/signoz/ee/query-service/dao"
"go.signoz.io/signoz/ee/query-service/integrations/gateway"
"go.signoz.io/signoz/ee/query-service/interfaces"
"go.signoz.io/signoz/ee/query-service/license"
"go.signoz.io/signoz/ee/query-service/usage"
baseapp "go.signoz.io/signoz/pkg/query-service/app"
"go.signoz.io/signoz/pkg/query-service/app/cloudintegrations"
"go.signoz.io/signoz/pkg/query-service/app/integrations"
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
"go.signoz.io/signoz/pkg/query-service/cache"
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
rules "go.signoz.io/signoz/pkg/query-service/rules"
"go.signoz.io/signoz/pkg/query-service/version"
)
type APIHandlerOptions struct {
DataConnector interfaces.DataConnector
SkipConfig *basemodel.SkipConfig
PreferSpanMetrics bool
AppDao dao.ModelDao
RulesManager *rules.Manager
@@ -45,7 +41,6 @@ type APIHandlerOptions struct {
FluxInterval time.Duration
UseLogsNewSchema bool
UseTraceNewSchema bool
JWT *authtypes.JWT
}
type APIHandler struct {
@@ -54,9 +49,11 @@ type APIHandler struct {
}
// NewAPIHandler returns an APIHandler
func NewAPIHandler(opts APIHandlerOptions, signoz *signoz.SigNoz) (*APIHandler, error) {
func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
baseHandler, err := baseapp.NewAPIHandler(baseapp.APIHandlerOpts{
Reader: opts.DataConnector,
SkipConfig: opts.SkipConfig,
PreferSpanMetrics: opts.PreferSpanMetrics,
AppDao: opts.AppDao,
RuleManager: opts.RulesManager,
@@ -68,9 +65,6 @@ func NewAPIHandler(opts APIHandlerOptions, signoz *signoz.SigNoz) (*APIHandler,
FluxInterval: opts.FluxInterval,
UseLogsNewSchema: opts.UseLogsNewSchema,
UseTraceNewSchema: opts.UseTraceNewSchema,
AlertmanagerAPI: alertmanager.NewAPI(signoz.Alertmanager),
FieldsAPI: fields.NewAPI(signoz.TelemetryStore),
Signoz: signoz,
})
if err != nil {
@@ -114,7 +108,7 @@ func (ah *APIHandler) CheckFeature(f string) bool {
}
// RegisterRoutes registers routes for this handler on the given router
func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *middleware.AuthZ) {
func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *baseapp.AuthMiddleware) {
// note: add ee override methods first
// routes available only in ee version
@@ -157,6 +151,7 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *middleware.AuthZ) {
router.HandleFunc("/api/v1/invite/{token}", am.OpenAccess(ah.getInvite)).Methods(http.MethodGet)
router.HandleFunc("/api/v1/register", am.OpenAccess(ah.registerUser)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/login", am.OpenAccess(ah.loginUser)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/traces/{traceId}", am.ViewAccess(ah.searchTraces)).Methods(http.MethodGet)
// PAT APIs
router.HandleFunc("/api/v1/pats", am.AdminAccess(ah.createPAT)).Methods(http.MethodPost)
@@ -187,7 +182,7 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *middleware.AuthZ) {
}
func (ah *APIHandler) RegisterCloudIntegrationsRoutes(router *mux.Router, am *middleware.AuthZ) {
func (ah *APIHandler) RegisterCloudIntegrationsRoutes(router *mux.Router, am *baseapp.AuthMiddleware) {
ah.APIHandler.RegisterCloudIntegrationsRoutes(router, am)
@@ -199,8 +194,9 @@ func (ah *APIHandler) RegisterCloudIntegrationsRoutes(router *mux.Router, am *mi
}
func (ah *APIHandler) getVersion(w http.ResponseWriter, r *http.Request) {
version := version.GetVersion()
versionResponse := basemodel.GetVersionResponse{
Version: version.Info.Version(),
Version: version,
EE: "Y",
SetupCompleted: ah.SetupCompleted,
}

View File

@@ -12,10 +12,10 @@ import (
"github.com/gorilla/mux"
"go.uber.org/zap"
"github.com/SigNoz/signoz/ee/query-service/constants"
"github.com/SigNoz/signoz/ee/query-service/model"
baseauth "github.com/SigNoz/signoz/pkg/query-service/auth"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
"go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/model"
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
)
func parseRequest(r *http.Request, req interface{}) error {
@@ -50,7 +50,7 @@ func (ah *APIHandler) loginUser(w http.ResponseWriter, r *http.Request) {
}
// if all looks good, call auth
resp, err := baseauth.Login(ctx, &req, ah.opts.JWT)
resp, err := baseauth.Login(ctx, &req)
if ah.HandleError(w, err, http.StatusUnauthorized) {
return
}
@@ -134,7 +134,7 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
return
}
_, registerError := baseauth.Register(ctx, req, ah.Signoz.Alertmanager, ah.Signoz.Modules.Organization)
_, registerError := baseauth.Register(ctx, req)
if !registerError.IsNil() {
RespondError(w, apierr, nil)
return
@@ -151,8 +151,9 @@ func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
func (ah *APIHandler) getInvite(w http.ResponseWriter, r *http.Request) {
token := mux.Vars(r)["token"]
sourceUrl := r.URL.Query().Get("ref")
ctx := context.Background()
inviteObject, err := baseauth.GetInvite(r.Context(), token, ah.Signoz.Modules.Organization)
inviteObject, err := baseauth.GetInvite(context.Background(), token)
if err != nil {
RespondError(w, model.BadRequest(err), nil)
return
@@ -162,7 +163,7 @@ func (ah *APIHandler) getInvite(w http.ResponseWriter, r *http.Request) {
InvitationResponseObject: inviteObject,
}
precheck, apierr := ah.AppDao().PrecheckLogin(r.Context(), inviteObject.Email, sourceUrl)
precheck, apierr := ah.AppDao().PrecheckLogin(ctx, inviteObject.Email, sourceUrl)
resp.Precheck = precheck
if apierr != nil {
@@ -252,7 +253,7 @@ func (ah *APIHandler) receiveGoogleAuth(w http.ResponseWriter, r *http.Request)
return
}
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, identity.Email, ah.opts.JWT)
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, identity.Email)
if err != nil {
zap.L().Error("[receiveGoogleAuth] failed to generate redirect URI after successful login ", zap.String("domain", domain.String()), zap.Error(err))
handleSsoError(w, r, redirectUri)
@@ -330,7 +331,7 @@ func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
return
}
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, email, ah.opts.JWT)
nextPage, err := ah.AppDao().PrepareSsoRedirect(ctx, redirectUri, email)
if err != nil {
zap.L().Error("[receiveSAML] failed to generate redirect URI after successful login ", zap.String("domain", domain.String()), zap.Error(err))
handleSsoError(w, r, redirectUri)

View File

@@ -10,15 +10,14 @@ import (
"strings"
"time"
"github.com/SigNoz/signoz/ee/query-service/constants"
eeTypes "github.com/SigNoz/signoz/ee/types"
"github.com/SigNoz/signoz/pkg/http/render"
"github.com/SigNoz/signoz/pkg/query-service/auth"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/google/uuid"
"github.com/gorilla/mux"
"go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/model"
"go.signoz.io/signoz/pkg/query-service/auth"
baseconstants "go.signoz.io/signoz/pkg/query-service/constants"
"go.signoz.io/signoz/pkg/query-service/dao"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap"
)
@@ -30,12 +29,6 @@ type CloudIntegrationConnectionParamsResponse struct {
}
func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseWriter, r *http.Request) {
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
cloudProvider := mux.Vars(r)["cloudProvider"]
if cloudProvider != "aws" {
RespondError(w, basemodel.BadRequest(fmt.Errorf(
@@ -44,7 +37,15 @@ func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseW
return
}
apiKey, apiErr := ah.getOrCreateCloudIntegrationPAT(r.Context(), claims.OrgID, cloudProvider)
currentUser, err := auth.GetUserFromRequest(r)
if err != nil {
RespondError(w, basemodel.UnauthorizedError(fmt.Errorf(
"couldn't deduce current user: %w", err,
)), nil)
return
}
apiKey, apiErr := ah.getOrCreateCloudIntegrationPAT(r.Context(), currentUser.OrgId, cloudProvider)
if apiErr != nil {
RespondError(w, basemodel.WrapApiError(
apiErr, "couldn't provision PAT for cloud integration:",
@@ -116,14 +117,14 @@ func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId
return "", apiErr
}
allPats, err := ah.AppDao().ListPATs(ctx, orgId)
allPats, err := ah.AppDao().ListPATs(ctx)
if err != nil {
return "", basemodel.InternalError(fmt.Errorf(
"couldn't list PATs: %w", err,
"couldn't list PATs: %w", err.Error(),
))
}
for _, p := range allPats {
if p.UserID == integrationUser.ID && p.Name == integrationPATName {
if p.UserID == integrationUser.Id && p.Name == integrationPATName {
return p.Token, nil
}
}
@@ -133,16 +134,19 @@ func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId
zap.String("cloudProvider", cloudProvider),
)
newPAT := eeTypes.NewGettablePAT(
integrationPATName,
authtypes.RoleViewer.String(),
integrationUser.ID,
0,
)
integrationPAT, err := ah.AppDao().CreatePAT(ctx, orgId, newPAT)
newPAT := model.PAT{
Token: generatePATToken(),
UserID: integrationUser.Id,
Name: integrationPATName,
Role: baseconstants.ViewerGroup,
ExpiresAt: 0,
CreatedAt: time.Now().Unix(),
UpdatedAt: time.Now().Unix(),
}
integrationPAT, err := ah.AppDao().CreatePAT(ctx, newPAT)
if err != nil {
return "", basemodel.InternalError(fmt.Errorf(
"couldn't create cloud integration PAT: %w", err,
"couldn't create cloud integration PAT: %w", err.Error(),
))
}
return integrationPAT.Token, nil
@@ -150,12 +154,10 @@ func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId
func (ah *APIHandler) getOrCreateCloudIntegrationUser(
ctx context.Context, orgId string, cloudProvider string,
) (*types.User, *basemodel.ApiError) {
cloudIntegrationUser := fmt.Sprintf("%s-integration", cloudProvider)
email := fmt.Sprintf("%s@signoz.io", cloudIntegrationUser)
) (*basemodel.User, *basemodel.ApiError) {
cloudIntegrationUserId := fmt.Sprintf("%s-integration", cloudProvider)
// TODO(nitya): there should be orgId here
integrationUserResult, apiErr := ah.AppDao().GetUserByEmail(ctx, email)
integrationUserResult, apiErr := ah.AppDao().GetUser(ctx, cloudIntegrationUserId)
if apiErr != nil {
return nil, basemodel.WrapApiError(apiErr, "couldn't look for integration user")
}
@@ -169,17 +171,19 @@ func (ah *APIHandler) getOrCreateCloudIntegrationUser(
zap.String("cloudProvider", cloudProvider),
)
newUser := &types.User{
ID: uuid.New().String(),
Name: cloudIntegrationUser,
Email: email,
TimeAuditable: types.TimeAuditable{
CreatedAt: time.Now(),
},
OrgID: orgId,
newUser := &basemodel.User{
Id: cloudIntegrationUserId,
Name: fmt.Sprintf("%s integration", cloudProvider),
Email: fmt.Sprintf("%s@signoz.io", cloudIntegrationUserId),
CreatedAt: time.Now().Unix(),
OrgId: orgId,
}
newUser.Role = authtypes.RoleViewer.String()
viewerGroup, apiErr := dao.DB().GetGroupByName(ctx, baseconstants.ViewerGroup)
if apiErr != nil {
return nil, basemodel.WrapApiError(apiErr, "couldn't get viewer group for creating integration user")
}
newUser.GroupId = viewerGroup.Id
passwordHash, err := auth.PasswordHash(uuid.NewString())
if err != nil {

View File

@@ -1,14 +1,15 @@
package api
import (
"errors"
"net/http"
"strings"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/http/render"
"github.com/SigNoz/signoz/pkg/query-service/app/dashboards"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/gorilla/mux"
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
"go.signoz.io/signoz/pkg/query-service/auth"
"go.signoz.io/signoz/pkg/query-service/common"
"go.signoz.io/signoz/pkg/query-service/model"
)
func (ah *APIHandler) lockDashboard(w http.ResponseWriter, r *http.Request) {
@@ -30,32 +31,26 @@ func (ah *APIHandler) lockUnlockDashboard(w http.ResponseWriter, r *http.Request
// Get the dashboard UUID from the request
uuid := mux.Vars(r)["uuid"]
if strings.HasPrefix(uuid, "integration") {
render.Error(w, errors.Newf(errors.TypeForbidden, errors.CodeForbidden, "dashboards created by integrations cannot be modified"))
if strings.HasPrefix(uuid,"integration") {
RespondError(w, &model.ApiError{Typ: model.ErrorForbidden, Err: errors.New("dashboards created by integrations cannot be unlocked")}, "You are not authorized to lock/unlock this dashboard")
return
}
claims, err := authtypes.ClaimsFromContext(r.Context())
dashboard, err := dashboards.GetDashboard(r.Context(), uuid)
if err != nil {
render.Error(w, errors.Newf(errors.TypeUnauthenticated, errors.CodeUnauthenticated, "unauthenticated"))
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, err.Error())
return
}
dashboard, err := dashboards.GetDashboard(r.Context(), claims.OrgID, uuid)
if err != nil {
render.Error(w, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to get dashboard"))
return
}
if err := claims.IsAdmin(); err != nil && (dashboard.CreatedBy != claims.Email) {
render.Error(w, errors.Newf(errors.TypeForbidden, errors.CodeForbidden, "You are not authorized to lock/unlock this dashboard"))
user := common.GetUserFromContext(r.Context())
if !auth.IsAdmin(user) && (dashboard.CreateBy != nil && *dashboard.CreateBy != user.Email) {
RespondError(w, &model.ApiError{Typ: model.ErrorForbidden, Err: err}, "You are not authorized to lock/unlock this dashboard")
return
}
// Lock/Unlock the dashboard
err = dashboards.LockUnlockDashboard(r.Context(), claims.OrgID, uuid, lock)
err = dashboards.LockUnlockDashboard(r.Context(), uuid, lock)
if err != nil {
render.Error(w, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to lock/unlock dashboard"))
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, err.Error())
return
}

View File

@@ -6,10 +6,9 @@ import (
"fmt"
"net/http"
"github.com/SigNoz/signoz/ee/query-service/model"
"github.com/SigNoz/signoz/ee/types"
"github.com/google/uuid"
"github.com/gorilla/mux"
"go.signoz.io/signoz/ee/query-service/model"
)
func (ah *APIHandler) listDomainsByOrg(w http.ResponseWriter, r *http.Request) {
@@ -25,7 +24,7 @@ func (ah *APIHandler) listDomainsByOrg(w http.ResponseWriter, r *http.Request) {
func (ah *APIHandler) postDomain(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
req := types.GettableOrgDomain{}
req := model.OrgDomain{}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
RespondError(w, model.BadRequest(err), nil)
@@ -55,12 +54,12 @@ func (ah *APIHandler) putDomain(w http.ResponseWriter, r *http.Request) {
return
}
req := types.GettableOrgDomain{StorableOrgDomain: types.StorableOrgDomain{ID: domainId}}
req := model.OrgDomain{Id: domainId}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
req.ID = domainId
req.Id = domainId
if err := req.Valid(nil); err != nil {
RespondError(w, model.BadRequest(err), nil)
}

View File

@@ -8,8 +8,8 @@ import (
"net/http"
"time"
"github.com/SigNoz/signoz/ee/query-service/constants"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
"go.signoz.io/signoz/ee/query-service/constants"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap"
)

View File

@@ -3,8 +3,8 @@ package api
import (
"testing"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/stretchr/testify/assert"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
)
func TestMergeFeatureSets(t *testing.T) {

View File

@@ -4,7 +4,7 @@ import (
"net/http"
"strings"
"github.com/SigNoz/signoz/ee/query-service/integrations/gateway"
"go.signoz.io/signoz/ee/query-service/integrations/gateway"
)
func (ah *APIHandler) ServeGatewayHTTP(rw http.ResponseWriter, req *http.Request) {

View File

@@ -3,12 +3,13 @@ package api
import (
"encoding/json"
"fmt"
"io"
"net/http"
"github.com/SigNoz/signoz/ee/query-service/constants"
"github.com/SigNoz/signoz/ee/query-service/integrations/signozio"
"github.com/SigNoz/signoz/ee/query-service/model"
"github.com/SigNoz/signoz/pkg/http/render"
"go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/model"
"go.signoz.io/signoz/pkg/http/render"
"go.uber.org/zap"
)
type DayWiseBreakdown struct {
@@ -47,10 +48,6 @@ type details struct {
BillTotal float64 `json:"billTotal"`
}
type Redirect struct {
RedirectURL string `json:"redirectURL"`
}
type billingDetails struct {
Status string `json:"status"`
Data struct {
@@ -122,31 +119,36 @@ func (ah *APIHandler) refreshLicensesV3(w http.ResponseWriter, r *http.Request)
render.Success(w, http.StatusNoContent, nil)
}
func getCheckoutPortalResponse(redirectURL string) *Redirect {
return &Redirect{RedirectURL: redirectURL}
}
func (ah *APIHandler) checkout(w http.ResponseWriter, r *http.Request) {
checkoutRequest := &model.CheckoutRequest{}
if err := json.NewDecoder(r.Body).Decode(checkoutRequest); err != nil {
RespondError(w, model.BadRequest(err), nil)
return
type checkoutResponse struct {
Status string `json:"status"`
Data struct {
RedirectURL string `json:"redirectURL"`
} `json:"data"`
}
license := ah.LM().GetActiveLicense()
if license == nil {
RespondError(w, model.BadRequestStr("cannot proceed with checkout without license key"), nil)
return
}
redirectUrl, err := signozio.CheckoutSession(r.Context(), checkoutRequest, license.Key)
hClient := &http.Client{}
req, err := http.NewRequest("POST", constants.LicenseSignozIo+"/checkout", r.Body)
if err != nil {
RespondError(w, err, nil)
RespondError(w, model.InternalError(err), nil)
return
}
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
licenseResp, err := hClient.Do(req)
if err != nil {
RespondError(w, model.InternalError(err), nil)
return
}
ah.Respond(w, getCheckoutPortalResponse(redirectUrl))
// decode response body
var resp checkoutResponse
if err := json.NewDecoder(licenseResp.Body).Decode(&resp); err != nil {
RespondError(w, model.InternalError(err), nil)
return
}
ah.Respond(w, resp.Data)
}
func (ah *APIHandler) getBilling(w http.ResponseWriter, r *http.Request) {
@@ -226,28 +228,102 @@ func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) {
Licenses: licenses,
}
var currentActiveLicenseKey string
for _, license := range licenses {
if license.IsCurrent {
currentActiveLicenseKey = license.Key
}
}
// For the case when no license is applied i.e community edition
// There will be no trial details or license details
if currentActiveLicenseKey == "" {
ah.Respond(w, resp)
return
}
// Fetch trial details
hClient := &http.Client{}
url := fmt.Sprintf("%s/trial?licenseKey=%s", constants.LicenseSignozIo, currentActiveLicenseKey)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
zap.L().Error("Error while creating request for trial details", zap.Error(err))
// If there is an error in fetching trial details, we will still return the license details
// to avoid blocking the UI
ah.Respond(w, resp)
return
}
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
trialResp, err := hClient.Do(req)
if err != nil {
zap.L().Error("Error while fetching trial details", zap.Error(err))
// If there is an error in fetching trial details, we will still return the license details
// to avoid incorrectly blocking the UI
ah.Respond(w, resp)
return
}
defer trialResp.Body.Close()
trialRespBody, err := io.ReadAll(trialResp.Body)
if err != nil || trialResp.StatusCode != http.StatusOK {
zap.L().Error("Error while fetching trial details", zap.Error(err))
// If there is an error in fetching trial details, we will still return the license details
// to avoid incorrectly blocking the UI
ah.Respond(w, resp)
return
}
// decode response body
var trialRespData model.SubscriptionServerResp
if err := json.Unmarshal(trialRespBody, &trialRespData); err != nil {
zap.L().Error("Error while decoding trial details", zap.Error(err))
// If there is an error in fetching trial details, we will still return the license details
// to avoid incorrectly blocking the UI
ah.Respond(w, resp)
return
}
resp.TrialStart = trialRespData.Data.TrialStart
resp.TrialEnd = trialRespData.Data.TrialEnd
resp.OnTrial = trialRespData.Data.OnTrial
resp.WorkSpaceBlock = trialRespData.Data.WorkSpaceBlock
resp.TrialConvertedToSubscription = trialRespData.Data.TrialConvertedToSubscription
resp.GracePeriodEnd = trialRespData.Data.GracePeriodEnd
ah.Respond(w, resp)
}
func (ah *APIHandler) portalSession(w http.ResponseWriter, r *http.Request) {
portalRequest := &model.PortalRequest{}
if err := json.NewDecoder(r.Body).Decode(portalRequest); err != nil {
RespondError(w, model.BadRequest(err), nil)
return
type checkoutResponse struct {
Status string `json:"status"`
Data struct {
RedirectURL string `json:"redirectURL"`
} `json:"data"`
}
license := ah.LM().GetActiveLicense()
if license == nil {
RespondError(w, model.BadRequestStr("cannot request the portal session without license key"), nil)
return
}
redirectUrl, err := signozio.PortalSession(r.Context(), portalRequest, license.Key)
hClient := &http.Client{}
req, err := http.NewRequest("POST", constants.LicenseSignozIo+"/portal", r.Body)
if err != nil {
RespondError(w, err, nil)
RespondError(w, model.InternalError(err), nil)
return
}
req.Header.Add("X-SigNoz-SecretKey", constants.LicenseAPIKey)
licenseResp, err := hClient.Do(req)
if err != nil {
RespondError(w, model.InternalError(err), nil)
return
}
ah.Respond(w, getCheckoutPortalResponse(redirectUrl))
// decode response body
var resp checkoutResponse
if err := json.NewDecoder(licenseResp.Body).Decode(&resp); err != nil {
RespondError(w, model.InternalError(err), nil)
return
}
ah.Respond(w, resp.Data)
}

View File

@@ -1,53 +1,73 @@
package api
import (
"context"
"crypto/rand"
"encoding/base64"
"encoding/json"
"fmt"
"net/http"
"slices"
"time"
"github.com/SigNoz/signoz/ee/query-service/model"
eeTypes "github.com/SigNoz/signoz/ee/types"
"github.com/SigNoz/signoz/pkg/errors"
errorsV2 "github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/http/render"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/gorilla/mux"
"go.signoz.io/signoz/ee/query-service/model"
"go.signoz.io/signoz/pkg/query-service/auth"
baseconstants "go.signoz.io/signoz/pkg/query-service/constants"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap"
)
func generatePATToken() string {
// Generate a 32-byte random token.
token := make([]byte, 32)
rand.Read(token)
// Encode the token in base64.
encodedToken := base64.StdEncoding.EncodeToString(token)
return encodedToken
}
func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
ctx := context.Background()
req := model.CreatePATRequestBody{}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
pat := eeTypes.NewGettablePAT(
req.Name,
req.Role,
claims.UserID,
req.ExpiresInDays,
)
user, err := auth.GetUserFromRequest(r)
if err != nil {
RespondError(w, &model.ApiError{
Typ: model.ErrorUnauthorized,
Err: err,
}, nil)
return
}
pat := model.PAT{
Name: req.Name,
Role: req.Role,
ExpiresAt: req.ExpiresInDays,
}
err = validatePATRequest(pat)
if err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
// All the PATs are associated with the user creating the PAT.
pat.UserID = user.Id
pat.CreatedAt = time.Now().Unix()
pat.UpdatedAt = time.Now().Unix()
pat.LastUsed = 0
pat.Token = generatePATToken()
if pat.ExpiresAt != 0 {
// convert expiresAt to unix timestamp from days
pat.ExpiresAt = time.Now().Unix() + (pat.ExpiresAt * 24 * 60 * 60)
}
zap.L().Info("Got Create PAT request", zap.Any("pat", pat))
var apierr basemodel.BaseApiError
if pat, apierr = ah.AppDao().CreatePAT(r.Context(), claims.OrgID, pat); apierr != nil {
if pat, apierr = ah.AppDao().CreatePAT(ctx, pat); apierr != nil {
RespondError(w, apierr, nil)
return
}
@@ -55,59 +75,34 @@ func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
ah.Respond(w, &pat)
}
func validatePATRequest(req eeTypes.GettablePAT) error {
_, err := authtypes.NewRole(req.Role)
if err != nil {
return err
func validatePATRequest(req model.PAT) error {
if req.Role == "" || (req.Role != baseconstants.ViewerGroup && req.Role != baseconstants.EditorGroup && req.Role != baseconstants.AdminGroup) {
return fmt.Errorf("valid role is required")
}
if req.ExpiresAt < 0 {
return fmt.Errorf("valid expiresAt is required")
}
if req.Name == "" {
return fmt.Errorf("valid name is required")
}
return nil
}
func (ah *APIHandler) updatePAT(w http.ResponseWriter, r *http.Request) {
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
ctx := context.Background()
req := eeTypes.GettablePAT{}
req := model.PAT{}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
idStr := mux.Vars(r)["id"]
id, err := valuer.NewUUID(idStr)
user, err := auth.GetUserFromRequest(r)
if err != nil {
render.Error(w, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "id is not a valid uuid-v7"))
return
}
//get the pat
existingPAT, paterr := ah.AppDao().GetPATByID(r.Context(), claims.OrgID, id)
if paterr != nil {
render.Error(w, errorsV2.Newf(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, paterr.Error()))
return
}
// get the user
createdByUser, usererr := ah.AppDao().GetUser(r.Context(), existingPAT.UserID)
if usererr != nil {
render.Error(w, errorsV2.Newf(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, usererr.Error()))
return
}
if slices.Contains(types.AllIntegrationUserEmails, types.IntegrationUserEmail(createdByUser.Email)) {
render.Error(w, errorsV2.Newf(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, "integration user pat cannot be updated"))
RespondError(w, &model.ApiError{
Typ: model.ErrorUnauthorized,
Err: err,
}, nil)
return
}
@@ -117,11 +112,12 @@ func (ah *APIHandler) updatePAT(w http.ResponseWriter, r *http.Request) {
return
}
req.UpdatedByUserID = claims.UserID
req.UpdatedAt = time.Now()
req.UpdatedByUserID = user.Id
id := mux.Vars(r)["id"]
req.UpdatedAt = time.Now().Unix()
zap.L().Info("Got Update PAT request", zap.Any("pat", req))
var apierr basemodel.BaseApiError
if apierr = ah.AppDao().UpdatePAT(r.Context(), claims.OrgID, req, id); apierr != nil {
if apierr = ah.AppDao().UpdatePAT(ctx, req, id); apierr != nil {
RespondError(w, apierr, nil)
return
}
@@ -130,56 +126,38 @@ func (ah *APIHandler) updatePAT(w http.ResponseWriter, r *http.Request) {
}
func (ah *APIHandler) getPATs(w http.ResponseWriter, r *http.Request) {
claims, err := authtypes.ClaimsFromContext(r.Context())
ctx := context.Background()
user, err := auth.GetUserFromRequest(r)
if err != nil {
render.Error(w, err)
RespondError(w, &model.ApiError{
Typ: model.ErrorUnauthorized,
Err: err,
}, nil)
return
}
pats, apierr := ah.AppDao().ListPATs(r.Context(), claims.OrgID)
zap.L().Info("Get PATs for user", zap.String("user_id", user.Id))
pats, apierr := ah.AppDao().ListPATs(ctx)
if apierr != nil {
RespondError(w, apierr, nil)
return
}
ah.Respond(w, pats)
}
func (ah *APIHandler) revokePAT(w http.ResponseWriter, r *http.Request) {
claims, err := authtypes.ClaimsFromContext(r.Context())
ctx := context.Background()
id := mux.Vars(r)["id"]
user, err := auth.GetUserFromRequest(r)
if err != nil {
render.Error(w, err)
RespondError(w, &model.ApiError{
Typ: model.ErrorUnauthorized,
Err: err,
}, nil)
return
}
idStr := mux.Vars(r)["id"]
id, err := valuer.NewUUID(idStr)
if err != nil {
render.Error(w, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "id is not a valid uuid-v7"))
return
}
//get the pat
existingPAT, paterr := ah.AppDao().GetPATByID(r.Context(), claims.OrgID, id)
if paterr != nil {
render.Error(w, errorsV2.Newf(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, paterr.Error()))
return
}
// get the user
createdByUser, usererr := ah.AppDao().GetUser(r.Context(), existingPAT.UserID)
if usererr != nil {
render.Error(w, errorsV2.Newf(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, usererr.Error()))
return
}
if slices.Contains(types.AllIntegrationUserEmails, types.IntegrationUserEmail(createdByUser.Email)) {
render.Error(w, errorsV2.Newf(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, "integration user pat cannot be updated"))
return
}
zap.L().Info("Revoke PAT with id", zap.String("id", id.StringValue()))
if apierr := ah.AppDao().RevokePAT(r.Context(), claims.OrgID, id, claims.UserID); apierr != nil {
zap.L().Info("Revoke PAT with id", zap.String("id", id))
if apierr := ah.AppDao().RevokePAT(ctx, id, user.Id); apierr != nil {
RespondError(w, apierr, nil)
return
}

View File

@@ -6,11 +6,11 @@ import (
"io"
"net/http"
"github.com/SigNoz/signoz/ee/query-service/anomaly"
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
"github.com/SigNoz/signoz/pkg/query-service/model"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/ee/query-service/anomaly"
baseapp "go.signoz.io/signoz/pkg/query-service/app"
"go.signoz.io/signoz/pkg/query-service/app/queryBuilder"
"go.signoz.io/signoz/pkg/query-service/model"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.uber.org/zap"
)
@@ -88,24 +88,28 @@ func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
anomaly.WithCache[*anomaly.WeeklyProvider](aH.opts.Cache),
anomaly.WithKeyGenerator[*anomaly.WeeklyProvider](queryBuilder.NewKeyGenerator()),
anomaly.WithReader[*anomaly.WeeklyProvider](aH.opts.DataConnector),
anomaly.WithFeatureLookup[*anomaly.WeeklyProvider](aH.opts.FeatureFlags),
)
case anomaly.SeasonalityDaily:
provider = anomaly.NewDailyProvider(
anomaly.WithCache[*anomaly.DailyProvider](aH.opts.Cache),
anomaly.WithKeyGenerator[*anomaly.DailyProvider](queryBuilder.NewKeyGenerator()),
anomaly.WithReader[*anomaly.DailyProvider](aH.opts.DataConnector),
anomaly.WithFeatureLookup[*anomaly.DailyProvider](aH.opts.FeatureFlags),
)
case anomaly.SeasonalityHourly:
provider = anomaly.NewHourlyProvider(
anomaly.WithCache[*anomaly.HourlyProvider](aH.opts.Cache),
anomaly.WithKeyGenerator[*anomaly.HourlyProvider](queryBuilder.NewKeyGenerator()),
anomaly.WithReader[*anomaly.HourlyProvider](aH.opts.DataConnector),
anomaly.WithFeatureLookup[*anomaly.HourlyProvider](aH.opts.FeatureFlags),
)
default:
provider = anomaly.NewDailyProvider(
anomaly.WithCache[*anomaly.DailyProvider](aH.opts.Cache),
anomaly.WithKeyGenerator[*anomaly.DailyProvider](queryBuilder.NewKeyGenerator()),
anomaly.WithReader[*anomaly.DailyProvider](aH.opts.DataConnector),
anomaly.WithFeatureLookup[*anomaly.DailyProvider](aH.opts.FeatureFlags),
)
}
anomalies, err := provider.GetAnomalies(r.Context(), &anomaly.GetAnomaliesRequest{Params: queryRangeParams})

View File

@@ -3,8 +3,8 @@ package api
import (
"net/http"
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
baseapp "go.signoz.io/signoz/pkg/query-service/app"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
)
func RespondError(w http.ResponseWriter, apiErr basemodel.BaseApiError, data interface{}) {

View File

@@ -0,0 +1,33 @@
package api
import (
"net/http"
"go.signoz.io/signoz/ee/query-service/app/db"
"go.signoz.io/signoz/ee/query-service/model"
baseapp "go.signoz.io/signoz/pkg/query-service/app"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap"
)
func (ah *APIHandler) searchTraces(w http.ResponseWriter, r *http.Request) {
if !ah.CheckFeature(basemodel.SmartTraceDetail) {
zap.L().Info("SmartTraceDetail feature is not enabled in this plan")
ah.APIHandler.SearchTraces(w, r)
return
}
searchTracesParams, err := baseapp.ParseSearchTracesParams(r)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, "Error reading params")
return
}
result, err := ah.opts.DataConnector.SearchTraces(r.Context(), searchTracesParams, db.SmartTraceAlgorithm)
if ah.HandleError(w, err, http.StatusBadRequest) {
return
}
ah.WriteJSON(w, r, result)
}

View File

@@ -5,33 +5,38 @@ import (
"github.com/ClickHouse/clickhouse-go/v2"
"github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/prometheus"
basechr "github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/telemetrystore"
"github.com/jmoiron/sqlx"
"go.signoz.io/signoz/pkg/cache"
basechr "go.signoz.io/signoz/pkg/query-service/app/clickhouseReader"
"go.signoz.io/signoz/pkg/query-service/interfaces"
)
type ClickhouseReader struct {
conn clickhouse.Conn
appdb sqlstore.SQLStore
appdb *sqlx.DB
*basechr.ClickHouseReader
}
func NewDataConnector(
sqlDB sqlstore.SQLStore,
telemetryStore telemetrystore.TelemetryStore,
prometheus prometheus.Prometheus,
localDB *sqlx.DB,
ch clickhouse.Conn,
promConfigPath string,
lm interfaces.FeatureLookup,
cluster string,
useLogsNewSchema bool,
useTraceNewSchema bool,
fluxIntervalForTraceDetail time.Duration,
cache cache.Cache,
) *ClickhouseReader {
chReader := basechr.NewReader(sqlDB, telemetryStore, prometheus, cluster, useLogsNewSchema, useTraceNewSchema, fluxIntervalForTraceDetail, cache)
chReader := basechr.NewReader(localDB, ch, promConfigPath, lm, cluster, useLogsNewSchema, useTraceNewSchema, fluxIntervalForTraceDetail, cache)
return &ClickhouseReader{
conn: telemetryStore.ClickhouseDB(),
appdb: sqlDB,
conn: ch,
appdb: localDB,
ClickHouseReader: chReader,
}
}
func (r *ClickhouseReader) Start(readerReady chan bool) {
r.ClickHouseReader.Start(readerReady)
}

View File

@@ -1,16 +1,17 @@
package smart
package db
import (
"errors"
"strconv"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
"go.signoz.io/signoz/ee/query-service/model"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap"
)
// SmartTraceAlgorithm is an algorithm to find the target span and build a tree of spans around it with the given levelUp and levelDown parameters and the given spanLimit
func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanId string, levelUp int, levelDown int, spanLimit int) ([]basemodel.SearchSpansResult, error) {
var spans []*SpanForTraceDetails
var spans []*model.SpanForTraceDetails
// if targetSpanId is null or not present then randomly select a span as targetSpanId
if (targetSpanId == "" || targetSpanId == "null") && len(payload) > 0 {
@@ -23,7 +24,7 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
if len(spanItem.References) > 0 && spanItem.References[0].RefType == "CHILD_OF" {
parentID = spanItem.References[0].SpanId
}
span := &SpanForTraceDetails{
span := &model.SpanForTraceDetails{
TimeUnixNano: spanItem.TimeUnixNano,
SpanID: spanItem.SpanID,
TraceID: spanItem.TraceID,
@@ -44,7 +45,7 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
if err != nil {
return nil, err
}
targetSpan := &SpanForTraceDetails{}
targetSpan := &model.SpanForTraceDetails{}
// Find the target span in the span trees
for _, root := range roots {
@@ -64,7 +65,7 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
}
// Build the final result
parents := []*SpanForTraceDetails{}
parents := []*model.SpanForTraceDetails{}
// Get the parent spans of the target span up to the given levelUp parameter and spanLimit
preParent := targetSpan
@@ -89,11 +90,11 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
}
// Get the child spans of the target span until the given levelDown and spanLimit
preParents := []*SpanForTraceDetails{targetSpan}
children := []*SpanForTraceDetails{}
preParents := []*model.SpanForTraceDetails{targetSpan}
children := []*model.SpanForTraceDetails{}
for i := 0; i < levelDown && len(preParents) != 0 && spanLimit > 0; i++ {
parents := []*SpanForTraceDetails{}
parents := []*model.SpanForTraceDetails{}
for _, parent := range preParents {
if spanLimit-len(parent.Children) <= 0 {
children = append(children, parent.Children[:spanLimit]...)
@@ -107,7 +108,7 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
}
// Store the final list of spans in the resultSpanSet map to avoid duplicates
resultSpansSet := make(map[*SpanForTraceDetails]struct{})
resultSpansSet := make(map[*model.SpanForTraceDetails]struct{})
resultSpansSet[targetSpan] = struct{}{}
for _, parent := range parents {
resultSpansSet[parent] = struct{}{}
@@ -168,12 +169,12 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
}
// buildSpanTrees builds trees of spans from a list of spans.
func buildSpanTrees(spansPtr *[]*SpanForTraceDetails) ([]*SpanForTraceDetails, error) {
func buildSpanTrees(spansPtr *[]*model.SpanForTraceDetails) ([]*model.SpanForTraceDetails, error) {
// Build a map of spanID to span for fast lookup
var roots []*SpanForTraceDetails
var roots []*model.SpanForTraceDetails
spans := *spansPtr
mapOfSpans := make(map[string]*SpanForTraceDetails, len(spans))
mapOfSpans := make(map[string]*model.SpanForTraceDetails, len(spans))
for _, span := range spans {
if span.ParentID == "" {
@@ -205,8 +206,8 @@ func buildSpanTrees(spansPtr *[]*SpanForTraceDetails) ([]*SpanForTraceDetails, e
}
// breadthFirstSearch performs a breadth-first search on the span tree to find the target span.
func breadthFirstSearch(spansPtr *SpanForTraceDetails, targetId string) (*SpanForTraceDetails, error) {
queue := []*SpanForTraceDetails{spansPtr}
func breadthFirstSearch(spansPtr *model.SpanForTraceDetails, targetId string) (*model.SpanForTraceDetails, error) {
queue := []*model.SpanForTraceDetails{spansPtr}
visited := make(map[string]bool)
for len(queue) > 0 {

View File

@@ -1,61 +1,78 @@
package app
import (
"bufio"
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net"
"net/http"
_ "net/http/pprof" // http profiler
"os"
"regexp"
"time"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
"github.com/jmoiron/sqlx"
eemiddleware "github.com/SigNoz/signoz/ee/http/middleware"
"github.com/SigNoz/signoz/ee/query-service/app/api"
"github.com/SigNoz/signoz/ee/query-service/app/db"
"github.com/SigNoz/signoz/ee/query-service/constants"
"github.com/SigNoz/signoz/ee/query-service/dao"
"github.com/SigNoz/signoz/ee/query-service/integrations/gateway"
"github.com/SigNoz/signoz/ee/query-service/rules"
"github.com/SigNoz/signoz/pkg/alertmanager"
"github.com/SigNoz/signoz/pkg/http/middleware"
"github.com/SigNoz/signoz/pkg/prometheus"
"github.com/SigNoz/signoz/pkg/signoz"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/telemetrystore"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/web"
"github.com/rs/cors"
"github.com/soheilhy/cmux"
"go.signoz.io/signoz/ee/query-service/app/api"
"go.signoz.io/signoz/ee/query-service/app/db"
"go.signoz.io/signoz/ee/query-service/auth"
"go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/dao"
"go.signoz.io/signoz/ee/query-service/integrations/gateway"
"go.signoz.io/signoz/ee/query-service/interfaces"
"go.signoz.io/signoz/ee/query-service/rules"
"go.signoz.io/signoz/pkg/http/middleware"
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/signoz"
"go.signoz.io/signoz/pkg/web"
licensepkg "github.com/SigNoz/signoz/ee/query-service/license"
"github.com/SigNoz/signoz/ee/query-service/usage"
licensepkg "go.signoz.io/signoz/ee/query-service/license"
"go.signoz.io/signoz/ee/query-service/usage"
"github.com/SigNoz/signoz/pkg/query-service/agentConf"
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
"github.com/SigNoz/signoz/pkg/query-service/app/dashboards"
baseexplorer "github.com/SigNoz/signoz/pkg/query-service/app/explorer"
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
"github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline"
"github.com/SigNoz/signoz/pkg/query-service/app/opamp"
opAmpModel "github.com/SigNoz/signoz/pkg/query-service/app/opamp/model"
"github.com/SigNoz/signoz/pkg/query-service/cache"
baseconst "github.com/SigNoz/signoz/pkg/query-service/constants"
"github.com/SigNoz/signoz/pkg/query-service/healthcheck"
baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces"
baserules "github.com/SigNoz/signoz/pkg/query-service/rules"
"github.com/SigNoz/signoz/pkg/query-service/telemetry"
"github.com/SigNoz/signoz/pkg/query-service/utils"
"go.signoz.io/signoz/pkg/query-service/agentConf"
baseapp "go.signoz.io/signoz/pkg/query-service/app"
"go.signoz.io/signoz/pkg/query-service/app/cloudintegrations"
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
baseexplorer "go.signoz.io/signoz/pkg/query-service/app/explorer"
"go.signoz.io/signoz/pkg/query-service/app/integrations"
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
"go.signoz.io/signoz/pkg/query-service/app/opamp"
opAmpModel "go.signoz.io/signoz/pkg/query-service/app/opamp/model"
"go.signoz.io/signoz/pkg/query-service/app/preferences"
"go.signoz.io/signoz/pkg/query-service/cache"
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
"go.signoz.io/signoz/pkg/query-service/healthcheck"
basealm "go.signoz.io/signoz/pkg/query-service/integrations/alertManager"
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
pqle "go.signoz.io/signoz/pkg/query-service/pqlEngine"
baserules "go.signoz.io/signoz/pkg/query-service/rules"
"go.signoz.io/signoz/pkg/query-service/telemetry"
"go.signoz.io/signoz/pkg/query-service/utils"
"go.uber.org/zap"
)
const AppDbEngine = "sqlite"
type ServerOptions struct {
Config signoz.Config
SigNoz *signoz.SigNoz
HTTPHostPort string
PrivateHostPort string
Config signoz.Config
SigNoz *signoz.SigNoz
PromConfigPath string
SkipTopLvlOpsPath string
HTTPHostPort string
PrivateHostPort string
// alert specific params
DisableRules bool
RuleRepoURL string
PreferSpanMetrics bool
CacheConfigPath string
FluxInterval string
@@ -64,7 +81,6 @@ type ServerOptions struct {
GatewayUrl string
UseLogsNewSchema bool
UseTraceNewSchema bool
Jwt *authtypes.JWT
}
// Server runs HTTP api service
@@ -95,16 +111,20 @@ func (s Server) HealthCheckStatus() chan healthcheck.Status {
// NewServer creates and initializes Server
func NewServer(serverOptions *ServerOptions) (*Server, error) {
modelDao, err := dao.InitDao(serverOptions.SigNoz.SQLStore)
modelDao, err := dao.InitDao(serverOptions.SigNoz.SQLStore.SQLxDB())
if err != nil {
return nil, err
}
if err := baseexplorer.InitWithDSN(serverOptions.SigNoz.SQLStore); err != nil {
if err := baseexplorer.InitWithDSN(serverOptions.SigNoz.SQLStore.SQLxDB()); err != nil {
return nil, err
}
if err := dashboards.InitDB(serverOptions.SigNoz.SQLStore); err != nil {
if err := preferences.InitDB(serverOptions.SigNoz.SQLStore.SQLxDB()); err != nil {
return nil, err
}
if err := dashboards.InitDB(serverOptions.SigNoz.SQLStore.SQLxDB()); err != nil {
return nil, err
}
@@ -114,30 +134,48 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
}
// initiate license manager
lm, err := licensepkg.StartManager(serverOptions.SigNoz.SQLStore.SQLxDB(), serverOptions.SigNoz.SQLStore)
lm, err := licensepkg.StartManager(serverOptions.SigNoz.SQLStore.SQLxDB())
if err != nil {
return nil, err
}
// set license manager as feature flag provider in dao
modelDao.SetFlagProvider(lm)
readerReady := make(chan bool)
fluxIntervalForTraceDetail, err := time.ParseDuration(serverOptions.FluxIntervalForTraceDetail)
if err != nil {
return nil, err
}
reader := db.NewDataConnector(
serverOptions.SigNoz.SQLStore,
serverOptions.SigNoz.TelemetryStore,
serverOptions.SigNoz.Prometheus,
serverOptions.Cluster,
serverOptions.UseLogsNewSchema,
serverOptions.UseTraceNewSchema,
fluxIntervalForTraceDetail,
serverOptions.SigNoz.Cache,
)
var reader interfaces.DataConnector
storage := os.Getenv("STORAGE")
if storage == "clickhouse" {
zap.L().Info("Using ClickHouse as datastore ...")
qb := db.NewDataConnector(
serverOptions.SigNoz.SQLStore.SQLxDB(),
serverOptions.SigNoz.TelemetryStore.ClickHouseDB(),
serverOptions.PromConfigPath,
lm,
serverOptions.Cluster,
serverOptions.UseLogsNewSchema,
serverOptions.UseTraceNewSchema,
fluxIntervalForTraceDetail,
serverOptions.SigNoz.Cache,
)
go qb.Start(readerReady)
reader = qb
} else {
return nil, fmt.Errorf("storage type: %s is not supported in query service", storage)
}
skipConfig := &basemodel.SkipConfig{}
if serverOptions.SkipTopLvlOpsPath != "" {
// read skip config
skipConfig, err = basemodel.ReadSkipConfig(serverOptions.SkipTopLvlOpsPath)
if err != nil {
return nil, err
}
}
var c cache.Cache
if serverOptions.CacheConfigPath != "" {
cacheOpts, err := cache.LoadFromYAMLCacheConfigFile(serverOptions.CacheConfigPath)
@@ -147,16 +185,17 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
c = cache.NewCache(cacheOpts)
}
rm, err := makeRulesManager(
<-readerReady
rm, err := makeRulesManager(serverOptions.PromConfigPath,
baseconst.GetAlertManagerApiPrefix(),
serverOptions.RuleRepoURL,
serverOptions.SigNoz.SQLStore.SQLxDB(),
reader,
c,
serverOptions.DisableRules,
lm,
serverOptions.UseLogsNewSchema,
serverOptions.UseTraceNewSchema,
serverOptions.SigNoz.Alertmanager,
serverOptions.SigNoz.SQLStore,
serverOptions.SigNoz.TelemetryStore,
serverOptions.SigNoz.Prometheus,
)
if err != nil {
@@ -169,14 +208,14 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
return nil, err
}
integrationsController, err := integrations.NewController(serverOptions.SigNoz.SQLStore)
integrationsController, err := integrations.NewController(serverOptions.SigNoz.SQLStore.SQLxDB())
if err != nil {
return nil, fmt.Errorf(
"couldn't create integrations controller: %w", err,
)
}
cloudIntegrationsController, err := cloudintegrations.NewController(serverOptions.SigNoz.SQLStore)
cloudIntegrationsController, err := cloudintegrations.NewController(serverOptions.SigNoz.SQLStore.SQLxDB())
if err != nil {
return nil, fmt.Errorf(
"couldn't create cloud provider integrations controller: %w", err,
@@ -185,7 +224,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
// ingestion pipelines manager
logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(
serverOptions.SigNoz.SQLStore, integrationsController.GetPipelinesForInstalledIntegrations,
serverOptions.SigNoz.SQLStore.SQLxDB(), integrationsController.GetPipelinesForInstalledIntegrations,
)
if err != nil {
return nil, err
@@ -201,7 +240,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
}
// start the usagemanager
usageManager, err := usage.New(modelDao, lm.GetRepo(), serverOptions.SigNoz.TelemetryStore.ClickhouseDB(), serverOptions.Config.TelemetryStore.Clickhouse.DSN)
usageManager, err := usage.New(modelDao, lm.GetRepo(), serverOptions.SigNoz.TelemetryStore.ClickHouseDB(), serverOptions.Config.TelemetryStore.ClickHouse.DSN)
if err != nil {
return nil, err
}
@@ -220,6 +259,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
apiOpts := api.APIHandlerOptions{
DataConnector: reader,
SkipConfig: skipConfig,
PreferSpanMetrics: serverOptions.PreferSpanMetrics,
AppDao: modelDao,
RulesManager: rm,
@@ -235,10 +275,9 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
GatewayUrl: serverOptions.GatewayUrl,
UseLogsNewSchema: serverOptions.UseLogsNewSchema,
UseTraceNewSchema: serverOptions.UseTraceNewSchema,
JWT: serverOptions.Jwt,
}
apiHandler, err := api.NewAPIHandler(apiOpts, serverOptions.SigNoz)
apiHandler, err := api.NewAPIHandler(apiOpts)
if err != nil {
return nil, err
}
@@ -271,11 +310,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
&opAmpModel.AllAgents, agentConfMgr,
)
errorList := reader.PreloadMetricsMetadata(context.Background())
for _, er := range errorList {
zap.L().Error("failed to preload metrics metadata", zap.Error(er))
}
return s, nil
}
@@ -283,8 +317,6 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
r := baseapp.NewRouter()
r.Use(middleware.NewAuth(zap.L(), s.serverOptions.Jwt, []string{"Authorization", "Sec-WebSocket-Protocol"}).Wrap)
r.Use(eemiddleware.NewPat(s.serverOptions.SigNoz.SQLStore, []string{"SIGNOZ-API-KEY"}).Wrap)
r.Use(middleware.NewTimeout(zap.L(),
s.serverOptions.Config.APIServer.Timeout.ExcludedRoutes,
s.serverOptions.Config.APIServer.Timeout.Default,
@@ -312,11 +344,25 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
}
func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*http.Server, error) {
r := baseapp.NewRouter()
am := middleware.NewAuthZ(s.serverOptions.SigNoz.Instrumentation.Logger())
r.Use(middleware.NewAuth(zap.L(), s.serverOptions.Jwt, []string{"Authorization", "Sec-WebSocket-Protocol"}).Wrap)
r.Use(eemiddleware.NewPat(s.serverOptions.SigNoz.SQLStore, []string{"SIGNOZ-API-KEY"}).Wrap)
r := baseapp.NewRouter()
// add auth middleware
getUserFromRequest := func(r *http.Request) (*basemodel.UserPayload, error) {
user, err := auth.GetUserFromRequest(r, apiHandler)
if err != nil {
return nil, err
}
if user.User.OrgId == "" {
return nil, basemodel.UnauthorizedError(errors.New("orgId is missing in the claims"))
}
return user, nil
}
am := baseapp.NewAuthMiddleware(getUserFromRequest)
r.Use(middleware.NewTimeout(zap.L(),
s.serverOptions.Config.APIServer.Timeout.ExcludedRoutes,
s.serverOptions.Config.APIServer.Timeout.Default,
@@ -329,15 +375,11 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*h
apiHandler.RegisterLogsRoutes(r, am)
apiHandler.RegisterIntegrationRoutes(r, am)
apiHandler.RegisterCloudIntegrationsRoutes(r, am)
apiHandler.RegisterFieldsRoutes(r, am)
apiHandler.RegisterQueryRangeV3Routes(r, am)
apiHandler.RegisterInfraMetricsRoutes(r, am)
apiHandler.RegisterQueryRangeV4Routes(r, am)
apiHandler.RegisterWebSocketPaths(r, am)
apiHandler.RegisterMessagingQueuesRoutes(r, am)
apiHandler.RegisterThirdPartyApiRoutes(r, am)
apiHandler.MetricExplorerRoutes(r, am)
apiHandler.RegisterTraceFunnelsRoutes(r, am)
c := cors.New(cors.Options{
AllowedOrigins: []string{"*"},
@@ -359,6 +401,174 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*h
}, nil
}
// TODO(remove): Implemented at pkg/http/middleware/logging.go
type loggingResponseWriter struct {
http.ResponseWriter
statusCode int
}
// TODO(remove): Implemented at pkg/http/middleware/logging.go
func NewLoggingResponseWriter(w http.ResponseWriter) *loggingResponseWriter {
// WriteHeader(int) is not called if our response implicitly returns 200 OK, so
// we default to that status code.
return &loggingResponseWriter{w, http.StatusOK}
}
// TODO(remove): Implemented at pkg/http/middleware/logging.go
func (lrw *loggingResponseWriter) WriteHeader(code int) {
lrw.statusCode = code
lrw.ResponseWriter.WriteHeader(code)
}
// TODO(remove): Implemented at pkg/http/middleware/logging.go
// Flush implements the http.Flush interface.
func (lrw *loggingResponseWriter) Flush() {
lrw.ResponseWriter.(http.Flusher).Flush()
}
// TODO(remove): Implemented at pkg/http/middleware/logging.go
// Support websockets
func (lrw *loggingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
h, ok := lrw.ResponseWriter.(http.Hijacker)
if !ok {
return nil, nil, errors.New("hijack not supported")
}
return h.Hijack()
}
func extractQueryRangeData(path string, r *http.Request) (map[string]interface{}, bool) {
pathToExtractBodyFromV3 := "/api/v3/query_range"
pathToExtractBodyFromV4 := "/api/v4/query_range"
data := map[string]interface{}{}
var postData *v3.QueryRangeParamsV3
if (r.Method == "POST") && ((path == pathToExtractBodyFromV3) || (path == pathToExtractBodyFromV4)) {
if r.Body != nil {
bodyBytes, err := io.ReadAll(r.Body)
if err != nil {
return nil, false
}
r.Body.Close() // must close
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
json.Unmarshal(bodyBytes, &postData)
} else {
return nil, false
}
} else {
return nil, false
}
referrer := r.Header.Get("Referer")
dashboardMatched, err := regexp.MatchString(`/dashboard/[a-zA-Z0-9\-]+/(new|edit)(?:\?.*)?$`, referrer)
if err != nil {
zap.L().Error("error while matching the referrer", zap.Error(err))
}
alertMatched, err := regexp.MatchString(`/alerts/(new|edit)(?:\?.*)?$`, referrer)
if err != nil {
zap.L().Error("error while matching the alert: ", zap.Error(err))
}
logsExplorerMatched, err := regexp.MatchString(`/logs/logs-explorer(?:\?.*)?$`, referrer)
if err != nil {
zap.L().Error("error while matching the logs explorer: ", zap.Error(err))
}
traceExplorerMatched, err := regexp.MatchString(`/traces-explorer(?:\?.*)?$`, referrer)
if err != nil {
zap.L().Error("error while matching the trace explorer: ", zap.Error(err))
}
queryInfoResult := telemetry.GetInstance().CheckQueryInfo(postData)
if (queryInfoResult.MetricsUsed || queryInfoResult.LogsUsed || queryInfoResult.TracesUsed) && (queryInfoResult.FilterApplied) {
if queryInfoResult.MetricsUsed {
telemetry.GetInstance().AddActiveMetricsUser()
}
if queryInfoResult.LogsUsed {
telemetry.GetInstance().AddActiveLogsUser()
}
if queryInfoResult.TracesUsed {
telemetry.GetInstance().AddActiveTracesUser()
}
data["metricsUsed"] = queryInfoResult.MetricsUsed
data["logsUsed"] = queryInfoResult.LogsUsed
data["tracesUsed"] = queryInfoResult.TracesUsed
data["filterApplied"] = queryInfoResult.FilterApplied
data["groupByApplied"] = queryInfoResult.GroupByApplied
data["aggregateOperator"] = queryInfoResult.AggregateOperator
data["aggregateAttributeKey"] = queryInfoResult.AggregateAttributeKey
data["numberOfQueries"] = queryInfoResult.NumberOfQueries
data["queryType"] = queryInfoResult.QueryType
data["panelType"] = queryInfoResult.PanelType
userEmail, err := baseauth.GetEmailFromJwt(r.Context())
if err == nil {
// switch case to set data["screen"] based on the referrer
switch {
case dashboardMatched:
data["screen"] = "panel"
case alertMatched:
data["screen"] = "alert"
case logsExplorerMatched:
data["screen"] = "logs-explorer"
case traceExplorerMatched:
data["screen"] = "traces-explorer"
default:
data["screen"] = "unknown"
return data, true
}
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_QUERY_RANGE_API, data, userEmail, true, false)
}
}
return data, true
}
func getActiveLogs(path string, r *http.Request) {
// if path == "/api/v1/dashboards/{uuid}" {
// telemetry.GetInstance().AddActiveMetricsUser()
// }
if path == "/api/v1/logs" {
hasFilters := len(r.URL.Query().Get("q"))
if hasFilters > 0 {
telemetry.GetInstance().AddActiveLogsUser()
}
}
}
func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := baseauth.AttachJwtToContext(r.Context(), r)
r = r.WithContext(ctx)
route := mux.CurrentRoute(r)
path, _ := route.GetPathTemplate()
queryRangeData, metadataExists := extractQueryRangeData(path, r)
getActiveLogs(path, r)
lrw := NewLoggingResponseWriter(w)
next.ServeHTTP(lrw, r)
data := map[string]interface{}{"path": path, "statusCode": lrw.statusCode}
if metadataExists {
for key, value := range queryRangeData {
data[key] = value
}
}
if _, ok := telemetry.EnabledPaths()[path]; ok {
userEmail, err := baseauth.GetEmailFromJwt(r.Context())
if err == nil {
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_PATH, data, userEmail, true, false)
}
}
})
}
// initListeners initialises listeners of the server
func (s *Server) initListeners() error {
// listen on public port
@@ -392,8 +602,14 @@ func (s *Server) initListeners() error {
}
// Start listening on http and private http port concurrently
func (s *Server) Start(ctx context.Context) error {
s.ruleManager.Start(ctx)
func (s *Server) Start() error {
// initiate rule manager first
if !s.serverOptions.DisableRules {
s.ruleManager.Start()
} else {
zap.L().Info("msg: Rules disabled as rules.disable is set to TRUE")
}
err := s.initListeners()
if err != nil {
@@ -474,7 +690,7 @@ func (s *Server) Stop() error {
s.opampServer.Stop()
if s.ruleManager != nil {
s.ruleManager.Stop(context.Background())
s.ruleManager.Stop()
}
// stop usage manager
@@ -484,32 +700,48 @@ func (s *Server) Stop() error {
}
func makeRulesManager(
promConfigPath,
alertManagerURL string,
ruleRepoURL string,
db *sqlx.DB,
ch baseint.Reader,
cache cache.Cache,
disableRules bool,
fm baseint.FeatureLookup,
useLogsNewSchema bool,
useTraceNewSchema bool,
alertmanager alertmanager.Alertmanager,
sqlstore sqlstore.SQLStore,
telemetryStore telemetrystore.TelemetryStore,
prometheus prometheus.Prometheus,
) (*baserules.Manager, error) {
useTraceNewSchema bool) (*baserules.Manager, error) {
// create engine
pqle, err := pqle.FromConfigPath(promConfigPath)
if err != nil {
return nil, fmt.Errorf("failed to create pql engine : %v", err)
}
// notifier opts
notifierOpts := basealm.NotifierOptions{
QueueCapacity: 10000,
Timeout: 1 * time.Second,
AlertManagerURLs: []string{alertManagerURL},
}
// create manager opts
managerOpts := &baserules.ManagerOptions{
TelemetryStore: telemetryStore,
Prometheus: prometheus,
DBConn: db,
Context: context.Background(),
Logger: zap.L(),
Reader: ch,
Cache: cache,
EvalDelay: baseconst.GetEvalDelay(),
NotifierOpts: notifierOpts,
PqlEngine: pqle,
RepoURL: ruleRepoURL,
DBConn: db,
Context: context.Background(),
Logger: zap.L(),
DisableRules: disableRules,
FeatureFlags: fm,
Reader: ch,
Cache: cache,
EvalDelay: baseconst.GetEvalDelay(),
PrepareTaskFunc: rules.PrepareTaskFunc,
UseLogsNewSchema: useLogsNewSchema,
UseTraceNewSchema: useTraceNewSchema,
PrepareTestRuleFunc: rules.TestNotification,
Alertmanager: alertmanager,
SQLStore: sqlstore,
}
// create Manager

View File

@@ -0,0 +1,56 @@
package auth
import (
"context"
"fmt"
"net/http"
"time"
"go.signoz.io/signoz/ee/query-service/app/api"
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.signoz.io/signoz/pkg/query-service/telemetry"
"go.uber.org/zap"
)
func GetUserFromRequest(r *http.Request, apiHandler *api.APIHandler) (*basemodel.UserPayload, error) {
patToken := r.Header.Get("SIGNOZ-API-KEY")
if len(patToken) > 0 {
zap.L().Debug("Received a non-zero length PAT token")
ctx := context.Background()
dao := apiHandler.AppDao()
pat, err := dao.GetPAT(ctx, patToken)
if err == nil && pat != nil {
zap.L().Debug("Found valid PAT: ", zap.Any("pat", pat))
if pat.ExpiresAt < time.Now().Unix() && pat.ExpiresAt != 0 {
zap.L().Info("PAT has expired: ", zap.Any("pat", pat))
return nil, fmt.Errorf("PAT has expired")
}
group, apiErr := dao.GetGroupByName(ctx, pat.Role)
if apiErr != nil {
zap.L().Error("Error while getting group for PAT: ", zap.Any("apiErr", apiErr))
return nil, apiErr
}
user, err := dao.GetUser(ctx, pat.UserID)
if err != nil {
zap.L().Error("Error while getting user for PAT: ", zap.Error(err))
return nil, err
}
telemetry.GetInstance().SetPatTokenUser()
dao.UpdatePATLastUsed(ctx, patToken, time.Now().Unix())
user.User.GroupId = group.Id
user.User.Id = pat.Id
return &basemodel.UserPayload{
User: user.User,
Role: pat.Role,
}, nil
}
if err != nil {
zap.L().Error("Error while getting user for PAT: ", zap.Error(err))
return nil, err
}
}
return baseauth.GetUserFromRequest(r)
}

View File

@@ -5,7 +5,7 @@ import (
)
const (
DefaultSiteURL = "https://localhost:8080"
DefaultSiteURL = "https://localhost:3301"
)
var LicenseSignozIo = "https://license.signoz.io/api/v1"

View File

@@ -1,10 +1,10 @@
package dao
import (
"github.com/SigNoz/signoz/ee/query-service/dao/sqlite"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/jmoiron/sqlx"
"go.signoz.io/signoz/ee/query-service/dao/sqlite"
)
func InitDao(sqlStore sqlstore.SQLStore) (ModelDao, error) {
return sqlite.InitDB(sqlStore)
func InitDao(inputDB *sqlx.DB) (ModelDao, error) {
return sqlite.InitDB(inputDB)
}

View File

@@ -4,14 +4,12 @@ import (
"context"
"net/url"
"github.com/SigNoz/signoz/ee/types"
basedao "github.com/SigNoz/signoz/pkg/query-service/dao"
baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/google/uuid"
"github.com/uptrace/bun"
"github.com/jmoiron/sqlx"
"go.signoz.io/signoz/ee/query-service/model"
basedao "go.signoz.io/signoz/pkg/query-service/dao"
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
)
type ModelDao interface {
@@ -20,25 +18,27 @@ type ModelDao interface {
// SetFlagProvider sets the feature lookup provider
SetFlagProvider(flags baseint.FeatureLookup)
DB() *bun.DB
DB() *sqlx.DB
// auth methods
CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError)
PrepareSsoRedirect(ctx context.Context, redirectUri, email string, jwt *authtypes.JWT) (redirectURL string, apierr basemodel.BaseApiError)
GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*types.GettableOrgDomain, error)
PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr basemodel.BaseApiError)
GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error)
// org domain (auth domains) CRUD ops
ListDomains(ctx context.Context, orgId string) ([]types.GettableOrgDomain, basemodel.BaseApiError)
GetDomain(ctx context.Context, id uuid.UUID) (*types.GettableOrgDomain, basemodel.BaseApiError)
CreateDomain(ctx context.Context, d *types.GettableOrgDomain) basemodel.BaseApiError
UpdateDomain(ctx context.Context, domain *types.GettableOrgDomain) basemodel.BaseApiError
ListDomains(ctx context.Context, orgId string) ([]model.OrgDomain, basemodel.BaseApiError)
GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, basemodel.BaseApiError)
CreateDomain(ctx context.Context, d *model.OrgDomain) basemodel.BaseApiError
UpdateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError
DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError
GetDomainByEmail(ctx context.Context, email string) (*types.GettableOrgDomain, basemodel.BaseApiError)
GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, basemodel.BaseApiError)
CreatePAT(ctx context.Context, orgID string, p types.GettablePAT) (types.GettablePAT, basemodel.BaseApiError)
UpdatePAT(ctx context.Context, orgID string, p types.GettablePAT, id valuer.UUID) basemodel.BaseApiError
GetPAT(ctx context.Context, pat string) (*types.GettablePAT, basemodel.BaseApiError)
GetPATByID(ctx context.Context, orgID string, id valuer.UUID) (*types.GettablePAT, basemodel.BaseApiError)
ListPATs(ctx context.Context, orgID string) ([]types.GettablePAT, basemodel.BaseApiError)
RevokePAT(ctx context.Context, orgID string, id valuer.UUID, userID string) basemodel.BaseApiError
CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basemodel.BaseApiError)
UpdatePAT(ctx context.Context, p model.PAT, id string) basemodel.BaseApiError
GetPAT(ctx context.Context, pat string) (*model.PAT, basemodel.BaseApiError)
UpdatePATLastUsed(ctx context.Context, pat string, lastUsed int64) basemodel.BaseApiError
GetPATByID(ctx context.Context, id string) (*model.PAT, basemodel.BaseApiError)
GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, basemodel.BaseApiError)
ListPATs(ctx context.Context) ([]model.PAT, basemodel.BaseApiError)
RevokePAT(ctx context.Context, id string, userID string) basemodel.BaseApiError
}

View File

@@ -4,20 +4,20 @@ import (
"context"
"fmt"
"net/url"
"strings"
"time"
"github.com/SigNoz/signoz/ee/query-service/constants"
"github.com/SigNoz/signoz/ee/query-service/model"
baseauth "github.com/SigNoz/signoz/pkg/query-service/auth"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/query-service/utils"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/google/uuid"
"go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/model"
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.signoz.io/signoz/pkg/query-service/utils"
"go.uber.org/zap"
)
func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (*types.User, basemodel.BaseApiError) {
func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (*basemodel.User, basemodel.BaseApiError) {
// get auth domain from email domain
domain, apierr := m.GetDomainByEmail(ctx, email)
if apierr != nil {
@@ -35,20 +35,24 @@ func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (
return nil, model.InternalErrorStr("failed to generate password hash")
}
user := &types.User{
ID: uuid.New().String(),
Name: "",
Email: email,
Password: hash,
TimeAuditable: types.TimeAuditable{
CreatedAt: time.Now(),
},
ProfilePictureURL: "", // Currently unused
Role: authtypes.RoleViewer.String(),
OrgID: domain.OrgID,
group, apiErr := m.GetGroupByName(ctx, baseconst.ViewerGroup)
if apiErr != nil {
zap.L().Error("GetGroupByName failed", zap.Error(apiErr))
return nil, apiErr
}
user, apiErr := m.CreateUser(ctx, user, false)
user := &basemodel.User{
Id: uuid.NewString(),
Name: "",
Email: email,
Password: hash,
CreatedAt: time.Now().Unix(),
ProfilePictureURL: "", // Currently unused
GroupId: group.Id,
OrgId: domain.OrgId,
}
user, apiErr = m.CreateUser(ctx, user, false)
if apiErr != nil {
zap.L().Error("CreateUser failed", zap.Error(apiErr))
return nil, apiErr
@@ -60,7 +64,7 @@ func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (
// PrepareSsoRedirect prepares redirect page link after SSO response
// is successfully parsed (i.e. valid email is available)
func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email string, jwt *authtypes.JWT) (redirectURL string, apierr basemodel.BaseApiError) {
func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email string) (redirectURL string, apierr basemodel.BaseApiError) {
userPayload, apierr := m.GetUserByEmail(ctx, email)
if !apierr.IsNil() {
@@ -68,7 +72,7 @@ func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email st
return "", model.BadRequestStr("invalid user email received from the auth provider")
}
user := &types.User{}
user := &basemodel.User{}
if userPayload == nil {
newUser, apiErr := m.createUserForSAMLRequest(ctx, email)
@@ -81,7 +85,7 @@ func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email st
user = &userPayload.User
}
tokenStore, err := baseauth.GenerateJWTForUser(user, jwt)
tokenStore, err := baseauth.GenerateJWTForUser(user)
if err != nil {
zap.L().Error("failed to generate token for SSO login user", zap.Error(err))
return "", model.InternalErrorStr("failed to generate token for the user")
@@ -90,7 +94,7 @@ func (m *modelDao) PrepareSsoRedirect(ctx context.Context, redirectUri, email st
return fmt.Sprintf("%s?jwt=%s&usr=%s&refreshjwt=%s",
redirectUri,
tokenStore.AccessJwt,
user.ID,
user.Id,
tokenStore.RefreshJwt), nil
}
@@ -108,7 +112,7 @@ func (m *modelDao) CanUsePassword(ctx context.Context, email string) (bool, base
return false, baseapierr
}
if userPayload.Role != authtypes.RoleAdmin.String() {
if userPayload.Role != baseconst.AdminGroup {
return false, model.BadRequest(fmt.Errorf("auth method not supported"))
}
@@ -154,7 +158,12 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
// find domain from email
orgDomain, apierr := m.GetDomainByEmail(ctx, email)
if apierr != nil {
zap.L().Error("failed to get org domain from email", zap.String("email", email), zap.Error(apierr.ToError()))
var emailDomain string
emailComponents := strings.Split(email, "@")
if len(emailComponents) > 0 {
emailDomain = emailComponents[1]
}
zap.L().Error("failed to get org domain from email", zap.String("emailDomain", emailDomain), zap.Error(apierr.ToError()))
return resp, apierr
}

View File

@@ -9,23 +9,32 @@ import (
"strings"
"time"
"github.com/SigNoz/signoz/ee/query-service/model"
"github.com/SigNoz/signoz/ee/types"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
ossTypes "github.com/SigNoz/signoz/pkg/types"
"github.com/google/uuid"
"go.signoz.io/signoz/ee/query-service/model"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap"
)
// StoredDomain represents stored database record for org domain
type StoredDomain struct {
Id uuid.UUID `db:"id"`
Name string `db:"name"`
OrgId string `db:"org_id"`
Data string `db:"data"`
CreatedAt int64 `db:"created_at"`
UpdatedAt int64 `db:"updated_at"`
}
// GetDomainFromSsoResponse uses relay state received from IdP to fetch
// user domain. The domain is further used to process validity of the response.
// when sending login request to IdP we send relay state as URL (site url)
// with domainId or domainName as query parameter.
func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*types.GettableOrgDomain, error) {
func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url.URL) (*model.OrgDomain, error) {
// derive domain id from relay state now
var domainIdStr string
var domainNameStr string
var domain *types.GettableOrgDomain
var domain *model.OrgDomain
for k, v := range relayState.Query() {
if k == "domainId" && len(v) > 0 {
@@ -67,14 +76,10 @@ func (m *modelDao) GetDomainFromSsoResponse(ctx context.Context, relayState *url
}
// GetDomainByName returns org domain for a given domain name
func (m *modelDao) GetDomainByName(ctx context.Context, name string) (*types.GettableOrgDomain, basemodel.BaseApiError) {
func (m *modelDao) GetDomainByName(ctx context.Context, name string) (*model.OrgDomain, basemodel.BaseApiError) {
stored := types.StorableOrgDomain{}
err := m.DB().NewSelect().
Model(&stored).
Where("name = ?", name).
Limit(1).
Scan(ctx)
stored := StoredDomain{}
err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE name=$1 LIMIT 1`, name)
if err != nil {
if err == sql.ErrNoRows {
@@ -83,7 +88,7 @@ func (m *modelDao) GetDomainByName(ctx context.Context, name string) (*types.Get
return nil, model.InternalError(err)
}
domain := &types.GettableOrgDomain{StorableOrgDomain: stored}
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
if err := domain.LoadConfig(stored.Data); err != nil {
return nil, model.InternalError(err)
}
@@ -91,14 +96,10 @@ func (m *modelDao) GetDomainByName(ctx context.Context, name string) (*types.Get
}
// GetDomain returns org domain for a given domain id
func (m *modelDao) GetDomain(ctx context.Context, id uuid.UUID) (*types.GettableOrgDomain, basemodel.BaseApiError) {
func (m *modelDao) GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, basemodel.BaseApiError) {
stored := types.StorableOrgDomain{}
err := m.DB().NewSelect().
Model(&stored).
Where("id = ?", id).
Limit(1).
Scan(ctx)
stored := StoredDomain{}
err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE id=$1 LIMIT 1`, id)
if err != nil {
if err == sql.ErrNoRows {
@@ -107,7 +108,7 @@ func (m *modelDao) GetDomain(ctx context.Context, id uuid.UUID) (*types.Gettable
return nil, model.InternalError(err)
}
domain := &types.GettableOrgDomain{StorableOrgDomain: stored}
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
if err := domain.LoadConfig(stored.Data); err != nil {
return nil, model.InternalError(err)
}
@@ -115,24 +116,21 @@ func (m *modelDao) GetDomain(ctx context.Context, id uuid.UUID) (*types.Gettable
}
// ListDomains gets the list of auth domains by org id
func (m *modelDao) ListDomains(ctx context.Context, orgId string) ([]types.GettableOrgDomain, basemodel.BaseApiError) {
domains := []types.GettableOrgDomain{}
func (m *modelDao) ListDomains(ctx context.Context, orgId string) ([]model.OrgDomain, basemodel.BaseApiError) {
domains := []model.OrgDomain{}
stored := []types.StorableOrgDomain{}
err := m.DB().NewSelect().
Model(&stored).
Where("org_id = ?", orgId).
Scan(ctx)
stored := []StoredDomain{}
err := m.DB().SelectContext(ctx, &stored, `SELECT * FROM org_domains WHERE org_id=$1`, orgId)
if err != nil {
if err == sql.ErrNoRows {
return domains, nil
return []model.OrgDomain{}, nil
}
return nil, model.InternalError(err)
}
for _, s := range stored {
domain := types.GettableOrgDomain{StorableOrgDomain: s}
domain := model.OrgDomain{Id: s.Id, Name: s.Name, OrgId: s.OrgId}
if err := domain.LoadConfig(s.Data); err != nil {
zap.L().Error("ListDomains() failed", zap.Error(err))
}
@@ -143,14 +141,14 @@ func (m *modelDao) ListDomains(ctx context.Context, orgId string) ([]types.Getta
}
// CreateDomain creates a new auth domain
func (m *modelDao) CreateDomain(ctx context.Context, domain *types.GettableOrgDomain) basemodel.BaseApiError {
func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError {
if domain.ID == uuid.Nil {
domain.ID = uuid.New()
if domain.Id == uuid.Nil {
domain.Id = uuid.New()
}
if domain.OrgID == "" || domain.Name == "" {
return model.BadRequest(fmt.Errorf("domain creation failed, missing fields: OrgID, Name "))
if domain.OrgId == "" || domain.Name == "" {
return model.BadRequest(fmt.Errorf("domain creation failed, missing fields: OrgId, Name "))
}
configJson, err := json.Marshal(domain)
@@ -159,17 +157,14 @@ func (m *modelDao) CreateDomain(ctx context.Context, domain *types.GettableOrgDo
return model.InternalError(fmt.Errorf("domain creation failed"))
}
storableDomain := types.StorableOrgDomain{
ID: domain.ID,
Name: domain.Name,
OrgID: domain.OrgID,
Data: string(configJson),
TimeAuditable: ossTypes.TimeAuditable{CreatedAt: time.Now(), UpdatedAt: time.Now()},
}
_, err = m.DB().NewInsert().
Model(&storableDomain).
Exec(ctx)
_, err = m.DB().ExecContext(ctx,
"INSERT INTO org_domains (id, name, org_id, data, created_at, updated_at) VALUES ($1, $2, $3, $4, $5, $6)",
domain.Id,
domain.Name,
domain.OrgId,
configJson,
time.Now().Unix(),
time.Now().Unix())
if err != nil {
zap.L().Error("failed to insert domain in db", zap.Error(err))
@@ -180,9 +175,9 @@ func (m *modelDao) CreateDomain(ctx context.Context, domain *types.GettableOrgDo
}
// UpdateDomain updates stored config params for a domain
func (m *modelDao) UpdateDomain(ctx context.Context, domain *types.GettableOrgDomain) basemodel.BaseApiError {
func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError {
if domain.ID == uuid.Nil {
if domain.Id == uuid.Nil {
zap.L().Error("domain update failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
return model.InternalError(fmt.Errorf("domain update failed"))
}
@@ -193,19 +188,11 @@ func (m *modelDao) UpdateDomain(ctx context.Context, domain *types.GettableOrgDo
return model.InternalError(fmt.Errorf("domain update failed"))
}
storableDomain := &types.StorableOrgDomain{
ID: domain.ID,
Name: domain.Name,
OrgID: domain.OrgID,
Data: string(configJson),
TimeAuditable: ossTypes.TimeAuditable{UpdatedAt: time.Now()},
}
_, err = m.DB().NewUpdate().
Model(storableDomain).
Column("data", "updated_at").
WherePK().
Exec(ctx)
_, err = m.DB().ExecContext(ctx,
"UPDATE org_domains SET data = $1, updated_at = $2 WHERE id = $3",
configJson,
time.Now().Unix(),
domain.Id)
if err != nil {
zap.L().Error("domain update failed", zap.Error(err))
@@ -223,11 +210,9 @@ func (m *modelDao) DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.Bas
return model.InternalError(fmt.Errorf("domain delete failed"))
}
storableDomain := &types.StorableOrgDomain{ID: id}
_, err := m.DB().NewDelete().
Model(storableDomain).
WherePK().
Exec(ctx)
_, err := m.DB().ExecContext(ctx,
"DELETE FROM org_domains WHERE id = $1",
id)
if err != nil {
zap.L().Error("domain delete failed", zap.Error(err))
@@ -237,7 +222,7 @@ func (m *modelDao) DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.Bas
return nil
}
func (m *modelDao) GetDomainByEmail(ctx context.Context, email string) (*types.GettableOrgDomain, basemodel.BaseApiError) {
func (m *modelDao) GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, basemodel.BaseApiError) {
if email == "" {
return nil, model.BadRequest(fmt.Errorf("could not find auth domain, missing fields: email "))
@@ -250,12 +235,8 @@ func (m *modelDao) GetDomainByEmail(ctx context.Context, email string) (*types.G
parsedDomain := components[1]
stored := types.StorableOrgDomain{}
err := m.DB().NewSelect().
Model(&stored).
Where("name = ?", parsedDomain).
Limit(1).
Scan(ctx)
stored := StoredDomain{}
err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE name=$1 LIMIT 1`, parsedDomain)
if err != nil {
if err == sql.ErrNoRows {
@@ -264,7 +245,7 @@ func (m *modelDao) GetDomainByEmail(ctx context.Context, email string) (*types.G
return nil, model.InternalError(err)
}
domain := &types.GettableOrgDomain{StorableOrgDomain: stored}
domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
if err := domain.LoadConfig(stored.Data); err != nil {
return nil, model.InternalError(err)
}

View File

@@ -3,11 +3,11 @@ package sqlite
import (
"fmt"
basedao "github.com/SigNoz/signoz/pkg/query-service/dao"
basedsql "github.com/SigNoz/signoz/pkg/query-service/dao/sqlite"
baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/uptrace/bun"
"github.com/jmoiron/sqlx"
basedao "go.signoz.io/signoz/pkg/query-service/dao"
basedsql "go.signoz.io/signoz/pkg/query-service/dao/sqlite"
baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
"go.uber.org/zap"
)
type modelDao struct {
@@ -29,18 +29,116 @@ func (m *modelDao) checkFeature(key string) error {
return m.flags.CheckFeature(key)
}
func columnExists(db *sqlx.DB, tableName, columnName string) bool {
query := fmt.Sprintf("PRAGMA table_info(%s);", tableName)
rows, err := db.Query(query)
if err != nil {
zap.L().Error("Failed to query table info", zap.Error(err))
return false
}
defer rows.Close()
var (
cid int
name string
ctype string
notnull int
dflt_value *string
pk int
)
for rows.Next() {
err := rows.Scan(&cid, &name, &ctype, &notnull, &dflt_value, &pk)
if err != nil {
zap.L().Error("Failed to scan table info", zap.Error(err))
return false
}
if name == columnName {
return true
}
}
err = rows.Err()
if err != nil {
zap.L().Error("Failed to scan table info", zap.Error(err))
return false
}
return false
}
// InitDB creates and extends base model DB repository
func InitDB(sqlStore sqlstore.SQLStore) (*modelDao, error) {
dao, err := basedsql.InitDB(sqlStore)
func InitDB(inputDB *sqlx.DB) (*modelDao, error) {
dao, err := basedsql.InitDB(inputDB)
if err != nil {
return nil, err
}
// set package variable so dependent base methods (e.g. AuthCache) will work
basedao.SetDB(dao)
m := &modelDao{ModelDaoSqlite: dao}
table_schema := `
PRAGMA foreign_keys = ON;
CREATE TABLE IF NOT EXISTS org_domains(
id TEXT PRIMARY KEY,
org_id TEXT NOT NULL,
name VARCHAR(50) NOT NULL UNIQUE,
created_at INTEGER NOT NULL,
updated_at INTEGER,
data TEXT NOT NULL,
FOREIGN KEY(org_id) REFERENCES organizations(id)
);
CREATE TABLE IF NOT EXISTS personal_access_tokens (
id INTEGER PRIMARY KEY AUTOINCREMENT,
role TEXT NOT NULL,
user_id TEXT NOT NULL,
token TEXT NOT NULL UNIQUE,
name TEXT NOT NULL,
created_at INTEGER NOT NULL,
expires_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL,
last_used INTEGER NOT NULL,
revoked BOOLEAN NOT NULL,
updated_by_user_id TEXT NOT NULL,
FOREIGN KEY(user_id) REFERENCES users(id)
);
`
_, err = m.DB().Exec(table_schema)
if err != nil {
return nil, fmt.Errorf("error in creating tables: %v", err.Error())
}
if !columnExists(m.DB(), "personal_access_tokens", "role") {
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN role TEXT NOT NULL DEFAULT 'ADMIN';")
if err != nil {
return nil, fmt.Errorf("error in adding column: %v", err.Error())
}
}
if !columnExists(m.DB(), "personal_access_tokens", "updated_at") {
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN updated_at INTEGER NOT NULL DEFAULT 0;")
if err != nil {
return nil, fmt.Errorf("error in adding column: %v", err.Error())
}
}
if !columnExists(m.DB(), "personal_access_tokens", "last_used") {
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN last_used INTEGER NOT NULL DEFAULT 0;")
if err != nil {
return nil, fmt.Errorf("error in adding column: %v", err.Error())
}
}
if !columnExists(m.DB(), "personal_access_tokens", "revoked") {
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN revoked BOOLEAN NOT NULL DEFAULT FALSE;")
if err != nil {
return nil, fmt.Errorf("error in adding column: %v", err.Error())
}
}
if !columnExists(m.DB(), "personal_access_tokens", "updated_by_user_id") {
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN updated_by_user_id TEXT NOT NULL DEFAULT '';")
if err != nil {
return nil, fmt.Errorf("error in adding column: %v", err.Error())
}
}
return m, nil
}
func (m *modelDao) DB() *bun.DB {
func (m *modelDao) DB() *sqlx.DB {
return m.ModelDaoSqlite.DB()
}

Some files were not shown because too many files have changed in this diff Show More