Compare commits

..

21 Commits

Author SHA1 Message Date
Shaheer Kochai
1b48eb81c6 feat: add timezone support to the graphs throughout the app (#6520)
...
2024-12-03 13:56:48 +04:30
ahmadshaheer
504811546e chore: added null check for timezone.value in updateTimezone 2024-12-02 19:40:27 +04:30
ahmadshaheer
e012f10395 chore: overall improvements 2024-11-20 18:42:41 +04:30
ahmadshaheer
676e32ea09 chore: store timezone string in local storage instead of object 2024-11-20 18:20:48 +04:30
ahmadshaheer
28045772b8 fix: get active timezone in timepicker hint 2024-11-20 09:23:52 +04:30
ahmadshaheer
b1120c7d16 fix: if timezone is undefined, fallback to browser time zone 2024-11-20 09:19:55 +04:30
ahmadshaheer
55c9205aad feat: timezone setting functionality in timepicker, timezone picker, and timezone preferences 2024-11-20 09:19:55 +04:30
ahmadshaheer
5b4f423f9f feat: create a context for timezone and handle the timezone configuration 2024-11-20 09:19:16 +04:30
ahmadshaheer
cc376ce6a8 chore: improve timezoneUtils by adding a function to get browser timezone 2024-11-20 09:19:16 +04:30
ahmadshaheer
20e00c597a fix: display the timezone in timepicker hint 'You are at' 2024-11-20 09:18:43 +04:30
ahmadshaheer
ff7da5c05b fix: fix the issue of timezone breaking for browser and utc timezones 2024-11-19 18:42:20 +04:30
ahmadshaheer
14ccadaeb5 fix: don't focus on time picker when timezone is clicked 2024-11-19 13:55:42 +04:30
ahmadshaheer
984f3829dd chore: fix the typo 2024-11-19 13:37:18 +04:30
ahmadshaheer
31a9ead2fc feat: display timezone in timepicker input 2024-11-19 13:34:15 +04:30
ahmadshaheer
65ce8eaf14 chore: change timezone item from div to button 2024-11-19 13:33:14 +04:30
ahmadshaheer
daec491c79 chore: improve timezone utils 2024-11-19 13:29:33 +04:30
ahmadshaheer
49e29567f4 feat: timezone preferences UI 2024-11-19 10:02:00 +04:30
ahmadshaheer
8edd5fe7d6 fix: overall improvement + add searchIndex to timezone 2024-11-18 18:49:22 +04:30
ahmadshaheer
178a3153dd chore: add the selected timezone as url param and close timezone picker on select 2024-11-18 18:38:45 +04:30
ahmadshaheer
e7f1b27a5b feat: add support for esc keypress to close the timezone picker 2024-11-18 18:27:38 +04:30
ahmadshaheer
dbf0f236be feat: time picker hint and timezone picker UI with basic functionality + helper to get timezones 2024-11-18 17:47:48 +04:30
1333 changed files with 13837 additions and 92042 deletions

View File

@@ -4,6 +4,3 @@
README.md README.md
deploy deploy
sample-apps sample-apps
# frontend
node_modules

View File

@@ -3,6 +3,7 @@ name: build-pipeline
on: on:
pull_request: pull_request:
branches: branches:
- develop
- main - main
- release/v* - release/v*

View File

@@ -3,7 +3,7 @@ name: "Update PR labels and Block PR until related docs are shipped for the feat
on: on:
pull_request: pull_request:
branches: branches:
- main - develop
types: [opened, edited, labeled, unlabeled] types: [opened, edited, labeled, unlabeled]
permissions: permissions:

View File

@@ -42,7 +42,7 @@ jobs:
kubectl create ns sample-application kubectl create ns sample-application
# apply hotrod k8s manifest file # apply hotrod k8s manifest file
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/develop/sample-apps/hotrod/hotrod.yaml
# wait for all deployments in sample-application namespace to be READY # wait for all deployments in sample-application namespace to be READY
kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s

View File

@@ -1,35 +0,0 @@
name: goreleaser
on:
push:
tags:
- v*
- histogram-quantile/v*
permissions:
contents: write
jobs:
goreleaser:
runs-on: ubuntu-latest
strategy:
matrix:
workdirs:
- scripts/clickhouse/histogramquantile
steps:
- name: checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: set-up-go
uses: actions/setup-go@v5
- name: run-goreleaser
uses: goreleaser/goreleaser-action@v6
with:
distribution: goreleaser-pro
version: '~> v2'
args: release --clean
workdir: ${{ matrix.workdirs }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}

View File

@@ -2,8 +2,7 @@ name: Jest Coverage - changed files
on: on:
pull_request: pull_request:
branches: branches: develop
- main
jobs: jobs:
build: build:
@@ -12,7 +11,7 @@ jobs:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
ref: "refs/heads/main" ref: "refs/heads/develop"
token: ${{ secrets.GITHUB_TOKEN }} # Provide the GitHub token for authentication token: ${{ secrets.GITHUB_TOKEN }} # Provide the GitHub token for authentication
- name: Fetch branch - name: Fetch branch

View File

@@ -0,0 +1,19 @@
# This workflow will inspect a pull request to ensure there is a linked issue or a
# valid issue is mentioned in the body. If neither is present it fails the check and adds
# a comment alerting users of this missing requirement.
name: VerifyIssue
on:
pull_request:
types: [edited, opened]
check_run:
jobs:
verify_linked_issue:
runs-on: ubuntu-latest
name: Ensure Pull Request has a linked issue.
steps:
- name: Verify Linked Issue
uses: srikanthccv/verify-linked-issue-action@v0.71
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -1,36 +0,0 @@
name: prereleaser
on:
# schedule every wednesday 9:30 AM UTC (3pm IST)
schedule:
- cron: '30 9 * * 3'
# allow manual triggering of the workflow by a maintainer
workflow_dispatch:
inputs:
release_type:
description: "Type of the release"
type: choice
required: true
options:
- 'patch'
- 'minor'
- 'major'
jobs:
verify:
uses: signoz/primus.workflows/.github/workflows/github-verify.yaml@main
secrets: inherit
with:
PRIMUS_REF: main
GITHUB_TEAM_NAME: releaser
GITHUB_MEMBER_NAME: ${{ github.actor }}
signoz:
if: ${{ always() && (needs.verify.result == 'success' || github.event.name == 'schedule') }}
uses: signoz/primus.workflows/.github/workflows/releaser.yaml@main
secrets: inherit
needs: [verify]
with:
PRIMUS_REF: main
PROJECT_NAME: signoz
RELEASE_TYPE: ${{ inputs.release_type || 'minor' }}

View File

@@ -4,6 +4,7 @@ on:
push: push:
branches: branches:
- main - main
- develop
tags: tags:
- v* - v*
@@ -57,17 +58,6 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Create .env file
run: |
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
- name: Setup golang - name: Setup golang
uses: actions/setup-go@v4 uses: actions/setup-go@v4
with: with:

View File

@@ -1,34 +0,0 @@
name: releaser
on:
# trigger on new latest release
release:
types: [published]
jobs:
detect:
if: ${{ !startsWith(github.event.release.tag_name, 'histogram-quantile/') }}
runs-on: ubuntu-latest
outputs:
release_type: ${{ steps.find.outputs.release_type }}
steps:
- id: find
name: find
run: |
release_tag=${{ github.event.release.tag_name }}
patch_number=$(echo $release_tag | awk -F. '{print $3}')
release_type="minor"
if [[ $patch_number -ne 0 ]]; then
release_type="patch"
fi
echo "release_type=${release_type}" >> "$GITHUB_OUTPUT"
charts:
if: ${{ !startsWith(github.event.release.tag_name, 'histogram-quantile/') }}
uses: signoz/primus.workflows/.github/workflows/github-trigger.yaml@main
secrets: inherit
needs: [detect]
with:
PRIMUS_REF: main
GITHUB_REPOSITORY_NAME: charts
GITHUB_EVENT_NAME: prereleaser
GITHUB_EVENT_PAYLOAD: "{\"release_type\": \"${{ needs.detect.outputs.release_type }}\"}"

View File

@@ -3,6 +3,7 @@ on:
pull_request: pull_request:
branches: branches:
- main - main
- develop
paths: paths:
- 'frontend/**' - 'frontend/**'
defaults: defaults:

View File

@@ -1,12 +1,12 @@
name: staging-deployment name: staging-deployment
# Trigger deployment only on push to main branch # Trigger deployment only on push to develop branch
on: on:
push: push:
branches: branches:
- main - develop
jobs: jobs:
deploy: deploy:
name: Deploy latest main branch to staging name: Deploy latest develop branch to staging
runs-on: ubuntu-latest runs-on: ubuntu-latest
environment: staging environment: staging
permissions: permissions:

View File

@@ -44,7 +44,7 @@ jobs:
git add . git add .
git stash push -m "stashed on $(date --iso-8601=seconds)" git stash push -m "stashed on $(date --iso-8601=seconds)"
git fetch origin git fetch origin
git checkout main git checkout develop
git pull git pull
# This is added to include the scenerio when new commit in PR is force-pushed # This is added to include the scenerio when new commit in PR is force-pushed
git branch -D ${GITHUB_BRANCH} git branch -D ${GITHUB_BRANCH}

8
.gitignore vendored
View File

@@ -52,7 +52,7 @@ ee/query-service/tests/test-deploy/data/
/deploy/docker/clickhouse-setup/data/ /deploy/docker/clickhouse-setup/data/
/deploy/docker-swarm/clickhouse-setup/data/ /deploy/docker-swarm/clickhouse-setup/data/
bin/ bin/
.local/
*/query-service/queries.active */query-service/queries.active
# e2e # e2e
@@ -70,9 +70,3 @@ vendor/
# git-town # git-town
.git-branches.toml .git-branches.toml
# goreleaser
dist/
# ignore user_scripts that is fetched by init-clickhouse
deploy/common/clickhouse/user_scripts/

View File

@@ -3,10 +3,16 @@
tasks: tasks:
- name: Run Script to Comment ut required lines
init: |
cd ./.scripts
sh commentLinesForSetup.sh
- name: Run Docker Images - name: Run Docker Images
init: | init: |
cd ./deploy/docker cd ./deploy
sudo docker compose up -d sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d
# command:
- name: Run Frontend - name: Run Frontend
init: | init: |

View File

@@ -141,9 +141,9 @@ Depending upon your area of expertise & interest, you can choose one or more to
# 3. Develop Frontend 🌚 # 3. Develop Frontend 🌚
**Need to Update: [https://github.com/SigNoz/signoz/tree/main/frontend](https://github.com/SigNoz/signoz/tree/main/frontend)** **Need to Update: [https://github.com/SigNoz/signoz/tree/develop/frontend](https://github.com/SigNoz/signoz/tree/develop/frontend)**
Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/main/frontend/README.md) sections for more info on how to setup SigNoz frontend locally (with and without Docker). Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/develop/frontend/README.md) sections for more info on how to setup SigNoz frontend locally (with and without Docker).
## 3.1 Contribute to Frontend with Docker installation of SigNoz ## 3.1 Contribute to Frontend with Docker installation of SigNoz
@@ -151,14 +151,14 @@ Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/
``` ```
git clone https://github.com/SigNoz/signoz.git && cd signoz git clone https://github.com/SigNoz/signoz.git && cd signoz
``` ```
- Comment out `frontend` service section at [`deploy/docker/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L68) - Comment out `frontend` service section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L68)
![develop-frontend](https://user-images.githubusercontent.com/52788043/179009217-6692616b-17dc-4d27-b587-9d007098d739.jpeg) ![develop-frontend](https://user-images.githubusercontent.com/52788043/179009217-6692616b-17dc-4d27-b587-9d007098d739.jpeg)
- run `cd deploy` to move to deploy directory, - run `cd deploy` to move to deploy directory,
- Install signoz locally **without** the frontend, - Install signoz locally **without** the frontend,
- Add / Uncomment the below configuration to query-service section at [`deploy/docker/docker-compose.yaml#L47`](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L47) - Add / Uncomment the below configuration to query-service section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L47`](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L47)
``` ```
ports: ports:
- "8080:8080" - "8080:8080"
@@ -167,10 +167,9 @@ Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/
- Next run, - Next run,
``` ```
cd deploy/docker sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d
sudo docker compose up -d
``` ```
- `cd ../frontend` and change baseURL in file [`frontend/src/constants/env.ts#L2`](https://github.com/SigNoz/signoz/blob/main/frontend/src/constants/env.ts#L2) and for that, you need to create a `.env` file in the `frontend` directory with the following environment variable (`FRONTEND_API_ENDPOINT`) matching your configuration. - `cd ../frontend` and change baseURL in file [`frontend/src/constants/env.ts#L2`](https://github.com/SigNoz/signoz/blob/develop/frontend/src/constants/env.ts#L2) and for that, you need to create a `.env` file in the `frontend` directory with the following environment variable (`FRONTEND_API_ENDPOINT`) matching your configuration.
If you have backend api exposed via frontend nginx: If you have backend api exposed via frontend nginx:
``` ```
@@ -187,6 +186,11 @@ Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/
yarn dev yarn dev
``` ```
### Important Notes:
The Maintainers / Contributors who will change Line Numbers of `Frontend` & `Query-Section`, please update line numbers in [`/.scripts/commentLinesForSetup.sh`](https://github.com/SigNoz/signoz/blob/develop/.scripts/commentLinesForSetup.sh)
**[`^top^`](#contributing-guidelines)**
## 3.2 Contribute to Frontend without installing SigNoz backend ## 3.2 Contribute to Frontend without installing SigNoz backend
If you don't want to install the SigNoz backend just for doing frontend development, we can provide you with test environments that you can use as the backend. If you don't want to install the SigNoz backend just for doing frontend development, we can provide you with test environments that you can use as the backend.
@@ -212,7 +216,7 @@ Please ping us in the [`#contributing`](https://signoz-community.slack.com/archi
# 4. Contribute to Backend (Query-Service) 🌑 # 4. Contribute to Backend (Query-Service) 🌑
**Need to Update: [https://github.com/SigNoz/signoz/tree/main/pkg/query-service](https://github.com/SigNoz/signoz/tree/main/pkg/query-service)** **Need to Update: [https://github.com/SigNoz/signoz/tree/develop/pkg/query-service](https://github.com/SigNoz/signoz/tree/develop/pkg/query-service)**
## 4.1 Prerequisites ## 4.1 Prerequisites
@@ -238,13 +242,13 @@ Please ping us in the [`#contributing`](https://signoz-community.slack.com/archi
git clone https://github.com/SigNoz/signoz.git && cd signoz git clone https://github.com/SigNoz/signoz.git && cd signoz
``` ```
- run `sudo make dev-setup` to configure local setup to run query-service, - run `sudo make dev-setup` to configure local setup to run query-service,
- Comment out `frontend` service section at [`deploy/docker/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L68) - Comment out `frontend` service section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L68)
<img width="982" alt="develop-frontend" src="https://user-images.githubusercontent.com/52788043/179043977-012be8b0-a2ed-40d1-b2e6-2ab72d7989c0.png"> <img width="982" alt="develop-frontend" src="https://user-images.githubusercontent.com/52788043/179043977-012be8b0-a2ed-40d1-b2e6-2ab72d7989c0.png">
- Comment out `query-service` section at [`deploy/docker/docker-compose.yaml#L41`,](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L41) - Comment out `query-service` section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L41`,](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L41)
<img width="1068" alt="Screenshot 2022-07-14 at 22 48 07" src="https://user-images.githubusercontent.com/52788043/179044151-a65ba571-db0b-4a16-b64b-ca3fadcf3af0.png"> <img width="1068" alt="Screenshot 2022-07-14 at 22 48 07" src="https://user-images.githubusercontent.com/52788043/179044151-a65ba571-db0b-4a16-b64b-ca3fadcf3af0.png">
- add below configuration to `clickhouse` section at [`deploy/docker/docker-compose.yaml`,](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml) - add below configuration to `clickhouse` section at [`deploy/docker/clickhouse-setup/docker-compose.yaml`,](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml)
``` ```
ports: ports:
- 9001:9000 - 9001:9000
@@ -254,9 +258,9 @@ Please ping us in the [`#contributing`](https://signoz-community.slack.com/archi
- run `cd pkg/query-service/` to move to `query-service` directory, - run `cd pkg/query-service/` to move to `query-service` directory,
- Then, you need to create a `.env` file with the following environment variable - Then, you need to create a `.env` file with the following environment variable
``` ```
SIGNOZ_SQLSTORE_SQLITE_PATH="./signoz.db" SIGNOZ_LOCAL_DB_PATH="./signoz.db"
``` ```
to set your local environment with the right `RELATIONAL_DATASOURCE_PATH` as mentioned in [`./constants/constants.go#L38`,](https://github.com/SigNoz/signoz/blob/main/pkg/query-service/constants/constants.go#L38) to set your local environment with the right `RELATIONAL_DATASOURCE_PATH` as mentioned in [`./constants/constants.go#L38`,](https://github.com/SigNoz/signoz/blob/develop/pkg/query-service/constants/constants.go#L38)
- Now, install SigNoz locally **without** the `frontend` and `query-service`, - Now, install SigNoz locally **without** the `frontend` and `query-service`,
- If you are using `x86_64` processors (All Intel/AMD processors) run `sudo make run-x86` - If you are using `x86_64` processors (All Intel/AMD processors) run `sudo make run-x86`
@@ -290,10 +294,13 @@ docker pull signoz/query-service:develop
``` ```
### Important Note: ### Important Note:
The Maintainers / Contributors who will change Line Numbers of `Frontend` & `Query-Section`, please update line numbers in [`/.scripts/commentLinesForSetup.sh`](https://github.com/SigNoz/signoz/blob/develop/.scripts/commentLinesForSetup.sh)
**Query Service should now be available at** [`http://localhost:8080`](http://localhost:8080) **Query Service should now be available at** [`http://localhost:8080`](http://localhost:8080)
If you want to see how the frontend plays with query service, you can run the frontend also in your local env with the baseURL changed to `http://localhost:8080` in file [`frontend/src/constants/env.ts`](https://github.com/SigNoz/signoz/blob/main/frontend/src/constants/env.ts) as the `query-service` is now running at port `8080`. If you want to see how the frontend plays with query service, you can run the frontend also in your local env with the baseURL changed to `http://localhost:8080` in file [`frontend/src/constants/env.ts`](https://github.com/SigNoz/signoz/blob/develop/frontend/src/constants/env.ts) as the `query-service` is now running at port `8080`.
<!-- Instead of configuring a local setup, you can also use [Gitpod](https://www.gitpod.io/), a VSCode-based Web IDE. <!-- Instead of configuring a local setup, you can also use [Gitpod](https://www.gitpod.io/), a VSCode-based Web IDE.
@@ -332,7 +339,7 @@ to make SigNoz UI available at [localhost:3301](http://localhost:3301)
**5.1.1 To install the HotROD sample app:** **5.1.1 To install the HotROD sample app:**
```bash ```bash
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh \ curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-install.sh \
| HELM_RELEASE=my-release SIGNOZ_NAMESPACE=platform bash | HELM_RELEASE=my-release SIGNOZ_NAMESPACE=platform bash
``` ```
@@ -355,7 +362,7 @@ kubectl -n sample-application run strzal --image=djbingham/curl \
**5.1.4 To delete the HotROD sample app:** **5.1.4 To delete the HotROD sample app:**
```bash ```bash
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh \ curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-delete.sh \
| HOTROD_NAMESPACE=sample-application bash | HOTROD_NAMESPACE=sample-application bash
``` ```

View File

@@ -8,16 +8,14 @@ BUILD_HASH ?= $(shell git rev-parse --short HEAD)
BUILD_TIME ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ") BUILD_TIME ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD) BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
DEV_LICENSE_SIGNOZ_IO ?= https://staging-license.signoz.io/api/v1 DEV_LICENSE_SIGNOZ_IO ?= https://staging-license.signoz.io/api/v1
ZEUS_URL ?= https://api.signoz.cloud
DEV_BUILD ?= "" # set to any non-empty value to enable dev build DEV_BUILD ?= "" # set to any non-empty value to enable dev build
# Internal variables or constants. # Internal variables or constants.
FRONTEND_DIRECTORY ?= frontend FRONTEND_DIRECTORY ?= frontend
QUERY_SERVICE_DIRECTORY ?= pkg/query-service QUERY_SERVICE_DIRECTORY ?= pkg/query-service
EE_QUERY_SERVICE_DIRECTORY ?= ee/query-service EE_QUERY_SERVICE_DIRECTORY ?= ee/query-service
STANDALONE_DIRECTORY ?= deploy/docker STANDALONE_DIRECTORY ?= deploy/docker/clickhouse-setup
SWARM_DIRECTORY ?= deploy/docker-swarm SWARM_DIRECTORY ?= deploy/docker-swarm/clickhouse-setup
CH_HISTOGRAM_QUANTILE_DIRECTORY ?= scripts/clickhouse/histogramquantile
GOOS ?= $(shell go env GOOS) GOOS ?= $(shell go env GOOS)
GOARCH ?= $(shell go env GOARCH) GOARCH ?= $(shell go env GOARCH)
@@ -35,9 +33,8 @@ buildHash=${PACKAGE}/pkg/query-service/version.buildHash
buildTime=${PACKAGE}/pkg/query-service/version.buildTime buildTime=${PACKAGE}/pkg/query-service/version.buildTime
gitBranch=${PACKAGE}/pkg/query-service/version.gitBranch gitBranch=${PACKAGE}/pkg/query-service/version.gitBranch
licenseSignozIo=${PACKAGE}/ee/query-service/constants.LicenseSignozIo licenseSignozIo=${PACKAGE}/ee/query-service/constants.LicenseSignozIo
zeusURL=${PACKAGE}/ee/query-service/constants.ZeusURL
LD_FLAGS=-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH} -X ${zeusURL}=${ZEUS_URL} LD_FLAGS=-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH}
DEV_LD_FLAGS=-X ${licenseSignozIo}=${DEV_LICENSE_SIGNOZ_IO} DEV_LD_FLAGS=-X ${licenseSignozIo}=${DEV_LICENSE_SIGNOZ_IO}
all: build-push-frontend build-push-query-service all: build-push-frontend build-push-query-service
@@ -99,12 +96,12 @@ build-query-service-static-arm64:
# Steps to build static binary of query service for all platforms # Steps to build static binary of query service for all platforms
.PHONY: build-query-service-static-all .PHONY: build-query-service-static-all
build-query-service-static-all: build-query-service-static-amd64 build-query-service-static-arm64 build-frontend-static build-query-service-static-all: build-query-service-static-amd64 build-query-service-static-arm64
# Steps to build and push docker image of query service # Steps to build and push docker image of query service
.PHONY: build-query-service-amd64 build-push-query-service .PHONY: build-query-service-amd64 build-push-query-service
# Step to build docker image of query service in amd64 (used in build pipeline) # Step to build docker image of query service in amd64 (used in build pipeline)
build-query-service-amd64: build-query-service-static-amd64 build-frontend-static build-query-service-amd64: build-query-service-static-amd64
@echo "------------------" @echo "------------------"
@echo "--> Building query-service docker image for amd64" @echo "--> Building query-service docker image for amd64"
@echo "------------------" @echo "------------------"
@@ -143,6 +140,16 @@ dev-setup:
@echo "--> Local Setup completed" @echo "--> Local Setup completed"
@echo "------------------" @echo "------------------"
run-local:
@docker-compose -f \
$(STANDALONE_DIRECTORY)/docker-compose-core.yaml -f $(STANDALONE_DIRECTORY)/docker-compose-local.yaml \
up --build -d
down-local:
@docker-compose -f \
$(STANDALONE_DIRECTORY)/docker-compose-core.yaml -f $(STANDALONE_DIRECTORY)/docker-compose-local.yaml \
down -v
pull-signoz: pull-signoz:
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml pull @docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml pull
@@ -181,16 +188,4 @@ check-no-ee-references:
fi fi
test: test:
go test ./pkg/... go test ./pkg/query-service/...
goreleaser-snapshot:
@if [[ ${GORELEASER_WORKDIR} ]]; then \
cd ${GORELEASER_WORKDIR} && \
goreleaser release --clean --snapshot; \
cd -; \
else \
goreleaser release --clean --snapshot; \
fi
goreleaser-snapshot-histogram-quantile:
make GORELEASER_WORKDIR=$(CH_HISTOGRAM_QUANTILE_DIRECTORY) goreleaser-snapshot

View File

@@ -13,9 +13,9 @@
<h3 align="center"> <h3 align="center">
<a href="https://signoz.io/docs"><b>Dokumentation</b></a> &bull; <a href="https://signoz.io/docs"><b>Dokumentation</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/main/README.md"><b>Readme auf Englisch </b></a> &bull; <a href="https://github.com/SigNoz/signoz/blob/develop/README.md"><b>Readme auf Englisch </b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/main/README.zh-cn.md"><b>ReadMe auf Chinesisch</b></a> &bull; <a href="https://github.com/SigNoz/signoz/blob/develop/README.zh-cn.md"><b>ReadMe auf Chinesisch</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/main/README.pt-br.md"><b>ReadMe auf Portugiesisch</b></a> &bull; <a href="https://github.com/SigNoz/signoz/blob/develop/README.pt-br.md"><b>ReadMe auf Portugiesisch</b></a> &bull;
<a href="https://signoz.io/slack"><b>Slack Community</b></a> &bull; <a href="https://signoz.io/slack"><b>Slack Community</b></a> &bull;
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a> <a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
</h3> </h3>

View File

@@ -17,9 +17,9 @@
<h3 align="center"> <h3 align="center">
<a href="https://signoz.io/docs"><b>Documentation</b></a> &bull; <a href="https://signoz.io/docs"><b>Documentation</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/main/README.zh-cn.md"><b>ReadMe in Chinese</b></a> &bull; <a href="https://github.com/SigNoz/signoz/blob/develop/README.zh-cn.md"><b>ReadMe in Chinese</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/main/README.de-de.md"><b>ReadMe in German</b></a> &bull; <a href="https://github.com/SigNoz/signoz/blob/develop/README.de-de.md"><b>ReadMe in German</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/main/README.pt-br.md"><b>ReadMe in Portuguese</b></a> &bull; <a href="https://github.com/SigNoz/signoz/blob/develop/README.pt-br.md"><b>ReadMe in Portuguese</b></a> &bull;
<a href="https://signoz.io/slack"><b>Slack Community</b></a> &bull; <a href="https://signoz.io/slack"><b>Slack Community</b></a> &bull;
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a> <a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
</h3> </h3>

View File

@@ -12,9 +12,9 @@
<h3 align="center"> <h3 align="center">
<a href="https://signoz.io/docs"><b>文档</b></a> <a href="https://signoz.io/docs"><b>文档</b></a>
<a href="https://github.com/SigNoz/signoz/blob/main/README.zh-cn.md"><b>中文ReadMe</b></a> <a href="https://github.com/SigNoz/signoz/blob/develop/README.zh-cn.md"><b>中文ReadMe</b></a>
<a href="https://github.com/SigNoz/signoz/blob/main/README.de-de.md"><b>德文ReadMe</b></a> <a href="https://github.com/SigNoz/signoz/blob/develop/README.de-de.md"><b>德文ReadMe</b></a>
<a href="https://github.com/SigNoz/signoz/blob/main/README.pt-br.md"><b>葡萄牙语ReadMe</b></a> <a href="https://github.com/SigNoz/signoz/blob/develop/README.pt-br.md"><b>葡萄牙语ReadMe</b></a>
<a href="https://signoz.io/slack"><b>Slack 社区</b></a> <a href="https://signoz.io/slack"><b>Slack 社区</b></a>
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a> <a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
</h3> </h3>

View File

@@ -1,95 +0,0 @@
##################### SigNoz Configuration Example #####################
#
# Do not modify this file
#
##################### Instrumentation #####################
instrumentation:
logs:
# The log level to use.
level: info
traces:
# Whether to enable tracing.
enabled: false
processors:
batch:
exporter:
otlp:
endpoint: localhost:4317
metrics:
# Whether to enable metrics.
enabled: true
readers:
pull:
exporter:
prometheus:
host: "0.0.0.0"
port: 9090
##################### Web #####################
web:
# Whether to enable the web frontend
enabled: true
# The prefix to serve web on
prefix: /
# The directory containing the static build files.
directory: /etc/signoz/web
##################### Cache #####################
cache:
# specifies the caching provider to use.
provider: memory
# memory: Uses in-memory caching.
memory:
# Time-to-live for cache entries in memory. Specify the duration in ns
ttl: 60000000000
# The interval at which the cache will be cleaned up
cleanupInterval: 1m
# redis: Uses Redis as the caching backend.
redis:
# The hostname or IP address of the Redis server.
host: localhost
# The port on which the Redis server is running. Default is usually 6379.
port: 6379
# The password for authenticating with the Redis server, if required.
password:
# The Redis database number to use
db: 0
##################### SQLStore #####################
sqlstore:
# specifies the SQLStore provider to use.
provider: sqlite
# The maximum number of open connections to the database.
max_open_conns: 100
sqlite:
# The path to the SQLite database file.
path: /var/lib/signoz/signoz.db
##################### APIServer #####################
apiserver:
timeout:
default: 60s
max: 600s
excluded_routes:
- /api/v1/logs/tail
- /api/v3/logs/livetail
logging:
excluded_routes:
- /api/v1/health
##################### TelemetryStore #####################
telemetrystore:
# specifies the telemetrystore provider to use.
provider: clickhouse
clickhouse:
# The DSN to use for ClickHouse.
dsn: http://localhost:9000
# Maximum number of idle connections in the connection pool.
max_idle_conns: 50
# Maximum number of open connections to the database.
max_open_conns: 100
# Maximum time to wait for a connection to be established.
dial_timeout: 5s

View File

@@ -18,64 +18,65 @@ Now run the following command to install:
### Using Docker Compose ### Using Docker Compose
If you don't have docker compose set up, please follow [this guide](https://docs.docker.com/compose/install/) If you don't have docker-compose set up, please follow [this guide](https://docs.docker.com/compose/install/)
to set up docker compose before proceeding with the next steps. to set up docker compose before proceeding with the next steps.
```sh For x86 chip (amd):
cd deploy/docker
docker compose up -d
```
Open http://localhost:3301 in your favourite browser.
To start collecting logs and metrics from your infrastructure, run the following command:
```sh ```sh
cd generator/infra docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d
docker compose up -d
``` ```
To start generating sample traces, run the following command: Open http://localhost:3301 in your favourite browser. In couple of minutes, you should see
the data generated from hotrod in SigNoz UI.
## Kubernetes
### Using Helm
#### Bring up SigNoz cluster
```sh ```sh
cd generator/hotrod helm repo add signoz https://charts.signoz.io
docker compose up -d
kubectl create ns platform
helm -n platform install my-release signoz/signoz
``` ```
In a couple of minutes, you should see the data generated from hotrod in SigNoz UI. To access the UI, you can `port-forward` the frontend service:
For more details, please refer to the [SigNoz documentation](https://signoz.io/docs/install/docker/).
## Docker Swarm
To install SigNoz using Docker Swarm, run the following command:
```sh ```sh
cd deploy/docker-swarm kubectl -n platform port-forward svc/my-release-frontend 3301:3301
docker stack deploy -c docker-compose.yaml signoz
``` ```
Open http://localhost:3301 in your favourite browser. Open http://localhost:3301 in your favourite browser. Few minutes after you generate load
from the HotROD application, you should see the data generated from hotrod in SigNoz UI.
To start collecting logs and metrics from your infrastructure, run the following command: #### Test HotROD application with SigNoz
```sh ```sh
cd generator/infra kubectl create ns sample-application
docker stack deploy -c docker-compose.yaml infra
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/develop/sample-apps/hotrod/hotrod.yaml
``` ```
To start generating sample traces, run the following command: To generate load:
```sh ```sh
cd generator/hotrod kubectl -n sample-application run strzal --image=djbingham/curl \
docker stack deploy -c docker-compose.yaml hotrod --restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
'user_count=6' -F 'spawn_rate=2' http://locust-master:8089/swarm
``` ```
In a couple of minutes, you should see the data generated from hotrod in SigNoz UI. To stop load:
For more details, please refer to the [SigNoz documentation](https://signoz.io/docs/install/docker-swarm/). ```sh
kubectl -n sample-application run strzal --image=djbingham/curl \
--restart='OnFailure' -i --tty --rm --command -- curl \
http://locust-master:8089/stop
```
## Uninstall/Troubleshoot? ## Uninstall/Troubleshoot?
Go to our official documentation site [signoz.io/docs](https://signoz.io/docs) for more. Go to our official documentation site [signoz.io/docs](https://signoz.io/docs) for more.

View File

@@ -0,0 +1,35 @@
global:
resolve_timeout: 1m
slack_api_url: 'https://hooks.slack.com/services/xxx'
route:
receiver: 'slack-notifications'
receivers:
- name: 'slack-notifications'
slack_configs:
- channel: '#alerts'
send_resolved: true
icon_url: https://avatars3.githubusercontent.com/u/3380462
title: |-
[{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }}
{{- if gt (len .CommonLabels) (len .GroupLabels) -}}
{{" "}}(
{{- with .CommonLabels.Remove .GroupLabels.Names }}
{{- range $index, $label := .SortedPairs -}}
{{ if $index }}, {{ end }}
{{- $label.Name }}="{{ $label.Value -}}"
{{- end }}
{{- end -}}
)
{{- end }}
text: >-
{{ range .Alerts -}}
*Alert:* {{ .Annotations.title }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }}
*Description:* {{ .Annotations.description }}
*Details:*
{{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
{{ end }}
{{ end }}

View File

@@ -0,0 +1,11 @@
groups:
- name: ExampleCPULoadGroup
rules:
- alert: HighCpuLoad
expr: system_cpu_load_average_1m > 0.1
for: 0m
labels:
severity: warning
annotations:
summary: High CPU load
description: "CPU load is > 0.1\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,287 @@
version: "3.9"
x-clickhouse-defaults: &clickhouse-defaults
image: clickhouse/clickhouse-server:24.1.2-alpine
tty: true
deploy:
restart_policy:
condition: on-failure
depends_on:
- zookeeper-1
# - zookeeper-2
# - zookeeper-3
logging:
options:
max-size: 50m
max-file: "3"
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test:
[
"CMD",
"wget",
"--spider",
"-q",
"0.0.0.0:8123/ping"
]
interval: 30s
timeout: 5s
retries: 3
ulimits:
nproc: 65535
nofile:
soft: 262144
hard: 262144
x-db-depend: &db-depend
depends_on:
- clickhouse
- otel-collector-migrator
# - clickhouse-2
# - clickhouse-3
services:
zookeeper-1:
image: bitnami/zookeeper:3.7.1
hostname: zookeeper-1
user: root
ports:
- "2181:2181"
- "2888:2888"
- "3888:3888"
volumes:
- ./data/zookeeper-1:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=1
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
# zookeeper-2:
# image: bitnami/zookeeper:3.7.0
# hostname: zookeeper-2
# user: root
# ports:
# - "2182:2181"
# - "2889:2888"
# - "3889:3888"
# volumes:
# - ./data/zookeeper-2:/bitnami/zookeeper
# environment:
# - ZOO_SERVER_ID=2
# - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
# - ALLOW_ANONYMOUS_LOGIN=yes
# - ZOO_AUTOPURGE_INTERVAL=1
# zookeeper-3:
# image: bitnami/zookeeper:3.7.0
# hostname: zookeeper-3
# user: root
# ports:
# - "2183:2181"
# - "2890:2888"
# - "3890:3888"
# volumes:
# - ./data/zookeeper-3:/bitnami/zookeeper
# environment:
# - ZOO_SERVER_ID=3
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
# - ALLOW_ANONYMOUS_LOGIN=yes
# - ZOO_AUTOPURGE_INTERVAL=1
clickhouse:
<<: *clickhouse-defaults
hostname: clickhouse
# ports:
# - "9000:9000"
# - "8123:8123"
# - "9181:9181"
volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
- ./data/clickhouse/:/var/lib/clickhouse/
# clickhouse-2:
# <<: *clickhouse-defaults
# hostname: clickhouse-2
# ports:
# - "9001:9000"
# - "8124:8123"
# - "9182:9181"
# volumes:
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-2/:/var/lib/clickhouse/
# clickhouse-3:
# <<: *clickhouse-defaults
# hostname: clickhouse-3
# ports:
# - "9002:9000"
# - "8125:8123"
# - "9183:9181"
# volumes:
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-3/:/var/lib/clickhouse/
alertmanager:
image: signoz/alertmanager:0.23.7
volumes:
- ./data/alertmanager:/data
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
depends_on:
- query-service
deploy:
restart_policy:
condition: on-failure
query-service:
image: signoz/query-service:0.56.0
command:
[
"-config=/root/config/prometheus.yml",
"--use-logs-new-schema=true"
]
# ports:
# - "6060:6060" # pprof port
# - "8080:8080" # query-service port
volumes:
- ./prometheus.yml:/root/config/prometheus.yml
- ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-swarm
healthcheck:
test:
[
"CMD",
"wget",
"--spider",
"-q",
"localhost:8080/api/v1/health"
]
interval: 30s
timeout: 5s
retries: 3
deploy:
restart_policy:
condition: on-failure
<<: *db-depend
frontend:
image: signoz/frontend:0.56.0
deploy:
restart_policy:
condition: on-failure
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
image: signoz/signoz-otel-collector:0.111.5
command:
[
"--config=/etc/otel-collector-config.yaml",
"--manager-config=/etc/manager-config.yaml",
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
]
user: root # required for reading docker container logs
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
- /var/lib/docker/containers:/var/lib/docker/containers:ro
- /:/hostfs:ro
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}},dockerswarm.service.name={{.Service.Name}},dockerswarm.task.name={{.Task.Name}}
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
# - "8888:8888" # OtelCollector internal metrics
# - "8889:8889" # signoz spanmetrics exposed by the agent
# - "9411:9411" # Zipkin port
# - "13133:13133" # Health check extension
# - "14250:14250" # Jaeger gRPC
# - "14268:14268" # Jaeger thrift HTTP
# - "55678:55678" # OpenCensus receiver
# - "55679:55679" # zPages extension
deploy:
mode: global
restart_policy:
condition: on-failure
depends_on:
- clickhouse
- otel-collector-migrator
- query-service
otel-collector-migrator:
image: signoz/signoz-schema-migrator:0.111.5
deploy:
restart_policy:
condition: on-failure
delay: 5s
command:
- "--dsn=tcp://clickhouse:9000"
depends_on:
- clickhouse
# - clickhouse-2
# - clickhouse-3
logspout:
image: "gliderlabs/logspout:v3.2.14"
volumes:
- /etc/hostname:/etc/host_hostname:ro
- /var/run/docker.sock:/var/run/docker.sock
command: syslog+tcp://otel-collector:2255
depends_on:
- otel-collector
deploy:
mode: global
restart_policy:
condition: on-failure
hotrod:
image: jaegertracing/example-hotrod:1.30
command: [ "all" ]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
logging:
options:
max-size: 50m
max-file: "3"
load-hotrod:
image: "signoz/locust:1.2.3"
hostname: load-hotrod
environment:
ATTACKED_HOST: http://hotrod:8080
LOCUST_MODE: standalone
NO_PROXY: standalone
TASK_DELAY_FROM: 5
TASK_DELAY_TO: 30
QUIET_MODE: "${QUIET_MODE:-false}"
LOCUST_OPTS: "--headless -u 10 -r 1"
volumes:
- ../common/locust-scripts:/locust

View File

@@ -0,0 +1,31 @@
CREATE TABLE IF NOT EXISTS signoz_index (
timestamp DateTime64(9) CODEC(Delta, ZSTD(1)),
traceID String CODEC(ZSTD(1)),
spanID String CODEC(ZSTD(1)),
parentSpanID String CODEC(ZSTD(1)),
serviceName LowCardinality(String) CODEC(ZSTD(1)),
name LowCardinality(String) CODEC(ZSTD(1)),
kind Int32 CODEC(ZSTD(1)),
durationNano UInt64 CODEC(ZSTD(1)),
tags Array(String) CODEC(ZSTD(1)),
tagsKeys Array(String) CODEC(ZSTD(1)),
tagsValues Array(String) CODEC(ZSTD(1)),
statusCode Int64 CODEC(ZSTD(1)),
references String CODEC(ZSTD(1)),
externalHttpMethod Nullable(String) CODEC(ZSTD(1)),
externalHttpUrl Nullable(String) CODEC(ZSTD(1)),
component Nullable(String) CODEC(ZSTD(1)),
dbSystem Nullable(String) CODEC(ZSTD(1)),
dbName Nullable(String) CODEC(ZSTD(1)),
dbOperation Nullable(String) CODEC(ZSTD(1)),
peerService Nullable(String) CODEC(ZSTD(1)),
INDEX idx_traceID traceID TYPE bloom_filter GRANULARITY 4,
INDEX idx_service serviceName TYPE bloom_filter GRANULARITY 4,
INDEX idx_name name TYPE bloom_filter GRANULARITY 4,
INDEX idx_kind kind TYPE minmax GRANULARITY 4,
INDEX idx_tagsKeys tagsKeys TYPE bloom_filter(0.01) GRANULARITY 64,
INDEX idx_tagsValues tagsValues TYPE bloom_filter(0.01) GRANULARITY 64,
INDEX idx_duration durationNano TYPE minmax GRANULARITY 1
) ENGINE MergeTree()
PARTITION BY toDate(timestamp)
ORDER BY (serviceName, -toUnixTimestamp(timestamp))

View File

@@ -1,21 +1,62 @@
receivers: receivers:
tcplog/docker:
listen_address: "0.0.0.0:2255"
operators:
- type: regex_parser
regex: '^<([0-9]+)>[0-9]+ (?P<timestamp>[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?) (?P<container_id>\S+) (?P<container_name>\S+) [0-9]+ - -( (?P<body>.*))?'
timestamp:
parse_from: attributes.timestamp
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
- type: move
from: attributes["body"]
to: body
- type: remove
field: attributes.timestamp
# please remove names from below if you want to collect logs from them
- type: filter
id: signoz_logs_filter
expr: 'attributes.container_name matches "^signoz_(logspout|frontend|alertmanager|query-service|otel-collector|clickhouse|zookeeper)"'
opencensus:
endpoint: 0.0.0.0:55678
otlp: otlp:
protocols: protocols:
grpc: grpc:
endpoint: 0.0.0.0:4317 endpoint: 0.0.0.0:4317
http: http:
endpoint: 0.0.0.0:4318 endpoint: 0.0.0.0:4318
jaeger:
protocols:
grpc:
endpoint: 0.0.0.0:14250
thrift_http:
endpoint: 0.0.0.0:14268
# thrift_compact:
# endpoint: 0.0.0.0:6831
# thrift_binary:
# endpoint: 0.0.0.0:6832
hostmetrics:
collection_interval: 30s
root_path: /hostfs
scrapers:
cpu: {}
load: {}
memory: {}
disk: {}
filesystem: {}
network: {}
prometheus: prometheus:
config: config:
global: global:
scrape_interval: 60s scrape_interval: 60s
scrape_configs: scrape_configs:
# otel-collector internal metrics
- job_name: otel-collector - job_name: otel-collector
static_configs: static_configs:
- targets: - targets:
- localhost:8888 - localhost:8888
labels: labels:
job_name: otel-collector job_name: otel-collector
processors: processors:
batch: batch:
send_batch_size: 10000 send_batch_size: 10000
@@ -23,11 +64,25 @@ processors:
timeout: 10s timeout: 10s
resourcedetection: resourcedetection:
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels. # Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
detectors: [env, system] detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure.
timeout: 2s timeout: 2s
# memory_limiter:
# # 80% of maximum memory up to 2G
# limit_mib: 1500
# # 25% of limit up to 2G
# spike_limit_mib: 512
# check_interval: 5s
#
# # 50% of the maximum memory
# limit_percentage: 50
# # 20% of max memory usage spike expected
# spike_limit_percentage: 20
# queued_retry:
# num_workers: 4
# queue_size: 100
# retry_on_failure: true
signozspanmetrics/delta: signozspanmetrics/delta:
metrics_exporter: clickhousemetricswrite metrics_exporter: clickhousemetricswrite
metrics_flush_interval: 60s
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ] latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
dimensions_cache_size: 100000 dimensions_cache_size: 100000
aggregation_temporality: AGGREGATION_TEMPORALITY_DELTA aggregation_temporality: AGGREGATION_TEMPORALITY_DELTA
@@ -50,16 +105,11 @@ processors:
- name: host.name - name: host.name
- name: host.type - name: host.type
- name: container.name - name: container.name
extensions:
health_check:
endpoint: 0.0.0.0:13133
pprof:
endpoint: 0.0.0.0:1777
exporters: exporters:
clickhousetraces: clickhousetraces:
datasource: tcp://clickhouse:9000/signoz_traces datasource: tcp://clickhouse:9000/signoz_traces
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING} low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
use_new_schema: true
clickhousemetricswrite: clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/signoz_metrics endpoint: tcp://clickhouse:9000/signoz_metrics
resource_to_telemetry_conversion: resource_to_telemetry_conversion:
@@ -68,34 +118,44 @@ exporters:
endpoint: tcp://clickhouse:9000/signoz_metrics endpoint: tcp://clickhouse:9000/signoz_metrics
clickhousemetricswritev2: clickhousemetricswritev2:
dsn: tcp://clickhouse:9000/signoz_metrics dsn: tcp://clickhouse:9000/signoz_metrics
# logging: {}
clickhouselogsexporter: clickhouselogsexporter:
dsn: tcp://clickhouse:9000/signoz_logs dsn: tcp://clickhouse:9000/signoz_logs
timeout: 10s timeout: 10s
use_new_schema: true use_new_schema: true
# debug: {} extensions:
health_check:
endpoint: 0.0.0.0:13133
zpages:
endpoint: 0.0.0.0:55679
pprof:
endpoint: 0.0.0.0:1777
service: service:
telemetry: telemetry:
logs: logs:
encoding: json encoding: json
metrics: metrics:
address: 0.0.0.0:8888 address: 0.0.0.0:8888
extensions: extensions: [health_check, zpages, pprof]
- health_check
- pprof
pipelines: pipelines:
traces: traces:
receivers: [otlp] receivers: [jaeger, otlp]
processors: [signozspanmetrics/delta, batch] processors: [signozspanmetrics/delta, batch]
exporters: [clickhousetraces] exporters: [clickhousetraces]
metrics: metrics:
receivers: [otlp] receivers: [otlp]
processors: [batch] processors: [batch]
exporters: [clickhousemetricswrite, clickhousemetricswritev2] exporters: [clickhousemetricswrite, clickhousemetricswritev2]
metrics/hostmetrics:
receivers: [hostmetrics]
processors: [resourcedetection, batch]
exporters: [clickhousemetricswrite, clickhousemetricswritev2]
metrics/prometheus: metrics/prometheus:
receivers: [prometheus] receivers: [prometheus]
processors: [batch] processors: [batch]
exporters: [clickhousemetricswrite/prometheus, clickhousemetricswritev2] exporters: [clickhousemetricswrite/prometheus, clickhousemetricswritev2]
logs: logs:
receivers: [otlp] receivers: [otlp, tcplog/docker]
processors: [batch] processors: [batch]
exporters: [clickhouselogsexporter] exporters: [clickhouselogsexporter]

View File

@@ -12,10 +12,10 @@ alerting:
- alertmanager:9093 - alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'. # Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files: [] rule_files:
# - "first_rules.yml" # - "first_rules.yml"
# - "second_rules.yml" # - "second_rules.yml"
# - 'alerts.yml' - 'alerts.yml'
# A scrape configuration containing exactly one endpoint to scrape: # A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself. # Here it's Prometheus itself.

View File

@@ -0,0 +1,51 @@
server {
listen 3301;
server_name _;
gzip on;
gzip_static on;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
gzip_proxied any;
gzip_vary on;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
# to handle uri issue 414 from nginx
client_max_body_size 24M;
large_client_header_buffers 8 128k;
location / {
if ( $uri = '/index.html' ) {
add_header Cache-Control no-store always;
}
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
}
location ~ ^/api/(v1|v3)/logs/(tail|livetail){
proxy_pass http://query-service:8080;
proxy_http_version 1.1;
# connection will be closed if no data is read for 600s between successive read operations
proxy_read_timeout 600s;
# dont buffer the data send it directly to client.
proxy_buffering off;
proxy_cache off;
}
location /api {
proxy_pass http://query-service:8080/api;
# connection will be closed if no data is read for 600s between successive read operations
proxy_read_timeout 600s;
}
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}

View File

@@ -1,281 +0,0 @@
version: "3"
x-common: &common
networks:
- signoz-net
deploy:
restart_policy:
condition: on-failure
logging:
options:
max-size: 50m
max-file: "3"
x-clickhouse-defaults: &clickhouse-defaults
!!merge <<: *common
image: clickhouse/clickhouse-server:24.1.2-alpine
tty: true
deploy:
labels:
signoz.io/scrape: "true"
signoz.io/port: "9363"
signoz.io/path: "/metrics"
depends_on:
- zookeeper-1
- zookeeper-2
- zookeeper-3
healthcheck:
test:
- CMD
- wget
- --spider
- -q
- 0.0.0.0:8123/ping
interval: 30s
timeout: 5s
retries: 3
ulimits:
nproc: 65535
nofile:
soft: 262144
hard: 262144
x-zookeeper-defaults: &zookeeper-defaults
!!merge <<: *common
image: bitnami/zookeeper:3.7.1
user: root
deploy:
labels:
signoz.io/scrape: "true"
signoz.io/port: "9141"
signoz.io/path: "/metrics"
healthcheck:
test:
- CMD-SHELL
- curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
interval: 30s
timeout: 5s
retries: 3
x-db-depend: &db-depend
!!merge <<: *common
depends_on:
- clickhouse
- clickhouse-2
- clickhouse-3
- schema-migrator
services:
init-clickhouse:
!!merge <<: *common
image: clickhouse/clickhouse-server:24.1.2-alpine
command:
- bash
- -c
- |
version="v0.0.1"
node_os=$$(uname -s | tr '[:upper:]' '[:lower:]')
node_arch=$$(uname -m | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)
echo "Fetching histogram-binary for $${node_os}/$${node_arch}"
cd /tmp
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
tar -xvzf histogram-quantile.tar.gz
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
volumes:
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
zookeeper-1:
!!merge <<: *zookeeper-defaults
# ports:
# - "2181:2181"
# - "2888:2888"
# - "3888:3888"
volumes:
- ./clickhouse-setup/data/zookeeper-1:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=1
- ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
zookeeper-2:
!!merge <<: *zookeeper-defaults
# ports:
# - "2182:2181"
# - "2889:2888"
# - "3889:3888"
volumes:
- ./clickhouse-setup/data/zookeeper-2:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=2
- ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
zookeeper-3:
!!merge <<: *zookeeper-defaults
# ports:
# - "2183:2181"
# - "2890:2888"
# - "3890:3888"
volumes:
- ./clickhouse-setup/data/zookeeper-3:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=3
- ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
clickhouse:
!!merge <<: *clickhouse-defaults
# TODO: needed for schema-migrator to work, remove this redundancy once we have a better solution
hostname: clickhouse
# ports:
# - "9000:9000"
# - "8123:8123"
# - "9181:9181"
volumes:
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
- ./clickhouse-setup/data/clickhouse/:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
clickhouse-2:
!!merge <<: *clickhouse-defaults
hostname: clickhouse-2
# ports:
# - "9001:9000"
# - "8124:8123"
# - "9182:9181"
volumes:
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
- ./clickhouse-setup/data/clickhouse-2/:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
clickhouse-3:
!!merge <<: *clickhouse-defaults
hostname: clickhouse-3
# ports:
# - "9002:9000"
# - "8125:8123"
# - "9183:9181"
volumes:
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
- ./clickhouse-setup/data/clickhouse-3/:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
alertmanager:
!!merge <<: *common
image: signoz/alertmanager:0.23.7
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
volumes:
- ./clickhouse-setup/data/alertmanager:/data
depends_on:
- query-service
query-service:
!!merge <<: *db-depend
image: signoz/query-service:0.71.0
command:
- --config=/root/config/prometheus.yml
- --use-logs-new-schema=true
- --use-trace-new-schema=true
# ports:
# - "8080:8080" # signoz port
# - "6060:6060" # pprof port
volumes:
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
- ../common/dashboards:/root/config/dashboards
- ./clickhouse-setup/data/signoz/:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-swarm
healthcheck:
test:
- CMD
- wget
- --spider
- -q
- localhost:8080/api/v1/health
interval: 30s
timeout: 5s
retries: 3
frontend:
!!merge <<: *common
image: signoz/frontend:0.71.0
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:0.111.26
command:
- --config=/etc/otel-collector-config.yaml
- --manager-config=/etc/manager-config.yaml
- --copy-path=/var/tmp/collector-config.yaml
- --feature-gates=-pkg.translator.prometheus.NormalizeName
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}}
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
deploy:
replicas: 3
depends_on:
- clickhouse
- schema-migrator
- query-service
schema-migrator:
!!merge <<: *common
image: signoz/signoz-schema-migrator:0.111.24
deploy:
restart_policy:
condition: on-failure
delay: 5s
entrypoint: sh
command:
- -c
- "/signoz-schema-migrator sync --dsn=tcp://clickhouse:9000 --up= && /signoz-schema-migrator async --dsn=tcp://clickhouse:9000 --up="
depends_on:
- clickhouse
networks:
signoz-net:
name: signoz-net
volumes:
alertmanager:
name: signoz-alertmanager
clickhouse:
name: signoz-clickhouse
clickhouse-2:
name: signoz-clickhouse-2
clickhouse-3:
name: signoz-clickhouse-3
sqlite:
name: signoz-sqlite
zookeeper-1:
name: signoz-zookeeper-1
zookeeper-2:
name: signoz-zookeeper-2
zookeeper-3:
name: signoz-zookeeper-3

View File

@@ -1,209 +0,0 @@
version: "3"
x-common: &common
networks:
- signoz-net
deploy:
restart_policy:
condition: on-failure
logging:
options:
max-size: 50m
max-file: "3"
x-clickhouse-defaults: &clickhouse-defaults
!!merge <<: *common
image: clickhouse/clickhouse-server:24.1.2-alpine
tty: true
deploy:
labels:
signoz.io/scrape: "true"
signoz.io/port: "9363"
signoz.io/path: "/metrics"
depends_on:
- init-clickhouse
- zookeeper-1
healthcheck:
test:
- CMD
- wget
- --spider
- -q
- 0.0.0.0:8123/ping
interval: 30s
timeout: 5s
retries: 3
ulimits:
nproc: 65535
nofile:
soft: 262144
hard: 262144
x-zookeeper-defaults: &zookeeper-defaults
!!merge <<: *common
image: bitnami/zookeeper:3.7.1
user: root
deploy:
labels:
signoz.io/scrape: "true"
signoz.io/port: "9141"
signoz.io/path: "/metrics"
healthcheck:
test:
- CMD-SHELL
- curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
interval: 30s
timeout: 5s
retries: 3
x-db-depend: &db-depend
!!merge <<: *common
depends_on:
- clickhouse
- schema-migrator
services:
init-clickhouse:
!!merge <<: *common
image: clickhouse/clickhouse-server:24.1.2-alpine
command:
- bash
- -c
- |
version="v0.0.1"
node_os=$$(uname -s | tr '[:upper:]' '[:lower:]')
node_arch=$$(uname -m | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)
echo "Fetching histogram-binary for $${node_os}/$${node_arch}"
cd /tmp
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
tar -xvzf histogram-quantile.tar.gz
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
volumes:
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
zookeeper-1:
!!merge <<: *zookeeper-defaults
# ports:
# - "2181:2181"
# - "2888:2888"
# - "3888:3888"
volumes:
- zookeeper-1:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=1
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
clickhouse:
!!merge <<: *clickhouse-defaults
# TODO: needed for clickhouse TCP connectio
hostname: clickhouse
# ports:
# - "9000:9000"
# - "8123:8123"
# - "9181:9181"
volumes:
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
- ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
- clickhouse:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
alertmanager:
!!merge <<: *common
image: signoz/alertmanager:0.23.7
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
volumes:
- alertmanager:/data
depends_on:
- query-service
query-service:
!!merge <<: *db-depend
image: signoz/query-service:0.71.0
command:
- --config=/root/config/prometheus.yml
- --use-logs-new-schema=true
- --use-trace-new-schema=true
# ports:
# - "8080:8080" # signoz port
# - "6060:6060" # pprof port
volumes:
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
- ../common/dashboards:/root/config/dashboards
- sqlite:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-swarm
healthcheck:
test:
- CMD
- wget
- --spider
- -q
- localhost:8080/api/v1/health
interval: 30s
timeout: 5s
retries: 3
frontend:
!!merge <<: *common
image: signoz/frontend:0.71.0
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:0.111.26
command:
- --config=/etc/otel-collector-config.yaml
- --manager-config=/etc/manager-config.yaml
- --copy-path=/var/tmp/collector-config.yaml
- --feature-gates=-pkg.translator.prometheus.NormalizeName
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}}
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
deploy:
replicas: 3
depends_on:
- clickhouse
- schema-migrator
- query-service
schema-migrator:
!!merge <<: *common
image: signoz/signoz-schema-migrator:0.111.24
deploy:
restart_policy:
condition: on-failure
delay: 5s
entrypoint: sh
command:
- -c
- "/signoz-schema-migrator sync --dsn=tcp://clickhouse:9000 --up= && /signoz-schema-migrator async --dsn=tcp://clickhouse:9000 --up="
depends_on:
- clickhouse
networks:
signoz-net:
name: signoz-net
volumes:
alertmanager:
name: signoz-alertmanager
clickhouse:
name: signoz-clickhouse
sqlite:
name: signoz-sqlite
zookeeper-1:
name: signoz-zookeeper-1

View File

@@ -1,38 +0,0 @@
version: "3"
x-common: &common
networks:
- signoz-net
extra_hosts:
- host.docker.internal:host-gateway
logging:
options:
max-size: 50m
max-file: "3"
deploy:
restart_policy:
condition: on-failure
services:
hotrod:
<<: *common
image: jaegertracing/example-hotrod:1.61.0
command: [ "all" ]
environment:
- OTEL_EXPORTER_OTLP_ENDPOINT=http://host.docker.internal:4318 #
load-hotrod:
<<: *common
image: "signoz/locust:1.2.3"
environment:
ATTACKED_HOST: http://hotrod:8080
LOCUST_MODE: standalone
NO_PROXY: standalone
TASK_DELAY_FROM: 5
TASK_DELAY_TO: 30
QUIET_MODE: "${QUIET_MODE:-false}"
LOCUST_OPTS: "--headless -u 10 -r 1"
volumes:
- ../../../common/locust-scripts:/locust
networks:
signoz-net:
name: signoz-net
external: true

View File

@@ -1,69 +0,0 @@
version: "3"
x-common: &common
networks:
- signoz-net
extra_hosts:
- host.docker.internal:host-gateway
logging:
options:
max-size: 50m
max-file: "3"
deploy:
mode: global
restart_policy:
condition: on-failure
services:
otel-agent:
<<: *common
image: otel/opentelemetry-collector-contrib:0.111.0
command:
- --config=/etc/otel-collector-config.yaml
volumes:
- ./otel-agent-config.yaml:/etc/otel-collector-config.yaml
- /:/hostfs:ro
environment:
- SIGNOZ_COLLECTOR_ENDPOINT=http://host.docker.internal:4317 # In case of external SigNoz or cloud, update the endpoint and access token
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}}
# - SIGNOZ_ACCESS_TOKEN="<your-access-token>"
# Before exposing the ports, make sure the ports are not used by other services
# ports:
# - "4317:4317"
# - "4318:4318"
otel-metrics:
<<: *common
image: otel/opentelemetry-collector-contrib:0.111.0
user: 0:0 # If you have security concerns, you can replace this with your `UID:GID` that has necessary permissions to docker.sock
command:
- --config=/etc/otel-collector-config.yaml
volumes:
- ./otel-metrics-config.yaml:/etc/otel-collector-config.yaml
- /var/run/docker.sock:/var/run/docker.sock
environment:
- SIGNOZ_COLLECTOR_ENDPOINT=http://host.docker.internal:4317 # In case of external SigNoz or cloud, update the endpoint and access token
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}}
# - SIGNOZ_ACCESS_TOKEN="<your-access-token>"
# Before exposing the ports, make sure the ports are not used by other services
# ports:
# - "4317:4317"
# - "4318:4318"
deploy:
mode: replicated
replicas: 1
placement:
constraints:
- node.role == manager
logspout:
<<: *common
image: "gliderlabs/logspout:v3.2.14"
command: syslog+tcp://otel-agent:2255
user: root
volumes:
- /etc/hostname:/etc/host_hostname:ro
- /var/run/docker.sock:/var/run/docker.sock
depends_on:
- otel-agent
networks:
signoz-net:
name: signoz-net
external: true

View File

@@ -1,102 +0,0 @@
receivers:
hostmetrics:
collection_interval: 30s
root_path: /hostfs
scrapers:
cpu: {}
load: {}
memory: {}
disk: {}
filesystem: {}
network: {}
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- job_name: otel-agent
static_configs:
- targets:
- localhost:8888
labels:
job_name: otel-agent
tcplog/docker:
listen_address: "0.0.0.0:2255"
operators:
- type: regex_parser
regex: '^<([0-9]+)>[0-9]+ (?P<timestamp>[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?) (?P<container_id>\S+) (?P<container_name>\S+) [0-9]+ - -( (?P<body>.*))?'
timestamp:
parse_from: attributes.timestamp
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
- type: move
from: attributes["body"]
to: body
- type: remove
field: attributes.timestamp
# please remove names from below if you want to collect logs from them
- type: filter
id: signoz_logs_filter
expr: 'attributes.container_name matches "^(signoz_(logspout|alertmanager|query-service|otel-collector|clickhouse|zookeeper))|(infra_(logspout|otel-agent|otel-metrics)).*"'
processors:
batch:
send_batch_size: 10000
send_batch_max_size: 11000
timeout: 10s
resourcedetection:
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
detectors:
# - ec2
# - gcp
# - azure
- env
- system
timeout: 2s
extensions:
health_check:
endpoint: 0.0.0.0:13133
pprof:
endpoint: 0.0.0.0:1777
exporters:
otlp:
endpoint: ${env:SIGNOZ_COLLECTOR_ENDPOINT}
tls:
insecure: true
headers:
signoz-access-token: ${env:SIGNOZ_ACCESS_TOKEN}
# debug: {}
service:
telemetry:
logs:
encoding: json
metrics:
address: 0.0.0.0:8888
extensions:
- health_check
- pprof
pipelines:
traces:
receivers: [otlp]
processors: [resourcedetection, batch]
exporters: [otlp]
metrics:
receivers: [otlp]
processors: [resourcedetection, batch]
exporters: [otlp]
metrics/hostmetrics:
receivers: [hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
metrics/prometheus:
receivers: [prometheus]
processors: [resourcedetection, batch]
exporters: [otlp]
logs:
receivers: [otlp, tcplog/docker]
processors: [resourcedetection, batch]
exporters: [otlp]

View File

@@ -1,103 +0,0 @@
receivers:
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- job_name: otel-metrics
static_configs:
- targets:
- localhost:8888
labels:
job_name: otel-metrics
# For Docker daemon metrics to be scraped, it must be configured to expose
# Prometheus metrics, as documented here: https://docs.docker.com/config/daemon/prometheus/
# - job_name: docker-daemon
# dockerswarm_sd_configs:
# - host: unix:///var/run/docker.sock
# role: nodes
# relabel_configs:
# - source_labels: [__meta_dockerswarm_node_address]
# target_label: __address__
# replacement: $1:9323
- job_name: "dockerswarm"
dockerswarm_sd_configs:
- host: unix:///var/run/docker.sock
role: tasks
relabel_configs:
- action: keep
regex: running
source_labels:
- __meta_dockerswarm_task_desired_state
- action: keep
regex: true
source_labels:
- __meta_dockerswarm_service_label_signoz_io_scrape
- regex: ([^:]+)(?::\d+)?
replacement: $1
source_labels:
- __address__
target_label: swarm_container_ip
- separator: .
source_labels:
- __meta_dockerswarm_service_name
- __meta_dockerswarm_task_slot
- __meta_dockerswarm_task_id
target_label: swarm_container_name
- target_label: __address__
source_labels:
- swarm_container_ip
- __meta_dockerswarm_service_label_signoz_io_port
separator: ":"
- source_labels:
- __meta_dockerswarm_service_label_signoz_io_path
target_label: __metrics_path__
- source_labels:
- __meta_dockerswarm_service_label_com_docker_stack_namespace
target_label: namespace
- source_labels:
- __meta_dockerswarm_service_name
target_label: service_name
- source_labels:
- __meta_dockerswarm_task_id
target_label: service_instance_id
- source_labels:
- __meta_dockerswarm_node_hostname
target_label: host_name
processors:
batch:
send_batch_size: 10000
send_batch_max_size: 11000
timeout: 10s
resourcedetection:
detectors:
- env
- system
timeout: 2s
extensions:
health_check:
endpoint: 0.0.0.0:13133
pprof:
endpoint: 0.0.0.0:1777
exporters:
otlp:
endpoint: ${env:SIGNOZ_COLLECTOR_ENDPOINT}
tls:
insecure: true
headers:
signoz-access-token: ${env:SIGNOZ_ACCESS_TOKEN}
# debug: {}
service:
telemetry:
logs:
encoding: json
metrics:
address: 0.0.0.0:8888
extensions:
- health_check
- pprof
pipelines:
metrics:
receivers: [prometheus]
processors: [resourcedetection, batch]
exporters: [otlp]

View File

@@ -1 +0,0 @@
COMPOSE_PROJECT_NAME=signoz

View File

@@ -1,3 +0,0 @@
This data directory is deprecated and will be removed in the future.
Please use the migration script under `scripts/volume-migration` to migrate data from bind mounts to Docker volumes.
The script also renames the project name to `signoz` and the network name to `signoz-net` (if not already in place).

View File

@@ -0,0 +1,35 @@
global:
resolve_timeout: 1m
slack_api_url: 'https://hooks.slack.com/services/xxx'
route:
receiver: 'slack-notifications'
receivers:
- name: 'slack-notifications'
slack_configs:
- channel: '#alerts'
send_resolved: true
icon_url: https://avatars3.githubusercontent.com/u/3380462
title: |-
[{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }}
{{- if gt (len .CommonLabels) (len .GroupLabels) -}}
{{" "}}(
{{- with .CommonLabels.Remove .GroupLabels.Names }}
{{- range $index, $label := .SortedPairs -}}
{{ if $index }}, {{ end }}
{{- $label.Name }}="{{ $label.Value -}}"
{{- end }}
{{- end -}}
)
{{- end }}
text: >-
{{ range .Alerts -}}
*Alert:* {{ .Annotations.title }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }}
*Description:* {{ .Annotations.description }}
*Details:*
{{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
{{ end }}
{{ end }}

View File

@@ -0,0 +1,11 @@
groups:
- name: ExampleCPULoadGroup
rules:
- alert: HighCpuLoad
expr: system_cpu_load_average_1m > 0.1
for: 0m
labels:
severity: warning
annotations:
summary: High CPU load
description: "CPU load is > 0.1\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

View File

@@ -10,14 +10,14 @@
<host>zookeeper-1</host> <host>zookeeper-1</host>
<port>2181</port> <port>2181</port>
</node> </node>
<node index="2"> <!-- <node index="2">
<host>zookeeper-2</host> <host>zookeeper-2</host>
<port>2181</port> <port>2181</port>
</node> </node>
<node index="3"> <node index="3">
<host>zookeeper-3</host> <host>zookeeper-3</host>
<port>2181</port> <port>2181</port>
</node> </node> -->
</zookeeper> </zookeeper>
<!-- Configuration of clusters that could be used in Distributed tables. <!-- Configuration of clusters that could be used in Distributed tables.
@@ -58,7 +58,7 @@
<!-- <priority>1</priority> --> <!-- <priority>1</priority> -->
</replica> </replica>
</shard> </shard>
<shard> <!-- <shard>
<replica> <replica>
<host>clickhouse-2</host> <host>clickhouse-2</host>
<port>9000</port> <port>9000</port>
@@ -69,7 +69,7 @@
<host>clickhouse-3</host> <host>clickhouse-3</host>
<port>9000</port> <port>9000</port>
</replica> </replica>
</shard> </shard> -->
</cluster> </cluster>
</remote_servers> </remote_servers>
</clickhouse> </clickhouse>

View File

@@ -716,7 +716,7 @@
asynchronous_metrics - send data from table system.asynchronous_metrics asynchronous_metrics - send data from table system.asynchronous_metrics
status_info - send data from different component from CH, ex: Dictionaries status status_info - send data from different component from CH, ex: Dictionaries status
--> -->
<!--
<prometheus> <prometheus>
<endpoint>/metrics</endpoint> <endpoint>/metrics</endpoint>
<port>9363</port> <port>9363</port>
@@ -726,6 +726,7 @@
<asynchronous_metrics>true</asynchronous_metrics> <asynchronous_metrics>true</asynchronous_metrics>
<status_info>true</status_info> <status_info>true</status_info>
</prometheus> </prometheus>
-->
<!-- Query log. Used only for queries with setting log_queries = 1. --> <!-- Query log. Used only for queries with setting log_queries = 1. -->
<query_log> <query_log>

View File

@@ -0,0 +1,41 @@
<?xml version="1.0"?>
<clickhouse>
<storage_configuration>
<disks>
<default>
<keep_free_space_bytes>10485760</keep_free_space_bytes>
</default>
<s3>
<type>s3</type>
<!-- For S3 cold storage,
if region is us-east-1, endpoint can be https://<bucket-name>.s3.amazonaws.com
if region is not us-east-1, endpoint should be https://<bucket-name>.s3-<region>.amazonaws.com
For GCS cold storage,
endpoint should be https://storage.googleapis.com/<bucket-name>/data/
-->
<endpoint>https://BUCKET-NAME.s3-REGION-NAME.amazonaws.com/data/</endpoint>
<access_key_id>ACCESS-KEY-ID</access_key_id>
<secret_access_key>SECRET-ACCESS-KEY</secret_access_key>
<!-- In case of S3, uncomment the below configuration in case you want to read
AWS credentials from the Environment variables if they exist. -->
<!-- <use_environment_credentials>true</use_environment_credentials> -->
<!-- In case of GCS, uncomment the below configuration, since GCS does
not support batch deletion and result in error messages in logs. -->
<!-- <support_batch_delete>false</support_batch_delete> -->
</s3>
</disks>
<policies>
<tiered>
<volumes>
<default>
<disk>default</disk>
</default>
<s3>
<disk>s3</disk>
<perform_ttl_move_on_insert>0</perform_ttl_move_on_insert>
</s3>
</volumes>
</tiered>
</policies>
</storage_configuration>
</clickhouse>

View File

@@ -0,0 +1,123 @@
<?xml version="1.0"?>
<clickhouse>
<!-- See also the files in users.d directory where the settings can be overridden. -->
<!-- Profiles of settings. -->
<profiles>
<!-- Default settings. -->
<default>
<!-- Maximum memory usage for processing single query, in bytes. -->
<max_memory_usage>10000000000</max_memory_usage>
<!-- How to choose between replicas during distributed query processing.
random - choose random replica from set of replicas with minimum number of errors
nearest_hostname - from set of replicas with minimum number of errors, choose replica
with minimum number of different symbols between replica's hostname and local hostname
(Hamming distance).
in_order - first live replica is chosen in specified order.
first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors.
-->
<load_balancing>random</load_balancing>
</default>
<!-- Profile that allows only read queries. -->
<readonly>
<readonly>1</readonly>
</readonly>
</profiles>
<!-- Users and ACL. -->
<users>
<!-- If user name was not specified, 'default' user is used. -->
<default>
<!-- See also the files in users.d directory where the password can be overridden.
Password could be specified in plaintext or in SHA256 (in hex format).
If you want to specify password in plaintext (not recommended), place it in 'password' element.
Example: <password>qwerty</password>.
Password could be empty.
If you want to specify SHA256, place it in 'password_sha256_hex' element.
Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
If you want to specify a previously defined LDAP server (see 'ldap_servers' in the main config) for authentication,
place its name in 'server' element inside 'ldap' element.
Example: <ldap><server>my_ldap_server</server></ldap>
If you want to authenticate the user via Kerberos (assuming Kerberos is enabled, see 'kerberos' in the main config),
place 'kerberos' element instead of 'password' (and similar) elements.
The name part of the canonical principal name of the initiator must match the user name for authentication to succeed.
You can also place 'realm' element inside 'kerberos' element to further restrict authentication to only those requests
whose initiator's realm matches it.
Example: <kerberos />
Example: <kerberos><realm>EXAMPLE.COM</realm></kerberos>
How to generate decent password:
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
In first line will be password and in second - corresponding SHA256.
How to generate double SHA1:
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-'
In first line will be password and in second - corresponding double SHA1.
-->
<password></password>
<!-- List of networks with open access.
To open access from everywhere, specify:
<ip>::/0</ip>
To open access only from localhost, specify:
<ip>::1</ip>
<ip>127.0.0.1</ip>
Each element of list has one of the following forms:
<ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
<host> Hostname. Example: server01.clickhouse.com.
To check access, DNS query is performed, and all received addresses compared to peer address.
<host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.clickhouse\.com$
To check access, DNS PTR query is performed for peer address and then regexp is applied.
Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
Strongly recommended that regexp is ends with $
All results of DNS requests are cached till server restart.
-->
<networks>
<ip>::/0</ip>
</networks>
<!-- Settings profile for user. -->
<profile>default</profile>
<!-- Quota for user. -->
<quota>default</quota>
<!-- User can create other users and grant rights to them. -->
<!-- <access_management>1</access_management> -->
</default>
</users>
<!-- Quotas. -->
<quotas>
<!-- Name of quota. -->
<default>
<!-- Limits for time interval. You could specify many intervals with different limits. -->
<interval>
<!-- Length of interval. -->
<duration>3600</duration>
<!-- No limits. Just calculate resource usage for time interval. -->
<queries>0</queries>
<errors>0</errors>
<result_rows>0</result_rows>
<read_rows>0</read_rows>
<execution_time>0</execution_time>
</interval>
</default>
</quotas>
</clickhouse>

View File

@@ -0,0 +1,133 @@
version: "2.4"
include:
- test-app-docker-compose.yaml
services:
zookeeper-1:
image: bitnami/zookeeper:3.7.1
container_name: signoz-zookeeper-1
hostname: zookeeper-1
user: root
ports:
- "2181:2181"
- "2888:2888"
- "3888:3888"
volumes:
- ./data/zookeeper-1:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=1
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
clickhouse:
image: clickhouse/clickhouse-server:24.1.2-alpine
container_name: signoz-clickhouse
# ports:
# - "9000:9000"
# - "8123:8123"
tty: true
volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
- ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
- ./data/clickhouse/:/var/lib/clickhouse/
- ./user_scripts:/var/lib/clickhouse/user_scripts/
restart: on-failure
logging:
options:
max-size: 50m
max-file: "3"
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test:
[
"CMD",
"wget",
"--spider",
"-q",
"0.0.0.0:8123/ping"
]
interval: 30s
timeout: 5s
retries: 3
alertmanager:
container_name: signoz-alertmanager
image: signoz/alertmanager:0.23.7
volumes:
- ./data/alertmanager:/data
depends_on:
query-service:
condition: service_healthy
restart: on-failure
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
otel-collector-migrator:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.5}
container_name: otel-migrator
command:
- "--dsn=tcp://clickhouse:9000"
depends_on:
clickhouse:
condition: service_healthy
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
# condition: service_healthy
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
otel-collector:
container_name: signoz-otel-collector
image: signoz/signoz-otel-collector:0.111.5
command:
[
"--config=/etc/otel-collector-config.yaml",
"--manager-config=/etc/manager-config.yaml",
"--copy-path=/var/tmp/collector-config.yaml",
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
]
# user: root # required for reading docker container logs
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
- /var/lib/docker/containers:/var/lib/docker/containers:ro
- /:/hostfs:ro
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
# - "8888:8888" # OtelCollector internal metrics
# - "8889:8889" # signoz spanmetrics exposed by the agent
# - "9411:9411" # Zipkin port
# - "13133:13133" # health check extension
# - "14250:14250" # Jaeger gRPC
# - "14268:14268" # Jaeger thrift HTTP
# - "55678:55678" # OpenCensus receiver
# - "55679:55679" # zPages extension
restart: on-failure
depends_on:
clickhouse:
condition: service_healthy
otel-collector-migrator:
condition: service_completed_successfully
query-service:
condition: service_healthy
logspout:
image: "gliderlabs/logspout:v3.2.14"
container_name: signoz-logspout
volumes:
- /etc/hostname:/etc/host_hostname:ro
- /var/run/docker.sock:/var/run/docker.sock
command: syslog+tcp://otel-collector:2255
depends_on:
- otel-collector
restart: on-failure

View File

@@ -0,0 +1,67 @@
version: "2.4"
services:
query-service:
hostname: query-service
build:
context: "../../../"
dockerfile: "./pkg/query-service/Dockerfile"
args:
LDFLAGS: ""
TARGETPLATFORM: "${GOOS}/${GOARCH}"
container_name: signoz-query-service
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
volumes:
- ./prometheus.yml:/root/config/prometheus.yml
- ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/
command:
[
"-config=/root/config/prometheus.yml",
"--use-logs-new-schema=true"
]
ports:
- "6060:6060"
- "8080:8080"
restart: on-failure
healthcheck:
test:
[
"CMD",
"wget",
"--spider",
"-q",
"localhost:8080/api/v1/health"
]
interval: 30s
timeout: 5s
retries: 3
depends_on:
clickhouse:
condition: service_healthy
frontend:
build:
context: "../../../frontend"
dockerfile: "./Dockerfile"
args:
TARGETOS: "${GOOS}"
TARGETPLATFORM: "${GOARCH}"
container_name: signoz-frontend
environment:
- FRONTEND_API_ENDPOINT=http://query-service:8080
restart: on-failure
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf

View File

@@ -0,0 +1,296 @@
x-clickhouse-defaults: &clickhouse-defaults
restart: on-failure
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
image: clickhouse/clickhouse-server:24.1.2-alpine
tty: true
depends_on:
- zookeeper-1
# - zookeeper-2
# - zookeeper-3
logging:
options:
max-size: 50m
max-file: "3"
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test:
[
"CMD",
"wget",
"--spider",
"-q",
"0.0.0.0:8123/ping"
]
interval: 30s
timeout: 5s
retries: 3
ulimits:
nproc: 65535
nofile:
soft: 262144
hard: 262144
x-db-depend: &db-depend
depends_on:
clickhouse:
condition: service_healthy
otel-collector-migrator-sync:
condition: service_completed_successfully
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
# condition: service_healthy
services:
zookeeper-1:
image: bitnami/zookeeper:3.7.1
container_name: signoz-zookeeper-1
hostname: zookeeper-1
user: root
ports:
- "2181:2181"
- "2888:2888"
- "3888:3888"
volumes:
- ./data/zookeeper-1:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=1
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
# zookeeper-2:
# image: bitnami/zookeeper:3.7.0
# container_name: signoz-zookeeper-2
# hostname: zookeeper-2
# user: root
# ports:
# - "2182:2181"
# - "2889:2888"
# - "3889:3888"
# volumes:
# - ./data/zookeeper-2:/bitnami/zookeeper
# environment:
# - ZOO_SERVER_ID=2
# - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
# - ALLOW_ANONYMOUS_LOGIN=yes
# - ZOO_AUTOPURGE_INTERVAL=1
# zookeeper-3:
# image: bitnami/zookeeper:3.7.0
# container_name: signoz-zookeeper-3
# hostname: zookeeper-3
# user: root
# ports:
# - "2183:2181"
# - "2890:2888"
# - "3890:3888"
# volumes:
# - ./data/zookeeper-3:/bitnami/zookeeper
# environment:
# - ZOO_SERVER_ID=3
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
# - ALLOW_ANONYMOUS_LOGIN=yes
# - ZOO_AUTOPURGE_INTERVAL=1
clickhouse:
<<: *clickhouse-defaults
container_name: signoz-clickhouse
hostname: clickhouse
ports:
- "9000:9000"
- "8123:8123"
- "9181:9181"
volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
- ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
- ./data/clickhouse/:/var/lib/clickhouse/
- ./user_scripts:/var/lib/clickhouse/user_scripts/
# clickhouse-2:
# <<: *clickhouse-defaults
# container_name: signoz-clickhouse-2
# hostname: clickhouse-2
# ports:
# - "9001:9000"
# - "8124:8123"
# - "9182:9181"
# volumes:
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-2/:/var/lib/clickhouse/
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
# clickhouse-3:
# <<: *clickhouse-defaults
# container_name: signoz-clickhouse-3
# hostname: clickhouse-3
# ports:
# - "9002:9000"
# - "8125:8123"
# - "9183:9181"
# volumes:
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-3/:/var/lib/clickhouse/
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
alertmanager:
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
container_name: signoz-alertmanager
volumes:
- ./data/alertmanager:/data
depends_on:
query-service:
condition: service_healthy
restart: on-failure
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
query-service:
image: signoz/query-service:${DOCKER_TAG:-0.56.0}
container_name: signoz-query-service
command:
[
"-config=/root/config/prometheus.yml",
"--use-logs-new-schema=true"
]
# ports:
# - "6060:6060" # pprof port
# - "8080:8080" # query-service port
volumes:
- ./prometheus.yml:/root/config/prometheus.yml
- ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-standalone-amd
restart: on-failure
healthcheck:
test:
[
"CMD",
"wget",
"--spider",
"-q",
"localhost:8080/api/v1/health"
]
interval: 30s
timeout: 5s
retries: 3
<<: *db-depend
frontend:
image: signoz/frontend:${DOCKER_TAG:-0.56.0}
container_name: signoz-frontend
restart: on-failure
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector-migrator-sync:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.5}
container_name: otel-migrator-sync
command:
- "sync"
- "--dsn=tcp://clickhouse:9000"
- "--up="
depends_on:
clickhouse:
condition: service_healthy
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
# condition: service_healthy
otel-collector-migrator-async:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.5}
container_name: otel-migrator-async
command:
- "async"
- "--dsn=tcp://clickhouse:9000"
- "--up="
depends_on:
clickhouse:
condition: service_healthy
otel-collector-migrator-sync:
condition: service_completed_successfully
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
# condition: service_healthy
otel-collector:
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.5}
container_name: signoz-otel-collector
command:
[
"--config=/etc/otel-collector-config.yaml",
"--manager-config=/etc/manager-config.yaml",
"--copy-path=/var/tmp/collector-config.yaml",
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
]
user: root # required for reading docker container logs
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
- /var/lib/docker/containers:/var/lib/docker/containers:ro
- /:/hostfs:ro
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
# - "8888:8888" # OtelCollector internal metrics
# - "8889:8889" # signoz spanmetrics exposed by the agent
# - "9411:9411" # Zipkin port
# - "13133:13133" # health check extension
# - "14250:14250" # Jaeger gRPC
# - "14268:14268" # Jaeger thrift HTTP
# - "55678:55678" # OpenCensus receiver
# - "55679:55679" # zPages extension
restart: on-failure
depends_on:
clickhouse:
condition: service_healthy
otel-collector-migrator-sync:
condition: service_completed_successfully
query-service:
condition: service_healthy
logspout:
image: "gliderlabs/logspout:v3.2.14"
container_name: signoz-logspout
volumes:
- /etc/hostname:/etc/host_hostname:ro
- /var/run/docker.sock:/var/run/docker.sock
command: syslog+tcp://otel-collector:2255
depends_on:
- otel-collector
restart: on-failure

View File

@@ -0,0 +1,285 @@
version: "2.4"
include:
- test-app-docker-compose.yaml
x-clickhouse-defaults: &clickhouse-defaults
restart: on-failure
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
image: clickhouse/clickhouse-server:24.1.2-alpine
tty: true
depends_on:
- zookeeper-1
# - zookeeper-2
# - zookeeper-3
logging:
options:
max-size: 50m
max-file: "3"
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test:
[
"CMD",
"wget",
"--spider",
"-q",
"0.0.0.0:8123/ping"
]
interval: 30s
timeout: 5s
retries: 3
ulimits:
nproc: 65535
nofile:
soft: 262144
hard: 262144
x-db-depend: &db-depend
depends_on:
clickhouse:
condition: service_healthy
otel-collector-migrator:
condition: service_completed_successfully
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
# condition: service_healthy
services:
zookeeper-1:
image: bitnami/zookeeper:3.7.1
container_name: signoz-zookeeper-1
hostname: zookeeper-1
user: root
ports:
- "2181:2181"
- "2888:2888"
- "3888:3888"
volumes:
- ./data/zookeeper-1:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=1
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
# zookeeper-2:
# image: bitnami/zookeeper:3.7.0
# container_name: signoz-zookeeper-2
# hostname: zookeeper-2
# user: root
# ports:
# - "2182:2181"
# - "2889:2888"
# - "3889:3888"
# volumes:
# - ./data/zookeeper-2:/bitnami/zookeeper
# environment:
# - ZOO_SERVER_ID=2
# - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
# - ALLOW_ANONYMOUS_LOGIN=yes
# - ZOO_AUTOPURGE_INTERVAL=1
# zookeeper-3:
# image: bitnami/zookeeper:3.7.0
# container_name: signoz-zookeeper-3
# hostname: zookeeper-3
# user: root
# ports:
# - "2183:2181"
# - "2890:2888"
# - "3890:3888"
# volumes:
# - ./data/zookeeper-3:/bitnami/zookeeper
# environment:
# - ZOO_SERVER_ID=3
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
# - ALLOW_ANONYMOUS_LOGIN=yes
# - ZOO_AUTOPURGE_INTERVAL=1
clickhouse:
<<: *clickhouse-defaults
container_name: signoz-clickhouse
hostname: clickhouse
ports:
- "9000:9000"
- "8123:8123"
- "9181:9181"
volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
- ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
- ./data/clickhouse/:/var/lib/clickhouse/
- ./user_scripts:/var/lib/clickhouse/user_scripts/
# clickhouse-2:
# <<: *clickhouse-defaults
# container_name: signoz-clickhouse-2
# hostname: clickhouse-2
# ports:
# - "9001:9000"
# - "8124:8123"
# - "9182:9181"
# volumes:
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-2/:/var/lib/clickhouse/
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
# clickhouse-3:
# <<: *clickhouse-defaults
# container_name: signoz-clickhouse-3
# hostname: clickhouse-3
# ports:
# - "9002:9000"
# - "8125:8123"
# - "9183:9181"
# volumes:
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-3/:/var/lib/clickhouse/
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
alertmanager:
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
container_name: signoz-alertmanager
volumes:
- ./data/alertmanager:/data
depends_on:
query-service:
condition: service_healthy
restart: on-failure
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
query-service:
image: signoz/query-service:${DOCKER_TAG:-0.56.0}
container_name: signoz-query-service
command:
[
"-config=/root/config/prometheus.yml",
"-gateway-url=https://api.staging.signoz.cloud",
"--use-logs-new-schema=true"
]
# ports:
# - "6060:6060" # pprof port
# - "8080:8080" # query-service port
volumes:
- ./prometheus.yml:/root/config/prometheus.yml
- ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-standalone-amd
- KAFKA_SPAN_EVAL=${KAFKA_SPAN_EVAL:-false}
restart: on-failure
healthcheck:
test:
[
"CMD",
"wget",
"--spider",
"-q",
"localhost:8080/api/v1/health"
]
interval: 30s
timeout: 5s
retries: 3
<<: *db-depend
frontend:
image: signoz/frontend:${DOCKER_TAG:-0.56.0}
container_name: signoz-frontend
restart: on-failure
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector-migrator:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.5}
container_name: otel-migrator
command:
- "--dsn=tcp://clickhouse:9000"
depends_on:
clickhouse:
condition: service_healthy
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
# condition: service_healthy
otel-collector:
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.5}
container_name: signoz-otel-collector
command:
[
"--config=/etc/otel-collector-config.yaml",
"--manager-config=/etc/manager-config.yaml",
"--copy-path=/var/tmp/collector-config.yaml",
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
]
user: root # required for reading docker container logs
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
- /var/lib/docker/containers:/var/lib/docker/containers:ro
- /:/hostfs:ro
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
# - "8888:8888" # OtelCollector internal metrics
# - "8889:8889" # signoz spanmetrics exposed by the agent
# - "9411:9411" # Zipkin port
# - "13133:13133" # health check extension
# - "14250:14250" # Jaeger gRPC
# - "14268:14268" # Jaeger thrift HTTP
# - "55678:55678" # OpenCensus receiver
# - "55679:55679" # zPages extension
restart: on-failure
depends_on:
clickhouse:
condition: service_healthy
otel-collector-migrator:
condition: service_completed_successfully
query-service:
condition: service_healthy
logspout:
image: "gliderlabs/logspout:v3.2.14"
container_name: signoz-logspout
volumes:
- /etc/hostname:/etc/host_hostname:ro
- /var/run/docker.sock:/var/run/docker.sock
command: syslog+tcp://otel-collector:2255
depends_on:
- otel-collector
restart: on-failure

View File

@@ -0,0 +1,3 @@
include:
- test-app-docker-compose.yaml
- docker-compose-minimal.yaml

View File

@@ -0,0 +1,64 @@
<clickhouse>
<logger>
<!-- Possible levels [1]:
- none (turns off logging)
- fatal
- critical
- error
- warning
- notice
- information
- debug
- trace
[1]: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105-L114
-->
<level>information</level>
<log>/var/log/clickhouse-keeper/clickhouse-keeper.log</log>
<errorlog>/var/log/clickhouse-keeper/clickhouse-keeper.err.log</errorlog>
<!-- Rotation policy
See https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/FileChannel.h#L54-L85
-->
<size>1000M</size>
<count>10</count>
<!-- <console>1</console> --> <!-- Default behavior is autodetection (log to console if not daemon mode and is tty) -->
</logger>
<listen_host>0.0.0.0</listen_host>
<max_connections>4096</max_connections>
<keeper_server>
<tcp_port>9181</tcp_port>
<!-- Must be unique among all keeper serves -->
<server_id>1</server_id>
<log_storage_path>/var/lib/clickhouse/coordination/logs</log_storage_path>
<snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>
<coordination_settings>
<operation_timeout_ms>10000</operation_timeout_ms>
<min_session_timeout_ms>10000</min_session_timeout_ms>
<session_timeout_ms>100000</session_timeout_ms>
<raft_logs_level>information</raft_logs_level>
<compress_logs>false</compress_logs>
<!-- All settings listed in https://github.com/ClickHouse/ClickHouse/blob/master/src/Coordination/CoordinationSettings.h -->
</coordination_settings>
<!-- enable sanity hostname checks for cluster configuration (e.g. if localhost is used with remote endpoints) -->
<hostname_checks_enabled>true</hostname_checks_enabled>
<raft_configuration>
<server>
<id>1</id>
<!-- Internal port and hostname -->
<hostname>clickhouses-keeper-1</hostname>
<port>9234</port>
</server>
<!-- Add more servers here -->
</raft_configuration>
</keeper_server>
</clickhouse>

View File

@@ -1,29 +1,85 @@
receivers: receivers:
tcplog/docker:
listen_address: "0.0.0.0:2255"
operators:
- type: regex_parser
regex: '^<([0-9]+)>[0-9]+ (?P<timestamp>[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?) (?P<container_id>\S+) (?P<container_name>\S+) [0-9]+ - -( (?P<body>.*))?'
timestamp:
parse_from: attributes.timestamp
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
- type: move
from: attributes["body"]
to: body
- type: remove
field: attributes.timestamp
# please remove names from below if you want to collect logs from them
- type: filter
id: signoz_logs_filter
expr: 'attributes.container_name matches "^signoz-(logspout|frontend|alertmanager|query-service|otel-collector|clickhouse|zookeeper)"'
opencensus:
endpoint: 0.0.0.0:55678
otlp: otlp:
protocols: protocols:
grpc: grpc:
endpoint: 0.0.0.0:4317 endpoint: 0.0.0.0:4317
http: http:
endpoint: 0.0.0.0:4318 endpoint: 0.0.0.0:4318
jaeger:
protocols:
grpc:
endpoint: 0.0.0.0:14250
thrift_http:
endpoint: 0.0.0.0:14268
# thrift_compact:
# endpoint: 0.0.0.0:6831
# thrift_binary:
# endpoint: 0.0.0.0:6832
hostmetrics:
collection_interval: 30s
root_path: /hostfs
scrapers:
cpu: {}
load: {}
memory: {}
disk: {}
filesystem: {}
network: {}
prometheus: prometheus:
config: config:
global: global:
scrape_interval: 60s scrape_interval: 60s
scrape_configs: scrape_configs:
# otel-collector internal metrics
- job_name: otel-collector - job_name: otel-collector
static_configs: static_configs:
- targets: - targets:
- localhost:8888 - localhost:8888
labels: labels:
job_name: otel-collector job_name: otel-collector
processors: processors:
batch: batch:
send_batch_size: 10000 send_batch_size: 10000
send_batch_max_size: 11000 send_batch_max_size: 11000
timeout: 10s timeout: 10s
# memory_limiter:
# # 80% of maximum memory up to 2G
# limit_mib: 1500
# # 25% of limit up to 2G
# spike_limit_mib: 512
# check_interval: 5s
#
# # 50% of the maximum memory
# limit_percentage: 50
# # 20% of max memory usage spike expected
# spike_limit_percentage: 20
# queued_retry:
# num_workers: 4
# queue_size: 100
# retry_on_failure: true
resourcedetection: resourcedetection:
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels. # Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
detectors: [env, system] detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure.
timeout: 2s timeout: 2s
signozspanmetrics/delta: signozspanmetrics/delta:
metrics_exporter: clickhousemetricswrite metrics_exporter: clickhousemetricswrite
@@ -50,16 +106,19 @@ processors:
- name: host.name - name: host.name
- name: host.type - name: host.type
- name: container.name - name: container.name
extensions: extensions:
health_check: health_check:
endpoint: 0.0.0.0:13133 endpoint: 0.0.0.0:13133
zpages:
endpoint: 0.0.0.0:55679
pprof: pprof:
endpoint: 0.0.0.0:1777 endpoint: 0.0.0.0:1777
exporters: exporters:
clickhousetraces: clickhousetraces:
datasource: tcp://clickhouse:9000/signoz_traces datasource: tcp://clickhouse:9000/signoz_traces
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING} low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
use_new_schema: true
clickhousemetricswrite: clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/signoz_metrics endpoint: tcp://clickhouse:9000/signoz_metrics
resource_to_telemetry_conversion: resource_to_telemetry_conversion:
@@ -72,7 +131,8 @@ exporters:
dsn: tcp://clickhouse:9000/signoz_logs dsn: tcp://clickhouse:9000/signoz_logs
timeout: 10s timeout: 10s
use_new_schema: true use_new_schema: true
# debug: {} # logging: {}
service: service:
telemetry: telemetry:
logs: logs:
@@ -81,21 +141,26 @@ service:
address: 0.0.0.0:8888 address: 0.0.0.0:8888
extensions: extensions:
- health_check - health_check
- zpages
- pprof - pprof
pipelines: pipelines:
traces: traces:
receivers: [otlp] receivers: [jaeger, otlp]
processors: [signozspanmetrics/delta, batch] processors: [signozspanmetrics/delta, batch]
exporters: [clickhousetraces] exporters: [clickhousetraces]
metrics: metrics:
receivers: [otlp] receivers: [otlp]
processors: [batch] processors: [batch]
exporters: [clickhousemetricswrite, clickhousemetricswritev2] exporters: [clickhousemetricswrite, clickhousemetricswritev2]
metrics/hostmetrics:
receivers: [hostmetrics]
processors: [resourcedetection, batch]
exporters: [clickhousemetricswrite, clickhousemetricswritev2]
metrics/prometheus: metrics/prometheus:
receivers: [prometheus] receivers: [prometheus]
processors: [batch] processors: [batch]
exporters: [clickhousemetricswrite/prometheus, clickhousemetricswritev2] exporters: [clickhousemetricswrite/prometheus, clickhousemetricswritev2]
logs: logs:
receivers: [otlp] receivers: [otlp, tcplog/docker]
processors: [batch] processors: [batch]
exporters: [clickhouselogsexporter] exporters: [clickhouselogsexporter]

View File

@@ -0,0 +1 @@
server_endpoint: ws://query-service:4320/v1/opamp

View File

@@ -0,0 +1,25 @@
# my global config
global:
scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
- alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
- 'alerts.yml'
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs: []
remote_read:
- url: tcp://clickhouse:9000/signoz_metrics

View File

@@ -0,0 +1,26 @@
services:
hotrod:
image: jaegertracing/example-hotrod:1.30
container_name: hotrod
logging:
options:
max-size: 50m
max-file: "3"
command: [ "all" ]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
load-hotrod:
image: "signoz/locust:1.2.3"
container_name: load-hotrod
hostname: load-hotrod
environment:
ATTACKED_HOST: http://hotrod:8080
LOCUST_MODE: standalone
NO_PROXY: standalone
TASK_DELAY_FROM: 5
TASK_DELAY_TO: 30
QUIET_MODE: "${QUIET_MODE:-false}"
LOCUST_OPTS: "--headless -u 10 -r 1"
volumes:
- ../common/locust-scripts:/locust

View File

@@ -1,2 +0,0 @@
This directory is deprecated and will be removed in the future.
Please use the new directory for Clickhouse setup scripts: `scripts/clickhouse` instead.

View File

@@ -0,0 +1,16 @@
from locust import HttpUser, task, between
class UserTasks(HttpUser):
wait_time = between(5, 15)
@task
def rachel(self):
self.client.get("/dispatch?customer=123&nonse=0.6308392664170006")
@task
def trom(self):
self.client.get("/dispatch?customer=392&nonse=0.015296363321630757")
@task
def japanese(self):
self.client.get("/dispatch?customer=731&nonse=0.8022286220408668")
@task
def coffee(self):
self.client.get("/dispatch?customer=567&nonse=0.0022220379420636593")

View File

@@ -1,299 +0,0 @@
version: "3"
x-common: &common
networks:
- signoz-net
restart: on-failure
logging:
options:
max-size: 50m
max-file: "3"
x-clickhouse-defaults: &clickhouse-defaults
!!merge <<: *common
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
image: clickhouse/clickhouse-server:24.1.2-alpine
tty: true
labels:
signoz.io/scrape: "true"
signoz.io/port: "9363"
signoz.io/path: "/metrics"
depends_on:
init-clickhouse:
condition: service_completed_successfully
zookeeper-1:
condition: service_healthy
zookeeper-2:
condition: service_healthy
zookeeper-3:
condition: service_healthy
healthcheck:
test:
- CMD
- wget
- --spider
- -q
- 0.0.0.0:8123/ping
interval: 30s
timeout: 5s
retries: 3
ulimits:
nproc: 65535
nofile:
soft: 262144
hard: 262144
x-zookeeper-defaults: &zookeeper-defaults
!!merge <<: *common
image: bitnami/zookeeper:3.7.1
user: root
labels:
signoz.io/scrape: "true"
signoz.io/port: "9141"
signoz.io/path: "/metrics"
healthcheck:
test:
- CMD-SHELL
- curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
interval: 30s
timeout: 5s
retries: 3
x-db-depend: &db-depend
!!merge <<: *common
depends_on:
clickhouse:
condition: service_healthy
schema-migrator-sync:
condition: service_completed_successfully
services:
init-clickhouse:
!!merge <<: *common
image: clickhouse/clickhouse-server:24.1.2-alpine
container_name: signoz-init-clickhouse
command:
- bash
- -c
- |
version="v0.0.1"
node_os=$$(uname -s | tr '[:upper:]' '[:lower:]')
node_arch=$$(uname -m | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)
echo "Fetching histogram-binary for $${node_os}/$${node_arch}"
cd /tmp
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
tar -xvzf histogram-quantile.tar.gz
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
volumes:
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
zookeeper-1:
!!merge <<: *zookeeper-defaults
container_name: signoz-zookeeper-1
# ports:
# - "2181:2181"
# - "2888:2888"
# - "3888:3888"
volumes:
- zookeeper-1:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=1
- ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
zookeeper-2:
!!merge <<: *zookeeper-defaults
container_name: signoz-zookeeper-2
# ports:
# - "2182:2181"
# - "2889:2888"
# - "3889:3888"
volumes:
- zookeeper-2:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=2
- ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
zookeeper-3:
!!merge <<: *zookeeper-defaults
container_name: signoz-zookeeper-3
# ports:
# - "2183:2181"
# - "2890:2888"
# - "3890:3888"
volumes:
- zookeeper-3:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=3
- ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
clickhouse:
!!merge <<: *clickhouse-defaults
container_name: signoz-clickhouse
# ports:
# - "9000:9000"
# - "8123:8123"
# - "9181:9181"
volumes:
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
- clickhouse:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
clickhouse-2:
!!merge <<: *clickhouse-defaults
container_name: signoz-clickhouse-2
# ports:
# - "9001:9000"
# - "8124:8123"
# - "9182:9181"
volumes:
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
- clickhouse-2:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
clickhouse-3:
!!merge <<: *clickhouse-defaults
container_name: signoz-clickhouse-3
# ports:
# - "9002:9000"
# - "8125:8123"
# - "9183:9181"
volumes:
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
- clickhouse-3:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
alertmanager:
!!merge <<: *common
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
container_name: signoz-alertmanager
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
volumes:
- alertmanager:/data
depends_on:
query-service:
condition: service_healthy
query-service:
!!merge <<: *db-depend
image: signoz/query-service:${DOCKER_TAG:-0.71.0}
container_name: signoz-query-service
command:
- --config=/root/config/prometheus.yml
- --use-logs-new-schema=true
- --use-trace-new-schema=true
# ports:
# - "3301:8080" # signoz port
# - "6060:6060" # pprof port
volumes:
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
- ../common/dashboards:/root/config/dashboards
- sqlite:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-standalone-amd
healthcheck:
test:
- CMD
- wget
- --spider
- -q
- localhost:8080/api/v1/health
interval: 30s
timeout: 5s
retries: 3
frontend:
!!merge <<: *common
image: signoz/frontend:${DOCKER_TAG:-0.71.0}
container_name: signoz-frontend
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
# TODO: support otel-collector multiple replicas. Nginx/Traefik for loadbalancing?
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.26}
container_name: signoz-otel-collector
command:
- --config=/etc/otel-collector-config.yaml
- --manager-config=/etc/manager-config.yaml
- --copy-path=/var/tmp/collector-config.yaml
- --feature-gates=-pkg.translator.prometheus.NormalizeName
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
depends_on:
clickhouse:
condition: service_healthy
schema-migrator-sync:
condition: service_completed_successfully
query-service:
condition: service_healthy
schema-migrator-sync:
!!merge <<: *common
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
container_name: schema-migrator-sync
command:
- sync
- --dsn=tcp://clickhouse:9000
- --up=
depends_on:
clickhouse:
condition: service_healthy
schema-migrator-async:
!!merge <<: *db-depend
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
container_name: schema-migrator-async
command:
- async
- --dsn=tcp://clickhouse:9000
- --up=
networks:
signoz-net:
name: signoz-net
volumes:
alertmanager:
name: signoz-alertmanager
clickhouse:
name: signoz-clickhouse
clickhouse-2:
name: signoz-clickhouse-2
clickhouse-3:
name: signoz-clickhouse-3
sqlite:
name: signoz-sqlite
zookeeper-1:
name: signoz-zookeeper-1
zookeeper-2:
name: signoz-zookeeper-2
zookeeper-3:
name: signoz-zookeeper-3

View File

@@ -1,221 +0,0 @@
version: "3"
x-common: &common
networks:
- signoz-net
restart: on-failure
logging:
options:
max-size: 50m
max-file: "3"
x-clickhouse-defaults: &clickhouse-defaults
!!merge <<: *common
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
image: clickhouse/clickhouse-server:24.1.2-alpine
tty: true
labels:
signoz.io/scrape: "true"
signoz.io/port: "9363"
signoz.io/path: "/metrics"
depends_on:
init-clickhouse:
condition: service_completed_successfully
zookeeper-1:
condition: service_healthy
healthcheck:
test:
- CMD
- wget
- --spider
- -q
- 0.0.0.0:8123/ping
interval: 30s
timeout: 5s
retries: 3
ulimits:
nproc: 65535
nofile:
soft: 262144
hard: 262144
x-zookeeper-defaults: &zookeeper-defaults
!!merge <<: *common
image: bitnami/zookeeper:3.7.1
user: root
labels:
signoz.io/scrape: "true"
signoz.io/port: "9141"
signoz.io/path: "/metrics"
healthcheck:
test:
- CMD-SHELL
- curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
interval: 30s
timeout: 5s
retries: 3
x-db-depend: &db-depend
!!merge <<: *common
depends_on:
clickhouse:
condition: service_healthy
schema-migrator-sync:
condition: service_completed_successfully
services:
init-clickhouse:
!!merge <<: *common
image: clickhouse/clickhouse-server:24.1.2-alpine
container_name: signoz-init-clickhouse
command:
- bash
- -c
- |
version="v0.0.1"
node_os=$$(uname -s | tr '[:upper:]' '[:lower:]')
node_arch=$$(uname -m | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)
echo "Fetching histogram-binary for $${node_os}/$${node_arch}"
cd /tmp
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
tar -xvzf histogram-quantile.tar.gz
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
volumes:
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
zookeeper-1:
!!merge <<: *zookeeper-defaults
container_name: signoz-zookeeper-1
ports:
- "2181:2181"
- "2888:2888"
- "3888:3888"
volumes:
- zookeeper-1:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=1
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
clickhouse:
!!merge <<: *clickhouse-defaults
container_name: signoz-clickhouse
ports:
- "9000:9000"
- "8123:8123"
- "9181:9181"
volumes:
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
- ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
- clickhouse:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
alertmanager:
!!merge <<: *common
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
container_name: signoz-alertmanager
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
volumes:
- alertmanager:/data
depends_on:
query-service:
condition: service_healthy
query-service:
!!merge <<: *db-depend
image: signoz/query-service:${DOCKER_TAG:-0.71.0}
container_name: signoz-query-service
command:
- --config=/root/config/prometheus.yml
- --gateway-url=https://api.staging.signoz.cloud
- --use-logs-new-schema=true
- --use-trace-new-schema=true
# ports:
# - "8080:8080" # signoz port
# - "6060:6060" # pprof port
volumes:
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
- ../common/dashboards:/root/config/dashboards
- sqlite:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-standalone-amd
- KAFKA_SPAN_EVAL=${KAFKA_SPAN_EVAL:-false}
healthcheck:
test:
- CMD
- wget
- --spider
- -q
- localhost:8080/api/v1/health
interval: 30s
timeout: 5s
retries: 3
frontend:
!!merge <<: *common
image: signoz/frontend:${DOCKER_TAG:-0.71.0}
container_name: signoz-frontend
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.26}
container_name: signoz-otel-collector
command:
- --config=/etc/otel-collector-config.yaml
- --manager-config=/etc/manager-config.yaml
- --copy-path=/var/tmp/collector-config.yaml
- --feature-gates=-pkg.translator.prometheus.NormalizeName
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
depends_on:
query-service:
condition: service_healthy
schema-migrator-sync:
!!merge <<: *common
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
container_name: schema-migrator-sync
command:
- sync
- --dsn=tcp://clickhouse:9000
- --up=
depends_on:
clickhouse:
condition: service_healthy
schema-migrator-async:
!!merge <<: *db-depend
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
container_name: schema-migrator-async
command:
- async
- --dsn=tcp://clickhouse:9000
- --up=
networks:
signoz-net:
name: signoz-net
volumes:
alertmanager:
name: signoz-alertmanager
clickhouse:
name: signoz-clickhouse
sqlite:
name: signoz-sqlite
zookeeper-1:
name: signoz-zookeeper-1

View File

@@ -1,219 +0,0 @@
version: "3"
x-common: &common
networks:
- signoz-net
restart: on-failure
logging:
options:
max-size: 50m
max-file: "3"
x-clickhouse-defaults: &clickhouse-defaults
!!merge <<: *common
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
image: clickhouse/clickhouse-server:24.1.2-alpine
tty: true
labels:
signoz.io/scrape: "true"
signoz.io/port: "9363"
signoz.io/path: "/metrics"
depends_on:
init-clickhouse:
condition: service_completed_successfully
zookeeper-1:
condition: service_healthy
healthcheck:
test:
- CMD
- wget
- --spider
- -q
- 0.0.0.0:8123/ping
interval: 30s
timeout: 5s
retries: 3
ulimits:
nproc: 65535
nofile:
soft: 262144
hard: 262144
x-zookeeper-defaults: &zookeeper-defaults
!!merge <<: *common
image: bitnami/zookeeper:3.7.1
user: root
labels:
signoz.io/scrape: "true"
signoz.io/port: "9141"
signoz.io/path: "/metrics"
healthcheck:
test:
- CMD-SHELL
- curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
interval: 30s
timeout: 5s
retries: 3
x-db-depend: &db-depend
!!merge <<: *common
depends_on:
clickhouse:
condition: service_healthy
schema-migrator-sync:
condition: service_completed_successfully
services:
init-clickhouse:
!!merge <<: *common
image: clickhouse/clickhouse-server:24.1.2-alpine
container_name: signoz-init-clickhouse
command:
- bash
- -c
- |
version="v0.0.1"
node_os=$$(uname -s | tr '[:upper:]' '[:lower:]')
node_arch=$$(uname -m | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)
echo "Fetching histogram-binary for $${node_os}/$${node_arch}"
cd /tmp
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
tar -xvzf histogram-quantile.tar.gz
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
volumes:
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
zookeeper-1:
!!merge <<: *zookeeper-defaults
container_name: signoz-zookeeper-1
# ports:
# - "2181:2181"
# - "2888:2888"
# - "3888:3888"
volumes:
- zookeeper-1:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=1
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
clickhouse:
!!merge <<: *clickhouse-defaults
container_name: signoz-clickhouse
# ports:
# - "9000:9000"
# - "8123:8123"
# - "9181:9181"
volumes:
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
- ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
- clickhouse:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
alertmanager:
!!merge <<: *common
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
container_name: signoz-alertmanager
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
volumes:
- alertmanager:/data
depends_on:
query-service:
condition: service_healthy
query-service:
!!merge <<: *db-depend
image: signoz/query-service:${DOCKER_TAG:-0.71.0}
container_name: signoz-query-service
command:
- --config=/root/config/prometheus.yml
- --use-logs-new-schema=true
- --use-trace-new-schema=true
# ports:
# - "3301:8080" # signoz port
# - "6060:6060" # pprof port
volumes:
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
- ../common/dashboards:/root/config/dashboards
- sqlite:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-standalone-amd
healthcheck:
test:
- CMD
- wget
- --spider
- -q
- localhost:8080/api/v1/health
interval: 30s
timeout: 5s
retries: 3
frontend:
!!merge <<: *common
image: signoz/frontend:${DOCKER_TAG:-0.71.0}
container_name: signoz-frontend
depends_on:
- alertmanager
- query-service
ports:
- "3301:3301"
volumes:
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.26}
container_name: signoz-otel-collector
command:
- --config=/etc/otel-collector-config.yaml
- --manager-config=/etc/manager-config.yaml
- --copy-path=/var/tmp/collector-config.yaml
- --feature-gates=-pkg.translator.prometheus.NormalizeName
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
depends_on:
query-service:
condition: service_healthy
schema-migrator-sync:
!!merge <<: *common
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
container_name: schema-migrator-sync
command:
- sync
- --dsn=tcp://clickhouse:9000
- --up=
depends_on:
clickhouse:
condition: service_healthy
schema-migrator-async:
!!merge <<: *db-depend
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
container_name: schema-migrator-async
command:
- async
- --dsn=tcp://clickhouse:9000
- --up=
networks:
signoz-net:
name: signoz-net
volumes:
alertmanager:
name: signoz-alertmanager
clickhouse:
name: signoz-clickhouse
sqlite:
name: signoz-sqlite
zookeeper-1:
name: signoz-zookeeper-1

View File

@@ -1,39 +0,0 @@
version: "3"
x-common: &common
networks:
- signoz-net
extra_hosts:
- host.docker.internal:host-gateway
logging:
options:
max-size: 50m
max-file: "3"
restart: on-failure
services:
hotrod:
<<: *common
image: jaegertracing/example-hotrod:1.61.0
container_name: hotrod
command: [ "all" ]
environment:
- OTEL_EXPORTER_OTLP_ENDPOINT=http://host.docker.internal:4318 # In case of external SigNoz or cloud, update the endpoint and access token
# - OTEL_OTLP_HEADERS=signoz-access-token=<your-access-token>
load-hotrod:
<<: *common
image: "signoz/locust:1.2.3"
container_name: load-hotrod
environment:
ATTACKED_HOST: http://hotrod:8080
LOCUST_MODE: standalone
NO_PROXY: standalone
TASK_DELAY_FROM: 5
TASK_DELAY_TO: 30
QUIET_MODE: "${QUIET_MODE:-false}"
LOCUST_OPTS: "--headless -u 10 -r 1"
volumes:
- ../../../common/locust-scripts:/locust
networks:
signoz-net:
name: signoz-net
external: true

View File

@@ -1,43 +0,0 @@
version: "3"
x-common: &common
networks:
- signoz-net
extra_hosts:
- host.docker.internal:host-gateway
logging:
options:
max-size: 50m
max-file: "3"
restart: on-failure
services:
otel-agent:
<<: *common
image: otel/opentelemetry-collector-contrib:0.111.0
command:
- --config=/etc/otel-collector-config.yaml
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- /:/hostfs:ro
- /var/run/docker.sock:/var/run/docker.sock
environment:
- SIGNOZ_COLLECTOR_ENDPOINT=http://host.docker.internal:4317 # In case of external SigNoz or cloud, update the endpoint and access token
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux # Replace signoz-host with the actual hostname
# - SIGNOZ_ACCESS_TOKEN="<your-access-token>"
# Before exposing the ports, make sure the ports are not used by other services
# ports:
# - "4317:4317"
# - "4318:4318"
logspout:
<<: *common
image: "gliderlabs/logspout:v3.2.14"
volumes:
- /etc/hostname:/etc/host_hostname:ro
- /var/run/docker.sock:/var/run/docker.sock
command: syslog+tcp://otel-agent:2255
depends_on:
- otel-agent
networks:
signoz-net:
name: signoz-net
external: true

View File

@@ -1,139 +0,0 @@
receivers:
hostmetrics:
collection_interval: 30s
root_path: /hostfs
scrapers:
cpu: {}
load: {}
memory: {}
disk: {}
filesystem: {}
network: {}
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- job_name: otel-collector
static_configs:
- targets:
- localhost:8888
labels:
job_name: otel-collector
# For Docker daemon metrics to be scraped, it must be configured to expose
# Prometheus metrics, as documented here: https://docs.docker.com/config/daemon/prometheus/
# - job_name: docker-daemon
# static_configs:
# - targets:
# - host.docker.internal:9323
# labels:
# job_name: docker-daemon
- job_name: docker-container
docker_sd_configs:
- host: unix:///var/run/docker.sock
relabel_configs:
- action: keep
regex: true
source_labels:
- __meta_docker_container_label_signoz_io_scrape
- regex: true
source_labels:
- __meta_docker_container_label_signoz_io_path
target_label: __metrics_path__
- regex: (.+)
source_labels:
- __meta_docker_container_label_signoz_io_path
target_label: __metrics_path__
- separator: ":"
source_labels:
- __meta_docker_network_ip
- __meta_docker_container_label_signoz_io_port
target_label: __address__
- regex: '/(.*)'
replacement: '$1'
source_labels:
- __meta_docker_container_name
target_label: container_name
- regex: __meta_docker_container_label_signoz_io_(.+)
action: labelmap
replacement: $1
tcplog/docker:
listen_address: "0.0.0.0:2255"
operators:
- type: regex_parser
regex: '^<([0-9]+)>[0-9]+ (?P<timestamp>[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?) (?P<container_id>\S+) (?P<container_name>\S+) [0-9]+ - -( (?P<body>.*))?'
timestamp:
parse_from: attributes.timestamp
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
- type: move
from: attributes["body"]
to: body
- type: remove
field: attributes.timestamp
# please remove names from below if you want to collect logs from them
- type: filter
id: signoz_logs_filter
expr: 'attributes.container_name matches "^(signoz-(|alertmanager|query-service|otel-collector|clickhouse|zookeeper))|(infra-(logspout|otel-agent)-.*)"'
processors:
batch:
send_batch_size: 10000
send_batch_max_size: 11000
timeout: 10s
resourcedetection:
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
detectors:
# - ec2
# - gcp
# - azure
- env
- system
timeout: 2s
extensions:
health_check:
endpoint: 0.0.0.0:13133
pprof:
endpoint: 0.0.0.0:1777
exporters:
otlp:
endpoint: ${env:SIGNOZ_COLLECTOR_ENDPOINT}
tls:
insecure: true
headers:
signoz-access-token: ${env:SIGNOZ_ACCESS_TOKEN}
# debug: {}
service:
telemetry:
logs:
encoding: json
metrics:
address: 0.0.0.0:8888
extensions:
- health_check
- pprof
pipelines:
traces:
receivers: [otlp]
processors: [resourcedetection, batch]
exporters: [otlp]
metrics:
receivers: [otlp]
processors: [resourcedetection, batch]
exporters: [otlp]
metrics/hostmetrics:
receivers: [hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
metrics/prometheus:
receivers: [prometheus]
processors: [resourcedetection, batch]
exporters: [otlp]
logs:
receivers: [otlp, tcplog/docker]
processors: [resourcedetection, batch]
exporters: [otlp]

View File

@@ -2,11 +2,6 @@
set -o errexit set -o errexit
# Variables
BASE_DIR="$(dirname "$(readlink -f "$0")")"
DOCKER_STANDALONE_DIR="docker"
DOCKER_SWARM_DIR="docker-swarm" # TODO: Add docker swarm support
# Regular Colors # Regular Colors
Black='\033[0;30m' # Black Black='\033[0;30m' # Black
Red='\[\e[0;31m\]' # Red Red='\[\e[0;31m\]' # Red
@@ -37,11 +32,6 @@ has_cmd() {
command -v "$1" > /dev/null 2>&1 command -v "$1" > /dev/null 2>&1
} }
# Check if docker compose plugin is present
has_docker_compose_plugin() {
docker compose version > /dev/null 2>&1
}
is_mac() { is_mac() {
[[ $OSTYPE == darwin* ]] [[ $OSTYPE == darwin* ]]
} }
@@ -193,7 +183,9 @@ install_docker() {
$sudo_cmd yum-config-manager --add-repo https://download.docker.com/linux/$os/docker-ce.repo $sudo_cmd yum-config-manager --add-repo https://download.docker.com/linux/$os/docker-ce.repo
echo "Installing docker" echo "Installing docker"
$yum_cmd install docker-ce docker-ce-cli containerd.io $yum_cmd install docker-ce docker-ce-cli containerd.io
fi fi
} }
compose_version () { compose_version () {
@@ -235,6 +227,12 @@ start_docker() {
echo "Starting docker service" echo "Starting docker service"
$sudo_cmd systemctl start docker.service $sudo_cmd systemctl start docker.service
fi fi
# if [[ -z $sudo_cmd ]]; then
# docker ps > /dev/null && true
# if [[ $? -ne 0 ]]; then
# request_sudo
# fi
# fi
if [[ -z $sudo_cmd ]]; then if [[ -z $sudo_cmd ]]; then
if ! docker ps > /dev/null && true; then if ! docker ps > /dev/null && true; then
request_sudo request_sudo
@@ -262,15 +260,12 @@ wait_for_containers_start() {
} }
bye() { # Prints a friendly good bye message and exits the script. bye() { # Prints a friendly good bye message and exits the script.
# Switch back to the original directory
popd > /dev/null 2>&1
if [[ "$?" -ne 0 ]]; then if [[ "$?" -ne 0 ]]; then
set +o errexit set +o errexit
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:" echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
echo "" echo ""
echo -e "cd ${DOCKER_STANDALONE_DIR}" echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
echo -e "$sudo_cmd $docker_compose_cmd ps -a"
echo "Please read our troubleshooting guide https://signoz.io/docs/install/troubleshooting/" echo "Please read our troubleshooting guide https://signoz.io/docs/install/troubleshooting/"
echo "or reach us for support in #help channel in our Slack Community https://signoz.io/slack" echo "or reach us for support in #help channel in our Slack Community https://signoz.io/slack"
@@ -301,6 +296,11 @@ request_sudo() {
if (( $EUID != 0 )); then if (( $EUID != 0 )); then
sudo_cmd="sudo" sudo_cmd="sudo"
echo -e "Please enter your sudo password, if prompted." echo -e "Please enter your sudo password, if prompted."
# $sudo_cmd -l | grep -e "NOPASSWD: ALL" > /dev/null
# if [[ $? -ne 0 ]] && ! $sudo_cmd -v; then
# echo "Need sudo privileges to proceed with the installation."
# exit 1;
# fi
if ! $sudo_cmd -l | grep -e "NOPASSWD: ALL" > /dev/null && ! $sudo_cmd -v; then if ! $sudo_cmd -l | grep -e "NOPASSWD: ALL" > /dev/null && ! $sudo_cmd -v; then
echo "Need sudo privileges to proceed with the installation." echo "Need sudo privileges to proceed with the installation."
exit 1; exit 1;
@@ -317,7 +317,6 @@ echo -e "👋 Thank you for trying out SigNoz! "
echo "" echo ""
sudo_cmd="" sudo_cmd=""
docker_compose_cmd=""
# Check sudo permissions # Check sudo permissions
if (( $EUID != 0 )); then if (( $EUID != 0 )); then
@@ -363,8 +362,28 @@ else
SIGNOZ_INSTALLATION_ID=$(echo "$sysinfo" | $digest_cmd | grep -E -o '[a-zA-Z0-9]{64}') SIGNOZ_INSTALLATION_ID=$(echo "$sysinfo" | $digest_cmd | grep -E -o '[a-zA-Z0-9]{64}')
fi fi
# echo ""
# echo -e "👉 ${RED}Two ways to go forward\n"
# echo -e "${RED}1) ClickHouse as database (default)\n"
# read -p "⚙️ Enter your preference (1/2):" choice_setup
# while [[ $choice_setup != "1" && $choice_setup != "2" && $choice_setup != "" ]]
# do
# # echo $choice_setup
# echo -e "\n❌ ${CYAN}Please enter either 1 or 2"
# read -p "⚙️ Enter your preference (1/2): " choice_setup
# # echo $choice_setup
# done
# if [[ $choice_setup == "1" || $choice_setup == "" ]];then
# setup_type='clickhouse'
# fi
setup_type='clickhouse' setup_type='clickhouse'
# echo -e "\n✅ ${CYAN}You have chosen: ${setup_type} setup\n"
# Run bye if failure happens # Run bye if failure happens
trap bye EXIT trap bye EXIT
@@ -436,6 +455,8 @@ if [[ $desired_os -eq 0 ]]; then
send_event "os_not_supported" send_event "os_not_supported"
fi fi
# check_ports_occupied
# Check is Docker daemon is installed and available. If not, the install & start Docker for Linux machines. We cannot automatically install Docker Desktop on Mac OS # Check is Docker daemon is installed and available. If not, the install & start Docker for Linux machines. We cannot automatically install Docker Desktop on Mac OS
if ! is_command_present docker; then if ! is_command_present docker; then
@@ -465,42 +486,27 @@ if ! is_command_present docker; then
fi fi
fi fi
if has_docker_compose_plugin; then
echo "docker compose plugin is present, using it"
docker_compose_cmd="docker compose"
# Install docker-compose # Install docker-compose
else if ! is_command_present docker-compose; then
docker_compose_cmd="docker-compose" request_sudo
if ! is_command_present docker-compose; then install_docker_compose
request_sudo
install_docker_compose
fi
fi fi
start_docker start_docker
# Switch to the Docker Standalone directory # $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up -d --remove-orphans || true
pushd "${BASE_DIR}/${DOCKER_STANDALONE_DIR}" > /dev/null 2>&1
# check for open ports, if signoz is not installed
if is_command_present docker-compose; then
if $sudo_cmd $docker_compose_cmd ps | grep "signoz-query-service" | grep -q "healthy" > /dev/null 2>&1; then
echo "SigNoz already installed, skipping the occupied ports check"
else
check_ports_occupied
fi
fi
echo "" echo ""
echo -e "\n🟡 Pulling the latest container images for SigNoz.\n" echo -e "\n🟡 Pulling the latest container images for SigNoz.\n"
$sudo_cmd $docker_compose_cmd pull $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml pull
echo "" echo ""
echo "🟡 Starting the SigNoz containers. It may take a few minutes ..." echo "🟡 Starting the SigNoz containers. It may take a few minutes ..."
echo echo
# The $docker_compose_cmd command does some nasty stuff for the `--detach` functionality. So we add a `|| true` so that the # The docker-compose command does some nasty stuff for the `--detach` functionality. So we add a `|| true` so that the
# script doesn't exit because this command looks like it failed to do it's thing. # script doesn't exit because this command looks like it failed to do it's thing.
$sudo_cmd $docker_compose_cmd up --detach --remove-orphans || true $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
wait_for_containers_start 60 wait_for_containers_start 60
echo "" echo ""
@@ -510,14 +516,7 @@ if [[ $status_code -ne 200 ]]; then
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:" echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
echo "" echo ""
echo "cd ${DOCKER_STANDALONE_DIR}" echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
echo "$sudo_cmd $docker_compose_cmd ps -a"
echo ""
echo "Try bringing down the containers and retrying the installation"
echo "cd ${DOCKER_STANDALONE_DIR}"
echo "$sudo_cmd $docker_compose_cmd down -v"
echo ""
echo "Please read our troubleshooting guide https://signoz.io/docs/install/troubleshooting/" echo "Please read our troubleshooting guide https://signoz.io/docs/install/troubleshooting/"
echo "or reach us on SigNoz for support https://signoz.io/slack" echo "or reach us on SigNoz for support https://signoz.io/slack"
@@ -538,10 +537,7 @@ else
echo " By default, retention period is set to 15 days for logs and traces, and 30 days for metrics." echo " By default, retention period is set to 15 days for logs and traces, and 30 days for metrics."
echo -e "To change this, navigate to the General tab on the Settings page of SigNoz UI. For more details, refer to https://signoz.io/docs/userguide/retention-period \n" echo -e "To change this, navigate to the General tab on the Settings page of SigNoz UI. For more details, refer to https://signoz.io/docs/userguide/retention-period \n"
echo " To bring down SigNoz and clean volumes:" echo " To bring down SigNoz and clean volumes : $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
echo ""
echo "cd ${DOCKER_STANDALONE_DIR}"
echo "$sudo_cmd $docker_compose_cmd down -v"
echo "" echo ""
echo "+++++++++++++++++++++++++++++++++++++++++++++++++" echo "+++++++++++++++++++++++++++++++++++++++++++++++++"

View File

@@ -23,9 +23,6 @@ COPY pkg/query-service/templates /root/templates
# Make query-service executable for non-root users # Make query-service executable for non-root users
RUN chmod 755 /root /root/query-service RUN chmod 755 /root /root/query-service
# Copy frontend
COPY frontend/build/ /etc/signoz/web/
# run the binary # run the binary
ENTRYPOINT ["./query-service"] ENTRYPOINT ["./query-service"]

View File

@@ -59,7 +59,7 @@ type anomalyQueryParams struct {
// The results obtained from this query are used to compare with predicted values // The results obtained from this query are used to compare with predicted values
// and to detect anomalies // and to detect anomalies
CurrentPeriodQuery *v3.QueryRangeParamsV3 CurrentPeriodQuery *v3.QueryRangeParamsV3
// PastPeriodQuery is the query range params for past period of seasonality // PastPeriodQuery is the query range params for past seasonal period
// Example: For weekly seasonality, (now-1w-5m, now-1w) // Example: For weekly seasonality, (now-1w-5m, now-1w)
// : For daily seasonality, (now-1d-5m, now-1d) // : For daily seasonality, (now-1d-5m, now-1d)
// : For hourly seasonality, (now-1h-5m, now-1h) // : For hourly seasonality, (now-1h-5m, now-1h)
@@ -74,6 +74,7 @@ type anomalyQueryParams struct {
// : For daily seasonality, this is the query range params for the (now-2d-5m, now-1d) // : For daily seasonality, this is the query range params for the (now-2d-5m, now-1d)
// : For hourly seasonality, this is the query range params for the (now-2h-5m, now-1h) // : For hourly seasonality, this is the query range params for the (now-2h-5m, now-1h)
PastSeasonQuery *v3.QueryRangeParamsV3 PastSeasonQuery *v3.QueryRangeParamsV3
// Past2SeasonQuery is the query range params for past 2 seasonal period to the current season // Past2SeasonQuery is the query range params for past 2 seasonal period to the current season
// Example: For weekly seasonality, this is the query range params for the (now-3w-5m, now-2w) // Example: For weekly seasonality, this is the query range params for the (now-3w-5m, now-2w)
// : For daily seasonality, this is the query range params for the (now-3d-5m, now-2d) // : For daily seasonality, this is the query range params for the (now-3d-5m, now-2d)
@@ -143,13 +144,13 @@ func prepareAnomalyQueryParams(req *v3.QueryRangeParamsV3, seasonality Seasonali
switch seasonality { switch seasonality {
case SeasonalityWeekly: case SeasonalityWeekly:
currentGrowthPeriodStart = start - oneWeekOffset currentGrowthPeriodStart = start - oneWeekOffset
currentGrowthPeriodEnd = start currentGrowthPeriodEnd = end
case SeasonalityDaily: case SeasonalityDaily:
currentGrowthPeriodStart = start - oneDayOffset currentGrowthPeriodStart = start - oneDayOffset
currentGrowthPeriodEnd = start currentGrowthPeriodEnd = end
case SeasonalityHourly: case SeasonalityHourly:
currentGrowthPeriodStart = start - oneHourOffset currentGrowthPeriodStart = start - oneHourOffset
currentGrowthPeriodEnd = start currentGrowthPeriodEnd = end
} }
currentGrowthQuery := &v3.QueryRangeParamsV3{ currentGrowthQuery := &v3.QueryRangeParamsV3{

View File

@@ -194,11 +194,10 @@ func (p *BaseSeasonalProvider) getMovingAvg(series *v3.Series, movingAvgWindowSi
} }
var sum float64 var sum float64
points := series.Points[startIdx:] points := series.Points[startIdx:]
windowSize := int(math.Min(float64(movingAvgWindowSize), float64(len(points)))) for i := 0; i < movingAvgWindowSize && i < len(points); i++ {
for i := 0; i < windowSize; i++ {
sum += points[i].Value sum += points[i].Value
} }
avg := sum / float64(windowSize) avg := sum / float64(movingAvgWindowSize)
return avg return avg
} }
@@ -227,25 +226,21 @@ func (p *BaseSeasonalProvider) getPredictedSeries(
// plus the average of the current season series // plus the average of the current season series
// minus the mean of the past season series, past2 season series and past3 season series // minus the mean of the past season series, past2 season series and past3 season series
for idx, curr := range series.Points { for idx, curr := range series.Points {
movingAvg := p.getMovingAvg(prevSeries, movingAvgWindowSize, idx) predictedValue :=
avg := p.getAvg(currentSeasonSeries) p.getMovingAvg(prevSeries, movingAvgWindowSize, idx) +
mean := p.getMean(p.getAvg(pastSeasonSeries), p.getAvg(past2SeasonSeries), p.getAvg(past3SeasonSeries)) p.getAvg(currentSeasonSeries) -
predictedValue := movingAvg + avg - mean p.getMean(p.getAvg(pastSeasonSeries), p.getAvg(past2SeasonSeries), p.getAvg(past3SeasonSeries))
if predictedValue < 0 { if predictedValue < 0 {
// this should not happen (except when the data has extreme outliers)
// we will use the moving avg of the previous period series in this case
zap.L().Warn("predictedValue is less than 0", zap.Float64("predictedValue", predictedValue), zap.Any("labels", series.Labels))
predictedValue = p.getMovingAvg(prevSeries, movingAvgWindowSize, idx) predictedValue = p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
} }
zap.L().Debug("predictedSeries", zap.L().Info("predictedSeries",
zap.Float64("movingAvg", movingAvg), zap.Float64("movingAvg", p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)),
zap.Float64("avg", avg), zap.Float64("avg", p.getAvg(currentSeasonSeries)),
zap.Float64("mean", mean), zap.Float64("mean", p.getMean(p.getAvg(pastSeasonSeries), p.getAvg(past2SeasonSeries), p.getAvg(past3SeasonSeries))),
zap.Any("labels", series.Labels), zap.Any("labels", series.Labels),
zap.Float64("predictedValue", predictedValue), zap.Float64("predictedValue", predictedValue),
zap.Float64("curr", curr.Value),
) )
predictedSeries.Points = append(predictedSeries.Points, v3.Point{ predictedSeries.Points = append(predictedSeries.Points, v3.Point{
Timestamp: curr.Timestamp, Timestamp: curr.Timestamp,

View File

@@ -12,7 +12,6 @@ import (
"go.signoz.io/signoz/ee/query-service/license" "go.signoz.io/signoz/ee/query-service/license"
"go.signoz.io/signoz/ee/query-service/usage" "go.signoz.io/signoz/ee/query-service/usage"
baseapp "go.signoz.io/signoz/pkg/query-service/app" baseapp "go.signoz.io/signoz/pkg/query-service/app"
"go.signoz.io/signoz/pkg/query-service/app/cloudintegrations"
"go.signoz.io/signoz/pkg/query-service/app/integrations" "go.signoz.io/signoz/pkg/query-service/app/integrations"
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline" "go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
"go.signoz.io/signoz/pkg/query-service/cache" "go.signoz.io/signoz/pkg/query-service/cache"
@@ -26,21 +25,22 @@ type APIHandlerOptions struct {
DataConnector interfaces.DataConnector DataConnector interfaces.DataConnector
SkipConfig *basemodel.SkipConfig SkipConfig *basemodel.SkipConfig
PreferSpanMetrics bool PreferSpanMetrics bool
MaxIdleConns int
MaxOpenConns int
DialTimeout time.Duration
AppDao dao.ModelDao AppDao dao.ModelDao
RulesManager *rules.Manager RulesManager *rules.Manager
UsageManager *usage.Manager UsageManager *usage.Manager
FeatureFlags baseint.FeatureLookup FeatureFlags baseint.FeatureLookup
LicenseManager *license.Manager LicenseManager *license.Manager
IntegrationsController *integrations.Controller IntegrationsController *integrations.Controller
CloudIntegrationsController *cloudintegrations.Controller
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
Cache cache.Cache Cache cache.Cache
Gateway *httputil.ReverseProxy Gateway *httputil.ReverseProxy
GatewayUrl string
// Querier Influx Interval // Querier Influx Interval
FluxInterval time.Duration FluxInterval time.Duration
UseLogsNewSchema bool UseLogsNewSchema bool
UseTraceNewSchema bool UseLicensesV3 bool
} }
type APIHandler struct { type APIHandler struct {
@@ -55,16 +55,18 @@ func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
Reader: opts.DataConnector, Reader: opts.DataConnector,
SkipConfig: opts.SkipConfig, SkipConfig: opts.SkipConfig,
PreferSpanMetrics: opts.PreferSpanMetrics, PreferSpanMetrics: opts.PreferSpanMetrics,
MaxIdleConns: opts.MaxIdleConns,
MaxOpenConns: opts.MaxOpenConns,
DialTimeout: opts.DialTimeout,
AppDao: opts.AppDao, AppDao: opts.AppDao,
RuleManager: opts.RulesManager, RuleManager: opts.RulesManager,
FeatureFlags: opts.FeatureFlags, FeatureFlags: opts.FeatureFlags,
IntegrationsController: opts.IntegrationsController, IntegrationsController: opts.IntegrationsController,
CloudIntegrationsController: opts.CloudIntegrationsController,
LogsParsingPipelineController: opts.LogsParsingPipelineController, LogsParsingPipelineController: opts.LogsParsingPipelineController,
Cache: opts.Cache, Cache: opts.Cache,
FluxInterval: opts.FluxInterval, FluxInterval: opts.FluxInterval,
UseLogsNewSchema: opts.UseLogsNewSchema, UseLogsNewSchema: opts.UseLogsNewSchema,
UseTraceNewSchema: opts.UseTraceNewSchema, UseLicensesV3: opts.UseLicensesV3,
}) })
if err != nil { if err != nil {
@@ -112,6 +114,13 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *baseapp.AuthMiddlew
// note: add ee override methods first // note: add ee override methods first
// routes available only in ee version // routes available only in ee version
router.HandleFunc("/api/v1/licenses",
am.AdminAccess(ah.listLicenses)).
Methods(http.MethodGet)
router.HandleFunc("/api/v1/licenses",
am.AdminAccess(ah.applyLicense)).
Methods(http.MethodPost)
router.HandleFunc("/api/v1/featureFlags", router.HandleFunc("/api/v1/featureFlags",
am.OpenAccess(ah.getFeatureFlags)). am.OpenAccess(ah.getFeatureFlags)).
@@ -166,33 +175,34 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *baseapp.AuthMiddlew
router.HandleFunc("/api/v1/dashboards/{uuid}/lock", am.EditAccess(ah.lockDashboard)).Methods(http.MethodPut) router.HandleFunc("/api/v1/dashboards/{uuid}/lock", am.EditAccess(ah.lockDashboard)).Methods(http.MethodPut)
router.HandleFunc("/api/v1/dashboards/{uuid}/unlock", am.EditAccess(ah.unlockDashboard)).Methods(http.MethodPut) router.HandleFunc("/api/v1/dashboards/{uuid}/unlock", am.EditAccess(ah.unlockDashboard)).Methods(http.MethodPut)
// v2
router.HandleFunc("/api/v2/licenses",
am.ViewAccess(ah.listLicensesV2)).
Methods(http.MethodGet)
// v3 // v3
router.HandleFunc("/api/v3/licenses", am.ViewAccess(ah.listLicensesV3)).Methods(http.MethodGet) router.HandleFunc("/api/v3/licenses",
router.HandleFunc("/api/v3/licenses", am.AdminAccess(ah.applyLicenseV3)).Methods(http.MethodPost) am.ViewAccess(ah.listLicensesV3)).
router.HandleFunc("/api/v3/licenses", am.AdminAccess(ah.refreshLicensesV3)).Methods(http.MethodPut) Methods(http.MethodGet)
router.HandleFunc("/api/v3/licenses/active", am.ViewAccess(ah.getActiveLicenseV3)).Methods(http.MethodGet)
router.HandleFunc("/api/v3/licenses",
am.AdminAccess(ah.applyLicenseV3)).
Methods(http.MethodPost)
router.HandleFunc("/api/v3/licenses",
am.AdminAccess(ah.refreshLicensesV3)).
Methods(http.MethodPut)
// v4 // v4
router.HandleFunc("/api/v4/query_range", am.ViewAccess(ah.queryRangeV4)).Methods(http.MethodPost) router.HandleFunc("/api/v4/query_range", am.ViewAccess(ah.queryRangeV4)).Methods(http.MethodPost)
// Gateway // Gateway
router.PathPrefix(gateway.RoutePrefix).HandlerFunc(am.EditAccess(ah.ServeGatewayHTTP)) router.PathPrefix(gateway.RoutePrefix).HandlerFunc(am.AdminAccess(ah.ServeGatewayHTTP))
ah.APIHandler.RegisterRoutes(router, am) ah.APIHandler.RegisterRoutes(router, am)
} }
func (ah *APIHandler) RegisterCloudIntegrationsRoutes(router *mux.Router, am *baseapp.AuthMiddleware) {
ah.APIHandler.RegisterCloudIntegrationsRoutes(router, am)
router.HandleFunc(
"/api/v1/cloud-integrations/{cloudProvider}/accounts/generate-connection-params",
am.EditAccess(ah.CloudIntegrationsGenerateConnectionParams),
).Methods(http.MethodGet)
}
func (ah *APIHandler) getVersion(w http.ResponseWriter, r *http.Request) { func (ah *APIHandler) getVersion(w http.ResponseWriter, r *http.Request) {
version := version.GetVersion() version := version.GetVersion()
versionResponse := basemodel.GetVersionResponse{ versionResponse := basemodel.GetVersionResponse{

View File

@@ -1,425 +0,0 @@
package api
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
"time"
"github.com/google/uuid"
"github.com/gorilla/mux"
"go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/model"
"go.signoz.io/signoz/pkg/query-service/auth"
baseconstants "go.signoz.io/signoz/pkg/query-service/constants"
"go.signoz.io/signoz/pkg/query-service/dao"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap"
)
type CloudIntegrationConnectionParamsResponse struct {
IngestionUrl string `json:"ingestion_url,omitempty"`
IngestionKey string `json:"ingestion_key,omitempty"`
SigNozAPIUrl string `json:"signoz_api_url,omitempty"`
SigNozAPIKey string `json:"signoz_api_key,omitempty"`
}
func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseWriter, r *http.Request) {
cloudProvider := mux.Vars(r)["cloudProvider"]
if cloudProvider != "aws" {
RespondError(w, basemodel.BadRequest(fmt.Errorf(
"cloud provider not supported: %s", cloudProvider,
)), nil)
return
}
currentUser, err := auth.GetUserFromRequest(r)
if err != nil {
RespondError(w, basemodel.UnauthorizedError(fmt.Errorf(
"couldn't deduce current user: %w", err,
)), nil)
return
}
apiKey, apiErr := ah.getOrCreateCloudIntegrationPAT(r.Context(), currentUser.OrgId, cloudProvider)
if apiErr != nil {
RespondError(w, basemodel.WrapApiError(
apiErr, "couldn't provision PAT for cloud integration:",
), nil)
return
}
result := CloudIntegrationConnectionParamsResponse{
SigNozAPIKey: apiKey,
}
license, apiErr := ah.LM().GetRepo().GetActiveLicense(r.Context())
if apiErr != nil {
RespondError(w, basemodel.WrapApiError(
apiErr, "couldn't look for active license",
), nil)
return
}
if license == nil {
// Return the API Key (PAT) even if the rest of the params can not be deduced.
// Params not returned from here will be requested from the user via form inputs.
// This enables gracefully degraded but working experience even for non-cloud deployments.
zap.L().Info("ingestion params and signoz api url can not be deduced since no license was found")
ah.Respond(w, result)
return
}
ingestionUrl, signozApiUrl, apiErr := getIngestionUrlAndSigNozAPIUrl(r.Context(), license.Key)
if apiErr != nil {
RespondError(w, basemodel.WrapApiError(
apiErr, "couldn't deduce ingestion url and signoz api url",
), nil)
return
}
result.IngestionUrl = ingestionUrl
result.SigNozAPIUrl = signozApiUrl
gatewayUrl := ah.opts.GatewayUrl
if len(gatewayUrl) > 0 {
ingestionKey, apiErr := getOrCreateCloudProviderIngestionKey(
r.Context(), gatewayUrl, license.Key, cloudProvider,
)
if apiErr != nil {
RespondError(w, basemodel.WrapApiError(
apiErr, "couldn't get or create ingestion key",
), nil)
return
}
result.IngestionKey = ingestionKey
} else {
zap.L().Info("ingestion key can't be deduced since no gateway url has been configured")
}
ah.Respond(w, result)
}
func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId string, cloudProvider string) (
string, *basemodel.ApiError,
) {
integrationPATName := fmt.Sprintf("%s integration", cloudProvider)
integrationUser, apiErr := ah.getOrCreateCloudIntegrationUser(ctx, orgId, cloudProvider)
if apiErr != nil {
return "", apiErr
}
allPats, err := ah.AppDao().ListPATs(ctx)
if err != nil {
return "", basemodel.InternalError(fmt.Errorf(
"couldn't list PATs: %w", err.Error(),
))
}
for _, p := range allPats {
if p.UserID == integrationUser.Id && p.Name == integrationPATName {
return p.Token, nil
}
}
zap.L().Info(
"no PAT found for cloud integration, creating a new one",
zap.String("cloudProvider", cloudProvider),
)
newPAT := model.PAT{
Token: generatePATToken(),
UserID: integrationUser.Id,
Name: integrationPATName,
Role: baseconstants.ViewerGroup,
ExpiresAt: 0,
CreatedAt: time.Now().Unix(),
UpdatedAt: time.Now().Unix(),
}
integrationPAT, err := ah.AppDao().CreatePAT(ctx, newPAT)
if err != nil {
return "", basemodel.InternalError(fmt.Errorf(
"couldn't create cloud integration PAT: %w", err.Error(),
))
}
return integrationPAT.Token, nil
}
func (ah *APIHandler) getOrCreateCloudIntegrationUser(
ctx context.Context, orgId string, cloudProvider string,
) (*basemodel.User, *basemodel.ApiError) {
cloudIntegrationUserId := fmt.Sprintf("%s-integration", cloudProvider)
integrationUserResult, apiErr := ah.AppDao().GetUser(ctx, cloudIntegrationUserId)
if apiErr != nil {
return nil, basemodel.WrapApiError(apiErr, "couldn't look for integration user")
}
if integrationUserResult != nil {
return &integrationUserResult.User, nil
}
zap.L().Info(
"cloud integration user not found. Attempting to create the user",
zap.String("cloudProvider", cloudProvider),
)
newUser := &basemodel.User{
Id: cloudIntegrationUserId,
Name: fmt.Sprintf("%s integration", cloudProvider),
Email: fmt.Sprintf("%s@signoz.io", cloudIntegrationUserId),
CreatedAt: time.Now().Unix(),
OrgId: orgId,
}
viewerGroup, apiErr := dao.DB().GetGroupByName(ctx, baseconstants.ViewerGroup)
if apiErr != nil {
return nil, basemodel.WrapApiError(apiErr, "couldn't get viewer group for creating integration user")
}
newUser.GroupId = viewerGroup.Id
passwordHash, err := auth.PasswordHash(uuid.NewString())
if err != nil {
return nil, basemodel.InternalError(fmt.Errorf(
"couldn't hash random password for cloud integration user: %w", err,
))
}
newUser.Password = passwordHash
integrationUser, apiErr := ah.AppDao().CreateUser(ctx, newUser, false)
if apiErr != nil {
return nil, basemodel.WrapApiError(apiErr, "couldn't create cloud integration user")
}
return integrationUser, nil
}
func getIngestionUrlAndSigNozAPIUrl(ctx context.Context, licenseKey string) (
string, string, *basemodel.ApiError,
) {
url := fmt.Sprintf(
"%s%s",
strings.TrimSuffix(constants.ZeusURL, "/"),
"/v2/deployments/me",
)
type deploymentResponse struct {
Status string `json:"status"`
Error string `json:"error"`
Data struct {
Name string `json:"name"`
ClusterInfo struct {
Region struct {
DNS string `json:"dns"`
} `json:"region"`
} `json:"cluster"`
} `json:"data"`
}
resp, apiErr := requestAndParseResponse[deploymentResponse](
ctx, url, map[string]string{"X-Signoz-Cloud-Api-Key": licenseKey}, nil,
)
if apiErr != nil {
return "", "", basemodel.WrapApiError(
apiErr, "couldn't query for deployment info",
)
}
if resp.Status != "success" {
return "", "", basemodel.InternalError(fmt.Errorf(
"couldn't query for deployment info: status: %s, error: %s",
resp.Status, resp.Error,
))
}
regionDns := resp.Data.ClusterInfo.Region.DNS
deploymentName := resp.Data.Name
if len(regionDns) < 1 || len(deploymentName) < 1 {
// Fail early if actual response structure and expectation here ever diverge
return "", "", basemodel.InternalError(fmt.Errorf(
"deployment info response not in expected shape. couldn't determine region dns and deployment name",
))
}
ingestionUrl := fmt.Sprintf("https://ingest.%s", regionDns)
signozApiUrl := fmt.Sprintf("https://%s.%s", deploymentName, regionDns)
return ingestionUrl, signozApiUrl, nil
}
type ingestionKey struct {
Name string `json:"name"`
Value string `json:"value"`
// other attributes from gateway response not included here since they are not being used.
}
type ingestionKeysSearchResponse struct {
Status string `json:"status"`
Data []ingestionKey `json:"data"`
Error string `json:"error"`
}
type createIngestionKeyResponse struct {
Status string `json:"status"`
Data ingestionKey `json:"data"`
Error string `json:"error"`
}
func getOrCreateCloudProviderIngestionKey(
ctx context.Context, gatewayUrl string, licenseKey string, cloudProvider string,
) (string, *basemodel.ApiError) {
cloudProviderKeyName := fmt.Sprintf("%s-integration", cloudProvider)
// see if the key already exists
searchResult, apiErr := requestGateway[ingestionKeysSearchResponse](
ctx,
gatewayUrl,
licenseKey,
fmt.Sprintf("/v1/workspaces/me/keys/search?name=%s", cloudProviderKeyName),
nil,
)
if apiErr != nil {
return "", basemodel.WrapApiError(
apiErr, "couldn't search for cloudprovider ingestion key",
)
}
if searchResult.Status != "success" {
return "", basemodel.InternalError(fmt.Errorf(
"couldn't search for cloudprovider ingestion key: status: %s, error: %s",
searchResult.Status, searchResult.Error,
))
}
for _, k := range searchResult.Data {
if k.Name == cloudProviderKeyName {
if len(k.Value) < 1 {
// Fail early if actual response structure and expectation here ever diverge
return "", basemodel.InternalError(fmt.Errorf(
"ingestion keys search response not as expected",
))
}
return k.Value, nil
}
}
zap.L().Info(
"no existing ingestion key found for cloud integration, creating a new one",
zap.String("cloudProvider", cloudProvider),
)
createKeyResult, apiErr := requestGateway[createIngestionKeyResponse](
ctx, gatewayUrl, licenseKey, "/v1/workspaces/me/keys",
map[string]any{
"name": cloudProviderKeyName,
"tags": []string{"integration", cloudProvider},
},
)
if apiErr != nil {
return "", basemodel.WrapApiError(
apiErr, "couldn't create cloudprovider ingestion key",
)
}
if createKeyResult.Status != "success" {
return "", basemodel.InternalError(fmt.Errorf(
"couldn't create cloudprovider ingestion key: status: %s, error: %s",
createKeyResult.Status, createKeyResult.Error,
))
}
ingestionKey := createKeyResult.Data.Value
if len(ingestionKey) < 1 {
// Fail early if actual response structure and expectation here ever diverge
return "", basemodel.InternalError(fmt.Errorf(
"ingestion key creation response not as expected",
))
}
return ingestionKey, nil
}
func requestGateway[ResponseType any](
ctx context.Context, gatewayUrl string, licenseKey string, path string, payload any,
) (*ResponseType, *basemodel.ApiError) {
baseUrl := strings.TrimSuffix(gatewayUrl, "/")
reqUrl := fmt.Sprintf("%s%s", baseUrl, path)
headers := map[string]string{
"X-Signoz-Cloud-Api-Key": licenseKey,
"X-Consumer-Username": "lid:00000000-0000-0000-0000-000000000000",
"X-Consumer-Groups": "ns:default",
}
return requestAndParseResponse[ResponseType](ctx, reqUrl, headers, payload)
}
func requestAndParseResponse[ResponseType any](
ctx context.Context, url string, headers map[string]string, payload any,
) (*ResponseType, *basemodel.ApiError) {
reqMethod := http.MethodGet
var reqBody io.Reader
if payload != nil {
reqMethod = http.MethodPost
bodyJson, err := json.Marshal(payload)
if err != nil {
return nil, basemodel.InternalError(fmt.Errorf(
"couldn't serialize request payload to JSON: %w", err,
))
}
reqBody = bytes.NewBuffer([]byte(bodyJson))
}
req, err := http.NewRequestWithContext(ctx, reqMethod, url, reqBody)
if err != nil {
return nil, basemodel.InternalError(fmt.Errorf(
"couldn't prepare request: %w", err,
))
}
for k, v := range headers {
req.Header.Set(k, v)
}
client := &http.Client{
Timeout: 10 * time.Second,
}
response, err := client.Do(req)
if err != nil {
return nil, basemodel.InternalError(fmt.Errorf("couldn't make request: %w", err))
}
defer response.Body.Close()
respBody, err := io.ReadAll(response.Body)
if err != nil {
return nil, basemodel.InternalError(fmt.Errorf("couldn't read response: %w", err))
}
var resp ResponseType
err = json.Unmarshal(respBody, &resp)
if err != nil {
return nil, basemodel.InternalError(fmt.Errorf(
"couldn't unmarshal gateway response into %T", resp,
))
}
return &resp, nil
}

View File

@@ -1,6 +1,7 @@
package api package api
import ( import (
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
@@ -63,26 +64,55 @@ type ApplyLicenseRequest struct {
LicenseKey string `json:"key"` LicenseKey string `json:"key"`
} }
func (ah *APIHandler) listLicensesV3(w http.ResponseWriter, r *http.Request) { type ListLicenseResponse map[string]interface{}
ah.listLicensesV2(w, r)
func convertLicenseV3ToListLicenseResponse(licensesV3 []*model.LicenseV3) []ListLicenseResponse {
listLicenses := []ListLicenseResponse{}
for _, license := range licensesV3 {
listLicenses = append(listLicenses, license.Data)
}
return listLicenses
} }
func (ah *APIHandler) getActiveLicenseV3(w http.ResponseWriter, r *http.Request) { func (ah *APIHandler) listLicenses(w http.ResponseWriter, r *http.Request) {
activeLicense, err := ah.LM().GetRepo().GetActiveLicenseV3(r.Context()) licenses, apiError := ah.LM().GetLicenses(context.Background())
if err != nil { if apiError != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) RespondError(w, apiError, nil)
}
ah.Respond(w, licenses)
}
func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) {
var l model.License
if err := json.NewDecoder(r.Body).Decode(&l); err != nil {
RespondError(w, model.BadRequest(err), nil)
return return
} }
// return 404 not found if there is no active license if l.Key == "" {
if activeLicense == nil { RespondError(w, model.BadRequest(fmt.Errorf("license key is required")), nil)
RespondError(w, &model.ApiError{Typ: model.ErrorNotFound, Err: fmt.Errorf("no active license found")}, nil) return
}
license, apiError := ah.LM().Activate(r.Context(), l.Key)
if apiError != nil {
RespondError(w, apiError, nil)
return return
} }
// TODO deprecate this when we move away from key for stripe ah.Respond(w, license)
activeLicense.Data["key"] = activeLicense.Key }
render.Success(w, http.StatusOK, activeLicense.Data)
func (ah *APIHandler) listLicensesV3(w http.ResponseWriter, r *http.Request) {
licenses, apiError := ah.LM().GetLicensesV3(r.Context())
if apiError != nil {
RespondError(w, apiError, nil)
return
}
ah.Respond(w, convertLicenseV3ToListLicenseResponse(licenses))
} }
// this function is called by zeus when inserting licenses in the query-service // this function is called by zeus when inserting licenses in the query-service
@@ -188,10 +218,6 @@ func (ah *APIHandler) getBilling(w http.ResponseWriter, r *http.Request) {
func convertLicenseV3ToLicenseV2(licenses []*model.LicenseV3) []model.License { func convertLicenseV3ToLicenseV2(licenses []*model.LicenseV3) []model.License {
licensesV2 := []model.License{} licensesV2 := []model.License{}
for _, l := range licenses { for _, l := range licenses {
planKeyFromPlanName, ok := model.MapOldPlanKeyToNewPlanName[l.PlanName]
if !ok {
planKeyFromPlanName = model.Basic
}
licenseV2 := model.License{ licenseV2 := model.License{
Key: l.Key, Key: l.Key,
ActivationId: "", ActivationId: "",
@@ -200,7 +226,7 @@ func convertLicenseV3ToLicenseV2(licenses []*model.LicenseV3) []model.License {
ValidationMessage: "", ValidationMessage: "",
IsCurrent: l.IsCurrent, IsCurrent: l.IsCurrent,
LicensePlan: model.LicensePlan{ LicensePlan: model.LicensePlan{
PlanKey: planKeyFromPlanName, PlanKey: l.PlanName,
ValidFrom: l.ValidFrom, ValidFrom: l.ValidFrom,
ValidUntil: l.ValidUntil, ValidUntil: l.ValidUntil,
Status: l.Status}, Status: l.Status},
@@ -211,12 +237,24 @@ func convertLicenseV3ToLicenseV2(licenses []*model.LicenseV3) []model.License {
} }
func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) { func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) {
licensesV3, apierr := ah.LM().GetLicensesV3(r.Context())
if apierr != nil { var licenses []model.License
RespondError(w, apierr, nil)
return if ah.UseLicensesV3 {
licensesV3, err := ah.LM().GetLicensesV3(r.Context())
if err != nil {
RespondError(w, err, nil)
return
}
licenses = convertLicenseV3ToLicenseV2(licensesV3)
} else {
_licenses, apiError := ah.LM().GetLicenses(r.Context())
if apiError != nil {
RespondError(w, apiError, nil)
return
}
licenses = _licenses
} }
licenses := convertLicenseV3ToLicenseV2(licensesV3)
resp := model.Licenses{ resp := model.Licenses{
TrialStart: -1, TrialStart: -1,

View File

@@ -7,7 +7,6 @@ import (
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
"go.signoz.io/signoz/pkg/cache"
basechr "go.signoz.io/signoz/pkg/query-service/app/clickhouseReader" basechr "go.signoz.io/signoz/pkg/query-service/app/clickhouseReader"
"go.signoz.io/signoz/pkg/query-service/interfaces" "go.signoz.io/signoz/pkg/query-service/interfaces"
) )
@@ -20,20 +19,19 @@ type ClickhouseReader struct {
func NewDataConnector( func NewDataConnector(
localDB *sqlx.DB, localDB *sqlx.DB,
ch clickhouse.Conn,
promConfigPath string, promConfigPath string,
lm interfaces.FeatureLookup, lm interfaces.FeatureLookup,
maxIdleConns int,
maxOpenConns int,
dialTimeout time.Duration,
cluster string, cluster string,
useLogsNewSchema bool, useLogsNewSchema bool,
useTraceNewSchema bool,
fluxIntervalForTraceDetail time.Duration,
cache cache.Cache,
) *ClickhouseReader { ) *ClickhouseReader {
chReader := basechr.NewReader(localDB, ch, promConfigPath, lm, cluster, useLogsNewSchema, useTraceNewSchema, fluxIntervalForTraceDetail, cache) ch := basechr.NewReader(localDB, promConfigPath, lm, maxIdleConns, maxOpenConns, dialTimeout, cluster, useLogsNewSchema)
return &ClickhouseReader{ return &ClickhouseReader{
conn: ch, conn: ch.GetConn(),
appdb: localDB, appdb: localDB,
ClickHouseReader: chReader, ClickHouseReader: ch,
} }
} }

View File

@@ -11,6 +11,7 @@ import (
"net" "net"
"net/http" "net/http"
_ "net/http/pprof" // http profiler _ "net/http/pprof" // http profiler
"os"
"regexp" "regexp"
"time" "time"
@@ -28,18 +29,15 @@ import (
"go.signoz.io/signoz/ee/query-service/integrations/gateway" "go.signoz.io/signoz/ee/query-service/integrations/gateway"
"go.signoz.io/signoz/ee/query-service/interfaces" "go.signoz.io/signoz/ee/query-service/interfaces"
"go.signoz.io/signoz/ee/query-service/rules" "go.signoz.io/signoz/ee/query-service/rules"
"go.signoz.io/signoz/pkg/http/middleware"
baseauth "go.signoz.io/signoz/pkg/query-service/auth" baseauth "go.signoz.io/signoz/pkg/query-service/auth"
"go.signoz.io/signoz/pkg/query-service/migrate"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3" v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/signoz"
"go.signoz.io/signoz/pkg/web"
licensepkg "go.signoz.io/signoz/ee/query-service/license" licensepkg "go.signoz.io/signoz/ee/query-service/license"
"go.signoz.io/signoz/ee/query-service/usage" "go.signoz.io/signoz/ee/query-service/usage"
"go.signoz.io/signoz/pkg/query-service/agentConf" "go.signoz.io/signoz/pkg/query-service/agentConf"
baseapp "go.signoz.io/signoz/pkg/query-service/app" baseapp "go.signoz.io/signoz/pkg/query-service/app"
"go.signoz.io/signoz/pkg/query-service/app/cloudintegrations"
"go.signoz.io/signoz/pkg/query-service/app/dashboards" "go.signoz.io/signoz/pkg/query-service/app/dashboards"
baseexplorer "go.signoz.io/signoz/pkg/query-service/app/explorer" baseexplorer "go.signoz.io/signoz/pkg/query-service/app/explorer"
"go.signoz.io/signoz/pkg/query-service/app/integrations" "go.signoz.io/signoz/pkg/query-service/app/integrations"
@@ -63,23 +61,23 @@ import (
const AppDbEngine = "sqlite" const AppDbEngine = "sqlite"
type ServerOptions struct { type ServerOptions struct {
Config signoz.Config
SigNoz *signoz.SigNoz
PromConfigPath string PromConfigPath string
SkipTopLvlOpsPath string SkipTopLvlOpsPath string
HTTPHostPort string HTTPHostPort string
PrivateHostPort string PrivateHostPort string
// alert specific params // alert specific params
DisableRules bool DisableRules bool
RuleRepoURL string RuleRepoURL string
PreferSpanMetrics bool PreferSpanMetrics bool
CacheConfigPath string MaxIdleConns int
FluxInterval string MaxOpenConns int
FluxIntervalForTraceDetail string DialTimeout time.Duration
Cluster string CacheConfigPath string
GatewayUrl string FluxInterval string
UseLogsNewSchema bool Cluster string
UseTraceNewSchema bool GatewayUrl string
UseLogsNewSchema bool
UseLicensesV3 bool
} }
// Server runs HTTP api service // Server runs HTTP api service
@@ -110,22 +108,25 @@ func (s Server) HealthCheckStatus() chan healthcheck.Status {
// NewServer creates and initializes Server // NewServer creates and initializes Server
func NewServer(serverOptions *ServerOptions) (*Server, error) { func NewServer(serverOptions *ServerOptions) (*Server, error) {
modelDao, err := dao.InitDao(serverOptions.SigNoz.SQLStore.SQLxDB())
modelDao, err := dao.InitDao("sqlite", baseconst.RELATIONAL_DATASOURCE_PATH)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if err := baseexplorer.InitWithDSN(serverOptions.SigNoz.SQLStore.SQLxDB()); err != nil { baseexplorer.InitWithDSN(baseconst.RELATIONAL_DATASOURCE_PATH)
if err := preferences.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH); err != nil {
return nil, err return nil, err
} }
if err := preferences.InitDB(serverOptions.SigNoz.SQLStore.SQLxDB()); err != nil { localDB, err := dashboards.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH)
if err != nil {
return nil, err return nil, err
} }
if err := dashboards.InitDB(serverOptions.SigNoz.SQLStore.SQLxDB()); err != nil { localDB.SetMaxOpenConns(10)
return nil, err
}
gatewayProxy, err := gateway.NewProxy(serverOptions.GatewayUrl, gateway.RoutePrefix) gatewayProxy, err := gateway.NewProxy(serverOptions.GatewayUrl, gateway.RoutePrefix)
if err != nil { if err != nil {
@@ -133,7 +134,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
} }
// initiate license manager // initiate license manager
lm, err := licensepkg.StartManager(serverOptions.SigNoz.SQLStore.SQLxDB()) lm, err := licensepkg.StartManager("sqlite", localDB, serverOptions.UseLicensesV3)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -142,26 +143,25 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
modelDao.SetFlagProvider(lm) modelDao.SetFlagProvider(lm)
readerReady := make(chan bool) readerReady := make(chan bool)
fluxIntervalForTraceDetail, err := time.ParseDuration(serverOptions.FluxIntervalForTraceDetail)
if err != nil {
return nil, err
}
var reader interfaces.DataConnector var reader interfaces.DataConnector
qb := db.NewDataConnector( storage := os.Getenv("STORAGE")
serverOptions.SigNoz.SQLStore.SQLxDB(), if storage == "clickhouse" {
serverOptions.SigNoz.TelemetryStore.ClickHouseDB(), zap.L().Info("Using ClickHouse as datastore ...")
serverOptions.PromConfigPath, qb := db.NewDataConnector(
lm, localDB,
serverOptions.Cluster, serverOptions.PromConfigPath,
serverOptions.UseLogsNewSchema, lm,
serverOptions.UseTraceNewSchema, serverOptions.MaxIdleConns,
fluxIntervalForTraceDetail, serverOptions.MaxOpenConns,
serverOptions.SigNoz.Cache, serverOptions.DialTimeout,
) serverOptions.Cluster,
go qb.Start(readerReady) serverOptions.UseLogsNewSchema,
reader = qb )
go qb.Start(readerReady)
reader = qb
} else {
return nil, fmt.Errorf("storage type: %s is not supported in query service", storage)
}
skipConfig := &basemodel.SkipConfig{} skipConfig := &basemodel.SkipConfig{}
if serverOptions.SkipTopLvlOpsPath != "" { if serverOptions.SkipTopLvlOpsPath != "" {
// read skip config // read skip config
@@ -183,42 +183,41 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
rm, err := makeRulesManager(serverOptions.PromConfigPath, rm, err := makeRulesManager(serverOptions.PromConfigPath,
baseconst.GetAlertManagerApiPrefix(), baseconst.GetAlertManagerApiPrefix(),
serverOptions.RuleRepoURL, serverOptions.RuleRepoURL,
serverOptions.SigNoz.SQLStore.SQLxDB(), localDB,
reader, reader,
c, c,
serverOptions.DisableRules, serverOptions.DisableRules,
lm, lm,
serverOptions.UseLogsNewSchema, serverOptions.UseLogsNewSchema,
serverOptions.UseTraceNewSchema,
) )
if err != nil { if err != nil {
return nil, err return nil, err
} }
go func() {
err = migrate.ClickHouseMigrate(reader.GetConn(), serverOptions.Cluster)
if err != nil {
zap.L().Error("error while running clickhouse migrations", zap.Error(err))
}
}()
// initiate opamp // initiate opamp
_, err = opAmpModel.InitDB(serverOptions.SigNoz.SQLStore.SQLxDB()) _, err = opAmpModel.InitDB(localDB)
if err != nil { if err != nil {
return nil, err return nil, err
} }
integrationsController, err := integrations.NewController(serverOptions.SigNoz.SQLStore.SQLxDB()) integrationsController, err := integrations.NewController(localDB)
if err != nil { if err != nil {
return nil, fmt.Errorf( return nil, fmt.Errorf(
"couldn't create integrations controller: %w", err, "couldn't create integrations controller: %w", err,
) )
} }
cloudIntegrationsController, err := cloudintegrations.NewController(serverOptions.SigNoz.SQLStore.SQLxDB())
if err != nil {
return nil, fmt.Errorf(
"couldn't create cloud provider integrations controller: %w", err,
)
}
// ingestion pipelines manager // ingestion pipelines manager
logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController( logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(
serverOptions.SigNoz.SQLStore.SQLxDB(), integrationsController.GetPipelinesForInstalledIntegrations, localDB, "sqlite", integrationsController.GetPipelinesForInstalledIntegrations,
) )
if err != nil { if err != nil {
return nil, err return nil, err
@@ -226,7 +225,8 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
// initiate agent config handler // initiate agent config handler
agentConfMgr, err := agentConf.Initiate(&agentConf.ManagerOptions{ agentConfMgr, err := agentConf.Initiate(&agentConf.ManagerOptions{
DB: serverOptions.SigNoz.SQLStore.SQLxDB(), DB: localDB,
DBEngine: AppDbEngine,
AgentFeatures: []agentConf.AgentFeature{logParsingPipelineController}, AgentFeatures: []agentConf.AgentFeature{logParsingPipelineController},
}) })
if err != nil { if err != nil {
@@ -234,7 +234,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
} }
// start the usagemanager // start the usagemanager
usageManager, err := usage.New(modelDao, lm.GetRepo(), serverOptions.SigNoz.TelemetryStore.ClickHouseDB(), serverOptions.Config.TelemetryStore.ClickHouse.DSN) usageManager, err := usage.New("sqlite", modelDao, lm.GetRepo(), reader.GetConn())
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -247,6 +247,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
telemetry.GetInstance().SetSaasOperator(constants.SaasSegmentKey) telemetry.GetInstance().SetSaasOperator(constants.SaasSegmentKey)
fluxInterval, err := time.ParseDuration(serverOptions.FluxInterval) fluxInterval, err := time.ParseDuration(serverOptions.FluxInterval)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -255,20 +256,21 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
DataConnector: reader, DataConnector: reader,
SkipConfig: skipConfig, SkipConfig: skipConfig,
PreferSpanMetrics: serverOptions.PreferSpanMetrics, PreferSpanMetrics: serverOptions.PreferSpanMetrics,
MaxIdleConns: serverOptions.MaxIdleConns,
MaxOpenConns: serverOptions.MaxOpenConns,
DialTimeout: serverOptions.DialTimeout,
AppDao: modelDao, AppDao: modelDao,
RulesManager: rm, RulesManager: rm,
UsageManager: usageManager, UsageManager: usageManager,
FeatureFlags: lm, FeatureFlags: lm,
LicenseManager: lm, LicenseManager: lm,
IntegrationsController: integrationsController, IntegrationsController: integrationsController,
CloudIntegrationsController: cloudIntegrationsController,
LogsParsingPipelineController: logParsingPipelineController, LogsParsingPipelineController: logParsingPipelineController,
Cache: c, Cache: c,
FluxInterval: fluxInterval, FluxInterval: fluxInterval,
Gateway: gatewayProxy, Gateway: gatewayProxy,
GatewayUrl: serverOptions.GatewayUrl,
UseLogsNewSchema: serverOptions.UseLogsNewSchema, UseLogsNewSchema: serverOptions.UseLogsNewSchema,
UseTraceNewSchema: serverOptions.UseTraceNewSchema, UseLicensesV3: serverOptions.UseLicensesV3,
} }
apiHandler, err := api.NewAPIHandler(apiOpts) apiHandler, err := api.NewAPIHandler(apiOpts)
@@ -285,7 +287,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
usageManager: usageManager, usageManager: usageManager,
} }
httpServer, err := s.createPublicServer(apiHandler, serverOptions.SigNoz.Web) httpServer, err := s.createPublicServer(apiHandler)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -311,13 +313,10 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
r := baseapp.NewRouter() r := baseapp.NewRouter()
r.Use(middleware.NewTimeout(zap.L(), r.Use(baseapp.LogCommentEnricher)
s.serverOptions.Config.APIServer.Timeout.ExcludedRoutes, r.Use(setTimeoutMiddleware)
s.serverOptions.Config.APIServer.Timeout.Default, r.Use(s.analyticsMiddleware)
s.serverOptions.Config.APIServer.Timeout.Max, r.Use(loggingMiddlewarePrivate)
).Wrap)
r.Use(middleware.NewAnalytics(zap.L()).Wrap)
r.Use(middleware.NewLogging(zap.L(), s.serverOptions.Config.APIServer.Logging.ExcludedRoutes).Wrap)
apiHandler.RegisterPrivateRoutes(r) apiHandler.RegisterPrivateRoutes(r)
@@ -337,7 +336,7 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
}, nil }, nil
} }
func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*http.Server, error) { func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, error) {
r := baseapp.NewRouter() r := baseapp.NewRouter()
@@ -357,18 +356,14 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*h
} }
am := baseapp.NewAuthMiddleware(getUserFromRequest) am := baseapp.NewAuthMiddleware(getUserFromRequest)
r.Use(middleware.NewTimeout(zap.L(), r.Use(baseapp.LogCommentEnricher)
s.serverOptions.Config.APIServer.Timeout.ExcludedRoutes, r.Use(setTimeoutMiddleware)
s.serverOptions.Config.APIServer.Timeout.Default, r.Use(s.analyticsMiddleware)
s.serverOptions.Config.APIServer.Timeout.Max, r.Use(loggingMiddleware)
).Wrap)
r.Use(middleware.NewAnalytics(zap.L()).Wrap)
r.Use(middleware.NewLogging(zap.L(), s.serverOptions.Config.APIServer.Logging.ExcludedRoutes).Wrap)
apiHandler.RegisterRoutes(r, am) apiHandler.RegisterRoutes(r, am)
apiHandler.RegisterLogsRoutes(r, am) apiHandler.RegisterLogsRoutes(r, am)
apiHandler.RegisterIntegrationRoutes(r, am) apiHandler.RegisterIntegrationRoutes(r, am)
apiHandler.RegisterCloudIntegrationsRoutes(r, am)
apiHandler.RegisterQueryRangeV3Routes(r, am) apiHandler.RegisterQueryRangeV3Routes(r, am)
apiHandler.RegisterInfraMetricsRoutes(r, am) apiHandler.RegisterInfraMetricsRoutes(r, am)
apiHandler.RegisterQueryRangeV4Routes(r, am) apiHandler.RegisterQueryRangeV4Routes(r, am)
@@ -385,16 +380,36 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*h
handler = handlers.CompressHandler(handler) handler = handlers.CompressHandler(handler)
err := web.AddToRouter(r)
if err != nil {
return nil, err
}
return &http.Server{ return &http.Server{
Handler: handler, Handler: handler,
}, nil }, nil
} }
// TODO(remove): Implemented at pkg/http/middleware/logging.go
// loggingMiddleware is used for logging public api calls
func loggingMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
route := mux.CurrentRoute(r)
path, _ := route.GetPathTemplate()
startTime := time.Now()
next.ServeHTTP(w, r)
zap.L().Info(path, zap.Duration("timeTaken", time.Since(startTime)), zap.String("path", path))
})
}
// TODO(remove): Implemented at pkg/http/middleware/logging.go
// loggingMiddlewarePrivate is used for logging private api calls
// from internal services like alert manager
func loggingMiddlewarePrivate(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
route := mux.CurrentRoute(r)
path, _ := route.GetPathTemplate()
startTime := time.Now()
next.ServeHTTP(w, r)
zap.L().Info(path, zap.Duration("timeTaken", time.Since(startTime)), zap.String("path", path), zap.Bool("tprivatePort", true))
})
}
// TODO(remove): Implemented at pkg/http/middleware/logging.go // TODO(remove): Implemented at pkg/http/middleware/logging.go
type loggingResponseWriter struct { type loggingResponseWriter struct {
http.ResponseWriter http.ResponseWriter
@@ -474,29 +489,32 @@ func extractQueryRangeData(path string, r *http.Request) (map[string]interface{}
zap.L().Error("error while matching the trace explorer: ", zap.Error(err)) zap.L().Error("error while matching the trace explorer: ", zap.Error(err))
} }
queryInfoResult := telemetry.GetInstance().CheckQueryInfo(postData) signozMetricsUsed := false
signozLogsUsed := false
signozTracesUsed := false
if postData != nil {
if (queryInfoResult.MetricsUsed || queryInfoResult.LogsUsed || queryInfoResult.TracesUsed) && (queryInfoResult.FilterApplied) { if postData.CompositeQuery != nil {
if queryInfoResult.MetricsUsed { data["queryType"] = postData.CompositeQuery.QueryType
data["panelType"] = postData.CompositeQuery.PanelType
signozLogsUsed, signozMetricsUsed, signozTracesUsed = telemetry.GetInstance().CheckSigNozSignals(postData)
}
}
if signozMetricsUsed || signozLogsUsed || signozTracesUsed {
if signozMetricsUsed {
telemetry.GetInstance().AddActiveMetricsUser() telemetry.GetInstance().AddActiveMetricsUser()
} }
if queryInfoResult.LogsUsed { if signozLogsUsed {
telemetry.GetInstance().AddActiveLogsUser() telemetry.GetInstance().AddActiveLogsUser()
} }
if queryInfoResult.TracesUsed { if signozTracesUsed {
telemetry.GetInstance().AddActiveTracesUser() telemetry.GetInstance().AddActiveTracesUser()
} }
data["metricsUsed"] = queryInfoResult.MetricsUsed data["metricsUsed"] = signozMetricsUsed
data["logsUsed"] = queryInfoResult.LogsUsed data["logsUsed"] = signozLogsUsed
data["tracesUsed"] = queryInfoResult.TracesUsed data["tracesUsed"] = signozTracesUsed
data["filterApplied"] = queryInfoResult.FilterApplied
data["groupByApplied"] = queryInfoResult.GroupByApplied
data["aggregateOperator"] = queryInfoResult.AggregateOperator
data["aggregateAttributeKey"] = queryInfoResult.AggregateAttributeKey
data["numberOfQueries"] = queryInfoResult.NumberOfQueries
data["queryType"] = queryInfoResult.QueryType
data["panelType"] = queryInfoResult.PanelType
userEmail, err := baseauth.GetEmailFromJwt(r.Context()) userEmail, err := baseauth.GetEmailFromJwt(r.Context())
if err == nil { if err == nil {
// switch case to set data["screen"] based on the referrer // switch case to set data["screen"] based on the referrer
@@ -563,6 +581,23 @@ func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
}) })
} }
// TODO(remove): Implemented at pkg/http/middleware/timeout.go
func setTimeoutMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
var cancel context.CancelFunc
// check if route is not excluded
url := r.URL.Path
if _, ok := baseconst.TimeoutExcludedRoutes[url]; !ok {
ctx, cancel = context.WithTimeout(r.Context(), baseconst.ContextTimeout)
defer cancel()
}
r = r.WithContext(ctx)
next.ServeHTTP(w, r)
})
}
// initListeners initialises listeners of the server // initListeners initialises listeners of the server
func (s *Server) initListeners() error { func (s *Server) initListeners() error {
// listen on public port // listen on public port
@@ -702,8 +737,7 @@ func makeRulesManager(
cache cache.Cache, cache cache.Cache,
disableRules bool, disableRules bool,
fm baseint.FeatureLookup, fm baseint.FeatureLookup,
useLogsNewSchema bool, useLogsNewSchema bool) (*baserules.Manager, error) {
useTraceNewSchema bool) (*baserules.Manager, error) {
// create engine // create engine
pqle, err := pqle.FromConfigPath(promConfigPath) pqle, err := pqle.FromConfigPath(promConfigPath)
@@ -733,9 +767,8 @@ func makeRulesManager(
EvalDelay: baseconst.GetEvalDelay(), EvalDelay: baseconst.GetEvalDelay(),
PrepareTaskFunc: rules.PrepareTaskFunc, PrepareTaskFunc: rules.PrepareTaskFunc,
UseLogsNewSchema: useLogsNewSchema,
UseTraceNewSchema: useTraceNewSchema,
PrepareTestRuleFunc: rules.TestNotification, PrepareTestRuleFunc: rules.TestNotification,
UseLogsNewSchema: useLogsNewSchema,
} }
// create Manager // create Manager

View File

@@ -13,9 +13,7 @@ var LicenseAPIKey = GetOrDefaultEnv("SIGNOZ_LICENSE_API_KEY", "")
var SaasSegmentKey = GetOrDefaultEnv("SIGNOZ_SAAS_SEGMENT_KEY", "") var SaasSegmentKey = GetOrDefaultEnv("SIGNOZ_SAAS_SEGMENT_KEY", "")
var FetchFeatures = GetOrDefaultEnv("FETCH_FEATURES", "false") var FetchFeatures = GetOrDefaultEnv("FETCH_FEATURES", "false")
var ZeusFeaturesURL = GetOrDefaultEnv("ZEUS_FEATURES_URL", "ZeusFeaturesURL") var ZeusFeaturesURL = GetOrDefaultEnv("ZEUS_FEATURES_URL", "ZeusFeaturesURL")
var ZeusURL = GetOrDefaultEnv("ZEUS_URL", "ZeusURL")
// this is set via build time variable
var ZeusURL = "https://api.signoz.cloud"
func GetOrDefaultEnv(key string, fallback string) string { func GetOrDefaultEnv(key string, fallback string) string {
v := os.Getenv(key) v := os.Getenv(key)

View File

@@ -1,10 +1,18 @@
package dao package dao
import ( import (
"github.com/jmoiron/sqlx" "fmt"
"go.signoz.io/signoz/ee/query-service/dao/sqlite" "go.signoz.io/signoz/ee/query-service/dao/sqlite"
) )
func InitDao(inputDB *sqlx.DB) (ModelDao, error) { func InitDao(engine, path string) (ModelDao, error) {
return sqlite.InitDB(inputDB)
switch engine {
case "sqlite":
return sqlite.InitDB(path)
default:
return nil, fmt.Errorf("qsdb type: %s is not supported in query service", engine)
}
} }

View File

@@ -7,6 +7,7 @@ import (
basedao "go.signoz.io/signoz/pkg/query-service/dao" basedao "go.signoz.io/signoz/pkg/query-service/dao"
basedsql "go.signoz.io/signoz/pkg/query-service/dao/sqlite" basedsql "go.signoz.io/signoz/pkg/query-service/dao/sqlite"
baseint "go.signoz.io/signoz/pkg/query-service/interfaces" baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
"go.uber.org/zap"
) )
type modelDao struct { type modelDao struct {
@@ -28,15 +29,113 @@ func (m *modelDao) checkFeature(key string) error {
return m.flags.CheckFeature(key) return m.flags.CheckFeature(key)
} }
func columnExists(db *sqlx.DB, tableName, columnName string) bool {
query := fmt.Sprintf("PRAGMA table_info(%s);", tableName)
rows, err := db.Query(query)
if err != nil {
zap.L().Error("Failed to query table info", zap.Error(err))
return false
}
defer rows.Close()
var (
cid int
name string
ctype string
notnull int
dflt_value *string
pk int
)
for rows.Next() {
err := rows.Scan(&cid, &name, &ctype, &notnull, &dflt_value, &pk)
if err != nil {
zap.L().Error("Failed to scan table info", zap.Error(err))
return false
}
if name == columnName {
return true
}
}
err = rows.Err()
if err != nil {
zap.L().Error("Failed to scan table info", zap.Error(err))
return false
}
return false
}
// InitDB creates and extends base model DB repository // InitDB creates and extends base model DB repository
func InitDB(inputDB *sqlx.DB) (*modelDao, error) { func InitDB(dataSourceName string) (*modelDao, error) {
dao, err := basedsql.InitDB(inputDB) dao, err := basedsql.InitDB(dataSourceName)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// set package variable so dependent base methods (e.g. AuthCache) will work // set package variable so dependent base methods (e.g. AuthCache) will work
basedao.SetDB(dao) basedao.SetDB(dao)
m := &modelDao{ModelDaoSqlite: dao} m := &modelDao{ModelDaoSqlite: dao}
table_schema := `
PRAGMA foreign_keys = ON;
CREATE TABLE IF NOT EXISTS org_domains(
id TEXT PRIMARY KEY,
org_id TEXT NOT NULL,
name VARCHAR(50) NOT NULL UNIQUE,
created_at INTEGER NOT NULL,
updated_at INTEGER,
data TEXT NOT NULL,
FOREIGN KEY(org_id) REFERENCES organizations(id)
);
CREATE TABLE IF NOT EXISTS personal_access_tokens (
id INTEGER PRIMARY KEY AUTOINCREMENT,
role TEXT NOT NULL,
user_id TEXT NOT NULL,
token TEXT NOT NULL UNIQUE,
name TEXT NOT NULL,
created_at INTEGER NOT NULL,
expires_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL,
last_used INTEGER NOT NULL,
revoked BOOLEAN NOT NULL,
updated_by_user_id TEXT NOT NULL,
FOREIGN KEY(user_id) REFERENCES users(id)
);
`
_, err = m.DB().Exec(table_schema)
if err != nil {
return nil, fmt.Errorf("error in creating tables: %v", err.Error())
}
if !columnExists(m.DB(), "personal_access_tokens", "role") {
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN role TEXT NOT NULL DEFAULT 'ADMIN';")
if err != nil {
return nil, fmt.Errorf("error in adding column: %v", err.Error())
}
}
if !columnExists(m.DB(), "personal_access_tokens", "updated_at") {
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN updated_at INTEGER NOT NULL DEFAULT 0;")
if err != nil {
return nil, fmt.Errorf("error in adding column: %v", err.Error())
}
}
if !columnExists(m.DB(), "personal_access_tokens", "last_used") {
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN last_used INTEGER NOT NULL DEFAULT 0;")
if err != nil {
return nil, fmt.Errorf("error in adding column: %v", err.Error())
}
}
if !columnExists(m.DB(), "personal_access_tokens", "revoked") {
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN revoked BOOLEAN NOT NULL DEFAULT FALSE;")
if err != nil {
return nil, fmt.Errorf("error in adding column: %v", err.Error())
}
}
if !columnExists(m.DB(), "personal_access_tokens", "updated_by_user_id") {
_, err = m.DB().Exec("ALTER TABLE personal_access_tokens ADD COLUMN updated_by_user_id TEXT NOT NULL DEFAULT '';")
if err != nil {
return nil, fmt.Errorf("error in adding column: %v", err.Error())
}
}
return m, nil return m, nil
} }

View File

@@ -2,6 +2,18 @@ package signozio
type status string type status string
type ActivationResult struct {
Status status `json:"status"`
Data *ActivationResponse `json:"data,omitempty"`
ErrorType string `json:"errorType,omitempty"`
Error string `json:"error,omitempty"`
}
type ActivationResponse struct {
ActivationId string `json:"ActivationId"`
PlanDetails string `json:"PlanDetails"`
}
type ValidateLicenseResponse struct { type ValidateLicenseResponse struct {
Status status `json:"status"` Status status `json:"status"`
Data map[string]interface{} `json:"data"` Data map[string]interface{} `json:"data"`

View File

@@ -10,6 +10,7 @@ import (
"time" "time"
"github.com/pkg/errors" "github.com/pkg/errors"
"go.uber.org/zap"
"go.signoz.io/signoz/ee/query-service/constants" "go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/model" "go.signoz.io/signoz/ee/query-service/model"
@@ -38,6 +39,86 @@ func init() {
C = New() C = New()
} }
// ActivateLicense sends key to license.signoz.io and gets activation data
func ActivateLicense(key, siteId string) (*ActivationResponse, *model.ApiError) {
licenseReq := map[string]string{
"key": key,
"siteId": siteId,
}
reqString, _ := json.Marshal(licenseReq)
httpResponse, err := http.Post(C.Prefix+"/licenses/activate", APPLICATION_JSON, bytes.NewBuffer(reqString))
if err != nil {
zap.L().Error("failed to connect to license.signoz.io", zap.Error(err))
return nil, model.BadRequest(fmt.Errorf("unable to connect with license.signoz.io, please check your network connection"))
}
httpBody, err := io.ReadAll(httpResponse.Body)
if err != nil {
zap.L().Error("failed to read activation response from license.signoz.io", zap.Error(err))
return nil, model.BadRequest(fmt.Errorf("failed to read activation response from license.signoz.io"))
}
defer httpResponse.Body.Close()
// read api request result
result := ActivationResult{}
err = json.Unmarshal(httpBody, &result)
if err != nil {
zap.L().Error("failed to marshal activation response from license.signoz.io", zap.Error(err))
return nil, model.InternalError(errors.Wrap(err, "failed to marshal license activation response"))
}
switch httpResponse.StatusCode {
case 200, 201:
return result.Data, nil
case 400, 401:
return nil, model.BadRequest(fmt.Errorf(fmt.Sprintf("failed to activate: %s", result.Error)))
default:
return nil, model.InternalError(fmt.Errorf(fmt.Sprintf("failed to activate: %s", result.Error)))
}
}
// ValidateLicense validates the license key
func ValidateLicense(activationId string) (*ActivationResponse, *model.ApiError) {
validReq := map[string]string{
"activationId": activationId,
}
reqString, _ := json.Marshal(validReq)
response, err := http.Post(C.Prefix+"/licenses/validate", APPLICATION_JSON, bytes.NewBuffer(reqString))
if err != nil {
return nil, model.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection"))
}
body, err := io.ReadAll(response.Body)
if err != nil {
return nil, model.BadRequest(errors.Wrap(err, "failed to read validation response from license.signoz.io"))
}
defer response.Body.Close()
switch response.StatusCode {
case 200, 201:
a := ActivationResult{}
err = json.Unmarshal(body, &a)
if err != nil {
return nil, model.BadRequest(errors.Wrap(err, "failed to marshal license validation response"))
}
return a.Data, nil
case 400, 401:
return nil, model.BadRequest(errors.Wrap(fmt.Errorf(string(body)),
"bad request error received from license.signoz.io"))
default:
return nil, model.InternalError(errors.Wrap(fmt.Errorf(string(body)),
"internal error received from license.signoz.io"))
}
}
func ValidateLicenseV3(licenseKey string) (*model.LicenseV3, *model.ApiError) { func ValidateLicenseV3(licenseKey string) (*model.LicenseV3, *model.ApiError) {
// Creating an HTTP client with a timeout for better control // Creating an HTTP client with a timeout for better control

View File

@@ -10,6 +10,7 @@ import (
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
"github.com/mattn/go-sqlite3" "github.com/mattn/go-sqlite3"
"go.signoz.io/signoz/ee/query-service/license/sqlite"
"go.signoz.io/signoz/ee/query-service/model" "go.signoz.io/signoz/ee/query-service/model"
basemodel "go.signoz.io/signoz/pkg/query-service/model" basemodel "go.signoz.io/signoz/pkg/query-service/model"
"go.uber.org/zap" "go.uber.org/zap"
@@ -27,6 +28,28 @@ func NewLicenseRepo(db *sqlx.DB) Repo {
} }
} }
func (r *Repo) InitDB(engine string) error {
switch engine {
case "sqlite3", "sqlite":
return sqlite.InitDB(r.db)
default:
return fmt.Errorf("unsupported db")
}
}
func (r *Repo) GetLicenses(ctx context.Context) ([]model.License, error) {
licenses := []model.License{}
query := "SELECT key, activationId, planDetails, validationMessage FROM licenses"
err := r.db.Select(&licenses, query)
if err != nil {
return nil, fmt.Errorf("failed to get licenses from db: %v", err)
}
return licenses, nil
}
func (r *Repo) GetLicensesV3(ctx context.Context) ([]*model.LicenseV3, error) { func (r *Repo) GetLicensesV3(ctx context.Context) ([]*model.LicenseV3, error) {
licensesData := []model.LicenseDB{} licensesData := []model.LicenseDB{}
licenseV3Data := []*model.LicenseV3{} licenseV3Data := []*model.LicenseV3{}
@@ -58,16 +81,32 @@ func (r *Repo) GetLicensesV3(ctx context.Context) ([]*model.LicenseV3, error) {
// GetActiveLicense fetches the latest active license from DB. // GetActiveLicense fetches the latest active license from DB.
// If the license is not present, expect a nil license and a nil error in the output. // If the license is not present, expect a nil license and a nil error in the output.
func (r *Repo) GetActiveLicense(ctx context.Context) (*model.License, *basemodel.ApiError) { func (r *Repo) GetActiveLicense(ctx context.Context) (*model.License, *basemodel.ApiError) {
activeLicenseV3, err := r.GetActiveLicenseV3(ctx) var err error
licenses := []model.License{}
query := "SELECT key, activationId, planDetails, validationMessage FROM licenses"
err = r.db.Select(&licenses, query)
if err != nil { if err != nil {
return nil, basemodel.InternalError(fmt.Errorf("failed to get active licenses from db: %v", err)) return nil, basemodel.InternalError(fmt.Errorf("failed to get active licenses from db: %v", err))
} }
if activeLicenseV3 == nil { var active *model.License
return nil, nil for _, l := range licenses {
l.ParsePlan()
if active == nil &&
(l.ValidFrom != 0) &&
(l.ValidUntil == -1 || l.ValidUntil > time.Now().Unix()) {
active = &l
}
if active != nil &&
l.ValidFrom > active.ValidFrom &&
(l.ValidUntil == -1 || l.ValidUntil > time.Now().Unix()) {
active = &l
}
} }
activeLicenseV2 := model.ConvertLicenseV3ToLicenseV2(activeLicenseV3)
return activeLicenseV2, nil return active, nil
} }
func (r *Repo) GetActiveLicenseV3(ctx context.Context) (*model.LicenseV3, error) { func (r *Repo) GetActiveLicenseV3(ctx context.Context) (*model.LicenseV3, error) {
@@ -109,56 +148,50 @@ func (r *Repo) GetActiveLicenseV3(ctx context.Context) (*model.LicenseV3, error)
return active, nil return active, nil
} }
// InsertLicenseV3 inserts a new license v3 in db // InsertLicense inserts a new license in db
func (r *Repo) InsertLicenseV3(ctx context.Context, l *model.LicenseV3) *model.ApiError { func (r *Repo) InsertLicense(ctx context.Context, l *model.License) error {
query := `INSERT INTO licenses_v3 (id, key, data) VALUES ($1, $2, $3)` if l.Key == "" {
return fmt.Errorf("insert license failed: license key is required")
// licsense is the entity of zeus so putting the entire license here without defining schema
licenseData, err := json.Marshal(l.Data)
if err != nil {
return &model.ApiError{Typ: basemodel.ErrorBadData, Err: err}
} }
_, err = r.db.ExecContext(ctx, query := `INSERT INTO licenses
(key, planDetails, activationId, validationmessage)
VALUES ($1, $2, $3, $4)`
_, err := r.db.ExecContext(ctx,
query, query,
l.ID,
l.Key, l.Key,
string(licenseData), l.PlanDetails,
) l.ActivationId,
l.ValidationMessage)
if err != nil { if err != nil {
if sqliteErr, ok := err.(sqlite3.Error); ok {
if sqliteErr.ExtendedCode == sqlite3.ErrConstraintUnique {
zap.L().Error("error in inserting license data: ", zap.Error(sqliteErr))
return &model.ApiError{Typ: model.ErrorConflict, Err: sqliteErr}
}
}
zap.L().Error("error in inserting license data: ", zap.Error(err)) zap.L().Error("error in inserting license data: ", zap.Error(err))
return &model.ApiError{Typ: basemodel.ErrorExec, Err: err} return fmt.Errorf("failed to insert license in db: %v", err)
} }
return nil return nil
} }
// UpdateLicenseV3 updates a new license v3 in db // UpdatePlanDetails writes new plan details to the db
func (r *Repo) UpdateLicenseV3(ctx context.Context, l *model.LicenseV3) error { func (r *Repo) UpdatePlanDetails(ctx context.Context,
key,
planDetails string) error {
// the key and id for the license can't change so only update the data here! if key == "" {
query := `UPDATE licenses_v3 SET data=$1 WHERE id=$2;` return fmt.Errorf("update plan details failed: license key is required")
license, err := json.Marshal(l.Data)
if err != nil {
return fmt.Errorf("insert license failed: license marshal error")
} }
_, err = r.db.ExecContext(ctx,
query, query := `UPDATE licenses
license, SET planDetails = $1,
l.ID, updatedAt = $2
) WHERE key = $3`
_, err := r.db.ExecContext(ctx, query, planDetails, time.Now(), key)
if err != nil { if err != nil {
zap.L().Error("error in updating license data: ", zap.Error(err)) zap.L().Error("error in updating license: ", zap.Error(err))
return fmt.Errorf("failed to update license in db: %v", err) return fmt.Errorf("failed to update license in db: %v", err)
} }
@@ -240,3 +273,59 @@ func (r *Repo) InitFeatures(req basemodel.FeatureSet) error {
} }
return nil return nil
} }
// InsertLicenseV3 inserts a new license v3 in db
func (r *Repo) InsertLicenseV3(ctx context.Context, l *model.LicenseV3) *model.ApiError {
query := `INSERT INTO licenses_v3 (id, key, data) VALUES ($1, $2, $3)`
// licsense is the entity of zeus so putting the entire license here without defining schema
licenseData, err := json.Marshal(l.Data)
if err != nil {
return &model.ApiError{Typ: basemodel.ErrorBadData, Err: err}
}
_, err = r.db.ExecContext(ctx,
query,
l.ID,
l.Key,
string(licenseData),
)
if err != nil {
if sqliteErr, ok := err.(sqlite3.Error); ok {
if sqliteErr.ExtendedCode == sqlite3.ErrConstraintUnique {
zap.L().Error("error in inserting license data: ", zap.Error(sqliteErr))
return &model.ApiError{Typ: model.ErrorConflict, Err: sqliteErr}
}
}
zap.L().Error("error in inserting license data: ", zap.Error(err))
return &model.ApiError{Typ: basemodel.ErrorExec, Err: err}
}
return nil
}
// UpdateLicenseV3 updates a new license v3 in db
func (r *Repo) UpdateLicenseV3(ctx context.Context, l *model.LicenseV3) error {
// the key and id for the license can't change so only update the data here!
query := `UPDATE licenses_v3 SET data=$1 WHERE id=$2;`
license, err := json.Marshal(l.Data)
if err != nil {
return fmt.Errorf("insert license failed: license marshal error")
}
_, err = r.db.ExecContext(ctx,
query,
license,
l.ID,
)
if err != nil {
zap.L().Error("error in updating license data: ", zap.Error(err))
return fmt.Errorf("failed to update license in db: %v", err)
}
return nil
}

View File

@@ -2,6 +2,7 @@ package license
import ( import (
"context" "context"
"fmt"
"sync/atomic" "sync/atomic"
"time" "time"
@@ -26,43 +27,88 @@ var LM *Manager
var validationFrequency = 24 * 60 * time.Minute var validationFrequency = 24 * 60 * time.Minute
type Manager struct { type Manager struct {
repo *Repo repo *Repo
mutex sync.Mutex mutex sync.Mutex
validatorRunning bool validatorRunning bool
// end the license validation, this is important to gracefully // end the license validation, this is important to gracefully
// stopping validation and protect in-consistent updates // stopping validation and protect in-consistent updates
done chan struct{} done chan struct{}
// terminated waits for the validate go routine to end // terminated waits for the validate go routine to end
terminated chan struct{} terminated chan struct{}
// last time the license was validated // last time the license was validated
lastValidated int64 lastValidated int64
// keep track of validation failure attempts // keep track of validation failure attempts
failedAttempts uint64 failedAttempts uint64
// keep track of active license and features // keep track of active license and features
activeLicense *model.License
activeLicenseV3 *model.LicenseV3 activeLicenseV3 *model.LicenseV3
activeFeatures basemodel.FeatureSet activeFeatures basemodel.FeatureSet
} }
func StartManager(db *sqlx.DB, features ...basemodel.Feature) (*Manager, error) { func StartManager(dbType string, db *sqlx.DB, useLicensesV3 bool, features ...basemodel.Feature) (*Manager, error) {
if LM != nil { if LM != nil {
return LM, nil return LM, nil
} }
repo := NewLicenseRepo(db) repo := NewLicenseRepo(db)
err := repo.InitDB(dbType)
if err != nil {
return nil, fmt.Errorf("failed to initiate license repo: %v", err)
}
m := &Manager{ m := &Manager{
repo: &repo, repo: &repo,
} }
if err := m.start(features...); err != nil {
return m, err if useLicensesV3 {
// get active license from the db
active, err := m.repo.GetActiveLicense(context.Background())
if err != nil {
return m, err
}
// if we have an active license then need to fetch the complete details
if active != nil {
// fetch the new license structure from control plane
licenseV3, apiError := validate.ValidateLicenseV3(active.Key)
if apiError != nil {
return m, apiError
}
// insert the licenseV3 in sqlite db
apiError = m.repo.InsertLicenseV3(context.Background(), licenseV3)
// if the license already exists move ahead.
if apiError != nil && apiError.Typ != model.ErrorConflict {
return m, apiError
}
}
} }
if err := m.start(useLicensesV3, features...); err != nil {
return m, err
}
LM = m LM = m
return m, nil return m, nil
} }
// start loads active license in memory and initiates validator // start loads active license in memory and initiates validator
func (lm *Manager) start(features ...basemodel.Feature) error { func (lm *Manager) start(useLicensesV3 bool, features ...basemodel.Feature) error {
return lm.LoadActiveLicenseV3(features...)
var err error
if useLicensesV3 {
err = lm.LoadActiveLicenseV3(features...)
} else {
err = lm.LoadActiveLicense(features...)
}
return err
} }
func (lm *Manager) Stop() { func (lm *Manager) Stop() {
@@ -70,6 +116,31 @@ func (lm *Manager) Stop() {
<-lm.terminated <-lm.terminated
} }
func (lm *Manager) SetActive(l *model.License, features ...basemodel.Feature) {
lm.mutex.Lock()
defer lm.mutex.Unlock()
if l == nil {
return
}
lm.activeLicense = l
lm.activeFeatures = append(l.FeatureSet, features...)
// set default features
setDefaultFeatures(lm)
err := lm.InitFeatures(lm.activeFeatures)
if err != nil {
zap.L().Panic("Couldn't activate features", zap.Error(err))
}
if !lm.validatorRunning {
// we want to make sure only one validator runs,
// we already have lock() so good to go
lm.validatorRunning = true
go lm.Validator(context.Background())
}
}
func (lm *Manager) SetActiveV3(l *model.LicenseV3, features ...basemodel.Feature) { func (lm *Manager) SetActiveV3(l *model.LicenseV3, features ...basemodel.Feature) {
lm.mutex.Lock() lm.mutex.Lock()
defer lm.mutex.Unlock() defer lm.mutex.Unlock()
@@ -100,12 +171,34 @@ func setDefaultFeatures(lm *Manager) {
lm.activeFeatures = append(lm.activeFeatures, baseconstants.DEFAULT_FEATURE_SET...) lm.activeFeatures = append(lm.activeFeatures, baseconstants.DEFAULT_FEATURE_SET...)
} }
// LoadActiveLicense loads the most recent active license
func (lm *Manager) LoadActiveLicense(features ...basemodel.Feature) error {
active, err := lm.repo.GetActiveLicense(context.Background())
if err != nil {
return err
}
if active != nil {
lm.SetActive(active, features...)
} else {
zap.L().Info("No active license found, defaulting to basic plan")
// if no active license is found, we default to basic(free) plan with all default features
lm.activeFeatures = model.BasicPlan
setDefaultFeatures(lm)
err := lm.InitFeatures(lm.activeFeatures)
if err != nil {
zap.L().Error("Couldn't initialize features", zap.Error(err))
return err
}
}
return nil
}
func (lm *Manager) LoadActiveLicenseV3(features ...basemodel.Feature) error { func (lm *Manager) LoadActiveLicenseV3(features ...basemodel.Feature) error {
active, err := lm.repo.GetActiveLicenseV3(context.Background()) active, err := lm.repo.GetActiveLicenseV3(context.Background())
if err != nil { if err != nil {
return err return err
} }
if active != nil { if active != nil {
lm.SetActiveV3(active, features...) lm.SetActiveV3(active, features...)
} else { } else {
@@ -123,6 +216,32 @@ func (lm *Manager) LoadActiveLicenseV3(features ...basemodel.Feature) error {
return nil return nil
} }
func (lm *Manager) GetLicenses(ctx context.Context) (response []model.License, apiError *model.ApiError) {
licenses, err := lm.repo.GetLicenses(ctx)
if err != nil {
return nil, model.InternalError(err)
}
for _, l := range licenses {
l.ParsePlan()
if lm.activeLicense != nil && l.Key == lm.activeLicense.Key {
l.IsCurrent = true
}
if l.ValidUntil == -1 {
// for subscriptions, there is no end-date as such
// but for showing user some validity we default one year timespan
l.ValidUntil = l.ValidFrom + 31556926
}
response = append(response, l)
}
return
}
func (lm *Manager) GetLicensesV3(ctx context.Context) (response []*model.LicenseV3, apiError *model.ApiError) { func (lm *Manager) GetLicensesV3(ctx context.Context) (response []*model.LicenseV3, apiError *model.ApiError) {
licenses, err := lm.repo.GetLicensesV3(ctx) licenses, err := lm.repo.GetLicensesV3(ctx)
@@ -134,11 +253,6 @@ func (lm *Manager) GetLicensesV3(ctx context.Context) (response []*model.License
if lm.activeLicenseV3 != nil && l.Key == lm.activeLicenseV3.Key { if lm.activeLicenseV3 != nil && l.Key == lm.activeLicenseV3.Key {
l.IsCurrent = true l.IsCurrent = true
} }
if l.ValidUntil == -1 {
// for subscriptions, there is no end-date as such
// but for showing user some validity we default one year timespan
l.ValidUntil = l.ValidFrom + 31556926
}
response = append(response, l) response = append(response, l)
} }
@@ -146,14 +260,37 @@ func (lm *Manager) GetLicensesV3(ctx context.Context) (response []*model.License
} }
// Validator validates license after an epoch of time // Validator validates license after an epoch of time
func (lm *Manager) ValidatorV3(ctx context.Context) { func (lm *Manager) Validator(ctx context.Context) {
zap.L().Info("ValidatorV3 started!")
defer close(lm.terminated) defer close(lm.terminated)
tick := time.NewTicker(validationFrequency)
defer tick.Stop()
lm.Validate(ctx)
for {
select {
case <-lm.done:
return
default:
select {
case <-lm.done:
return
case <-tick.C:
lm.Validate(ctx)
}
}
}
}
// Validator validates license after an epoch of time
func (lm *Manager) ValidatorV3(ctx context.Context) {
defer close(lm.terminated)
tick := time.NewTicker(validationFrequency) tick := time.NewTicker(validationFrequency)
defer tick.Stop() defer tick.Stop()
lm.ValidateV3(ctx) lm.ValidateV3(ctx)
for { for {
select { select {
case <-lm.done: case <-lm.done:
@@ -170,6 +307,74 @@ func (lm *Manager) ValidatorV3(ctx context.Context) {
} }
} }
// Validate validates the current active license
func (lm *Manager) Validate(ctx context.Context) (reterr error) {
zap.L().Info("License validation started")
if lm.activeLicense == nil {
return nil
}
defer func() {
lm.mutex.Lock()
lm.lastValidated = time.Now().Unix()
if reterr != nil {
zap.L().Error("License validation completed with error", zap.Error(reterr))
atomic.AddUint64(&lm.failedAttempts, 1)
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_CHECK_FAILED,
map[string]interface{}{"err": reterr.Error()}, "", true, false)
} else {
zap.L().Info("License validation completed with no errors")
}
lm.mutex.Unlock()
}()
response, apiError := validate.ValidateLicense(lm.activeLicense.ActivationId)
if apiError != nil {
zap.L().Error("failed to validate license", zap.Error(apiError.Err))
return apiError.Err
}
if response.PlanDetails == lm.activeLicense.PlanDetails {
// license plan hasnt changed, nothing to do
return nil
}
if response.PlanDetails != "" {
// copy and replace the active license record
l := model.License{
Key: lm.activeLicense.Key,
CreatedAt: lm.activeLicense.CreatedAt,
PlanDetails: response.PlanDetails,
ValidationMessage: lm.activeLicense.ValidationMessage,
ActivationId: lm.activeLicense.ActivationId,
}
if err := l.ParsePlan(); err != nil {
zap.L().Error("failed to parse updated license", zap.Error(err))
return err
}
// updated plan is parsable, check if plan has changed
if lm.activeLicense.PlanDetails != response.PlanDetails {
err := lm.repo.UpdatePlanDetails(ctx, lm.activeLicense.Key, response.PlanDetails)
if err != nil {
// unexpected db write issue but we can let the user continue
// and wait for update to work in next cycle.
zap.L().Error("failed to validate license", zap.Error(err))
}
}
// activate the update license plan
lm.SetActive(&l)
}
return nil
}
// todo[vikrantgupta25]: check the comparison here between old and new license!
func (lm *Manager) RefreshLicense(ctx context.Context) *model.ApiError { func (lm *Manager) RefreshLicense(ctx context.Context) *model.ApiError {
license, apiError := validate.ValidateLicenseV3(lm.activeLicenseV3.Key) license, apiError := validate.ValidateLicenseV3(lm.activeLicenseV3.Key)
@@ -199,27 +404,10 @@ func (lm *Manager) ValidateV3(ctx context.Context) (reterr error) {
lm.lastValidated = time.Now().Unix() lm.lastValidated = time.Now().Unix()
if reterr != nil { if reterr != nil {
zap.L().Error("License validation completed with error", zap.Error(reterr)) zap.L().Error("License validation completed with error", zap.Error(reterr))
atomic.AddUint64(&lm.failedAttempts, 1) atomic.AddUint64(&lm.failedAttempts, 1)
// default to basic plan if validation fails for three consecutive times
if atomic.LoadUint64(&lm.failedAttempts) > 3 {
zap.L().Error("License validation completed with error for three consecutive times, defaulting to basic plan", zap.String("license_id", lm.activeLicenseV3.ID), zap.Bool("license_validation", false))
lm.activeLicenseV3 = nil
lm.activeFeatures = model.BasicPlan
setDefaultFeatures(lm)
err := lm.InitFeatures(lm.activeFeatures)
if err != nil {
zap.L().Error("Couldn't initialize features", zap.Error(err))
}
lm.done <- struct{}{}
lm.validatorRunning = false
}
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_CHECK_FAILED, telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_CHECK_FAILED,
map[string]interface{}{"err": reterr.Error()}, "", true, false) map[string]interface{}{"err": reterr.Error()}, "", true, false)
} else { } else {
// reset the failed attempts counter
atomic.StoreUint64(&lm.failedAttempts, 0)
zap.L().Info("License validation completed with no errors") zap.L().Info("License validation completed with no errors")
} }
@@ -234,6 +422,50 @@ func (lm *Manager) ValidateV3(ctx context.Context) (reterr error) {
return nil return nil
} }
// Activate activates a license key with signoz server
func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *model.License, errResponse *model.ApiError) {
defer func() {
if errResponse != nil {
userEmail, err := auth.GetEmailFromJwt(ctx)
if err == nil {
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_ACT_FAILED,
map[string]interface{}{"err": errResponse.Err.Error()}, userEmail, true, false)
}
}
}()
response, apiError := validate.ActivateLicense(key, "")
if apiError != nil {
zap.L().Error("failed to activate license", zap.Error(apiError.Err))
return nil, apiError
}
l := &model.License{
Key: key,
ActivationId: response.ActivationId,
PlanDetails: response.PlanDetails,
}
// parse validity and features from the plan details
err := l.ParsePlan()
if err != nil {
zap.L().Error("failed to activate license", zap.Error(err))
return nil, model.InternalError(err)
}
// store the license before activating it
err = lm.repo.InsertLicense(ctx, l)
if err != nil {
zap.L().Error("failed to activate license", zap.Error(err))
return nil, model.InternalError(err)
}
// license is valid, activate it
lm.SetActive(l)
return l, nil
}
func (lm *Manager) ActivateV3(ctx context.Context, licenseKey string) (licenseResponse *model.LicenseV3, errResponse *model.ApiError) { func (lm *Manager) ActivateV3(ctx context.Context, licenseKey string) (licenseResponse *model.LicenseV3, errResponse *model.ApiError) {
defer func() { defer func() {
if errResponse != nil { if errResponse != nil {

View File

@@ -0,0 +1,63 @@
package sqlite
import (
"fmt"
"github.com/jmoiron/sqlx"
)
func InitDB(db *sqlx.DB) error {
var err error
if db == nil {
return fmt.Errorf("invalid db connection")
}
table_schema := `CREATE TABLE IF NOT EXISTS licenses(
key TEXT PRIMARY KEY,
createdAt TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updatedAt TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
planDetails TEXT,
activationId TEXT,
validationMessage TEXT,
lastValidated TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS sites(
uuid TEXT PRIMARY KEY,
alias VARCHAR(180) DEFAULT 'PROD',
url VARCHAR(300),
createdAt TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
`
_, err = db.Exec(table_schema)
if err != nil {
return fmt.Errorf("error in creating licenses table: %s", err.Error())
}
table_schema = `CREATE TABLE IF NOT EXISTS feature_status (
name TEXT PRIMARY KEY,
active bool,
usage INTEGER DEFAULT 0,
usage_limit INTEGER DEFAULT 0,
route TEXT
);`
_, err = db.Exec(table_schema)
if err != nil {
return fmt.Errorf("error in creating feature_status table: %s", err.Error())
}
table_schema = `CREATE TABLE IF NOT EXISTS licenses_v3 (
id TEXT PRIMARY KEY,
key TEXT NOT NULL UNIQUE,
data TEXT
);`
_, err = db.Exec(table_schema)
if err != nil {
return fmt.Errorf("error in creating licenses_v3 table: %s", err.Error())
}
return nil
}

View File

@@ -13,13 +13,10 @@ import (
"go.opentelemetry.io/otel/sdk/resource" "go.opentelemetry.io/otel/sdk/resource"
semconv "go.opentelemetry.io/otel/semconv/v1.4.0" semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
"go.signoz.io/signoz/ee/query-service/app" "go.signoz.io/signoz/ee/query-service/app"
"go.signoz.io/signoz/pkg/config"
"go.signoz.io/signoz/pkg/config/envprovider"
"go.signoz.io/signoz/pkg/config/fileprovider"
"go.signoz.io/signoz/pkg/query-service/auth" "go.signoz.io/signoz/pkg/query-service/auth"
baseconst "go.signoz.io/signoz/pkg/query-service/constants" baseconst "go.signoz.io/signoz/pkg/query-service/constants"
"go.signoz.io/signoz/pkg/query-service/migrate"
"go.signoz.io/signoz/pkg/query-service/version" "go.signoz.io/signoz/pkg/query-service/version"
"go.signoz.io/signoz/pkg/signoz"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/insecure"
@@ -97,8 +94,8 @@ func main() {
var cluster string var cluster string
var useLogsNewSchema bool var useLogsNewSchema bool
var useTraceNewSchema bool var useLicensesV3 bool
var cacheConfigPath, fluxInterval, fluxIntervalForTraceDetail string var cacheConfigPath, fluxInterval string
var enableQueryServiceLogOTLPExport bool var enableQueryServiceLogOTLPExport bool
var preferSpanMetrics bool var preferSpanMetrics bool
@@ -106,10 +103,9 @@ func main() {
var maxOpenConns int var maxOpenConns int
var dialTimeout time.Duration var dialTimeout time.Duration
var gatewayUrl string var gatewayUrl string
var useLicensesV3 bool
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs") flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
flag.BoolVar(&useTraceNewSchema, "use-trace-new-schema", false, "use new schema for traces") flag.BoolVar(&useLicensesV3, "use-licenses-v3", false, "use licenses_v3 schema for licenses")
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)") flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)") flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)") flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
@@ -120,11 +116,10 @@ func main() {
flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)") flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)")
flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)") flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)")
flag.StringVar(&fluxInterval, "flux-interval", "5m", "(the interval to exclude data from being cached to avoid incorrect cache for data in motion)") flag.StringVar(&fluxInterval, "flux-interval", "5m", "(the interval to exclude data from being cached to avoid incorrect cache for data in motion)")
flag.StringVar(&fluxIntervalForTraceDetail, "flux-interval-trace-detail", "2m", "(the interval to exclude data from being cached to avoid incorrect cache for trace data in motion)")
flag.BoolVar(&enableQueryServiceLogOTLPExport, "enable.query.service.log.otlp.export", false, "(enable query service log otlp export)") flag.BoolVar(&enableQueryServiceLogOTLPExport, "enable.query.service.log.otlp.export", false, "(enable query service log otlp export)")
flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')") flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')")
flag.StringVar(&gatewayUrl, "gateway-url", "", "(url to the gateway)") flag.StringVar(&gatewayUrl, "gateway-url", "", "(url to the gateway)")
flag.BoolVar(&useLicensesV3, "use-licenses-v3", false, "use licenses_v3 schema for licenses")
flag.Parse() flag.Parse()
loggerMgr := initZapLog(enableQueryServiceLogOTLPExport) loggerMgr := initZapLog(enableQueryServiceLogOTLPExport)
@@ -134,43 +129,23 @@ func main() {
version.PrintVersion() version.PrintVersion()
config, err := signoz.NewConfig(context.Background(), config.ResolverConfig{
Uris: []string{"env:"},
ProviderFactories: []config.ProviderFactory{
envprovider.NewFactory(),
fileprovider.NewFactory(),
},
}, signoz.DeprecatedFlags{
MaxIdleConns: maxIdleConns,
MaxOpenConns: maxOpenConns,
DialTimeout: dialTimeout,
})
if err != nil {
zap.L().Fatal("Failed to create config", zap.Error(err))
}
signoz, err := signoz.New(context.Background(), config, signoz.NewProviderConfig())
if err != nil {
zap.L().Fatal("Failed to create signoz struct", zap.Error(err))
}
serverOptions := &app.ServerOptions{ serverOptions := &app.ServerOptions{
Config: config, HTTPHostPort: baseconst.HTTPHostPort,
SigNoz: signoz, PromConfigPath: promConfigPath,
HTTPHostPort: baseconst.HTTPHostPort, SkipTopLvlOpsPath: skipTopLvlOpsPath,
PromConfigPath: promConfigPath, PreferSpanMetrics: preferSpanMetrics,
SkipTopLvlOpsPath: skipTopLvlOpsPath, PrivateHostPort: baseconst.PrivateHostPort,
PreferSpanMetrics: preferSpanMetrics, DisableRules: disableRules,
PrivateHostPort: baseconst.PrivateHostPort, RuleRepoURL: ruleRepoURL,
DisableRules: disableRules, MaxIdleConns: maxIdleConns,
RuleRepoURL: ruleRepoURL, MaxOpenConns: maxOpenConns,
CacheConfigPath: cacheConfigPath, DialTimeout: dialTimeout,
FluxInterval: fluxInterval, CacheConfigPath: cacheConfigPath,
FluxIntervalForTraceDetail: fluxIntervalForTraceDetail, FluxInterval: fluxInterval,
Cluster: cluster, Cluster: cluster,
GatewayUrl: gatewayUrl, GatewayUrl: gatewayUrl,
UseLogsNewSchema: useLogsNewSchema, UseLogsNewSchema: useLogsNewSchema,
UseTraceNewSchema: useTraceNewSchema, UseLicensesV3: useLicensesV3,
} }
// Read the jwt secret key // Read the jwt secret key
@@ -182,6 +157,12 @@ func main() {
zap.L().Info("JWT secret key set successfully.") zap.L().Info("JWT secret key set successfully.")
} }
if err := migrate.Migrate(baseconst.RELATIONAL_DATASOURCE_PATH); err != nil {
zap.L().Error("Failed to migrate", zap.Error(err))
} else {
zap.L().Info("Migration successful")
}
server, err := app.NewServer(serverOptions) server, err := app.NewServer(serverOptions)
if err != nil { if err != nil {
zap.L().Fatal("Failed to create server", zap.Error(err)) zap.L().Fatal("Failed to create server", zap.Error(err))

View File

@@ -1,6 +1,7 @@
package model package model
import ( import (
"encoding/base64"
"encoding/json" "encoding/json"
"fmt" "fmt"
"reflect" "reflect"
@@ -60,6 +61,37 @@ type LicensePlan struct {
Status string `json:"status"` Status string `json:"status"`
} }
func (l *License) ParsePlan() error {
l.LicensePlan = LicensePlan{}
planData, err := base64.StdEncoding.DecodeString(l.PlanDetails)
if err != nil {
return err
}
plan := LicensePlan{}
err = json.Unmarshal([]byte(planData), &plan)
if err != nil {
l.ValidationMessage = "failed to parse plan from license"
return errors.Wrap(err, "failed to parse plan from license")
}
l.LicensePlan = plan
l.ParseFeatures()
return nil
}
func (l *License) ParseFeatures() {
switch l.PlanKey {
case Pro:
l.FeatureSet = ProPlan
case Enterprise:
l.FeatureSet = EnterprisePlan
default:
l.FeatureSet = BasicPlan
}
}
type Licenses struct { type Licenses struct {
TrialStart int64 `json:"trialStart"` TrialStart int64 `json:"trialStart"`
TrialEnd int64 `json:"trialEnd"` TrialEnd int64 `json:"trialEnd"`
@@ -139,8 +171,8 @@ func NewLicenseV3(data map[string]interface{}) (*LicenseV3, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
// if license status is invalid then default it to basic // if license status is inactive then default it to basic
if status == LicenseStatusInvalid { if status == LicenseStatusInactive {
planName = PlanNameBasic planName = PlanNameBasic
} }
@@ -215,24 +247,3 @@ func NewLicenseV3WithIDAndKey(id string, key string, data map[string]interface{}
licenseDataWithIdAndKey["key"] = key licenseDataWithIdAndKey["key"] = key
return NewLicenseV3(licenseDataWithIdAndKey) return NewLicenseV3(licenseDataWithIdAndKey)
} }
func ConvertLicenseV3ToLicenseV2(l *LicenseV3) *License {
planKeyFromPlanName, ok := MapOldPlanKeyToNewPlanName[l.PlanName]
if !ok {
planKeyFromPlanName = Basic
}
return &License{
Key: l.Key,
ActivationId: "",
PlanDetails: "",
FeatureSet: l.Features,
ValidationMessage: "",
IsCurrent: l.IsCurrent,
LicensePlan: LicensePlan{
PlanKey: planKeyFromPlanName,
ValidFrom: l.ValidFrom,
ValidUntil: l.ValidUntil,
Status: l.Status},
}
}

View File

@@ -17,11 +17,7 @@ var (
) )
var ( var (
MapOldPlanKeyToNewPlanName map[string]string = map[string]string{PlanNameBasic: Basic, PlanNameTeams: Pro, PlanNameEnterprise: Enterprise} LicenseStatusInactive = "INACTIVE"
)
var (
LicenseStatusInvalid = "INVALID"
) )
const DisableUpsell = "DISABLE_UPSELL" const DisableUpsell = "DISABLE_UPSELL"
@@ -157,13 +153,6 @@ var BasicPlan = basemodel.FeatureSet{
UsageLimit: -1, UsageLimit: -1,
Route: "", Route: "",
}, },
basemodel.Feature{
Name: basemodel.AWSIntegration,
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
} }
var ProPlan = basemodel.FeatureSet{ var ProPlan = basemodel.FeatureSet{
@@ -286,13 +275,6 @@ var ProPlan = basemodel.FeatureSet{
UsageLimit: -1, UsageLimit: -1,
Route: "", Route: "",
}, },
basemodel.Feature{
Name: basemodel.AWSIntegration,
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
} }
var EnterprisePlan = basemodel.FeatureSet{ var EnterprisePlan = basemodel.FeatureSet{
@@ -429,11 +411,4 @@ var EnterprisePlan = basemodel.FeatureSet{
UsageLimit: -1, UsageLimit: -1,
Route: "", Route: "",
}, },
basemodel.Feature{
Name: basemodel.AWSIntegration,
Active: false,
Usage: 0,
UsageLimit: -1,
Route: "",
},
} }

View File

@@ -26,7 +26,6 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
opts.FF, opts.FF,
opts.Reader, opts.Reader,
opts.UseLogsNewSchema, opts.UseLogsNewSchema,
opts.UseTraceNewSchema,
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay), baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
) )
@@ -123,7 +122,6 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
opts.FF, opts.FF,
opts.Reader, opts.Reader,
opts.UseLogsNewSchema, opts.UseLogsNewSchema,
opts.UseTraceNewSchema,
baserules.WithSendAlways(), baserules.WithSendAlways(),
baserules.WithSendUnmatched(), baserules.WithSendUnmatched(),
) )

View File

@@ -4,6 +4,7 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"os"
"regexp" "regexp"
"strings" "strings"
"sync/atomic" "sync/atomic"
@@ -45,9 +46,9 @@ type Manager struct {
tenantID string tenantID string
} }
func New(modelDao dao.ModelDao, licenseRepo *license.Repo, clickhouseConn clickhouse.Conn, chUrl string) (*Manager, error) { func New(dbType string, modelDao dao.ModelDao, licenseRepo *license.Repo, clickhouseConn clickhouse.Conn) (*Manager, error) {
hostNameRegex := regexp.MustCompile(`tcp://(?P<hostname>.*):`) hostNameRegex := regexp.MustCompile(`tcp://(?P<hostname>.*):`)
hostNameRegexMatches := hostNameRegex.FindStringSubmatch(chUrl) hostNameRegexMatches := hostNameRegex.FindStringSubmatch(os.Getenv("ClickHouseUrl"))
tenantID := "" tenantID := ""
if len(hostNameRegexMatches) == 2 { if len(hostNameRegexMatches) == 2 {

View File

@@ -13,3 +13,8 @@ if [ "$branch" = "main" ]; then
echo "${color_red}${bold}You can't commit directly to the main branch${reset}" echo "${color_red}${bold}You can't commit directly to the main branch${reset}"
exit 1 exit 1
fi fi
if [ "$branch" = "develop" ]; then
echo "${color_red}${bold}You can't commit directly to the develop branch${reset}"
exit 1
fi

View File

@@ -6,7 +6,7 @@
**Building image** **Building image**
``docker compose up` ``docker-compose up`
/ This will also run / This will also run
or or
@@ -19,7 +19,7 @@ docker tag signoz/frontend:latest 7296823551/signoz:latest
``` ```
``` ```
docker compose up docker-compose up
``` ```
## Without Docker ## Without Docker

View File

@@ -1,7 +0,0 @@
version: "3.9"
services:
web:
build: .
image: signoz/frontend:latest
ports:
- "3301:3301"

View File

@@ -0,0 +1,7 @@
version: "3.9"
services:
web:
build: .
image: signoz/frontend:latest
ports:
- "3301:3301"

Some files were not shown because too many files have changed in this diff Show More