Compare commits
238 Commits
feat/timez
...
v0.70.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
70169986be | ||
|
|
fb3b70b729 | ||
|
|
36f91d1993 | ||
|
|
333f90d8ac | ||
|
|
ea83a7e62d | ||
|
|
42a5d71d81 | ||
|
|
08e1fd3ca5 | ||
|
|
8cb8beb63f | ||
|
|
35c61045c4 | ||
|
|
7c85befc17 | ||
|
|
813ca8bc23 | ||
|
|
98cdbcd711 | ||
|
|
61a6c21edb | ||
|
|
8d6731c7ba | ||
|
|
6dc7a76853 | ||
|
|
cc3d78cd71 | ||
|
|
bd0c4beeee | ||
|
|
e83e691ef5 | ||
|
|
c30c882aae | ||
|
|
001122db2c | ||
|
|
b544a54c40 | ||
|
|
af6b54aeb4 | ||
|
|
1e61e6c2f6 | ||
|
|
390b04c015 | ||
|
|
c56345e1db | ||
|
|
7f25674640 | ||
|
|
df5ab64c83 | ||
|
|
726f2b0fa2 | ||
|
|
4a0b0aafbd | ||
|
|
837f434fe9 | ||
|
|
0baf0e9453 | ||
|
|
403043e076 | ||
|
|
7730f76128 | ||
|
|
6e3ffd555d | ||
|
|
c565c2b865 | ||
|
|
4ec1e66c7e | ||
|
|
89541862cc | ||
|
|
610f4d43e7 | ||
|
|
044a124cc1 | ||
|
|
0cf9003e3a | ||
|
|
644135a933 | ||
|
|
b465f74e4a | ||
|
|
e00e365964 | ||
|
|
5c45e1f7b3 | ||
|
|
16e61b45ac | ||
|
|
fdcdbf021a | ||
|
|
c92ef53e9c | ||
|
|
268f283785 | ||
|
|
c574adc634 | ||
|
|
939ab5270e | ||
|
|
42525b6067 | ||
|
|
c66cd3ce4e | ||
|
|
e9618d64bc | ||
|
|
8e11a988be | ||
|
|
92299e1b08 | ||
|
|
bab8c8274c | ||
|
|
265c67e5bd | ||
|
|
efc8c95d59 | ||
|
|
5708079c3c | ||
|
|
dbe78e55a9 | ||
|
|
a60371fb80 | ||
|
|
d5b847c091 | ||
|
|
c106f1c9a9 | ||
|
|
d6bfd95302 | ||
|
|
68ee677630 | ||
|
|
3ff862b483 | ||
|
|
f91badbce9 | ||
|
|
2ead4fbb66 | ||
|
|
56b17bcfef | ||
|
|
5839b65f7a | ||
|
|
3787c5ca24 | ||
|
|
458cd28cc2 | ||
|
|
64a4606275 | ||
|
|
505757b971 | ||
|
|
80740f646c | ||
|
|
e92d055c30 | ||
|
|
5c546e8efd | ||
|
|
ecd50f7232 | ||
|
|
15f85a645f | ||
|
|
366ca3bb3e | ||
|
|
43b0cdbb6a | ||
|
|
4967696da8 | ||
|
|
c5938b6c10 | ||
|
|
9feee6ff46 | ||
|
|
d48cdbfc4a | ||
|
|
dad72dd295 | ||
|
|
28d27bc5c1 | ||
|
|
3e675bb9a5 | ||
|
|
05c9dd68dd | ||
|
|
03fb388cd1 | ||
|
|
196b17dd1e | ||
|
|
93e9d15004 | ||
|
|
f11161ddb8 | ||
|
|
50db3cc39f | ||
|
|
6e27df9dcb | ||
|
|
7f6bad67d5 | ||
|
|
825d2dfcbb | ||
|
|
9f6419c2f8 | ||
|
|
421879cf7a | ||
|
|
00abadd429 | ||
|
|
14096f8d53 | ||
|
|
d2aa1cf06e | ||
|
|
838192cf5c | ||
|
|
5dfe245f2d | ||
|
|
53b86e4b5c | ||
|
|
5d9a2571df | ||
|
|
bef6cc945a | ||
|
|
2c2e248c95 | ||
|
|
2f62a9d36d | ||
|
|
04778b9641 | ||
|
|
26fe5e49e7 | ||
|
|
accafbc3ec | ||
|
|
8e7c78e1b1 | ||
|
|
53ebd39f41 | ||
|
|
b36ef944cc | ||
|
|
fa90fad373 | ||
|
|
77420b9d3a | ||
|
|
cecc57e72d | ||
|
|
512adc6471 | ||
|
|
42fefc65be | ||
|
|
dcc659907a | ||
|
|
b90ed375c2 | ||
|
|
a8a3bd3f7d | ||
|
|
7405bfbbee | ||
|
|
67e822e23e | ||
|
|
60dc479a19 | ||
|
|
85cf4f4e2e | ||
|
|
83aa48c721 | ||
|
|
823f84f857 | ||
|
|
8a4d45084d | ||
|
|
5bc6c33899 | ||
|
|
83f6dea2db | ||
|
|
7031c866e8 | ||
|
|
46bc7c7a21 | ||
|
|
6d9741c3a4 | ||
|
|
610a8ec704 | ||
|
|
cd9f27ab08 | ||
|
|
14fbb1fcda | ||
|
|
96da21df05 | ||
|
|
8608f02263 | ||
|
|
2701ae5c34 | ||
|
|
951593b0a3 | ||
|
|
e6766023dd | ||
|
|
bef5b96c5c | ||
|
|
b29359dee0 | ||
|
|
9a1cd65b73 | ||
|
|
8ab0c066d6 | ||
|
|
b333aa3775 | ||
|
|
8a3319cdf5 | ||
|
|
d09c4d947e | ||
|
|
2508e6f9f1 | ||
|
|
1b8213653a | ||
|
|
b499b10333 | ||
|
|
b35b975798 | ||
|
|
715f8a2363 | ||
|
|
8d1c4491b7 | ||
|
|
e3caa6a8f5 | ||
|
|
a1059ed949 | ||
|
|
8c46de8eac | ||
|
|
2b5a0ec496 | ||
|
|
a9440c010c | ||
|
|
f9e7eff357 | ||
|
|
0fbfb6b22b | ||
|
|
b25df66381 | ||
|
|
32fa5a403c | ||
|
|
f9d4cf19e9 | ||
|
|
81775c7d55 | ||
|
|
8d2666004b | ||
|
|
51baf7f8d3 | ||
|
|
31a2926375 | ||
|
|
8c6225185d | ||
|
|
d4458d65ad | ||
|
|
02d8fdb212 | ||
|
|
47d8c9e3e7 | ||
|
|
a383c708e3 | ||
|
|
99367be850 | ||
|
|
73bcc2af46 | ||
|
|
43f856c41b | ||
|
|
6384b25af3 | ||
|
|
507c0600cd | ||
|
|
3d092ec2ae | ||
|
|
2b8a610a07 | ||
|
|
f7f8bf1867 | ||
|
|
813cd845f4 | ||
|
|
6aee991633 | ||
|
|
2bfd31841e | ||
|
|
a320a16556 | ||
|
|
7cd8442e6e | ||
|
|
486632b64e | ||
|
|
328d955a74 | ||
|
|
a3e57a1829 | ||
|
|
24ab18d988 | ||
|
|
2e4956c2f7 | ||
|
|
b85f7921f4 | ||
|
|
0c2a15d86f | ||
|
|
afbba1ed44 | ||
|
|
20f748f9c4 | ||
|
|
96b5e0920f | ||
|
|
7fe4f8cc56 | ||
|
|
ed6abe5a95 | ||
|
|
a6968d452c | ||
|
|
0c5db1937e | ||
|
|
67058b2a17 | ||
|
|
e46d969143 | ||
|
|
e4505693b0 | ||
|
|
2dad9a3093 | ||
|
|
7b6bd83e9a | ||
|
|
d43adc24ef | ||
|
|
5044861773 | ||
|
|
71d1e12be7 | ||
|
|
5a70123b06 | ||
|
|
f410df846a | ||
|
|
d7bd72e2aa | ||
|
|
20e64b5102 | ||
|
|
0b03ff07f1 | ||
|
|
c01060ccf7 | ||
|
|
57c2326908 | ||
|
|
649560265e | ||
|
|
c8d0f7638e | ||
|
|
25484caa4c | ||
|
|
9ccc686c63 | ||
|
|
3ad6ff73df | ||
|
|
c93cf1ce95 | ||
|
|
a9ced66258 | ||
|
|
98a350692b | ||
|
|
d859301d30 | ||
|
|
07c24bcdf3 | ||
|
|
a11aadb712 | ||
|
|
bc9c7b5f1d | ||
|
|
1bba932d08 | ||
|
|
3f7adeb040 | ||
|
|
eb6670980a | ||
|
|
48f3b9cacb | ||
|
|
eaf8571fe9 | ||
|
|
b10c22223b | ||
|
|
cdde369748 | ||
|
|
523cbcd6fc | ||
|
|
eeadc021e1 |
@@ -3,4 +3,7 @@
|
||||
.vscode
|
||||
README.md
|
||||
deploy
|
||||
sample-apps
|
||||
sample-apps
|
||||
|
||||
# frontend
|
||||
node_modules
|
||||
1
.github/workflows/build.yaml
vendored
1
.github/workflows/build.yaml
vendored
@@ -3,7 +3,6 @@ name: build-pipeline
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
- main
|
||||
- release/v*
|
||||
|
||||
|
||||
2
.github/workflows/docs.yml
vendored
2
.github/workflows/docs.yml
vendored
@@ -3,7 +3,7 @@ name: "Update PR labels and Block PR until related docs are shipped for the feat
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
- main
|
||||
types: [opened, edited, labeled, unlabeled]
|
||||
|
||||
permissions:
|
||||
|
||||
2
.github/workflows/e2e-k3s.yaml
vendored
2
.github/workflows/e2e-k3s.yaml
vendored
@@ -42,7 +42,7 @@ jobs:
|
||||
kubectl create ns sample-application
|
||||
|
||||
# apply hotrod k8s manifest file
|
||||
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/develop/sample-apps/hotrod/hotrod.yaml
|
||||
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
|
||||
|
||||
# wait for all deployments in sample-application namespace to be READY
|
||||
kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s
|
||||
|
||||
35
.github/workflows/goreleaser.yaml
vendored
Normal file
35
.github/workflows/goreleaser.yaml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
name: goreleaser
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- v*
|
||||
- histogram-quantile/v*
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
workdirs:
|
||||
- scripts/clickhouse/histogramquantile
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: set-up-go
|
||||
uses: actions/setup-go@v5
|
||||
- name: run-goreleaser
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
with:
|
||||
distribution: goreleaser-pro
|
||||
version: '~> v2'
|
||||
args: release --clean
|
||||
workdir: ${{ matrix.workdirs }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}
|
||||
5
.github/workflows/jest-coverage-changes.yml
vendored
5
.github/workflows/jest-coverage-changes.yml
vendored
@@ -2,7 +2,8 @@ name: Jest Coverage - changed files
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: develop
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -11,7 +12,7 @@ jobs:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: "refs/heads/develop"
|
||||
ref: "refs/heads/main"
|
||||
token: ${{ secrets.GITHUB_TOKEN }} # Provide the GitHub token for authentication
|
||||
|
||||
- name: Fetch branch
|
||||
|
||||
36
.github/workflows/prereleaser.yaml
vendored
Normal file
36
.github/workflows/prereleaser.yaml
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
name: prereleaser
|
||||
|
||||
on:
|
||||
# schedule every wednesday 9:30 AM UTC (3pm IST)
|
||||
schedule:
|
||||
- cron: '30 9 * * 3'
|
||||
|
||||
# allow manual triggering of the workflow by a maintainer
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
release_type:
|
||||
description: "Type of the release"
|
||||
type: choice
|
||||
required: true
|
||||
options:
|
||||
- 'patch'
|
||||
- 'minor'
|
||||
- 'major'
|
||||
|
||||
jobs:
|
||||
verify:
|
||||
uses: signoz/primus.workflows/.github/workflows/github-verify.yaml@main
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GITHUB_TEAM_NAME: releaser
|
||||
GITHUB_MEMBER_NAME: ${{ github.actor }}
|
||||
signoz:
|
||||
if: ${{ always() && (needs.verify.result == 'success' || github.event.name == 'schedule') }}
|
||||
uses: signoz/primus.workflows/.github/workflows/releaser.yaml@main
|
||||
secrets: inherit
|
||||
needs: [verify]
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
PROJECT_NAME: signoz
|
||||
RELEASE_TYPE: ${{ inputs.release_type || 'minor' }}
|
||||
12
.github/workflows/push.yaml
vendored
12
.github/workflows/push.yaml
vendored
@@ -4,7 +4,6 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- develop
|
||||
tags:
|
||||
- v*
|
||||
|
||||
@@ -58,6 +57,17 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Create .env file
|
||||
run: |
|
||||
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
|
||||
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
|
||||
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
|
||||
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
|
||||
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
|
||||
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
|
||||
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
|
||||
- name: Setup golang
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
|
||||
34
.github/workflows/releaser.yaml
vendored
Normal file
34
.github/workflows/releaser.yaml
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
name: releaser
|
||||
|
||||
on:
|
||||
# trigger on new latest release
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
detect:
|
||||
if: ${{ !startsWith(github.event.release.tag_name, 'histogram-quantile/') }}
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
release_type: ${{ steps.find.outputs.release_type }}
|
||||
steps:
|
||||
- id: find
|
||||
name: find
|
||||
run: |
|
||||
release_tag=${{ github.event.release.tag_name }}
|
||||
patch_number=$(echo $release_tag | awk -F. '{print $3}')
|
||||
release_type="minor"
|
||||
if [[ $patch_number -ne 0 ]]; then
|
||||
release_type="patch"
|
||||
fi
|
||||
echo "release_type=${release_type}" >> "$GITHUB_OUTPUT"
|
||||
charts:
|
||||
if: ${{ !startsWith(github.event.release.tag_name, 'histogram-quantile/') }}
|
||||
uses: signoz/primus.workflows/.github/workflows/github-trigger.yaml@main
|
||||
secrets: inherit
|
||||
needs: [detect]
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GITHUB_REPOSITORY_NAME: charts
|
||||
GITHUB_EVENT_NAME: prereleaser
|
||||
GITHUB_EVENT_PAYLOAD: "{\"release_type\": \"${{ needs.detect.outputs.release_type }}\"}"
|
||||
1
.github/workflows/sonar.yml
vendored
1
.github/workflows/sonar.yml
vendored
@@ -3,7 +3,6 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- develop
|
||||
paths:
|
||||
- 'frontend/**'
|
||||
defaults:
|
||||
|
||||
6
.github/workflows/staging-deployment.yaml
vendored
6
.github/workflows/staging-deployment.yaml
vendored
@@ -1,12 +1,12 @@
|
||||
name: staging-deployment
|
||||
# Trigger deployment only on push to develop branch
|
||||
# Trigger deployment only on push to main branch
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
- main
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy latest develop branch to staging
|
||||
name: Deploy latest main branch to staging
|
||||
runs-on: ubuntu-latest
|
||||
environment: staging
|
||||
permissions:
|
||||
|
||||
2
.github/workflows/testing-deployment.yaml
vendored
2
.github/workflows/testing-deployment.yaml
vendored
@@ -44,7 +44,7 @@ jobs:
|
||||
git add .
|
||||
git stash push -m "stashed on $(date --iso-8601=seconds)"
|
||||
git fetch origin
|
||||
git checkout develop
|
||||
git checkout main
|
||||
git pull
|
||||
# This is added to include the scenerio when new commit in PR is force-pushed
|
||||
git branch -D ${GITHUB_BRANCH}
|
||||
|
||||
8
.gitignore
vendored
8
.gitignore
vendored
@@ -52,7 +52,7 @@ ee/query-service/tests/test-deploy/data/
|
||||
/deploy/docker/clickhouse-setup/data/
|
||||
/deploy/docker-swarm/clickhouse-setup/data/
|
||||
bin/
|
||||
|
||||
.local/
|
||||
*/query-service/queries.active
|
||||
|
||||
# e2e
|
||||
@@ -70,3 +70,9 @@ vendor/
|
||||
|
||||
# git-town
|
||||
.git-branches.toml
|
||||
|
||||
# goreleaser
|
||||
dist/
|
||||
|
||||
# ignore user_scripts that is fetched by init-clickhouse
|
||||
deploy/common/clickhouse/user_scripts/
|
||||
|
||||
10
.gitpod.yml
10
.gitpod.yml
@@ -3,16 +3,10 @@
|
||||
|
||||
|
||||
tasks:
|
||||
- name: Run Script to Comment ut required lines
|
||||
init: |
|
||||
cd ./.scripts
|
||||
sh commentLinesForSetup.sh
|
||||
|
||||
- name: Run Docker Images
|
||||
init: |
|
||||
cd ./deploy
|
||||
sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d
|
||||
# command:
|
||||
cd ./deploy/docker
|
||||
sudo docker compose up -d
|
||||
|
||||
- name: Run Frontend
|
||||
init: |
|
||||
|
||||
@@ -141,9 +141,9 @@ Depending upon your area of expertise & interest, you can choose one or more to
|
||||
|
||||
# 3. Develop Frontend 🌚
|
||||
|
||||
**Need to Update: [https://github.com/SigNoz/signoz/tree/develop/frontend](https://github.com/SigNoz/signoz/tree/develop/frontend)**
|
||||
**Need to Update: [https://github.com/SigNoz/signoz/tree/main/frontend](https://github.com/SigNoz/signoz/tree/main/frontend)**
|
||||
|
||||
Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/develop/frontend/README.md) sections for more info on how to setup SigNoz frontend locally (with and without Docker).
|
||||
Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/main/frontend/README.md) sections for more info on how to setup SigNoz frontend locally (with and without Docker).
|
||||
|
||||
## 3.1 Contribute to Frontend with Docker installation of SigNoz
|
||||
|
||||
@@ -151,14 +151,14 @@ Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/
|
||||
```
|
||||
git clone https://github.com/SigNoz/signoz.git && cd signoz
|
||||
```
|
||||
- Comment out `frontend` service section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L68)
|
||||
- Comment out `frontend` service section at [`deploy/docker/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L68)
|
||||
|
||||

|
||||
|
||||
|
||||
- run `cd deploy` to move to deploy directory,
|
||||
- Install signoz locally **without** the frontend,
|
||||
- Add / Uncomment the below configuration to query-service section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L47`](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L47)
|
||||
- Add / Uncomment the below configuration to query-service section at [`deploy/docker/docker-compose.yaml#L47`](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L47)
|
||||
```
|
||||
ports:
|
||||
- "8080:8080"
|
||||
@@ -167,9 +167,10 @@ Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/
|
||||
|
||||
- Next run,
|
||||
```
|
||||
sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d
|
||||
cd deploy/docker
|
||||
sudo docker compose up -d
|
||||
```
|
||||
- `cd ../frontend` and change baseURL in file [`frontend/src/constants/env.ts#L2`](https://github.com/SigNoz/signoz/blob/develop/frontend/src/constants/env.ts#L2) and for that, you need to create a `.env` file in the `frontend` directory with the following environment variable (`FRONTEND_API_ENDPOINT`) matching your configuration.
|
||||
- `cd ../frontend` and change baseURL in file [`frontend/src/constants/env.ts#L2`](https://github.com/SigNoz/signoz/blob/main/frontend/src/constants/env.ts#L2) and for that, you need to create a `.env` file in the `frontend` directory with the following environment variable (`FRONTEND_API_ENDPOINT`) matching your configuration.
|
||||
|
||||
If you have backend api exposed via frontend nginx:
|
||||
```
|
||||
@@ -186,11 +187,6 @@ Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/
|
||||
yarn dev
|
||||
```
|
||||
|
||||
### Important Notes:
|
||||
The Maintainers / Contributors who will change Line Numbers of `Frontend` & `Query-Section`, please update line numbers in [`/.scripts/commentLinesForSetup.sh`](https://github.com/SigNoz/signoz/blob/develop/.scripts/commentLinesForSetup.sh)
|
||||
|
||||
**[`^top^`](#contributing-guidelines)**
|
||||
|
||||
## 3.2 Contribute to Frontend without installing SigNoz backend
|
||||
|
||||
If you don't want to install the SigNoz backend just for doing frontend development, we can provide you with test environments that you can use as the backend.
|
||||
@@ -216,7 +212,7 @@ Please ping us in the [`#contributing`](https://signoz-community.slack.com/archi
|
||||
|
||||
# 4. Contribute to Backend (Query-Service) 🌑
|
||||
|
||||
**Need to Update: [https://github.com/SigNoz/signoz/tree/develop/pkg/query-service](https://github.com/SigNoz/signoz/tree/develop/pkg/query-service)**
|
||||
**Need to Update: [https://github.com/SigNoz/signoz/tree/main/pkg/query-service](https://github.com/SigNoz/signoz/tree/main/pkg/query-service)**
|
||||
|
||||
## 4.1 Prerequisites
|
||||
|
||||
@@ -242,13 +238,13 @@ Please ping us in the [`#contributing`](https://signoz-community.slack.com/archi
|
||||
git clone https://github.com/SigNoz/signoz.git && cd signoz
|
||||
```
|
||||
- run `sudo make dev-setup` to configure local setup to run query-service,
|
||||
- Comment out `frontend` service section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L68)
|
||||
- Comment out `frontend` service section at [`deploy/docker/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L68)
|
||||
<img width="982" alt="develop-frontend" src="https://user-images.githubusercontent.com/52788043/179043977-012be8b0-a2ed-40d1-b2e6-2ab72d7989c0.png">
|
||||
|
||||
- Comment out `query-service` section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L41`,](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L41)
|
||||
- Comment out `query-service` section at [`deploy/docker/docker-compose.yaml#L41`,](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L41)
|
||||
<img width="1068" alt="Screenshot 2022-07-14 at 22 48 07" src="https://user-images.githubusercontent.com/52788043/179044151-a65ba571-db0b-4a16-b64b-ca3fadcf3af0.png">
|
||||
|
||||
- add below configuration to `clickhouse` section at [`deploy/docker/clickhouse-setup/docker-compose.yaml`,](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml)
|
||||
- add below configuration to `clickhouse` section at [`deploy/docker/docker-compose.yaml`,](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml)
|
||||
```
|
||||
ports:
|
||||
- 9001:9000
|
||||
@@ -258,9 +254,9 @@ Please ping us in the [`#contributing`](https://signoz-community.slack.com/archi
|
||||
- run `cd pkg/query-service/` to move to `query-service` directory,
|
||||
- Then, you need to create a `.env` file with the following environment variable
|
||||
```
|
||||
SIGNOZ_LOCAL_DB_PATH="./signoz.db"
|
||||
SIGNOZ_SQLSTORE_SQLITE_PATH="./signoz.db"
|
||||
```
|
||||
to set your local environment with the right `RELATIONAL_DATASOURCE_PATH` as mentioned in [`./constants/constants.go#L38`,](https://github.com/SigNoz/signoz/blob/develop/pkg/query-service/constants/constants.go#L38)
|
||||
to set your local environment with the right `RELATIONAL_DATASOURCE_PATH` as mentioned in [`./constants/constants.go#L38`,](https://github.com/SigNoz/signoz/blob/main/pkg/query-service/constants/constants.go#L38)
|
||||
|
||||
- Now, install SigNoz locally **without** the `frontend` and `query-service`,
|
||||
- If you are using `x86_64` processors (All Intel/AMD processors) run `sudo make run-x86`
|
||||
@@ -294,13 +290,10 @@ docker pull signoz/query-service:develop
|
||||
```
|
||||
|
||||
### Important Note:
|
||||
The Maintainers / Contributors who will change Line Numbers of `Frontend` & `Query-Section`, please update line numbers in [`/.scripts/commentLinesForSetup.sh`](https://github.com/SigNoz/signoz/blob/develop/.scripts/commentLinesForSetup.sh)
|
||||
|
||||
|
||||
|
||||
**Query Service should now be available at** [`http://localhost:8080`](http://localhost:8080)
|
||||
|
||||
If you want to see how the frontend plays with query service, you can run the frontend also in your local env with the baseURL changed to `http://localhost:8080` in file [`frontend/src/constants/env.ts`](https://github.com/SigNoz/signoz/blob/develop/frontend/src/constants/env.ts) as the `query-service` is now running at port `8080`.
|
||||
If you want to see how the frontend plays with query service, you can run the frontend also in your local env with the baseURL changed to `http://localhost:8080` in file [`frontend/src/constants/env.ts`](https://github.com/SigNoz/signoz/blob/main/frontend/src/constants/env.ts) as the `query-service` is now running at port `8080`.
|
||||
|
||||
<!-- Instead of configuring a local setup, you can also use [Gitpod](https://www.gitpod.io/), a VSCode-based Web IDE.
|
||||
|
||||
@@ -339,7 +332,7 @@ to make SigNoz UI available at [localhost:3301](http://localhost:3301)
|
||||
**5.1.1 To install the HotROD sample app:**
|
||||
|
||||
```bash
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-install.sh \
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh \
|
||||
| HELM_RELEASE=my-release SIGNOZ_NAMESPACE=platform bash
|
||||
```
|
||||
|
||||
@@ -362,7 +355,7 @@ kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||
**5.1.4 To delete the HotROD sample app:**
|
||||
|
||||
```bash
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-delete.sh \
|
||||
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh \
|
||||
| HOTROD_NAMESPACE=sample-application bash
|
||||
```
|
||||
|
||||
|
||||
37
Makefile
37
Makefile
@@ -8,14 +8,16 @@ BUILD_HASH ?= $(shell git rev-parse --short HEAD)
|
||||
BUILD_TIME ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
|
||||
BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
DEV_LICENSE_SIGNOZ_IO ?= https://staging-license.signoz.io/api/v1
|
||||
ZEUS_URL ?= https://api.signoz.cloud
|
||||
DEV_BUILD ?= "" # set to any non-empty value to enable dev build
|
||||
|
||||
# Internal variables or constants.
|
||||
FRONTEND_DIRECTORY ?= frontend
|
||||
QUERY_SERVICE_DIRECTORY ?= pkg/query-service
|
||||
EE_QUERY_SERVICE_DIRECTORY ?= ee/query-service
|
||||
STANDALONE_DIRECTORY ?= deploy/docker/clickhouse-setup
|
||||
SWARM_DIRECTORY ?= deploy/docker-swarm/clickhouse-setup
|
||||
STANDALONE_DIRECTORY ?= deploy/docker
|
||||
SWARM_DIRECTORY ?= deploy/docker-swarm
|
||||
CH_HISTOGRAM_QUANTILE_DIRECTORY ?= scripts/clickhouse/histogramquantile
|
||||
|
||||
GOOS ?= $(shell go env GOOS)
|
||||
GOARCH ?= $(shell go env GOARCH)
|
||||
@@ -33,8 +35,9 @@ buildHash=${PACKAGE}/pkg/query-service/version.buildHash
|
||||
buildTime=${PACKAGE}/pkg/query-service/version.buildTime
|
||||
gitBranch=${PACKAGE}/pkg/query-service/version.gitBranch
|
||||
licenseSignozIo=${PACKAGE}/ee/query-service/constants.LicenseSignozIo
|
||||
zeusURL=${PACKAGE}/ee/query-service/constants.ZeusURL
|
||||
|
||||
LD_FLAGS=-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH}
|
||||
LD_FLAGS=-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH} -X ${zeusURL}=${ZEUS_URL}
|
||||
DEV_LD_FLAGS=-X ${licenseSignozIo}=${DEV_LICENSE_SIGNOZ_IO}
|
||||
|
||||
all: build-push-frontend build-push-query-service
|
||||
@@ -96,12 +99,12 @@ build-query-service-static-arm64:
|
||||
|
||||
# Steps to build static binary of query service for all platforms
|
||||
.PHONY: build-query-service-static-all
|
||||
build-query-service-static-all: build-query-service-static-amd64 build-query-service-static-arm64
|
||||
build-query-service-static-all: build-query-service-static-amd64 build-query-service-static-arm64 build-frontend-static
|
||||
|
||||
# Steps to build and push docker image of query service
|
||||
.PHONY: build-query-service-amd64 build-push-query-service
|
||||
# Step to build docker image of query service in amd64 (used in build pipeline)
|
||||
build-query-service-amd64: build-query-service-static-amd64
|
||||
build-query-service-amd64: build-query-service-static-amd64 build-frontend-static
|
||||
@echo "------------------"
|
||||
@echo "--> Building query-service docker image for amd64"
|
||||
@echo "------------------"
|
||||
@@ -140,16 +143,6 @@ dev-setup:
|
||||
@echo "--> Local Setup completed"
|
||||
@echo "------------------"
|
||||
|
||||
run-local:
|
||||
@docker-compose -f \
|
||||
$(STANDALONE_DIRECTORY)/docker-compose-core.yaml -f $(STANDALONE_DIRECTORY)/docker-compose-local.yaml \
|
||||
up --build -d
|
||||
|
||||
down-local:
|
||||
@docker-compose -f \
|
||||
$(STANDALONE_DIRECTORY)/docker-compose-core.yaml -f $(STANDALONE_DIRECTORY)/docker-compose-local.yaml \
|
||||
down -v
|
||||
|
||||
pull-signoz:
|
||||
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml pull
|
||||
|
||||
@@ -188,4 +181,16 @@ check-no-ee-references:
|
||||
fi
|
||||
|
||||
test:
|
||||
go test ./pkg/query-service/...
|
||||
go test ./pkg/...
|
||||
|
||||
goreleaser-snapshot:
|
||||
@if [[ ${GORELEASER_WORKDIR} ]]; then \
|
||||
cd ${GORELEASER_WORKDIR} && \
|
||||
goreleaser release --clean --snapshot; \
|
||||
cd -; \
|
||||
else \
|
||||
goreleaser release --clean --snapshot; \
|
||||
fi
|
||||
|
||||
goreleaser-snapshot-histogram-quantile:
|
||||
make GORELEASER_WORKDIR=$(CH_HISTOGRAM_QUANTILE_DIRECTORY) goreleaser-snapshot
|
||||
|
||||
@@ -13,9 +13,9 @@
|
||||
|
||||
<h3 align="center">
|
||||
<a href="https://signoz.io/docs"><b>Dokumentation</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.md"><b>Readme auf Englisch </b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.zh-cn.md"><b>ReadMe auf Chinesisch</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.pt-br.md"><b>ReadMe auf Portugiesisch</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.md"><b>Readme auf Englisch </b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.zh-cn.md"><b>ReadMe auf Chinesisch</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.pt-br.md"><b>ReadMe auf Portugiesisch</b></a> •
|
||||
<a href="https://signoz.io/slack"><b>Slack Community</b></a> •
|
||||
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
|
||||
</h3>
|
||||
|
||||
@@ -17,9 +17,9 @@
|
||||
|
||||
<h3 align="center">
|
||||
<a href="https://signoz.io/docs"><b>Documentation</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.zh-cn.md"><b>ReadMe in Chinese</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.de-de.md"><b>ReadMe in German</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.pt-br.md"><b>ReadMe in Portuguese</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.zh-cn.md"><b>ReadMe in Chinese</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.de-de.md"><b>ReadMe in German</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.pt-br.md"><b>ReadMe in Portuguese</b></a> •
|
||||
<a href="https://signoz.io/slack"><b>Slack Community</b></a> •
|
||||
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
|
||||
</h3>
|
||||
|
||||
@@ -12,9 +12,9 @@
|
||||
|
||||
<h3 align="center">
|
||||
<a href="https://signoz.io/docs"><b>文档</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.zh-cn.md"><b>中文ReadMe</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.de-de.md"><b>德文ReadMe</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/develop/README.pt-br.md"><b>葡萄牙语ReadMe</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.zh-cn.md"><b>中文ReadMe</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.de-de.md"><b>德文ReadMe</b></a> •
|
||||
<a href="https://github.com/SigNoz/signoz/blob/main/README.pt-br.md"><b>葡萄牙语ReadMe</b></a> •
|
||||
<a href="https://signoz.io/slack"><b>Slack 社区</b></a> •
|
||||
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
|
||||
</h3>
|
||||
|
||||
80
conf/example.yaml
Normal file
80
conf/example.yaml
Normal file
@@ -0,0 +1,80 @@
|
||||
##################### SigNoz Configuration Example #####################
|
||||
#
|
||||
# Do not modify this file
|
||||
#
|
||||
|
||||
##################### Instrumentation #####################
|
||||
instrumentation:
|
||||
logs:
|
||||
# The log level to use.
|
||||
level: info
|
||||
traces:
|
||||
# Whether to enable tracing.
|
||||
enabled: false
|
||||
processors:
|
||||
batch:
|
||||
exporter:
|
||||
otlp:
|
||||
endpoint: localhost:4317
|
||||
metrics:
|
||||
# Whether to enable metrics.
|
||||
enabled: true
|
||||
readers:
|
||||
pull:
|
||||
exporter:
|
||||
prometheus:
|
||||
host: "0.0.0.0"
|
||||
port: 9090
|
||||
|
||||
##################### Web #####################
|
||||
web:
|
||||
# Whether to enable the web frontend
|
||||
enabled: true
|
||||
# The prefix to serve web on
|
||||
prefix: /
|
||||
# The directory containing the static build files.
|
||||
directory: /etc/signoz/web
|
||||
|
||||
##################### Cache #####################
|
||||
cache:
|
||||
# specifies the caching provider to use.
|
||||
provider: memory
|
||||
# memory: Uses in-memory caching.
|
||||
memory:
|
||||
# Time-to-live for cache entries in memory. Specify the duration in ns
|
||||
ttl: 60000000000
|
||||
# The interval at which the cache will be cleaned up
|
||||
cleanupInterval: 1m
|
||||
# redis: Uses Redis as the caching backend.
|
||||
redis:
|
||||
# The hostname or IP address of the Redis server.
|
||||
host: localhost
|
||||
# The port on which the Redis server is running. Default is usually 6379.
|
||||
port: 6379
|
||||
# The password for authenticating with the Redis server, if required.
|
||||
password:
|
||||
# The Redis database number to use
|
||||
db: 0
|
||||
|
||||
##################### SQLStore #####################
|
||||
sqlstore:
|
||||
# specifies the SQLStore provider to use.
|
||||
provider: sqlite
|
||||
# The maximum number of open connections to the database.
|
||||
max_open_conns: 100
|
||||
sqlite:
|
||||
# The path to the SQLite database file.
|
||||
path: /var/lib/signoz/signoz.db
|
||||
|
||||
|
||||
##################### APIServer #####################
|
||||
apiserver:
|
||||
timeout:
|
||||
default: 60s
|
||||
max: 600s
|
||||
excluded_routes:
|
||||
- /api/v1/logs/tail
|
||||
- /api/v3/logs/livetail
|
||||
logging:
|
||||
excluded_routes:
|
||||
- /api/v1/health
|
||||
@@ -18,65 +18,64 @@ Now run the following command to install:
|
||||
|
||||
### Using Docker Compose
|
||||
|
||||
If you don't have docker-compose set up, please follow [this guide](https://docs.docker.com/compose/install/)
|
||||
If you don't have docker compose set up, please follow [this guide](https://docs.docker.com/compose/install/)
|
||||
to set up docker compose before proceeding with the next steps.
|
||||
|
||||
For x86 chip (amd):
|
||||
|
||||
```sh
|
||||
docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d
|
||||
cd deploy/docker
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
Open http://localhost:3301 in your favourite browser. In couple of minutes, you should see
|
||||
the data generated from hotrod in SigNoz UI.
|
||||
Open http://localhost:3301 in your favourite browser.
|
||||
|
||||
## Kubernetes
|
||||
|
||||
### Using Helm
|
||||
|
||||
#### Bring up SigNoz cluster
|
||||
To start collecting logs and metrics from your infrastructure, run the following command:
|
||||
|
||||
```sh
|
||||
helm repo add signoz https://charts.signoz.io
|
||||
|
||||
kubectl create ns platform
|
||||
|
||||
helm -n platform install my-release signoz/signoz
|
||||
cd generator/infra
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
To access the UI, you can `port-forward` the frontend service:
|
||||
To start generating sample traces, run the following command:
|
||||
|
||||
```sh
|
||||
kubectl -n platform port-forward svc/my-release-frontend 3301:3301
|
||||
cd generator/hotrod
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
Open http://localhost:3301 in your favourite browser. Few minutes after you generate load
|
||||
from the HotROD application, you should see the data generated from hotrod in SigNoz UI.
|
||||
In a couple of minutes, you should see the data generated from hotrod in SigNoz UI.
|
||||
|
||||
#### Test HotROD application with SigNoz
|
||||
For more details, please refer to the [SigNoz documentation](https://signoz.io/docs/install/docker/).
|
||||
|
||||
## Docker Swarm
|
||||
|
||||
To install SigNoz using Docker Swarm, run the following command:
|
||||
|
||||
```sh
|
||||
kubectl create ns sample-application
|
||||
|
||||
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/develop/sample-apps/hotrod/hotrod.yaml
|
||||
cd deploy/docker-swarm
|
||||
docker stack deploy -c docker-compose.yaml signoz
|
||||
```
|
||||
|
||||
To generate load:
|
||||
Open http://localhost:3301 in your favourite browser.
|
||||
|
||||
To start collecting logs and metrics from your infrastructure, run the following command:
|
||||
|
||||
```sh
|
||||
kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
|
||||
'user_count=6' -F 'spawn_rate=2' http://locust-master:8089/swarm
|
||||
cd generator/infra
|
||||
docker stack deploy -c docker-compose.yaml infra
|
||||
```
|
||||
|
||||
To stop load:
|
||||
To start generating sample traces, run the following command:
|
||||
|
||||
```sh
|
||||
kubectl -n sample-application run strzal --image=djbingham/curl \
|
||||
--restart='OnFailure' -i --tty --rm --command -- curl \
|
||||
http://locust-master:8089/stop
|
||||
cd generator/hotrod
|
||||
docker stack deploy -c docker-compose.yaml hotrod
|
||||
```
|
||||
|
||||
In a couple of minutes, you should see the data generated from hotrod in SigNoz UI.
|
||||
|
||||
For more details, please refer to the [SigNoz documentation](https://signoz.io/docs/install/docker-swarm/).
|
||||
|
||||
## Uninstall/Troubleshoot?
|
||||
|
||||
Go to our official documentation site [signoz.io/docs](https://signoz.io/docs) for more.
|
||||
|
||||
|
||||
@@ -10,14 +10,14 @@
|
||||
<host>zookeeper-1</host>
|
||||
<port>2181</port>
|
||||
</node>
|
||||
<!-- <node index="2">
|
||||
<node index="2">
|
||||
<host>zookeeper-2</host>
|
||||
<port>2181</port>
|
||||
</node>
|
||||
<node index="3">
|
||||
<host>zookeeper-3</host>
|
||||
<port>2181</port>
|
||||
</node> -->
|
||||
</node>
|
||||
</zookeeper>
|
||||
|
||||
<!-- Configuration of clusters that could be used in Distributed tables.
|
||||
@@ -58,7 +58,7 @@
|
||||
<!-- <priority>1</priority> -->
|
||||
</replica>
|
||||
</shard>
|
||||
<!-- <shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>clickhouse-2</host>
|
||||
<port>9000</port>
|
||||
@@ -69,7 +69,7 @@
|
||||
<host>clickhouse-3</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard> -->
|
||||
</shard>
|
||||
</cluster>
|
||||
</remote_servers>
|
||||
</clickhouse>
|
||||
</clickhouse>
|
||||
@@ -72,4 +72,4 @@
|
||||
</shard> -->
|
||||
</cluster>
|
||||
</remote_servers>
|
||||
</clickhouse>
|
||||
</clickhouse>
|
||||
@@ -370,7 +370,7 @@
|
||||
|
||||
<!-- Path to temporary data for processing hard queries. -->
|
||||
<tmp_path>/var/lib/clickhouse/tmp/</tmp_path>
|
||||
|
||||
|
||||
<!-- Disable AuthType plaintext_password and no_password for ACL. -->
|
||||
<!-- <allow_plaintext_password>0</allow_plaintext_password> -->
|
||||
<!-- <allow_no_password>0</allow_no_password> -->`
|
||||
@@ -652,12 +652,12 @@
|
||||
|
||||
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/#creating-replicated-tables
|
||||
-->
|
||||
|
||||
|
||||
<macros>
|
||||
<shard>01</shard>
|
||||
<replica>example01-01-1</replica>
|
||||
</macros>
|
||||
|
||||
|
||||
|
||||
|
||||
<!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. -->
|
||||
@@ -716,7 +716,7 @@
|
||||
asynchronous_metrics - send data from table system.asynchronous_metrics
|
||||
status_info - send data from different component from CH, ex: Dictionaries status
|
||||
-->
|
||||
<!--
|
||||
|
||||
<prometheus>
|
||||
<endpoint>/metrics</endpoint>
|
||||
<port>9363</port>
|
||||
@@ -726,7 +726,6 @@
|
||||
<asynchronous_metrics>true</asynchronous_metrics>
|
||||
<status_info>true</status_info>
|
||||
</prometheus>
|
||||
-->
|
||||
|
||||
<!-- Query log. Used only for queries with setting log_queries = 1. -->
|
||||
<query_log>
|
||||
0
deploy/common/dashboards/.gitkeep
Normal file
0
deploy/common/dashboards/.gitkeep
Normal file
@@ -44,7 +44,7 @@ server {
|
||||
location /api {
|
||||
proxy_pass http://query-service:8080/api;
|
||||
# connection will be closed if no data is read for 600s between successive read operations
|
||||
proxy_read_timeout 600s;
|
||||
proxy_read_timeout 600s;
|
||||
}
|
||||
|
||||
location /ws {
|
||||
@@ -12,10 +12,10 @@ alerting:
|
||||
- alertmanager:9093
|
||||
|
||||
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
|
||||
rule_files:
|
||||
rule_files: []
|
||||
# - "first_rules.yml"
|
||||
# - "second_rules.yml"
|
||||
- 'alerts.yml'
|
||||
# - 'alerts.yml'
|
||||
|
||||
# A scrape configuration containing exactly one endpoint to scrape:
|
||||
# Here it's Prometheus itself.
|
||||
0
deploy/docker-swarm/clickhouse-setup/.gitkeep
Normal file
0
deploy/docker-swarm/clickhouse-setup/.gitkeep
Normal file
@@ -1,35 +0,0 @@
|
||||
global:
|
||||
resolve_timeout: 1m
|
||||
slack_api_url: 'https://hooks.slack.com/services/xxx'
|
||||
|
||||
route:
|
||||
receiver: 'slack-notifications'
|
||||
|
||||
receivers:
|
||||
- name: 'slack-notifications'
|
||||
slack_configs:
|
||||
- channel: '#alerts'
|
||||
send_resolved: true
|
||||
icon_url: https://avatars3.githubusercontent.com/u/3380462
|
||||
title: |-
|
||||
[{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }}
|
||||
{{- if gt (len .CommonLabels) (len .GroupLabels) -}}
|
||||
{{" "}}(
|
||||
{{- with .CommonLabels.Remove .GroupLabels.Names }}
|
||||
{{- range $index, $label := .SortedPairs -}}
|
||||
{{ if $index }}, {{ end }}
|
||||
{{- $label.Name }}="{{ $label.Value -}}"
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
)
|
||||
{{- end }}
|
||||
text: >-
|
||||
{{ range .Alerts -}}
|
||||
*Alert:* {{ .Annotations.title }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }}
|
||||
|
||||
*Description:* {{ .Annotations.description }}
|
||||
|
||||
*Details:*
|
||||
{{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
@@ -1,11 +0,0 @@
|
||||
groups:
|
||||
- name: ExampleCPULoadGroup
|
||||
rules:
|
||||
- alert: HighCpuLoad
|
||||
expr: system_cpu_load_average_1m > 0.1
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: High CPU load
|
||||
description: "CPU load is > 0.1\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,287 +0,0 @@
|
||||
version: "3.9"
|
||||
|
||||
x-clickhouse-defaults: &clickhouse-defaults
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
tty: true
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
depends_on:
|
||||
- zookeeper-1
|
||||
# - zookeeper-2
|
||||
# - zookeeper-3
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
healthcheck:
|
||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--spider",
|
||||
"-q",
|
||||
"0.0.0.0:8123/ping"
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
ulimits:
|
||||
nproc: 65535
|
||||
nofile:
|
||||
soft: 262144
|
||||
hard: 262144
|
||||
|
||||
x-db-depend: &db-depend
|
||||
depends_on:
|
||||
- clickhouse
|
||||
- otel-collector-migrator
|
||||
# - clickhouse-2
|
||||
# - clickhouse-3
|
||||
|
||||
|
||||
services:
|
||||
zookeeper-1:
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
hostname: zookeeper-1
|
||||
user: root
|
||||
ports:
|
||||
- "2181:2181"
|
||||
- "2888:2888"
|
||||
- "3888:3888"
|
||||
volumes:
|
||||
- ./data/zookeeper-1:/bitnami/zookeeper
|
||||
environment:
|
||||
- ZOO_SERVER_ID=1
|
||||
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_AUTOPURGE_INTERVAL=1
|
||||
|
||||
# zookeeper-2:
|
||||
# image: bitnami/zookeeper:3.7.0
|
||||
# hostname: zookeeper-2
|
||||
# user: root
|
||||
# ports:
|
||||
# - "2182:2181"
|
||||
# - "2889:2888"
|
||||
# - "3889:3888"
|
||||
# volumes:
|
||||
# - ./data/zookeeper-2:/bitnami/zookeeper
|
||||
# environment:
|
||||
# - ZOO_SERVER_ID=2
|
||||
# - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
|
||||
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||
|
||||
# zookeeper-3:
|
||||
# image: bitnami/zookeeper:3.7.0
|
||||
# hostname: zookeeper-3
|
||||
# user: root
|
||||
# ports:
|
||||
# - "2183:2181"
|
||||
# - "2890:2888"
|
||||
# - "3890:3888"
|
||||
# volumes:
|
||||
# - ./data/zookeeper-3:/bitnami/zookeeper
|
||||
# environment:
|
||||
# - ZOO_SERVER_ID=3
|
||||
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
|
||||
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||
|
||||
clickhouse:
|
||||
<<: *clickhouse-defaults
|
||||
hostname: clickhouse
|
||||
# ports:
|
||||
# - "9000:9000"
|
||||
# - "8123:8123"
|
||||
# - "9181:9181"
|
||||
volumes:
|
||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||
|
||||
# clickhouse-2:
|
||||
# <<: *clickhouse-defaults
|
||||
# hostname: clickhouse-2
|
||||
# ports:
|
||||
# - "9001:9000"
|
||||
# - "8124:8123"
|
||||
# - "9182:9181"
|
||||
# volumes:
|
||||
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
# - ./data/clickhouse-2/:/var/lib/clickhouse/
|
||||
|
||||
# clickhouse-3:
|
||||
# <<: *clickhouse-defaults
|
||||
# hostname: clickhouse-3
|
||||
# ports:
|
||||
# - "9002:9000"
|
||||
# - "8125:8123"
|
||||
# - "9183:9181"
|
||||
# volumes:
|
||||
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
# - ./data/clickhouse-3/:/var/lib/clickhouse/
|
||||
|
||||
alertmanager:
|
||||
image: signoz/alertmanager:0.23.7
|
||||
volumes:
|
||||
- ./data/alertmanager:/data
|
||||
command:
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
depends_on:
|
||||
- query-service
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:0.56.0
|
||||
command:
|
||||
[
|
||||
"-config=/root/config/prometheus.yml",
|
||||
"--use-logs-new-schema=true"
|
||||
]
|
||||
# ports:
|
||||
# - "6060:6060" # pprof port
|
||||
# - "8080:8080" # query-service port
|
||||
volumes:
|
||||
- ./prometheus.yml:/root/config/prometheus.yml
|
||||
- ../dashboards:/root/config/dashboards
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-swarm
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--spider",
|
||||
"-q",
|
||||
"localhost:8080/api/v1/health"
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
<<: *db-depend
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.56.0
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
otel-collector:
|
||||
image: signoz/signoz-otel-collector:0.111.5
|
||||
command:
|
||||
[
|
||||
"--config=/etc/otel-collector-config.yaml",
|
||||
"--manager-config=/etc/manager-config.yaml",
|
||||
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
||||
]
|
||||
user: root # required for reading docker container logs
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||
- /:/hostfs:ro
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}},dockerswarm.service.name={{.Service.Name}},dockerswarm.task.name={{.Task.Name}}
|
||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||
ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
# - "8888:8888" # OtelCollector internal metrics
|
||||
# - "8889:8889" # signoz spanmetrics exposed by the agent
|
||||
# - "9411:9411" # Zipkin port
|
||||
# - "13133:13133" # Health check extension
|
||||
# - "14250:14250" # Jaeger gRPC
|
||||
# - "14268:14268" # Jaeger thrift HTTP
|
||||
# - "55678:55678" # OpenCensus receiver
|
||||
# - "55679:55679" # zPages extension
|
||||
deploy:
|
||||
mode: global
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
depends_on:
|
||||
- clickhouse
|
||||
- otel-collector-migrator
|
||||
- query-service
|
||||
|
||||
otel-collector-migrator:
|
||||
image: signoz/signoz-schema-migrator:0.111.5
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
delay: 5s
|
||||
command:
|
||||
- "--dsn=tcp://clickhouse:9000"
|
||||
depends_on:
|
||||
- clickhouse
|
||||
# - clickhouse-2
|
||||
# - clickhouse-3
|
||||
|
||||
logspout:
|
||||
image: "gliderlabs/logspout:v3.2.14"
|
||||
volumes:
|
||||
- /etc/hostname:/etc/host_hostname:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
command: syslog+tcp://otel-collector:2255
|
||||
depends_on:
|
||||
- otel-collector
|
||||
deploy:
|
||||
mode: global
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
hotrod:
|
||||
image: jaegertracing/example-hotrod:1.30
|
||||
command: [ "all" ]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
|
||||
load-hotrod:
|
||||
image: "signoz/locust:1.2.3"
|
||||
hostname: load-hotrod
|
||||
environment:
|
||||
ATTACKED_HOST: http://hotrod:8080
|
||||
LOCUST_MODE: standalone
|
||||
NO_PROXY: standalone
|
||||
TASK_DELAY_FROM: 5
|
||||
TASK_DELAY_TO: 30
|
||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||
volumes:
|
||||
- ../common/locust-scripts:/locust
|
||||
@@ -1,31 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS signoz_index (
|
||||
timestamp DateTime64(9) CODEC(Delta, ZSTD(1)),
|
||||
traceID String CODEC(ZSTD(1)),
|
||||
spanID String CODEC(ZSTD(1)),
|
||||
parentSpanID String CODEC(ZSTD(1)),
|
||||
serviceName LowCardinality(String) CODEC(ZSTD(1)),
|
||||
name LowCardinality(String) CODEC(ZSTD(1)),
|
||||
kind Int32 CODEC(ZSTD(1)),
|
||||
durationNano UInt64 CODEC(ZSTD(1)),
|
||||
tags Array(String) CODEC(ZSTD(1)),
|
||||
tagsKeys Array(String) CODEC(ZSTD(1)),
|
||||
tagsValues Array(String) CODEC(ZSTD(1)),
|
||||
statusCode Int64 CODEC(ZSTD(1)),
|
||||
references String CODEC(ZSTD(1)),
|
||||
externalHttpMethod Nullable(String) CODEC(ZSTD(1)),
|
||||
externalHttpUrl Nullable(String) CODEC(ZSTD(1)),
|
||||
component Nullable(String) CODEC(ZSTD(1)),
|
||||
dbSystem Nullable(String) CODEC(ZSTD(1)),
|
||||
dbName Nullable(String) CODEC(ZSTD(1)),
|
||||
dbOperation Nullable(String) CODEC(ZSTD(1)),
|
||||
peerService Nullable(String) CODEC(ZSTD(1)),
|
||||
INDEX idx_traceID traceID TYPE bloom_filter GRANULARITY 4,
|
||||
INDEX idx_service serviceName TYPE bloom_filter GRANULARITY 4,
|
||||
INDEX idx_name name TYPE bloom_filter GRANULARITY 4,
|
||||
INDEX idx_kind kind TYPE minmax GRANULARITY 4,
|
||||
INDEX idx_tagsKeys tagsKeys TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX idx_tagsValues tagsValues TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX idx_duration durationNano TYPE minmax GRANULARITY 1
|
||||
) ENGINE MergeTree()
|
||||
PARTITION BY toDate(timestamp)
|
||||
ORDER BY (serviceName, -toUnixTimestamp(timestamp))
|
||||
@@ -1,51 +0,0 @@
|
||||
server {
|
||||
listen 3301;
|
||||
server_name _;
|
||||
|
||||
gzip on;
|
||||
gzip_static on;
|
||||
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
|
||||
gzip_proxied any;
|
||||
gzip_vary on;
|
||||
gzip_comp_level 6;
|
||||
gzip_buffers 16 8k;
|
||||
gzip_http_version 1.1;
|
||||
|
||||
# to handle uri issue 414 from nginx
|
||||
client_max_body_size 24M;
|
||||
large_client_header_buffers 8 128k;
|
||||
|
||||
location / {
|
||||
if ( $uri = '/index.html' ) {
|
||||
add_header Cache-Control no-store always;
|
||||
}
|
||||
root /usr/share/nginx/html;
|
||||
index index.html index.htm;
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
|
||||
location ~ ^/api/(v1|v3)/logs/(tail|livetail){
|
||||
proxy_pass http://query-service:8080;
|
||||
proxy_http_version 1.1;
|
||||
|
||||
# connection will be closed if no data is read for 600s between successive read operations
|
||||
proxy_read_timeout 600s;
|
||||
|
||||
# dont buffer the data send it directly to client.
|
||||
proxy_buffering off;
|
||||
proxy_cache off;
|
||||
}
|
||||
|
||||
location /api {
|
||||
proxy_pass http://query-service:8080/api;
|
||||
# connection will be closed if no data is read for 600s between successive read operations
|
||||
proxy_read_timeout 600s;
|
||||
}
|
||||
|
||||
# redirect server error pages to the static page /50x.html
|
||||
#
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
location = /50x.html {
|
||||
root /usr/share/nginx/html;
|
||||
}
|
||||
}
|
||||
282
deploy/docker-swarm/docker-compose.ha.yaml
Normal file
282
deploy/docker-swarm/docker-compose.ha.yaml
Normal file
@@ -0,0 +1,282 @@
|
||||
version: "3"
|
||||
x-common: &common
|
||||
networks:
|
||||
- signoz-net
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
x-clickhouse-defaults: &clickhouse-defaults
|
||||
!!merge <<: *common
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
tty: true
|
||||
deploy:
|
||||
labels:
|
||||
signoz.io/scrape: "true"
|
||||
signoz.io/port: "9363"
|
||||
signoz.io/path: "/metrics"
|
||||
depends_on:
|
||||
- zookeeper-1
|
||||
- zookeeper-2
|
||||
- zookeeper-3
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
- wget
|
||||
- --spider
|
||||
- -q
|
||||
- 0.0.0.0:8123/ping
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
ulimits:
|
||||
nproc: 65535
|
||||
nofile:
|
||||
soft: 262144
|
||||
hard: 262144
|
||||
x-zookeeper-defaults: &zookeeper-defaults
|
||||
!!merge <<: *common
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
user: root
|
||||
deploy:
|
||||
labels:
|
||||
signoz.io/scrape: "true"
|
||||
signoz.io/port: "9141"
|
||||
signoz.io/path: "/metrics"
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD-SHELL
|
||||
- curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
x-db-depend: &db-depend
|
||||
!!merge <<: *common
|
||||
depends_on:
|
||||
- clickhouse
|
||||
- clickhouse-2
|
||||
- clickhouse-3
|
||||
- schema-migrator
|
||||
services:
|
||||
init-clickhouse:
|
||||
!!merge <<: *common
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
command:
|
||||
- bash
|
||||
- -c
|
||||
- |
|
||||
version="v0.0.1"
|
||||
node_os=$$(uname -s | tr '[:upper:]' '[:lower:]')
|
||||
node_arch=$$(uname -m | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)
|
||||
echo "Fetching histogram-binary for $${node_os}/$${node_arch}"
|
||||
cd /tmp
|
||||
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
|
||||
tar -xvzf histogram-quantile.tar.gz
|
||||
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
|
||||
volumes:
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
zookeeper-1:
|
||||
!!merge <<: *zookeeper-defaults
|
||||
# ports:
|
||||
# - "2181:2181"
|
||||
# - "2888:2888"
|
||||
# - "3888:3888"
|
||||
volumes:
|
||||
- ./clickhouse-setup/data/zookeeper-1:/bitnami/zookeeper
|
||||
environment:
|
||||
- ZOO_SERVER_ID=1
|
||||
- ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_AUTOPURGE_INTERVAL=1
|
||||
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
|
||||
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
|
||||
zookeeper-2:
|
||||
!!merge <<: *zookeeper-defaults
|
||||
# ports:
|
||||
# - "2182:2181"
|
||||
# - "2889:2888"
|
||||
# - "3889:3888"
|
||||
volumes:
|
||||
- ./clickhouse-setup/data/zookeeper-2:/bitnami/zookeeper
|
||||
environment:
|
||||
- ZOO_SERVER_ID=2
|
||||
- ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_AUTOPURGE_INTERVAL=1
|
||||
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
|
||||
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
|
||||
zookeeper-3:
|
||||
!!merge <<: *zookeeper-defaults
|
||||
# ports:
|
||||
# - "2183:2181"
|
||||
# - "2890:2888"
|
||||
# - "3890:3888"
|
||||
volumes:
|
||||
- ./clickhouse-setup/data/zookeeper-3:/bitnami/zookeeper
|
||||
environment:
|
||||
- ZOO_SERVER_ID=3
|
||||
- ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_AUTOPURGE_INTERVAL=1
|
||||
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
|
||||
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
|
||||
clickhouse:
|
||||
!!merge <<: *clickhouse-defaults
|
||||
# TODO: needed for schema-migrator to work, remove this redundancy once we have a better solution
|
||||
hostname: clickhouse
|
||||
# ports:
|
||||
# - "9000:9000"
|
||||
# - "8123:8123"
|
||||
# - "9181:9181"
|
||||
volumes:
|
||||
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
|
||||
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
|
||||
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
- ./clickhouse-setup/data/clickhouse/:/var/lib/clickhouse/
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
clickhouse-2:
|
||||
!!merge <<: *clickhouse-defaults
|
||||
hostname: clickhouse-2
|
||||
# ports:
|
||||
# - "9001:9000"
|
||||
# - "8124:8123"
|
||||
# - "9182:9181"
|
||||
volumes:
|
||||
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
|
||||
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
|
||||
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
- ./clickhouse-setup/data/clickhouse-2/:/var/lib/clickhouse/
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
clickhouse-3:
|
||||
!!merge <<: *clickhouse-defaults
|
||||
hostname: clickhouse-3
|
||||
# ports:
|
||||
# - "9002:9000"
|
||||
# - "8125:8123"
|
||||
# - "9183:9181"
|
||||
volumes:
|
||||
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
|
||||
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
|
||||
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
- ./clickhouse-setup/data/clickhouse-3/:/var/lib/clickhouse/
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
alertmanager:
|
||||
!!merge <<: *common
|
||||
image: signoz/alertmanager:0.23.7
|
||||
command:
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
volumes:
|
||||
- ./clickhouse-setup/data/alertmanager:/data
|
||||
depends_on:
|
||||
- query-service
|
||||
query-service:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/query-service:0.70.0
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
- --use-logs-new-schema=true
|
||||
- --use-trace-new-schema=true
|
||||
# ports:
|
||||
# - "8080:8080" # signoz port
|
||||
# - "6060:6060" # pprof port
|
||||
volumes:
|
||||
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
|
||||
- ../common/dashboards:/root/config/dashboards
|
||||
- ./clickhouse-setup/data/signoz/:/var/lib/signoz/
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-swarm
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
- wget
|
||||
- --spider
|
||||
- -q
|
||||
- localhost:8080/api/v1/health
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
frontend:
|
||||
!!merge <<: *common
|
||||
image: signoz/frontend:0.70.0
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:0.111.24
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
- --copy-path=/var/tmp/collector-config.yaml
|
||||
- --feature-gates=-pkg.translator.prometheus.NormalizeName
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}}
|
||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||
ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
deploy:
|
||||
replicas: 3
|
||||
depends_on:
|
||||
- clickhouse
|
||||
- schema-migrator
|
||||
- query-service
|
||||
schema-migrator:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:0.111.24
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
delay: 5s
|
||||
entrypoint: sh
|
||||
command:
|
||||
- -c
|
||||
- "/signoz-schema-migrator sync --dsn=tcp://clickhouse:9000 --up= && /signoz-schema-migrator async --dsn=tcp://clickhouse:9000 --up="
|
||||
depends_on:
|
||||
- clickhouse
|
||||
networks:
|
||||
signoz-net:
|
||||
name: signoz-net
|
||||
|
||||
volumes:
|
||||
alertmanager:
|
||||
name: signoz-alertmanager
|
||||
clickhouse:
|
||||
name: signoz-clickhouse
|
||||
clickhouse-2:
|
||||
name: signoz-clickhouse-2
|
||||
clickhouse-3:
|
||||
name: signoz-clickhouse-3
|
||||
sqlite:
|
||||
name: signoz-sqlite
|
||||
zookeeper-1:
|
||||
name: signoz-zookeeper-1
|
||||
zookeeper-2:
|
||||
name: signoz-zookeeper-2
|
||||
zookeeper-3:
|
||||
name: signoz-zookeeper-3
|
||||
210
deploy/docker-swarm/docker-compose.yaml
Normal file
210
deploy/docker-swarm/docker-compose.yaml
Normal file
@@ -0,0 +1,210 @@
|
||||
version: "3"
|
||||
x-common: &common
|
||||
networks:
|
||||
- signoz-net
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
x-clickhouse-defaults: &clickhouse-defaults
|
||||
!!merge <<: *common
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
tty: true
|
||||
deploy:
|
||||
labels:
|
||||
signoz.io/scrape: "true"
|
||||
signoz.io/port: "9363"
|
||||
signoz.io/path: "/metrics"
|
||||
depends_on:
|
||||
- init-clickhouse
|
||||
- zookeeper-1
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
- wget
|
||||
- --spider
|
||||
- -q
|
||||
- 0.0.0.0:8123/ping
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
ulimits:
|
||||
nproc: 65535
|
||||
nofile:
|
||||
soft: 262144
|
||||
hard: 262144
|
||||
x-zookeeper-defaults: &zookeeper-defaults
|
||||
!!merge <<: *common
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
user: root
|
||||
deploy:
|
||||
labels:
|
||||
signoz.io/scrape: "true"
|
||||
signoz.io/port: "9141"
|
||||
signoz.io/path: "/metrics"
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD-SHELL
|
||||
- curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
x-db-depend: &db-depend
|
||||
!!merge <<: *common
|
||||
depends_on:
|
||||
- clickhouse
|
||||
- schema-migrator
|
||||
services:
|
||||
init-clickhouse:
|
||||
!!merge <<: *common
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
command:
|
||||
- bash
|
||||
- -c
|
||||
- |
|
||||
version="v0.0.1"
|
||||
node_os=$$(uname -s | tr '[:upper:]' '[:lower:]')
|
||||
node_arch=$$(uname -m | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)
|
||||
echo "Fetching histogram-binary for $${node_os}/$${node_arch}"
|
||||
cd /tmp
|
||||
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
|
||||
tar -xvzf histogram-quantile.tar.gz
|
||||
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
|
||||
volumes:
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
zookeeper-1:
|
||||
!!merge <<: *zookeeper-defaults
|
||||
# ports:
|
||||
# - "2181:2181"
|
||||
# - "2888:2888"
|
||||
# - "3888:3888"
|
||||
volumes:
|
||||
- zookeeper-1:/bitnami/zookeeper
|
||||
environment:
|
||||
- ZOO_SERVER_ID=1
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_AUTOPURGE_INTERVAL=1
|
||||
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
|
||||
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
|
||||
clickhouse:
|
||||
!!merge <<: *clickhouse-defaults
|
||||
# TODO: needed for clickhouse TCP connectio
|
||||
hostname: clickhouse
|
||||
# ports:
|
||||
# - "9000:9000"
|
||||
# - "8123:8123"
|
||||
# - "9181:9181"
|
||||
volumes:
|
||||
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
|
||||
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
|
||||
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
- ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
- clickhouse:/var/lib/clickhouse/
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
alertmanager:
|
||||
!!merge <<: *common
|
||||
image: signoz/alertmanager:0.23.7
|
||||
command:
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
volumes:
|
||||
- alertmanager:/data
|
||||
depends_on:
|
||||
- query-service
|
||||
query-service:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/query-service:0.70.0
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
- --use-logs-new-schema=true
|
||||
- --use-trace-new-schema=true
|
||||
# ports:
|
||||
# - "8080:8080" # signoz port
|
||||
# - "6060:6060" # pprof port
|
||||
volumes:
|
||||
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
|
||||
- ../common/dashboards:/root/config/dashboards
|
||||
- sqlite:/var/lib/signoz/
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-swarm
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
- wget
|
||||
- --spider
|
||||
- -q
|
||||
- localhost:8080/api/v1/health
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
frontend:
|
||||
!!merge <<: *common
|
||||
image: signoz/frontend:0.70.0
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:0.111.24
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
- --copy-path=/var/tmp/collector-config.yaml
|
||||
- --feature-gates=-pkg.translator.prometheus.NormalizeName
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}}
|
||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||
ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
deploy:
|
||||
replicas: 3
|
||||
depends_on:
|
||||
- clickhouse
|
||||
- schema-migrator
|
||||
- query-service
|
||||
schema-migrator:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:0.111.24
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
delay: 5s
|
||||
entrypoint: sh
|
||||
command:
|
||||
- -c
|
||||
- "/signoz-schema-migrator sync --dsn=tcp://clickhouse:9000 --up= && /signoz-schema-migrator async --dsn=tcp://clickhouse:9000 --up="
|
||||
depends_on:
|
||||
- clickhouse
|
||||
networks:
|
||||
signoz-net:
|
||||
name: signoz-net
|
||||
|
||||
volumes:
|
||||
alertmanager:
|
||||
name: signoz-alertmanager
|
||||
clickhouse:
|
||||
name: signoz-clickhouse
|
||||
sqlite:
|
||||
name: signoz-sqlite
|
||||
zookeeper-1:
|
||||
name: signoz-zookeeper-1
|
||||
38
deploy/docker-swarm/generator/hotrod/docker-compose.yaml
Normal file
38
deploy/docker-swarm/generator/hotrod/docker-compose.yaml
Normal file
@@ -0,0 +1,38 @@
|
||||
version: "3"
|
||||
x-common: &common
|
||||
networks:
|
||||
- signoz-net
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
services:
|
||||
hotrod:
|
||||
<<: *common
|
||||
image: jaegertracing/example-hotrod:1.61.0
|
||||
command: [ "all" ]
|
||||
environment:
|
||||
- OTEL_EXPORTER_OTLP_ENDPOINT=http://host.docker.internal:4318 #
|
||||
load-hotrod:
|
||||
<<: *common
|
||||
image: "signoz/locust:1.2.3"
|
||||
environment:
|
||||
ATTACKED_HOST: http://hotrod:8080
|
||||
LOCUST_MODE: standalone
|
||||
NO_PROXY: standalone
|
||||
TASK_DELAY_FROM: 5
|
||||
TASK_DELAY_TO: 30
|
||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||
volumes:
|
||||
- ../../../common/locust-scripts:/locust
|
||||
|
||||
networks:
|
||||
signoz-net:
|
||||
name: signoz-net
|
||||
external: true
|
||||
69
deploy/docker-swarm/generator/infra/docker-compose.yaml
Normal file
69
deploy/docker-swarm/generator/infra/docker-compose.yaml
Normal file
@@ -0,0 +1,69 @@
|
||||
version: "3"
|
||||
x-common: &common
|
||||
networks:
|
||||
- signoz-net
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
deploy:
|
||||
mode: global
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
services:
|
||||
otel-agent:
|
||||
<<: *common
|
||||
image: otel/opentelemetry-collector-contrib:0.111.0
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
volumes:
|
||||
- ./otel-agent-config.yaml:/etc/otel-collector-config.yaml
|
||||
- /:/hostfs:ro
|
||||
environment:
|
||||
- SIGNOZ_COLLECTOR_ENDPOINT=http://host.docker.internal:4317 # In case of external SigNoz or cloud, update the endpoint and access token
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}}
|
||||
# - SIGNOZ_ACCESS_TOKEN="<your-access-token>"
|
||||
# Before exposing the ports, make sure the ports are not used by other services
|
||||
# ports:
|
||||
# - "4317:4317"
|
||||
# - "4318:4318"
|
||||
otel-metrics:
|
||||
<<: *common
|
||||
image: otel/opentelemetry-collector-contrib:0.111.0
|
||||
user: 0:0 # If you have security concerns, you can replace this with your `UID:GID` that has necessary permissions to docker.sock
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
volumes:
|
||||
- ./otel-metrics-config.yaml:/etc/otel-collector-config.yaml
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
environment:
|
||||
- SIGNOZ_COLLECTOR_ENDPOINT=http://host.docker.internal:4317 # In case of external SigNoz or cloud, update the endpoint and access token
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}}
|
||||
# - SIGNOZ_ACCESS_TOKEN="<your-access-token>"
|
||||
# Before exposing the ports, make sure the ports are not used by other services
|
||||
# ports:
|
||||
# - "4317:4317"
|
||||
# - "4318:4318"
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
placement:
|
||||
constraints:
|
||||
- node.role == manager
|
||||
logspout:
|
||||
<<: *common
|
||||
image: "gliderlabs/logspout:v3.2.14"
|
||||
command: syslog+tcp://otel-agent:2255
|
||||
user: root
|
||||
volumes:
|
||||
- /etc/hostname:/etc/host_hostname:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
depends_on:
|
||||
- otel-agent
|
||||
|
||||
networks:
|
||||
signoz-net:
|
||||
name: signoz-net
|
||||
external: true
|
||||
102
deploy/docker-swarm/generator/infra/otel-agent-config.yaml
Normal file
102
deploy/docker-swarm/generator/infra/otel-agent-config.yaml
Normal file
@@ -0,0 +1,102 @@
|
||||
receivers:
|
||||
hostmetrics:
|
||||
collection_interval: 30s
|
||||
root_path: /hostfs
|
||||
scrapers:
|
||||
cpu: {}
|
||||
load: {}
|
||||
memory: {}
|
||||
disk: {}
|
||||
filesystem: {}
|
||||
network: {}
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
prometheus:
|
||||
config:
|
||||
global:
|
||||
scrape_interval: 60s
|
||||
scrape_configs:
|
||||
- job_name: otel-agent
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:8888
|
||||
labels:
|
||||
job_name: otel-agent
|
||||
tcplog/docker:
|
||||
listen_address: "0.0.0.0:2255"
|
||||
operators:
|
||||
- type: regex_parser
|
||||
regex: '^<([0-9]+)>[0-9]+ (?P<timestamp>[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?) (?P<container_id>\S+) (?P<container_name>\S+) [0-9]+ - -( (?P<body>.*))?'
|
||||
timestamp:
|
||||
parse_from: attributes.timestamp
|
||||
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
|
||||
- type: move
|
||||
from: attributes["body"]
|
||||
to: body
|
||||
- type: remove
|
||||
field: attributes.timestamp
|
||||
# please remove names from below if you want to collect logs from them
|
||||
- type: filter
|
||||
id: signoz_logs_filter
|
||||
expr: 'attributes.container_name matches "^(signoz_(logspout|alertmanager|query-service|otel-collector|clickhouse|zookeeper))|(infra_(logspout|otel-agent|otel-metrics)).*"'
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
timeout: 10s
|
||||
resourcedetection:
|
||||
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||
detectors:
|
||||
# - ec2
|
||||
# - gcp
|
||||
# - azure
|
||||
- env
|
||||
- system
|
||||
timeout: 2s
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
pprof:
|
||||
endpoint: 0.0.0.0:1777
|
||||
exporters:
|
||||
otlp:
|
||||
endpoint: ${env:SIGNOZ_COLLECTOR_ENDPOINT}
|
||||
tls:
|
||||
insecure: true
|
||||
headers:
|
||||
signoz-access-token: ${env:SIGNOZ_ACCESS_TOKEN}
|
||||
# debug: {}
|
||||
service:
|
||||
telemetry:
|
||||
logs:
|
||||
encoding: json
|
||||
metrics:
|
||||
address: 0.0.0.0:8888
|
||||
extensions:
|
||||
- health_check
|
||||
- pprof
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [otlp]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [otlp]
|
||||
metrics/hostmetrics:
|
||||
receivers: [hostmetrics]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [otlp]
|
||||
metrics/prometheus:
|
||||
receivers: [prometheus]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [otlp]
|
||||
logs:
|
||||
receivers: [otlp, tcplog/docker]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [otlp]
|
||||
103
deploy/docker-swarm/generator/infra/otel-metrics-config.yaml
Normal file
103
deploy/docker-swarm/generator/infra/otel-metrics-config.yaml
Normal file
@@ -0,0 +1,103 @@
|
||||
receivers:
|
||||
prometheus:
|
||||
config:
|
||||
global:
|
||||
scrape_interval: 60s
|
||||
scrape_configs:
|
||||
- job_name: otel-metrics
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:8888
|
||||
labels:
|
||||
job_name: otel-metrics
|
||||
# For Docker daemon metrics to be scraped, it must be configured to expose
|
||||
# Prometheus metrics, as documented here: https://docs.docker.com/config/daemon/prometheus/
|
||||
# - job_name: docker-daemon
|
||||
# dockerswarm_sd_configs:
|
||||
# - host: unix:///var/run/docker.sock
|
||||
# role: nodes
|
||||
# relabel_configs:
|
||||
# - source_labels: [__meta_dockerswarm_node_address]
|
||||
# target_label: __address__
|
||||
# replacement: $1:9323
|
||||
- job_name: "dockerswarm"
|
||||
dockerswarm_sd_configs:
|
||||
- host: unix:///var/run/docker.sock
|
||||
role: tasks
|
||||
relabel_configs:
|
||||
- action: keep
|
||||
regex: running
|
||||
source_labels:
|
||||
- __meta_dockerswarm_task_desired_state
|
||||
- action: keep
|
||||
regex: true
|
||||
source_labels:
|
||||
- __meta_dockerswarm_service_label_signoz_io_scrape
|
||||
- regex: ([^:]+)(?::\d+)?
|
||||
replacement: $1
|
||||
source_labels:
|
||||
- __address__
|
||||
target_label: swarm_container_ip
|
||||
- separator: .
|
||||
source_labels:
|
||||
- __meta_dockerswarm_service_name
|
||||
- __meta_dockerswarm_task_slot
|
||||
- __meta_dockerswarm_task_id
|
||||
target_label: swarm_container_name
|
||||
- target_label: __address__
|
||||
source_labels:
|
||||
- swarm_container_ip
|
||||
- __meta_dockerswarm_service_label_signoz_io_port
|
||||
separator: ":"
|
||||
- source_labels:
|
||||
- __meta_dockerswarm_service_label_signoz_io_path
|
||||
target_label: __metrics_path__
|
||||
- source_labels:
|
||||
- __meta_dockerswarm_service_label_com_docker_stack_namespace
|
||||
target_label: namespace
|
||||
- source_labels:
|
||||
- __meta_dockerswarm_service_name
|
||||
target_label: service_name
|
||||
- source_labels:
|
||||
- __meta_dockerswarm_task_id
|
||||
target_label: service_instance_id
|
||||
- source_labels:
|
||||
- __meta_dockerswarm_node_hostname
|
||||
target_label: host_name
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
timeout: 10s
|
||||
resourcedetection:
|
||||
detectors:
|
||||
- env
|
||||
- system
|
||||
timeout: 2s
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
pprof:
|
||||
endpoint: 0.0.0.0:1777
|
||||
exporters:
|
||||
otlp:
|
||||
endpoint: ${env:SIGNOZ_COLLECTOR_ENDPOINT}
|
||||
tls:
|
||||
insecure: true
|
||||
headers:
|
||||
signoz-access-token: ${env:SIGNOZ_ACCESS_TOKEN}
|
||||
# debug: {}
|
||||
service:
|
||||
telemetry:
|
||||
logs:
|
||||
encoding: json
|
||||
metrics:
|
||||
address: 0.0.0.0:8888
|
||||
extensions:
|
||||
- health_check
|
||||
- pprof
|
||||
pipelines:
|
||||
metrics:
|
||||
receivers: [prometheus]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [otlp]
|
||||
@@ -1,85 +1,29 @@
|
||||
receivers:
|
||||
tcplog/docker:
|
||||
listen_address: "0.0.0.0:2255"
|
||||
operators:
|
||||
- type: regex_parser
|
||||
regex: '^<([0-9]+)>[0-9]+ (?P<timestamp>[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?) (?P<container_id>\S+) (?P<container_name>\S+) [0-9]+ - -( (?P<body>.*))?'
|
||||
timestamp:
|
||||
parse_from: attributes.timestamp
|
||||
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
|
||||
- type: move
|
||||
from: attributes["body"]
|
||||
to: body
|
||||
- type: remove
|
||||
field: attributes.timestamp
|
||||
# please remove names from below if you want to collect logs from them
|
||||
- type: filter
|
||||
id: signoz_logs_filter
|
||||
expr: 'attributes.container_name matches "^signoz-(logspout|frontend|alertmanager|query-service|otel-collector|clickhouse|zookeeper)"'
|
||||
opencensus:
|
||||
endpoint: 0.0.0.0:55678
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:14250
|
||||
thrift_http:
|
||||
endpoint: 0.0.0.0:14268
|
||||
# thrift_compact:
|
||||
# endpoint: 0.0.0.0:6831
|
||||
# thrift_binary:
|
||||
# endpoint: 0.0.0.0:6832
|
||||
hostmetrics:
|
||||
collection_interval: 30s
|
||||
root_path: /hostfs
|
||||
scrapers:
|
||||
cpu: {}
|
||||
load: {}
|
||||
memory: {}
|
||||
disk: {}
|
||||
filesystem: {}
|
||||
network: {}
|
||||
prometheus:
|
||||
config:
|
||||
global:
|
||||
scrape_interval: 60s
|
||||
scrape_configs:
|
||||
# otel-collector internal metrics
|
||||
- job_name: otel-collector
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:8888
|
||||
labels:
|
||||
job_name: otel-collector
|
||||
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
timeout: 10s
|
||||
# memory_limiter:
|
||||
# # 80% of maximum memory up to 2G
|
||||
# limit_mib: 1500
|
||||
# # 25% of limit up to 2G
|
||||
# spike_limit_mib: 512
|
||||
# check_interval: 5s
|
||||
#
|
||||
# # 50% of the maximum memory
|
||||
# limit_percentage: 50
|
||||
# # 20% of max memory usage spike expected
|
||||
# spike_limit_percentage: 20
|
||||
# queued_retry:
|
||||
# num_workers: 4
|
||||
# queue_size: 100
|
||||
# retry_on_failure: true
|
||||
resourcedetection:
|
||||
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||
detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure.
|
||||
detectors: [env, system]
|
||||
timeout: 2s
|
||||
signozspanmetrics/delta:
|
||||
metrics_exporter: clickhousemetricswrite
|
||||
@@ -106,19 +50,16 @@ processors:
|
||||
- name: host.name
|
||||
- name: host.type
|
||||
- name: container.name
|
||||
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
zpages:
|
||||
endpoint: 0.0.0.0:55679
|
||||
pprof:
|
||||
endpoint: 0.0.0.0:1777
|
||||
|
||||
exporters:
|
||||
clickhousetraces:
|
||||
datasource: tcp://clickhouse:9000/signoz_traces
|
||||
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
|
||||
use_new_schema: true
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||
resource_to_telemetry_conversion:
|
||||
@@ -131,8 +72,7 @@ exporters:
|
||||
dsn: tcp://clickhouse:9000/signoz_logs
|
||||
timeout: 10s
|
||||
use_new_schema: true
|
||||
# logging: {}
|
||||
|
||||
# debug: {}
|
||||
service:
|
||||
telemetry:
|
||||
logs:
|
||||
@@ -141,26 +81,21 @@ service:
|
||||
address: 0.0.0.0:8888
|
||||
extensions:
|
||||
- health_check
|
||||
- zpages
|
||||
- pprof
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [jaeger, otlp]
|
||||
receivers: [otlp]
|
||||
processors: [signozspanmetrics/delta, batch]
|
||||
exporters: [clickhousetraces]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite, clickhousemetricswritev2]
|
||||
metrics/hostmetrics:
|
||||
receivers: [hostmetrics]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [clickhousemetricswrite, clickhousemetricswritev2]
|
||||
metrics/prometheus:
|
||||
receivers: [prometheus]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite/prometheus, clickhousemetricswritev2]
|
||||
logs:
|
||||
receivers: [otlp, tcplog/docker]
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [clickhouselogsexporter]
|
||||
exporters: [clickhouselogsexporter]
|
||||
1
deploy/docker/.env
Normal file
1
deploy/docker/.env
Normal file
@@ -0,0 +1 @@
|
||||
COMPOSE_PROJECT_NAME=signoz
|
||||
3
deploy/docker/clickhouse-setup/.deprecated
Normal file
3
deploy/docker/clickhouse-setup/.deprecated
Normal file
@@ -0,0 +1,3 @@
|
||||
This data directory is deprecated and will be removed in the future.
|
||||
Please use the migration script under `scripts/volume-migration` to migrate data from bind mounts to Docker volumes.
|
||||
The script also renames the project name to `signoz` and the network name to `signoz-net` (if not already in place).
|
||||
0
deploy/docker/clickhouse-setup/.gitkeep
Normal file
0
deploy/docker/clickhouse-setup/.gitkeep
Normal file
@@ -1,35 +0,0 @@
|
||||
global:
|
||||
resolve_timeout: 1m
|
||||
slack_api_url: 'https://hooks.slack.com/services/xxx'
|
||||
|
||||
route:
|
||||
receiver: 'slack-notifications'
|
||||
|
||||
receivers:
|
||||
- name: 'slack-notifications'
|
||||
slack_configs:
|
||||
- channel: '#alerts'
|
||||
send_resolved: true
|
||||
icon_url: https://avatars3.githubusercontent.com/u/3380462
|
||||
title: |-
|
||||
[{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }}
|
||||
{{- if gt (len .CommonLabels) (len .GroupLabels) -}}
|
||||
{{" "}}(
|
||||
{{- with .CommonLabels.Remove .GroupLabels.Names }}
|
||||
{{- range $index, $label := .SortedPairs -}}
|
||||
{{ if $index }}, {{ end }}
|
||||
{{- $label.Name }}="{{ $label.Value -}}"
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
)
|
||||
{{- end }}
|
||||
text: >-
|
||||
{{ range .Alerts -}}
|
||||
*Alert:* {{ .Annotations.title }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }}
|
||||
|
||||
*Description:* {{ .Annotations.description }}
|
||||
|
||||
*Details:*
|
||||
{{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
@@ -1,11 +0,0 @@
|
||||
groups:
|
||||
- name: ExampleCPULoadGroup
|
||||
rules:
|
||||
- alert: HighCpuLoad
|
||||
expr: system_cpu_load_average_1m > 0.1
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: High CPU load
|
||||
description: "CPU load is > 0.1\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
@@ -1,41 +0,0 @@
|
||||
<?xml version="1.0"?>
|
||||
<clickhouse>
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<default>
|
||||
<keep_free_space_bytes>10485760</keep_free_space_bytes>
|
||||
</default>
|
||||
<s3>
|
||||
<type>s3</type>
|
||||
<!-- For S3 cold storage,
|
||||
if region is us-east-1, endpoint can be https://<bucket-name>.s3.amazonaws.com
|
||||
if region is not us-east-1, endpoint should be https://<bucket-name>.s3-<region>.amazonaws.com
|
||||
For GCS cold storage,
|
||||
endpoint should be https://storage.googleapis.com/<bucket-name>/data/
|
||||
-->
|
||||
<endpoint>https://BUCKET-NAME.s3-REGION-NAME.amazonaws.com/data/</endpoint>
|
||||
<access_key_id>ACCESS-KEY-ID</access_key_id>
|
||||
<secret_access_key>SECRET-ACCESS-KEY</secret_access_key>
|
||||
<!-- In case of S3, uncomment the below configuration in case you want to read
|
||||
AWS credentials from the Environment variables if they exist. -->
|
||||
<!-- <use_environment_credentials>true</use_environment_credentials> -->
|
||||
<!-- In case of GCS, uncomment the below configuration, since GCS does
|
||||
not support batch deletion and result in error messages in logs. -->
|
||||
<!-- <support_batch_delete>false</support_batch_delete> -->
|
||||
</s3>
|
||||
</disks>
|
||||
<policies>
|
||||
<tiered>
|
||||
<volumes>
|
||||
<default>
|
||||
<disk>default</disk>
|
||||
</default>
|
||||
<s3>
|
||||
<disk>s3</disk>
|
||||
<perform_ttl_move_on_insert>0</perform_ttl_move_on_insert>
|
||||
</s3>
|
||||
</volumes>
|
||||
</tiered>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
</clickhouse>
|
||||
@@ -1,123 +0,0 @@
|
||||
<?xml version="1.0"?>
|
||||
<clickhouse>
|
||||
<!-- See also the files in users.d directory where the settings can be overridden. -->
|
||||
|
||||
<!-- Profiles of settings. -->
|
||||
<profiles>
|
||||
<!-- Default settings. -->
|
||||
<default>
|
||||
<!-- Maximum memory usage for processing single query, in bytes. -->
|
||||
<max_memory_usage>10000000000</max_memory_usage>
|
||||
|
||||
<!-- How to choose between replicas during distributed query processing.
|
||||
random - choose random replica from set of replicas with minimum number of errors
|
||||
nearest_hostname - from set of replicas with minimum number of errors, choose replica
|
||||
with minimum number of different symbols between replica's hostname and local hostname
|
||||
(Hamming distance).
|
||||
in_order - first live replica is chosen in specified order.
|
||||
first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors.
|
||||
-->
|
||||
<load_balancing>random</load_balancing>
|
||||
</default>
|
||||
|
||||
<!-- Profile that allows only read queries. -->
|
||||
<readonly>
|
||||
<readonly>1</readonly>
|
||||
</readonly>
|
||||
</profiles>
|
||||
|
||||
<!-- Users and ACL. -->
|
||||
<users>
|
||||
<!-- If user name was not specified, 'default' user is used. -->
|
||||
<default>
|
||||
<!-- See also the files in users.d directory where the password can be overridden.
|
||||
|
||||
Password could be specified in plaintext or in SHA256 (in hex format).
|
||||
|
||||
If you want to specify password in plaintext (not recommended), place it in 'password' element.
|
||||
Example: <password>qwerty</password>.
|
||||
Password could be empty.
|
||||
|
||||
If you want to specify SHA256, place it in 'password_sha256_hex' element.
|
||||
Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
|
||||
Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
|
||||
|
||||
If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
|
||||
Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
|
||||
|
||||
If you want to specify a previously defined LDAP server (see 'ldap_servers' in the main config) for authentication,
|
||||
place its name in 'server' element inside 'ldap' element.
|
||||
Example: <ldap><server>my_ldap_server</server></ldap>
|
||||
|
||||
If you want to authenticate the user via Kerberos (assuming Kerberos is enabled, see 'kerberos' in the main config),
|
||||
place 'kerberos' element instead of 'password' (and similar) elements.
|
||||
The name part of the canonical principal name of the initiator must match the user name for authentication to succeed.
|
||||
You can also place 'realm' element inside 'kerberos' element to further restrict authentication to only those requests
|
||||
whose initiator's realm matches it.
|
||||
Example: <kerberos />
|
||||
Example: <kerberos><realm>EXAMPLE.COM</realm></kerberos>
|
||||
|
||||
How to generate decent password:
|
||||
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
|
||||
In first line will be password and in second - corresponding SHA256.
|
||||
|
||||
How to generate double SHA1:
|
||||
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-'
|
||||
In first line will be password and in second - corresponding double SHA1.
|
||||
-->
|
||||
<password></password>
|
||||
|
||||
<!-- List of networks with open access.
|
||||
|
||||
To open access from everywhere, specify:
|
||||
<ip>::/0</ip>
|
||||
|
||||
To open access only from localhost, specify:
|
||||
<ip>::1</ip>
|
||||
<ip>127.0.0.1</ip>
|
||||
|
||||
Each element of list has one of the following forms:
|
||||
<ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
|
||||
2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
|
||||
<host> Hostname. Example: server01.clickhouse.com.
|
||||
To check access, DNS query is performed, and all received addresses compared to peer address.
|
||||
<host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.clickhouse\.com$
|
||||
To check access, DNS PTR query is performed for peer address and then regexp is applied.
|
||||
Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
|
||||
Strongly recommended that regexp is ends with $
|
||||
All results of DNS requests are cached till server restart.
|
||||
-->
|
||||
<networks>
|
||||
<ip>::/0</ip>
|
||||
</networks>
|
||||
|
||||
<!-- Settings profile for user. -->
|
||||
<profile>default</profile>
|
||||
|
||||
<!-- Quota for user. -->
|
||||
<quota>default</quota>
|
||||
|
||||
<!-- User can create other users and grant rights to them. -->
|
||||
<!-- <access_management>1</access_management> -->
|
||||
</default>
|
||||
</users>
|
||||
|
||||
<!-- Quotas. -->
|
||||
<quotas>
|
||||
<!-- Name of quota. -->
|
||||
<default>
|
||||
<!-- Limits for time interval. You could specify many intervals with different limits. -->
|
||||
<interval>
|
||||
<!-- Length of interval. -->
|
||||
<duration>3600</duration>
|
||||
|
||||
<!-- No limits. Just calculate resource usage for time interval. -->
|
||||
<queries>0</queries>
|
||||
<errors>0</errors>
|
||||
<result_rows>0</result_rows>
|
||||
<read_rows>0</read_rows>
|
||||
<execution_time>0</execution_time>
|
||||
</interval>
|
||||
</default>
|
||||
</quotas>
|
||||
</clickhouse>
|
||||
@@ -1,133 +0,0 @@
|
||||
version: "2.4"
|
||||
|
||||
include:
|
||||
- test-app-docker-compose.yaml
|
||||
|
||||
services:
|
||||
zookeeper-1:
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
container_name: signoz-zookeeper-1
|
||||
hostname: zookeeper-1
|
||||
user: root
|
||||
ports:
|
||||
- "2181:2181"
|
||||
- "2888:2888"
|
||||
- "3888:3888"
|
||||
volumes:
|
||||
- ./data/zookeeper-1:/bitnami/zookeeper
|
||||
environment:
|
||||
- ZOO_SERVER_ID=1
|
||||
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_AUTOPURGE_INTERVAL=1
|
||||
|
||||
clickhouse:
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
container_name: signoz-clickhouse
|
||||
# ports:
|
||||
# - "9000:9000"
|
||||
# - "8123:8123"
|
||||
tty: true
|
||||
volumes:
|
||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
- ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||
- ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
restart: on-failure
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
healthcheck:
|
||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--spider",
|
||||
"-q",
|
||||
"0.0.0.0:8123/ping"
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
|
||||
alertmanager:
|
||||
container_name: signoz-alertmanager
|
||||
image: signoz/alertmanager:0.23.7
|
||||
volumes:
|
||||
- ./data/alertmanager:/data
|
||||
depends_on:
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
restart: on-failure
|
||||
command:
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
|
||||
otel-collector-migrator:
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.5}
|
||||
container_name: otel-migrator
|
||||
command:
|
||||
- "--dsn=tcp://clickhouse:9000"
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
# clickhouse-2:
|
||||
# condition: service_healthy
|
||||
# clickhouse-3:
|
||||
# condition: service_healthy
|
||||
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
otel-collector:
|
||||
container_name: signoz-otel-collector
|
||||
image: signoz/signoz-otel-collector:0.111.5
|
||||
command:
|
||||
[
|
||||
"--config=/etc/otel-collector-config.yaml",
|
||||
"--manager-config=/etc/manager-config.yaml",
|
||||
"--copy-path=/var/tmp/collector-config.yaml",
|
||||
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
||||
]
|
||||
# user: root # required for reading docker container logs
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||
- /:/hostfs:ro
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||
ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
# - "8888:8888" # OtelCollector internal metrics
|
||||
# - "8889:8889" # signoz spanmetrics exposed by the agent
|
||||
# - "9411:9411" # Zipkin port
|
||||
# - "13133:13133" # health check extension
|
||||
# - "14250:14250" # Jaeger gRPC
|
||||
# - "14268:14268" # Jaeger thrift HTTP
|
||||
# - "55678:55678" # OpenCensus receiver
|
||||
# - "55679:55679" # zPages extension
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
otel-collector-migrator:
|
||||
condition: service_completed_successfully
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
|
||||
logspout:
|
||||
image: "gliderlabs/logspout:v3.2.14"
|
||||
container_name: signoz-logspout
|
||||
volumes:
|
||||
- /etc/hostname:/etc/host_hostname:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
command: syslog+tcp://otel-collector:2255
|
||||
depends_on:
|
||||
- otel-collector
|
||||
restart: on-failure
|
||||
@@ -1,67 +0,0 @@
|
||||
version: "2.4"
|
||||
|
||||
services:
|
||||
query-service:
|
||||
hostname: query-service
|
||||
build:
|
||||
context: "../../../"
|
||||
dockerfile: "./pkg/query-service/Dockerfile"
|
||||
args:
|
||||
LDFLAGS: ""
|
||||
TARGETPLATFORM: "${GOOS}/${GOARCH}"
|
||||
container_name: signoz-query-service
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
volumes:
|
||||
- ./prometheus.yml:/root/config/prometheus.yml
|
||||
- ../dashboards:/root/config/dashboards
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
command:
|
||||
[
|
||||
"-config=/root/config/prometheus.yml",
|
||||
"--use-logs-new-schema=true"
|
||||
]
|
||||
ports:
|
||||
- "6060:6060"
|
||||
- "8080:8080"
|
||||
restart: on-failure
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--spider",
|
||||
"-q",
|
||||
"localhost:8080/api/v1/health"
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
|
||||
frontend:
|
||||
build:
|
||||
context: "../../../frontend"
|
||||
dockerfile: "./Dockerfile"
|
||||
args:
|
||||
TARGETOS: "${GOOS}"
|
||||
TARGETPLATFORM: "${GOARCH}"
|
||||
container_name: signoz-frontend
|
||||
environment:
|
||||
- FRONTEND_API_ENDPOINT=http://query-service:8080
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
@@ -1,296 +0,0 @@
|
||||
x-clickhouse-defaults: &clickhouse-defaults
|
||||
restart: on-failure
|
||||
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
tty: true
|
||||
depends_on:
|
||||
- zookeeper-1
|
||||
# - zookeeper-2
|
||||
# - zookeeper-3
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
healthcheck:
|
||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--spider",
|
||||
"-q",
|
||||
"0.0.0.0:8123/ping"
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
ulimits:
|
||||
nproc: 65535
|
||||
nofile:
|
||||
soft: 262144
|
||||
hard: 262144
|
||||
|
||||
x-db-depend: &db-depend
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
otel-collector-migrator-sync:
|
||||
condition: service_completed_successfully
|
||||
# clickhouse-2:
|
||||
# condition: service_healthy
|
||||
# clickhouse-3:
|
||||
# condition: service_healthy
|
||||
|
||||
services:
|
||||
|
||||
zookeeper-1:
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
container_name: signoz-zookeeper-1
|
||||
hostname: zookeeper-1
|
||||
user: root
|
||||
ports:
|
||||
- "2181:2181"
|
||||
- "2888:2888"
|
||||
- "3888:3888"
|
||||
volumes:
|
||||
- ./data/zookeeper-1:/bitnami/zookeeper
|
||||
environment:
|
||||
- ZOO_SERVER_ID=1
|
||||
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_AUTOPURGE_INTERVAL=1
|
||||
|
||||
# zookeeper-2:
|
||||
# image: bitnami/zookeeper:3.7.0
|
||||
# container_name: signoz-zookeeper-2
|
||||
# hostname: zookeeper-2
|
||||
# user: root
|
||||
# ports:
|
||||
# - "2182:2181"
|
||||
# - "2889:2888"
|
||||
# - "3889:3888"
|
||||
# volumes:
|
||||
# - ./data/zookeeper-2:/bitnami/zookeeper
|
||||
# environment:
|
||||
# - ZOO_SERVER_ID=2
|
||||
# - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
|
||||
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||
|
||||
# zookeeper-3:
|
||||
# image: bitnami/zookeeper:3.7.0
|
||||
# container_name: signoz-zookeeper-3
|
||||
# hostname: zookeeper-3
|
||||
# user: root
|
||||
# ports:
|
||||
# - "2183:2181"
|
||||
# - "2890:2888"
|
||||
# - "3890:3888"
|
||||
# volumes:
|
||||
# - ./data/zookeeper-3:/bitnami/zookeeper
|
||||
# environment:
|
||||
# - ZOO_SERVER_ID=3
|
||||
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
|
||||
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||
|
||||
clickhouse:
|
||||
<<: *clickhouse-defaults
|
||||
container_name: signoz-clickhouse
|
||||
hostname: clickhouse
|
||||
ports:
|
||||
- "9000:9000"
|
||||
- "8123:8123"
|
||||
- "9181:9181"
|
||||
volumes:
|
||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
- ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||
- ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
|
||||
# clickhouse-2:
|
||||
# <<: *clickhouse-defaults
|
||||
# container_name: signoz-clickhouse-2
|
||||
# hostname: clickhouse-2
|
||||
# ports:
|
||||
# - "9001:9000"
|
||||
# - "8124:8123"
|
||||
# - "9182:9181"
|
||||
# volumes:
|
||||
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
# - ./data/clickhouse-2/:/var/lib/clickhouse/
|
||||
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
|
||||
|
||||
# clickhouse-3:
|
||||
# <<: *clickhouse-defaults
|
||||
# container_name: signoz-clickhouse-3
|
||||
# hostname: clickhouse-3
|
||||
# ports:
|
||||
# - "9002:9000"
|
||||
# - "8125:8123"
|
||||
# - "9183:9181"
|
||||
# volumes:
|
||||
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
# - ./data/clickhouse-3/:/var/lib/clickhouse/
|
||||
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
|
||||
alertmanager:
|
||||
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
|
||||
container_name: signoz-alertmanager
|
||||
volumes:
|
||||
- ./data/alertmanager:/data
|
||||
depends_on:
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
restart: on-failure
|
||||
command:
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.56.0}
|
||||
container_name: signoz-query-service
|
||||
command:
|
||||
[
|
||||
"-config=/root/config/prometheus.yml",
|
||||
"--use-logs-new-schema=true"
|
||||
]
|
||||
# ports:
|
||||
# - "6060:6060" # pprof port
|
||||
# - "8080:8080" # query-service port
|
||||
volumes:
|
||||
- ./prometheus.yml:/root/config/prometheus.yml
|
||||
- ../dashboards:/root/config/dashboards
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||
restart: on-failure
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--spider",
|
||||
"-q",
|
||||
"localhost:8080/api/v1/health"
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
<<: *db-depend
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.56.0}
|
||||
container_name: signoz-frontend
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
otel-collector-migrator-sync:
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.5}
|
||||
container_name: otel-migrator-sync
|
||||
command:
|
||||
- "sync"
|
||||
- "--dsn=tcp://clickhouse:9000"
|
||||
- "--up="
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
# clickhouse-2:
|
||||
# condition: service_healthy
|
||||
# clickhouse-3:
|
||||
# condition: service_healthy
|
||||
|
||||
otel-collector-migrator-async:
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.5}
|
||||
container_name: otel-migrator-async
|
||||
command:
|
||||
- "async"
|
||||
- "--dsn=tcp://clickhouse:9000"
|
||||
- "--up="
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
otel-collector-migrator-sync:
|
||||
condition: service_completed_successfully
|
||||
# clickhouse-2:
|
||||
# condition: service_healthy
|
||||
# clickhouse-3:
|
||||
# condition: service_healthy
|
||||
|
||||
otel-collector:
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.5}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
[
|
||||
"--config=/etc/otel-collector-config.yaml",
|
||||
"--manager-config=/etc/manager-config.yaml",
|
||||
"--copy-path=/var/tmp/collector-config.yaml",
|
||||
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
||||
]
|
||||
user: root # required for reading docker container logs
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||
- /:/hostfs:ro
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||
ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
# - "8888:8888" # OtelCollector internal metrics
|
||||
# - "8889:8889" # signoz spanmetrics exposed by the agent
|
||||
# - "9411:9411" # Zipkin port
|
||||
# - "13133:13133" # health check extension
|
||||
# - "14250:14250" # Jaeger gRPC
|
||||
# - "14268:14268" # Jaeger thrift HTTP
|
||||
# - "55678:55678" # OpenCensus receiver
|
||||
# - "55679:55679" # zPages extension
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
otel-collector-migrator-sync:
|
||||
condition: service_completed_successfully
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
|
||||
logspout:
|
||||
image: "gliderlabs/logspout:v3.2.14"
|
||||
container_name: signoz-logspout
|
||||
volumes:
|
||||
- /etc/hostname:/etc/host_hostname:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
command: syslog+tcp://otel-collector:2255
|
||||
depends_on:
|
||||
- otel-collector
|
||||
restart: on-failure
|
||||
@@ -1,285 +0,0 @@
|
||||
version: "2.4"
|
||||
|
||||
include:
|
||||
- test-app-docker-compose.yaml
|
||||
|
||||
x-clickhouse-defaults: &clickhouse-defaults
|
||||
restart: on-failure
|
||||
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
tty: true
|
||||
depends_on:
|
||||
- zookeeper-1
|
||||
# - zookeeper-2
|
||||
# - zookeeper-3
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
healthcheck:
|
||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--spider",
|
||||
"-q",
|
||||
"0.0.0.0:8123/ping"
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
ulimits:
|
||||
nproc: 65535
|
||||
nofile:
|
||||
soft: 262144
|
||||
hard: 262144
|
||||
|
||||
x-db-depend: &db-depend
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
otel-collector-migrator:
|
||||
condition: service_completed_successfully
|
||||
# clickhouse-2:
|
||||
# condition: service_healthy
|
||||
# clickhouse-3:
|
||||
# condition: service_healthy
|
||||
|
||||
services:
|
||||
|
||||
zookeeper-1:
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
container_name: signoz-zookeeper-1
|
||||
hostname: zookeeper-1
|
||||
user: root
|
||||
ports:
|
||||
- "2181:2181"
|
||||
- "2888:2888"
|
||||
- "3888:3888"
|
||||
volumes:
|
||||
- ./data/zookeeper-1:/bitnami/zookeeper
|
||||
environment:
|
||||
- ZOO_SERVER_ID=1
|
||||
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_AUTOPURGE_INTERVAL=1
|
||||
|
||||
# zookeeper-2:
|
||||
# image: bitnami/zookeeper:3.7.0
|
||||
# container_name: signoz-zookeeper-2
|
||||
# hostname: zookeeper-2
|
||||
# user: root
|
||||
# ports:
|
||||
# - "2182:2181"
|
||||
# - "2889:2888"
|
||||
# - "3889:3888"
|
||||
# volumes:
|
||||
# - ./data/zookeeper-2:/bitnami/zookeeper
|
||||
# environment:
|
||||
# - ZOO_SERVER_ID=2
|
||||
# - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
|
||||
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||
|
||||
# zookeeper-3:
|
||||
# image: bitnami/zookeeper:3.7.0
|
||||
# container_name: signoz-zookeeper-3
|
||||
# hostname: zookeeper-3
|
||||
# user: root
|
||||
# ports:
|
||||
# - "2183:2181"
|
||||
# - "2890:2888"
|
||||
# - "3890:3888"
|
||||
# volumes:
|
||||
# - ./data/zookeeper-3:/bitnami/zookeeper
|
||||
# environment:
|
||||
# - ZOO_SERVER_ID=3
|
||||
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
|
||||
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||
|
||||
clickhouse:
|
||||
<<: *clickhouse-defaults
|
||||
container_name: signoz-clickhouse
|
||||
hostname: clickhouse
|
||||
ports:
|
||||
- "9000:9000"
|
||||
- "8123:8123"
|
||||
- "9181:9181"
|
||||
volumes:
|
||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
- ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||
- ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
|
||||
# clickhouse-2:
|
||||
# <<: *clickhouse-defaults
|
||||
# container_name: signoz-clickhouse-2
|
||||
# hostname: clickhouse-2
|
||||
# ports:
|
||||
# - "9001:9000"
|
||||
# - "8124:8123"
|
||||
# - "9182:9181"
|
||||
# volumes:
|
||||
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
# - ./data/clickhouse-2/:/var/lib/clickhouse/
|
||||
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
|
||||
|
||||
# clickhouse-3:
|
||||
# <<: *clickhouse-defaults
|
||||
# container_name: signoz-clickhouse-3
|
||||
# hostname: clickhouse-3
|
||||
# ports:
|
||||
# - "9002:9000"
|
||||
# - "8125:8123"
|
||||
# - "9183:9181"
|
||||
# volumes:
|
||||
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||
# - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
# - ./data/clickhouse-3/:/var/lib/clickhouse/
|
||||
# - ./user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
|
||||
alertmanager:
|
||||
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
|
||||
container_name: signoz-alertmanager
|
||||
volumes:
|
||||
- ./data/alertmanager:/data
|
||||
depends_on:
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
restart: on-failure
|
||||
command:
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
|
||||
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.56.0}
|
||||
container_name: signoz-query-service
|
||||
command:
|
||||
[
|
||||
"-config=/root/config/prometheus.yml",
|
||||
"-gateway-url=https://api.staging.signoz.cloud",
|
||||
"--use-logs-new-schema=true"
|
||||
]
|
||||
# ports:
|
||||
# - "6060:6060" # pprof port
|
||||
# - "8080:8080" # query-service port
|
||||
volumes:
|
||||
- ./prometheus.yml:/root/config/prometheus.yml
|
||||
- ../dashboards:/root/config/dashboards
|
||||
- ./data/signoz/:/var/lib/signoz/
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||
- KAFKA_SPAN_EVAL=${KAFKA_SPAN_EVAL:-false}
|
||||
restart: on-failure
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--spider",
|
||||
"-q",
|
||||
"localhost:8080/api/v1/health"
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
<<: *db-depend
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.56.0}
|
||||
container_name: signoz-frontend
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
otel-collector-migrator:
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.5}
|
||||
container_name: otel-migrator
|
||||
command:
|
||||
- "--dsn=tcp://clickhouse:9000"
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
# clickhouse-2:
|
||||
# condition: service_healthy
|
||||
# clickhouse-3:
|
||||
# condition: service_healthy
|
||||
|
||||
|
||||
otel-collector:
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.5}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
[
|
||||
"--config=/etc/otel-collector-config.yaml",
|
||||
"--manager-config=/etc/manager-config.yaml",
|
||||
"--copy-path=/var/tmp/collector-config.yaml",
|
||||
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
|
||||
]
|
||||
user: root # required for reading docker container logs
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||
- /:/hostfs:ro
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||
ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
# - "8888:8888" # OtelCollector internal metrics
|
||||
# - "8889:8889" # signoz spanmetrics exposed by the agent
|
||||
# - "9411:9411" # Zipkin port
|
||||
# - "13133:13133" # health check extension
|
||||
# - "14250:14250" # Jaeger gRPC
|
||||
# - "14268:14268" # Jaeger thrift HTTP
|
||||
# - "55678:55678" # OpenCensus receiver
|
||||
# - "55679:55679" # zPages extension
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
otel-collector-migrator:
|
||||
condition: service_completed_successfully
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
|
||||
logspout:
|
||||
image: "gliderlabs/logspout:v3.2.14"
|
||||
container_name: signoz-logspout
|
||||
volumes:
|
||||
- /etc/hostname:/etc/host_hostname:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
command: syslog+tcp://otel-collector:2255
|
||||
depends_on:
|
||||
- otel-collector
|
||||
restart: on-failure
|
||||
@@ -1,3 +0,0 @@
|
||||
include:
|
||||
- test-app-docker-compose.yaml
|
||||
- docker-compose-minimal.yaml
|
||||
@@ -1,64 +0,0 @@
|
||||
<clickhouse>
|
||||
<logger>
|
||||
<!-- Possible levels [1]:
|
||||
|
||||
- none (turns off logging)
|
||||
- fatal
|
||||
- critical
|
||||
- error
|
||||
- warning
|
||||
- notice
|
||||
- information
|
||||
- debug
|
||||
- trace
|
||||
|
||||
[1]: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105-L114
|
||||
-->
|
||||
<level>information</level>
|
||||
<log>/var/log/clickhouse-keeper/clickhouse-keeper.log</log>
|
||||
<errorlog>/var/log/clickhouse-keeper/clickhouse-keeper.err.log</errorlog>
|
||||
<!-- Rotation policy
|
||||
See https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/FileChannel.h#L54-L85
|
||||
-->
|
||||
<size>1000M</size>
|
||||
<count>10</count>
|
||||
<!-- <console>1</console> --> <!-- Default behavior is autodetection (log to console if not daemon mode and is tty) -->
|
||||
</logger>
|
||||
|
||||
<listen_host>0.0.0.0</listen_host>
|
||||
<max_connections>4096</max_connections>
|
||||
|
||||
<keeper_server>
|
||||
<tcp_port>9181</tcp_port>
|
||||
|
||||
<!-- Must be unique among all keeper serves -->
|
||||
<server_id>1</server_id>
|
||||
|
||||
<log_storage_path>/var/lib/clickhouse/coordination/logs</log_storage_path>
|
||||
<snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>
|
||||
|
||||
<coordination_settings>
|
||||
<operation_timeout_ms>10000</operation_timeout_ms>
|
||||
<min_session_timeout_ms>10000</min_session_timeout_ms>
|
||||
<session_timeout_ms>100000</session_timeout_ms>
|
||||
<raft_logs_level>information</raft_logs_level>
|
||||
<compress_logs>false</compress_logs>
|
||||
<!-- All settings listed in https://github.com/ClickHouse/ClickHouse/blob/master/src/Coordination/CoordinationSettings.h -->
|
||||
</coordination_settings>
|
||||
|
||||
<!-- enable sanity hostname checks for cluster configuration (e.g. if localhost is used with remote endpoints) -->
|
||||
<hostname_checks_enabled>true</hostname_checks_enabled>
|
||||
<raft_configuration>
|
||||
<server>
|
||||
<id>1</id>
|
||||
|
||||
<!-- Internal port and hostname -->
|
||||
<hostname>clickhouses-keeper-1</hostname>
|
||||
<port>9234</port>
|
||||
</server>
|
||||
|
||||
<!-- Add more servers here -->
|
||||
|
||||
</raft_configuration>
|
||||
</keeper_server>
|
||||
</clickhouse>
|
||||
@@ -1 +0,0 @@
|
||||
server_endpoint: ws://query-service:4320/v1/opamp
|
||||
@@ -1,25 +0,0 @@
|
||||
# my global config
|
||||
global:
|
||||
scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
|
||||
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
|
||||
# scrape_timeout is set to the global default (10s).
|
||||
|
||||
# Alertmanager configuration
|
||||
alerting:
|
||||
alertmanagers:
|
||||
- static_configs:
|
||||
- targets:
|
||||
- alertmanager:9093
|
||||
|
||||
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
|
||||
rule_files:
|
||||
# - "first_rules.yml"
|
||||
# - "second_rules.yml"
|
||||
- 'alerts.yml'
|
||||
|
||||
# A scrape configuration containing exactly one endpoint to scrape:
|
||||
# Here it's Prometheus itself.
|
||||
scrape_configs: []
|
||||
|
||||
remote_read:
|
||||
- url: tcp://clickhouse:9000/signoz_metrics
|
||||
@@ -1,26 +0,0 @@
|
||||
services:
|
||||
hotrod:
|
||||
image: jaegertracing/example-hotrod:1.30
|
||||
container_name: hotrod
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
command: [ "all" ]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
|
||||
load-hotrod:
|
||||
image: "signoz/locust:1.2.3"
|
||||
container_name: load-hotrod
|
||||
hostname: load-hotrod
|
||||
environment:
|
||||
ATTACKED_HOST: http://hotrod:8080
|
||||
LOCUST_MODE: standalone
|
||||
NO_PROXY: standalone
|
||||
TASK_DELAY_FROM: 5
|
||||
TASK_DELAY_TO: 30
|
||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||
volumes:
|
||||
- ../common/locust-scripts:/locust
|
||||
2
deploy/docker/clickhouse-setup/user_scripts/.deprecated
Normal file
2
deploy/docker/clickhouse-setup/user_scripts/.deprecated
Normal file
@@ -0,0 +1,2 @@
|
||||
This directory is deprecated and will be removed in the future.
|
||||
Please use the new directory for Clickhouse setup scripts: `scripts/clickhouse` instead.
|
||||
@@ -1,16 +0,0 @@
|
||||
from locust import HttpUser, task, between
|
||||
class UserTasks(HttpUser):
|
||||
wait_time = between(5, 15)
|
||||
|
||||
@task
|
||||
def rachel(self):
|
||||
self.client.get("/dispatch?customer=123&nonse=0.6308392664170006")
|
||||
@task
|
||||
def trom(self):
|
||||
self.client.get("/dispatch?customer=392&nonse=0.015296363321630757")
|
||||
@task
|
||||
def japanese(self):
|
||||
self.client.get("/dispatch?customer=731&nonse=0.8022286220408668")
|
||||
@task
|
||||
def coffee(self):
|
||||
self.client.get("/dispatch?customer=567&nonse=0.0022220379420636593")
|
||||
300
deploy/docker/docker-compose.ha.yaml
Normal file
300
deploy/docker/docker-compose.ha.yaml
Normal file
@@ -0,0 +1,300 @@
|
||||
version: "3"
|
||||
x-common: &common
|
||||
networks:
|
||||
- signoz-net
|
||||
restart: on-failure
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
x-clickhouse-defaults: &clickhouse-defaults
|
||||
!!merge <<: *common
|
||||
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
tty: true
|
||||
labels:
|
||||
signoz.io/scrape: "true"
|
||||
signoz.io/port: "9363"
|
||||
signoz.io/path: "/metrics"
|
||||
depends_on:
|
||||
init-clickhouse:
|
||||
condition: service_completed_successfully
|
||||
zookeeper-1:
|
||||
condition: service_healthy
|
||||
zookeeper-2:
|
||||
condition: service_healthy
|
||||
zookeeper-3:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
- wget
|
||||
- --spider
|
||||
- -q
|
||||
- 0.0.0.0:8123/ping
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
ulimits:
|
||||
nproc: 65535
|
||||
nofile:
|
||||
soft: 262144
|
||||
hard: 262144
|
||||
x-zookeeper-defaults: &zookeeper-defaults
|
||||
!!merge <<: *common
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
user: root
|
||||
labels:
|
||||
signoz.io/scrape: "true"
|
||||
signoz.io/port: "9141"
|
||||
signoz.io/path: "/metrics"
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD-SHELL
|
||||
- curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
x-db-depend: &db-depend
|
||||
!!merge <<: *common
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
condition: service_completed_successfully
|
||||
services:
|
||||
init-clickhouse:
|
||||
!!merge <<: *common
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
container_name: signoz-init-clickhouse
|
||||
command:
|
||||
- bash
|
||||
- -c
|
||||
- |
|
||||
version="v0.0.1"
|
||||
node_os=$$(uname -s | tr '[:upper:]' '[:lower:]')
|
||||
node_arch=$$(uname -m | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)
|
||||
echo "Fetching histogram-binary for $${node_os}/$${node_arch}"
|
||||
cd /tmp
|
||||
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
|
||||
tar -xvzf histogram-quantile.tar.gz
|
||||
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
|
||||
volumes:
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
zookeeper-1:
|
||||
!!merge <<: *zookeeper-defaults
|
||||
container_name: signoz-zookeeper-1
|
||||
# ports:
|
||||
# - "2181:2181"
|
||||
# - "2888:2888"
|
||||
# - "3888:3888"
|
||||
volumes:
|
||||
- zookeeper-1:/bitnami/zookeeper
|
||||
environment:
|
||||
- ZOO_SERVER_ID=1
|
||||
- ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_AUTOPURGE_INTERVAL=1
|
||||
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
|
||||
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
|
||||
zookeeper-2:
|
||||
!!merge <<: *zookeeper-defaults
|
||||
container_name: signoz-zookeeper-2
|
||||
# ports:
|
||||
# - "2182:2181"
|
||||
# - "2889:2888"
|
||||
# - "3889:3888"
|
||||
volumes:
|
||||
- zookeeper-2:/bitnami/zookeeper
|
||||
environment:
|
||||
- ZOO_SERVER_ID=2
|
||||
- ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_AUTOPURGE_INTERVAL=1
|
||||
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
|
||||
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
|
||||
zookeeper-3:
|
||||
!!merge <<: *zookeeper-defaults
|
||||
container_name: signoz-zookeeper-3
|
||||
# ports:
|
||||
# - "2183:2181"
|
||||
# - "2890:2888"
|
||||
# - "3890:3888"
|
||||
volumes:
|
||||
- zookeeper-3:/bitnami/zookeeper
|
||||
environment:
|
||||
- ZOO_SERVER_ID=3
|
||||
- ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_AUTOPURGE_INTERVAL=1
|
||||
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
|
||||
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
|
||||
clickhouse:
|
||||
!!merge <<: *clickhouse-defaults
|
||||
container_name: signoz-clickhouse
|
||||
# ports:
|
||||
# - "9000:9000"
|
||||
# - "8123:8123"
|
||||
# - "9181:9181"
|
||||
volumes:
|
||||
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
|
||||
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
|
||||
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
- clickhouse:/var/lib/clickhouse/
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
clickhouse-2:
|
||||
!!merge <<: *clickhouse-defaults
|
||||
container_name: signoz-clickhouse-2
|
||||
# ports:
|
||||
# - "9001:9000"
|
||||
# - "8124:8123"
|
||||
# - "9182:9181"
|
||||
volumes:
|
||||
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
|
||||
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
|
||||
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
- clickhouse-2:/var/lib/clickhouse/
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
clickhouse-3:
|
||||
!!merge <<: *clickhouse-defaults
|
||||
container_name: signoz-clickhouse-3
|
||||
# ports:
|
||||
# - "9002:9000"
|
||||
# - "8125:8123"
|
||||
# - "9183:9181"
|
||||
volumes:
|
||||
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
|
||||
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
|
||||
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
- clickhouse-3:/var/lib/clickhouse/
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
alertmanager:
|
||||
!!merge <<: *common
|
||||
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
|
||||
container_name: signoz-alertmanager
|
||||
command:
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
volumes:
|
||||
- alertmanager:/data
|
||||
depends_on:
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
query-service:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.70.0}
|
||||
container_name: signoz-query-service
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
- --use-logs-new-schema=true
|
||||
- --use-trace-new-schema=true
|
||||
# ports:
|
||||
# - "3301:8080" # signoz port
|
||||
# - "6060:6060" # pprof port
|
||||
volumes:
|
||||
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
|
||||
- ../common/dashboards:/root/config/dashboards
|
||||
- sqlite:/var/lib/signoz/
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
- wget
|
||||
- --spider
|
||||
- -q
|
||||
- localhost:8080/api/v1/health
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
frontend:
|
||||
!!merge <<: *common
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.70.0}
|
||||
container_name: signoz-frontend
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
# TODO: support otel-collector multiple replicas. Nginx/Traefik for loadbalancing?
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.24}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
- --copy-path=/var/tmp/collector-config.yaml
|
||||
- --feature-gates=-pkg.translator.prometheus.NormalizeName
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||
ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
condition: service_completed_successfully
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
- --dsn=tcp://clickhouse:9000
|
||||
- --up=
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
schema-migrator-async:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
- --dsn=tcp://clickhouse:9000
|
||||
- --up=
|
||||
networks:
|
||||
signoz-net:
|
||||
name: signoz-net
|
||||
|
||||
volumes:
|
||||
alertmanager:
|
||||
name: signoz-alertmanager
|
||||
clickhouse:
|
||||
name: signoz-clickhouse
|
||||
clickhouse-2:
|
||||
name: signoz-clickhouse-2
|
||||
clickhouse-3:
|
||||
name: signoz-clickhouse-3
|
||||
sqlite:
|
||||
name: signoz-sqlite
|
||||
zookeeper-1:
|
||||
name: signoz-zookeeper-1
|
||||
zookeeper-2:
|
||||
name: signoz-zookeeper-2
|
||||
zookeeper-3:
|
||||
name: signoz-zookeeper-3
|
||||
222
deploy/docker/docker-compose.testing.yaml
Normal file
222
deploy/docker/docker-compose.testing.yaml
Normal file
@@ -0,0 +1,222 @@
|
||||
version: "3"
|
||||
x-common: &common
|
||||
networks:
|
||||
- signoz-net
|
||||
restart: on-failure
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
x-clickhouse-defaults: &clickhouse-defaults
|
||||
!!merge <<: *common
|
||||
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
tty: true
|
||||
labels:
|
||||
signoz.io/scrape: "true"
|
||||
signoz.io/port: "9363"
|
||||
signoz.io/path: "/metrics"
|
||||
depends_on:
|
||||
init-clickhouse:
|
||||
condition: service_completed_successfully
|
||||
zookeeper-1:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
- wget
|
||||
- --spider
|
||||
- -q
|
||||
- 0.0.0.0:8123/ping
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
ulimits:
|
||||
nproc: 65535
|
||||
nofile:
|
||||
soft: 262144
|
||||
hard: 262144
|
||||
x-zookeeper-defaults: &zookeeper-defaults
|
||||
!!merge <<: *common
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
user: root
|
||||
labels:
|
||||
signoz.io/scrape: "true"
|
||||
signoz.io/port: "9141"
|
||||
signoz.io/path: "/metrics"
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD-SHELL
|
||||
- curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
x-db-depend: &db-depend
|
||||
!!merge <<: *common
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
condition: service_completed_successfully
|
||||
services:
|
||||
init-clickhouse:
|
||||
!!merge <<: *common
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
container_name: signoz-init-clickhouse
|
||||
command:
|
||||
- bash
|
||||
- -c
|
||||
- |
|
||||
version="v0.0.1"
|
||||
node_os=$$(uname -s | tr '[:upper:]' '[:lower:]')
|
||||
node_arch=$$(uname -m | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)
|
||||
echo "Fetching histogram-binary for $${node_os}/$${node_arch}"
|
||||
cd /tmp
|
||||
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
|
||||
tar -xvzf histogram-quantile.tar.gz
|
||||
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
|
||||
volumes:
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
zookeeper-1:
|
||||
!!merge <<: *zookeeper-defaults
|
||||
container_name: signoz-zookeeper-1
|
||||
ports:
|
||||
- "2181:2181"
|
||||
- "2888:2888"
|
||||
- "3888:3888"
|
||||
volumes:
|
||||
- zookeeper-1:/bitnami/zookeeper
|
||||
environment:
|
||||
- ZOO_SERVER_ID=1
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_AUTOPURGE_INTERVAL=1
|
||||
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
|
||||
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
|
||||
clickhouse:
|
||||
!!merge <<: *clickhouse-defaults
|
||||
container_name: signoz-clickhouse
|
||||
ports:
|
||||
- "9000:9000"
|
||||
- "8123:8123"
|
||||
- "9181:9181"
|
||||
volumes:
|
||||
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
|
||||
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
|
||||
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
- ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
- clickhouse:/var/lib/clickhouse/
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
alertmanager:
|
||||
!!merge <<: *common
|
||||
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
|
||||
container_name: signoz-alertmanager
|
||||
command:
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
volumes:
|
||||
- alertmanager:/data
|
||||
depends_on:
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
query-service:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.70.0}
|
||||
container_name: signoz-query-service
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
- --gateway-url=https://api.staging.signoz.cloud
|
||||
- --use-logs-new-schema=true
|
||||
- --use-trace-new-schema=true
|
||||
# ports:
|
||||
# - "8080:8080" # signoz port
|
||||
# - "6060:6060" # pprof port
|
||||
volumes:
|
||||
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
|
||||
- ../common/dashboards:/root/config/dashboards
|
||||
- sqlite:/var/lib/signoz/
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||
- KAFKA_SPAN_EVAL=${KAFKA_SPAN_EVAL:-false}
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
- wget
|
||||
- --spider
|
||||
- -q
|
||||
- localhost:8080/api/v1/health
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
frontend:
|
||||
!!merge <<: *common
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.70.0}
|
||||
container_name: signoz-frontend
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.24}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
- --copy-path=/var/tmp/collector-config.yaml
|
||||
- --feature-gates=-pkg.translator.prometheus.NormalizeName
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||
ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
depends_on:
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
- --dsn=tcp://clickhouse:9000
|
||||
- --up=
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
schema-migrator-async:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
- --dsn=tcp://clickhouse:9000
|
||||
- --up=
|
||||
networks:
|
||||
signoz-net:
|
||||
name: signoz-net
|
||||
|
||||
volumes:
|
||||
alertmanager:
|
||||
name: signoz-alertmanager
|
||||
clickhouse:
|
||||
name: signoz-clickhouse
|
||||
sqlite:
|
||||
name: signoz-sqlite
|
||||
zookeeper-1:
|
||||
name: signoz-zookeeper-1
|
||||
220
deploy/docker/docker-compose.yaml
Normal file
220
deploy/docker/docker-compose.yaml
Normal file
@@ -0,0 +1,220 @@
|
||||
version: "3"
|
||||
x-common: &common
|
||||
networks:
|
||||
- signoz-net
|
||||
restart: on-failure
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
x-clickhouse-defaults: &clickhouse-defaults
|
||||
!!merge <<: *common
|
||||
# addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
tty: true
|
||||
labels:
|
||||
signoz.io/scrape: "true"
|
||||
signoz.io/port: "9363"
|
||||
signoz.io/path: "/metrics"
|
||||
depends_on:
|
||||
init-clickhouse:
|
||||
condition: service_completed_successfully
|
||||
zookeeper-1:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
- wget
|
||||
- --spider
|
||||
- -q
|
||||
- 0.0.0.0:8123/ping
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
ulimits:
|
||||
nproc: 65535
|
||||
nofile:
|
||||
soft: 262144
|
||||
hard: 262144
|
||||
x-zookeeper-defaults: &zookeeper-defaults
|
||||
!!merge <<: *common
|
||||
image: bitnami/zookeeper:3.7.1
|
||||
user: root
|
||||
labels:
|
||||
signoz.io/scrape: "true"
|
||||
signoz.io/port: "9141"
|
||||
signoz.io/path: "/metrics"
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD-SHELL
|
||||
- curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
x-db-depend: &db-depend
|
||||
!!merge <<: *common
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
condition: service_completed_successfully
|
||||
services:
|
||||
init-clickhouse:
|
||||
!!merge <<: *common
|
||||
image: clickhouse/clickhouse-server:24.1.2-alpine
|
||||
container_name: signoz-init-clickhouse
|
||||
command:
|
||||
- bash
|
||||
- -c
|
||||
- |
|
||||
version="v0.0.1"
|
||||
node_os=$$(uname -s | tr '[:upper:]' '[:lower:]')
|
||||
node_arch=$$(uname -m | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)
|
||||
echo "Fetching histogram-binary for $${node_os}/$${node_arch}"
|
||||
cd /tmp
|
||||
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
|
||||
tar -xvzf histogram-quantile.tar.gz
|
||||
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
|
||||
volumes:
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
zookeeper-1:
|
||||
!!merge <<: *zookeeper-defaults
|
||||
container_name: signoz-zookeeper-1
|
||||
# ports:
|
||||
# - "2181:2181"
|
||||
# - "2888:2888"
|
||||
# - "3888:3888"
|
||||
volumes:
|
||||
- zookeeper-1:/bitnami/zookeeper
|
||||
environment:
|
||||
- ZOO_SERVER_ID=1
|
||||
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||
- ZOO_AUTOPURGE_INTERVAL=1
|
||||
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
|
||||
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
|
||||
clickhouse:
|
||||
!!merge <<: *clickhouse-defaults
|
||||
container_name: signoz-clickhouse
|
||||
# ports:
|
||||
# - "9000:9000"
|
||||
# - "8123:8123"
|
||||
# - "9181:9181"
|
||||
volumes:
|
||||
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
|
||||
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
|
||||
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
|
||||
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
|
||||
- ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||
- clickhouse:/var/lib/clickhouse/
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
alertmanager:
|
||||
!!merge <<: *common
|
||||
image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
|
||||
container_name: signoz-alertmanager
|
||||
command:
|
||||
- --queryService.url=http://query-service:8085
|
||||
- --storage.path=/data
|
||||
volumes:
|
||||
- alertmanager:/data
|
||||
depends_on:
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
query-service:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/query-service:${DOCKER_TAG:-0.70.0}
|
||||
container_name: signoz-query-service
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
- --use-logs-new-schema=true
|
||||
- --use-trace-new-schema=true
|
||||
# ports:
|
||||
# - "3301:8080" # signoz port
|
||||
# - "6060:6060" # pprof port
|
||||
volumes:
|
||||
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
|
||||
- ../common/dashboards:/root/config/dashboards
|
||||
- sqlite:/var/lib/signoz/
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
|
||||
- DASHBOARDS_PATH=/root/config/dashboards
|
||||
- STORAGE=clickhouse
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
- wget
|
||||
- --spider
|
||||
- -q
|
||||
- localhost:8080/api/v1/health
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
frontend:
|
||||
!!merge <<: *common
|
||||
image: signoz/frontend:${DOCKER_TAG:-0.70.0}
|
||||
container_name: signoz-frontend
|
||||
depends_on:
|
||||
- alertmanager
|
||||
- query-service
|
||||
ports:
|
||||
- "3301:3301"
|
||||
volumes:
|
||||
- ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.24}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
- --copy-path=/var/tmp/collector-config.yaml
|
||||
- --feature-gates=-pkg.translator.prometheus.NormalizeName
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
|
||||
environment:
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||
- LOW_CARDINAL_EXCEPTION_GROUPING=false
|
||||
ports:
|
||||
# - "1777:1777" # pprof extension
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
depends_on:
|
||||
query-service:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
- --dsn=tcp://clickhouse:9000
|
||||
- --up=
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
schema-migrator-async:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
- --dsn=tcp://clickhouse:9000
|
||||
- --up=
|
||||
networks:
|
||||
signoz-net:
|
||||
name: signoz-net
|
||||
|
||||
volumes:
|
||||
alertmanager:
|
||||
name: signoz-alertmanager
|
||||
clickhouse:
|
||||
name: signoz-clickhouse
|
||||
sqlite:
|
||||
name: signoz-sqlite
|
||||
zookeeper-1:
|
||||
name: signoz-zookeeper-1
|
||||
39
deploy/docker/generator/hotrod/docker-compose.yaml
Normal file
39
deploy/docker/generator/hotrod/docker-compose.yaml
Normal file
@@ -0,0 +1,39 @@
|
||||
version: "3"
|
||||
x-common: &common
|
||||
networks:
|
||||
- signoz-net
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
restart: on-failure
|
||||
services:
|
||||
hotrod:
|
||||
<<: *common
|
||||
image: jaegertracing/example-hotrod:1.61.0
|
||||
container_name: hotrod
|
||||
command: [ "all" ]
|
||||
environment:
|
||||
- OTEL_EXPORTER_OTLP_ENDPOINT=http://host.docker.internal:4318 # In case of external SigNoz or cloud, update the endpoint and access token
|
||||
# - OTEL_OTLP_HEADERS=signoz-access-token=<your-access-token>
|
||||
load-hotrod:
|
||||
<<: *common
|
||||
image: "signoz/locust:1.2.3"
|
||||
container_name: load-hotrod
|
||||
environment:
|
||||
ATTACKED_HOST: http://hotrod:8080
|
||||
LOCUST_MODE: standalone
|
||||
NO_PROXY: standalone
|
||||
TASK_DELAY_FROM: 5
|
||||
TASK_DELAY_TO: 30
|
||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||
volumes:
|
||||
- ../../../common/locust-scripts:/locust
|
||||
|
||||
networks:
|
||||
signoz-net:
|
||||
name: signoz-net
|
||||
external: true
|
||||
43
deploy/docker/generator/infra/docker-compose.yaml
Normal file
43
deploy/docker/generator/infra/docker-compose.yaml
Normal file
@@ -0,0 +1,43 @@
|
||||
version: "3"
|
||||
x-common: &common
|
||||
networks:
|
||||
- signoz-net
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
logging:
|
||||
options:
|
||||
max-size: 50m
|
||||
max-file: "3"
|
||||
restart: on-failure
|
||||
services:
|
||||
otel-agent:
|
||||
<<: *common
|
||||
image: otel/opentelemetry-collector-contrib:0.111.0
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
- /:/hostfs:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
environment:
|
||||
- SIGNOZ_COLLECTOR_ENDPOINT=http://host.docker.internal:4317 # In case of external SigNoz or cloud, update the endpoint and access token
|
||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux # Replace signoz-host with the actual hostname
|
||||
# - SIGNOZ_ACCESS_TOKEN="<your-access-token>"
|
||||
# Before exposing the ports, make sure the ports are not used by other services
|
||||
# ports:
|
||||
# - "4317:4317"
|
||||
# - "4318:4318"
|
||||
logspout:
|
||||
<<: *common
|
||||
image: "gliderlabs/logspout:v3.2.14"
|
||||
volumes:
|
||||
- /etc/hostname:/etc/host_hostname:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
command: syslog+tcp://otel-agent:2255
|
||||
depends_on:
|
||||
- otel-agent
|
||||
|
||||
networks:
|
||||
signoz-net:
|
||||
name: signoz-net
|
||||
external: true
|
||||
139
deploy/docker/generator/infra/otel-collector-config.yaml
Normal file
139
deploy/docker/generator/infra/otel-collector-config.yaml
Normal file
@@ -0,0 +1,139 @@
|
||||
receivers:
|
||||
hostmetrics:
|
||||
collection_interval: 30s
|
||||
root_path: /hostfs
|
||||
scrapers:
|
||||
cpu: {}
|
||||
load: {}
|
||||
memory: {}
|
||||
disk: {}
|
||||
filesystem: {}
|
||||
network: {}
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
prometheus:
|
||||
config:
|
||||
global:
|
||||
scrape_interval: 60s
|
||||
scrape_configs:
|
||||
- job_name: otel-collector
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:8888
|
||||
labels:
|
||||
job_name: otel-collector
|
||||
# For Docker daemon metrics to be scraped, it must be configured to expose
|
||||
# Prometheus metrics, as documented here: https://docs.docker.com/config/daemon/prometheus/
|
||||
# - job_name: docker-daemon
|
||||
# static_configs:
|
||||
# - targets:
|
||||
# - host.docker.internal:9323
|
||||
# labels:
|
||||
# job_name: docker-daemon
|
||||
- job_name: docker-container
|
||||
docker_sd_configs:
|
||||
- host: unix:///var/run/docker.sock
|
||||
relabel_configs:
|
||||
- action: keep
|
||||
regex: true
|
||||
source_labels:
|
||||
- __meta_docker_container_label_signoz_io_scrape
|
||||
- regex: true
|
||||
source_labels:
|
||||
- __meta_docker_container_label_signoz_io_path
|
||||
target_label: __metrics_path__
|
||||
- regex: (.+)
|
||||
source_labels:
|
||||
- __meta_docker_container_label_signoz_io_path
|
||||
target_label: __metrics_path__
|
||||
- separator: ":"
|
||||
source_labels:
|
||||
- __meta_docker_network_ip
|
||||
- __meta_docker_container_label_signoz_io_port
|
||||
target_label: __address__
|
||||
- regex: '/(.*)'
|
||||
replacement: '$1'
|
||||
source_labels:
|
||||
- __meta_docker_container_name
|
||||
target_label: container_name
|
||||
- regex: __meta_docker_container_label_signoz_io_(.+)
|
||||
action: labelmap
|
||||
replacement: $1
|
||||
tcplog/docker:
|
||||
listen_address: "0.0.0.0:2255"
|
||||
operators:
|
||||
- type: regex_parser
|
||||
regex: '^<([0-9]+)>[0-9]+ (?P<timestamp>[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?) (?P<container_id>\S+) (?P<container_name>\S+) [0-9]+ - -( (?P<body>.*))?'
|
||||
timestamp:
|
||||
parse_from: attributes.timestamp
|
||||
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
|
||||
- type: move
|
||||
from: attributes["body"]
|
||||
to: body
|
||||
- type: remove
|
||||
field: attributes.timestamp
|
||||
# please remove names from below if you want to collect logs from them
|
||||
- type: filter
|
||||
id: signoz_logs_filter
|
||||
expr: 'attributes.container_name matches "^(signoz-(|alertmanager|query-service|otel-collector|clickhouse|zookeeper))|(infra-(logspout|otel-agent)-.*)"'
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
timeout: 10s
|
||||
resourcedetection:
|
||||
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||
detectors:
|
||||
# - ec2
|
||||
# - gcp
|
||||
# - azure
|
||||
- env
|
||||
- system
|
||||
timeout: 2s
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
pprof:
|
||||
endpoint: 0.0.0.0:1777
|
||||
exporters:
|
||||
otlp:
|
||||
endpoint: ${env:SIGNOZ_COLLECTOR_ENDPOINT}
|
||||
tls:
|
||||
insecure: true
|
||||
headers:
|
||||
signoz-access-token: ${env:SIGNOZ_ACCESS_TOKEN}
|
||||
# debug: {}
|
||||
service:
|
||||
telemetry:
|
||||
logs:
|
||||
encoding: json
|
||||
metrics:
|
||||
address: 0.0.0.0:8888
|
||||
extensions:
|
||||
- health_check
|
||||
- pprof
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [otlp]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [otlp]
|
||||
metrics/hostmetrics:
|
||||
receivers: [hostmetrics]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [otlp]
|
||||
metrics/prometheus:
|
||||
receivers: [prometheus]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [otlp]
|
||||
logs:
|
||||
receivers: [otlp, tcplog/docker]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [otlp]
|
||||
@@ -1,62 +1,21 @@
|
||||
receivers:
|
||||
tcplog/docker:
|
||||
listen_address: "0.0.0.0:2255"
|
||||
operators:
|
||||
- type: regex_parser
|
||||
regex: '^<([0-9]+)>[0-9]+ (?P<timestamp>[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?) (?P<container_id>\S+) (?P<container_name>\S+) [0-9]+ - -( (?P<body>.*))?'
|
||||
timestamp:
|
||||
parse_from: attributes.timestamp
|
||||
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
|
||||
- type: move
|
||||
from: attributes["body"]
|
||||
to: body
|
||||
- type: remove
|
||||
field: attributes.timestamp
|
||||
# please remove names from below if you want to collect logs from them
|
||||
- type: filter
|
||||
id: signoz_logs_filter
|
||||
expr: 'attributes.container_name matches "^signoz_(logspout|frontend|alertmanager|query-service|otel-collector|clickhouse|zookeeper)"'
|
||||
opencensus:
|
||||
endpoint: 0.0.0.0:55678
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:14250
|
||||
thrift_http:
|
||||
endpoint: 0.0.0.0:14268
|
||||
# thrift_compact:
|
||||
# endpoint: 0.0.0.0:6831
|
||||
# thrift_binary:
|
||||
# endpoint: 0.0.0.0:6832
|
||||
hostmetrics:
|
||||
collection_interval: 30s
|
||||
root_path: /hostfs
|
||||
scrapers:
|
||||
cpu: {}
|
||||
load: {}
|
||||
memory: {}
|
||||
disk: {}
|
||||
filesystem: {}
|
||||
network: {}
|
||||
prometheus:
|
||||
config:
|
||||
global:
|
||||
scrape_interval: 60s
|
||||
scrape_configs:
|
||||
# otel-collector internal metrics
|
||||
- job_name: otel-collector
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:8888
|
||||
labels:
|
||||
job_name: otel-collector
|
||||
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 10000
|
||||
@@ -64,25 +23,11 @@ processors:
|
||||
timeout: 10s
|
||||
resourcedetection:
|
||||
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||
detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure.
|
||||
detectors: [env, system]
|
||||
timeout: 2s
|
||||
# memory_limiter:
|
||||
# # 80% of maximum memory up to 2G
|
||||
# limit_mib: 1500
|
||||
# # 25% of limit up to 2G
|
||||
# spike_limit_mib: 512
|
||||
# check_interval: 5s
|
||||
#
|
||||
# # 50% of the maximum memory
|
||||
# limit_percentage: 50
|
||||
# # 20% of max memory usage spike expected
|
||||
# spike_limit_percentage: 20
|
||||
# queued_retry:
|
||||
# num_workers: 4
|
||||
# queue_size: 100
|
||||
# retry_on_failure: true
|
||||
signozspanmetrics/delta:
|
||||
metrics_exporter: clickhousemetricswrite
|
||||
metrics_flush_interval: 60s
|
||||
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
|
||||
dimensions_cache_size: 100000
|
||||
aggregation_temporality: AGGREGATION_TEMPORALITY_DELTA
|
||||
@@ -105,11 +50,16 @@ processors:
|
||||
- name: host.name
|
||||
- name: host.type
|
||||
- name: container.name
|
||||
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
pprof:
|
||||
endpoint: 0.0.0.0:1777
|
||||
exporters:
|
||||
clickhousetraces:
|
||||
datasource: tcp://clickhouse:9000/signoz_traces
|
||||
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
|
||||
use_new_schema: true
|
||||
clickhousemetricswrite:
|
||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||
resource_to_telemetry_conversion:
|
||||
@@ -118,44 +68,34 @@ exporters:
|
||||
endpoint: tcp://clickhouse:9000/signoz_metrics
|
||||
clickhousemetricswritev2:
|
||||
dsn: tcp://clickhouse:9000/signoz_metrics
|
||||
# logging: {}
|
||||
clickhouselogsexporter:
|
||||
dsn: tcp://clickhouse:9000/signoz_logs
|
||||
timeout: 10s
|
||||
use_new_schema: true
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
zpages:
|
||||
endpoint: 0.0.0.0:55679
|
||||
pprof:
|
||||
endpoint: 0.0.0.0:1777
|
||||
|
||||
# debug: {}
|
||||
service:
|
||||
telemetry:
|
||||
logs:
|
||||
encoding: json
|
||||
metrics:
|
||||
address: 0.0.0.0:8888
|
||||
extensions: [health_check, zpages, pprof]
|
||||
extensions:
|
||||
- health_check
|
||||
- pprof
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [jaeger, otlp]
|
||||
receivers: [otlp]
|
||||
processors: [signozspanmetrics/delta, batch]
|
||||
exporters: [clickhousetraces]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite, clickhousemetricswritev2]
|
||||
metrics/hostmetrics:
|
||||
receivers: [hostmetrics]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [clickhousemetricswrite, clickhousemetricswritev2]
|
||||
metrics/prometheus:
|
||||
receivers: [prometheus]
|
||||
processors: [batch]
|
||||
exporters: [clickhousemetricswrite/prometheus, clickhousemetricswritev2]
|
||||
logs:
|
||||
receivers: [otlp, tcplog/docker]
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [clickhouselogsexporter]
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
set -o errexit
|
||||
|
||||
# Variables
|
||||
BASE_DIR="$(dirname "$(readlink -f "$0")")"
|
||||
DOCKER_STANDALONE_DIR="docker"
|
||||
DOCKER_SWARM_DIR="docker-swarm" # TODO: Add docker swarm support
|
||||
|
||||
# Regular Colors
|
||||
Black='\033[0;30m' # Black
|
||||
Red='\[\e[0;31m\]' # Red
|
||||
@@ -32,6 +37,11 @@ has_cmd() {
|
||||
command -v "$1" > /dev/null 2>&1
|
||||
}
|
||||
|
||||
# Check if docker compose plugin is present
|
||||
has_docker_compose_plugin() {
|
||||
docker compose version > /dev/null 2>&1
|
||||
}
|
||||
|
||||
is_mac() {
|
||||
[[ $OSTYPE == darwin* ]]
|
||||
}
|
||||
@@ -183,9 +193,7 @@ install_docker() {
|
||||
$sudo_cmd yum-config-manager --add-repo https://download.docker.com/linux/$os/docker-ce.repo
|
||||
echo "Installing docker"
|
||||
$yum_cmd install docker-ce docker-ce-cli containerd.io
|
||||
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
compose_version () {
|
||||
@@ -222,17 +230,11 @@ start_docker() {
|
||||
echo -e "🐳 Starting Docker ...\n"
|
||||
if [[ $os == "Mac" ]]; then
|
||||
open --background -a Docker && while ! docker system info > /dev/null 2>&1; do sleep 1; done
|
||||
else
|
||||
else
|
||||
if ! $sudo_cmd systemctl is-active docker.service > /dev/null; then
|
||||
echo "Starting docker service"
|
||||
$sudo_cmd systemctl start docker.service
|
||||
fi
|
||||
# if [[ -z $sudo_cmd ]]; then
|
||||
# docker ps > /dev/null && true
|
||||
# if [[ $? -ne 0 ]]; then
|
||||
# request_sudo
|
||||
# fi
|
||||
# fi
|
||||
if [[ -z $sudo_cmd ]]; then
|
||||
if ! docker ps > /dev/null && true; then
|
||||
request_sudo
|
||||
@@ -260,12 +262,15 @@ wait_for_containers_start() {
|
||||
}
|
||||
|
||||
bye() { # Prints a friendly good bye message and exits the script.
|
||||
# Switch back to the original directory
|
||||
popd > /dev/null 2>&1
|
||||
if [[ "$?" -ne 0 ]]; then
|
||||
set +o errexit
|
||||
|
||||
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
||||
echo ""
|
||||
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
echo -e "cd ${DOCKER_STANDALONE_DIR}"
|
||||
echo -e "$sudo_cmd $docker_compose_cmd ps -a"
|
||||
|
||||
echo "Please read our troubleshooting guide https://signoz.io/docs/install/troubleshooting/"
|
||||
echo "or reach us for support in #help channel in our Slack Community https://signoz.io/slack"
|
||||
@@ -296,11 +301,6 @@ request_sudo() {
|
||||
if (( $EUID != 0 )); then
|
||||
sudo_cmd="sudo"
|
||||
echo -e "Please enter your sudo password, if prompted."
|
||||
# $sudo_cmd -l | grep -e "NOPASSWD: ALL" > /dev/null
|
||||
# if [[ $? -ne 0 ]] && ! $sudo_cmd -v; then
|
||||
# echo "Need sudo privileges to proceed with the installation."
|
||||
# exit 1;
|
||||
# fi
|
||||
if ! $sudo_cmd -l | grep -e "NOPASSWD: ALL" > /dev/null && ! $sudo_cmd -v; then
|
||||
echo "Need sudo privileges to proceed with the installation."
|
||||
exit 1;
|
||||
@@ -317,6 +317,7 @@ echo -e "👋 Thank you for trying out SigNoz! "
|
||||
echo ""
|
||||
|
||||
sudo_cmd=""
|
||||
docker_compose_cmd=""
|
||||
|
||||
# Check sudo permissions
|
||||
if (( $EUID != 0 )); then
|
||||
@@ -362,28 +363,8 @@ else
|
||||
SIGNOZ_INSTALLATION_ID=$(echo "$sysinfo" | $digest_cmd | grep -E -o '[a-zA-Z0-9]{64}')
|
||||
fi
|
||||
|
||||
# echo ""
|
||||
|
||||
# echo -e "👉 ${RED}Two ways to go forward\n"
|
||||
# echo -e "${RED}1) ClickHouse as database (default)\n"
|
||||
# read -p "⚙️ Enter your preference (1/2):" choice_setup
|
||||
|
||||
# while [[ $choice_setup != "1" && $choice_setup != "2" && $choice_setup != "" ]]
|
||||
# do
|
||||
# # echo $choice_setup
|
||||
# echo -e "\n❌ ${CYAN}Please enter either 1 or 2"
|
||||
# read -p "⚙️ Enter your preference (1/2): " choice_setup
|
||||
# # echo $choice_setup
|
||||
# done
|
||||
|
||||
# if [[ $choice_setup == "1" || $choice_setup == "" ]];then
|
||||
# setup_type='clickhouse'
|
||||
# fi
|
||||
|
||||
setup_type='clickhouse'
|
||||
|
||||
# echo -e "\n✅ ${CYAN}You have chosen: ${setup_type} setup\n"
|
||||
|
||||
# Run bye if failure happens
|
||||
trap bye EXIT
|
||||
|
||||
@@ -455,8 +436,6 @@ if [[ $desired_os -eq 0 ]]; then
|
||||
send_event "os_not_supported"
|
||||
fi
|
||||
|
||||
# check_ports_occupied
|
||||
|
||||
# Check is Docker daemon is installed and available. If not, the install & start Docker for Linux machines. We cannot automatically install Docker Desktop on Mac OS
|
||||
if ! is_command_present docker; then
|
||||
|
||||
@@ -486,27 +465,42 @@ if ! is_command_present docker; then
|
||||
fi
|
||||
fi
|
||||
|
||||
if has_docker_compose_plugin; then
|
||||
echo "docker compose plugin is present, using it"
|
||||
docker_compose_cmd="docker compose"
|
||||
# Install docker-compose
|
||||
if ! is_command_present docker-compose; then
|
||||
request_sudo
|
||||
install_docker_compose
|
||||
else
|
||||
docker_compose_cmd="docker-compose"
|
||||
if ! is_command_present docker-compose; then
|
||||
request_sudo
|
||||
install_docker_compose
|
||||
fi
|
||||
fi
|
||||
|
||||
start_docker
|
||||
|
||||
# $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up -d --remove-orphans || true
|
||||
# Switch to the Docker Standalone directory
|
||||
pushd "${BASE_DIR}/${DOCKER_STANDALONE_DIR}" > /dev/null 2>&1
|
||||
|
||||
# check for open ports, if signoz is not installed
|
||||
if is_command_present docker-compose; then
|
||||
if $sudo_cmd $docker_compose_cmd ps | grep "signoz-query-service" | grep -q "healthy" > /dev/null 2>&1; then
|
||||
echo "SigNoz already installed, skipping the occupied ports check"
|
||||
else
|
||||
check_ports_occupied
|
||||
fi
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo -e "\n🟡 Pulling the latest container images for SigNoz.\n"
|
||||
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml pull
|
||||
$sudo_cmd $docker_compose_cmd pull
|
||||
|
||||
echo ""
|
||||
echo "🟡 Starting the SigNoz containers. It may take a few minutes ..."
|
||||
echo
|
||||
# The docker-compose command does some nasty stuff for the `--detach` functionality. So we add a `|| true` so that the
|
||||
# The $docker_compose_cmd command does some nasty stuff for the `--detach` functionality. So we add a `|| true` so that the
|
||||
# script doesn't exit because this command looks like it failed to do it's thing.
|
||||
$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
|
||||
$sudo_cmd $docker_compose_cmd up --detach --remove-orphans || true
|
||||
|
||||
wait_for_containers_start 60
|
||||
echo ""
|
||||
@@ -516,7 +510,14 @@ if [[ $status_code -ne 200 ]]; then
|
||||
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
||||
echo ""
|
||||
|
||||
echo -e "$sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
echo "cd ${DOCKER_STANDALONE_DIR}"
|
||||
echo "$sudo_cmd $docker_compose_cmd ps -a"
|
||||
echo ""
|
||||
|
||||
echo "Try bringing down the containers and retrying the installation"
|
||||
echo "cd ${DOCKER_STANDALONE_DIR}"
|
||||
echo "$sudo_cmd $docker_compose_cmd down -v"
|
||||
echo ""
|
||||
|
||||
echo "Please read our troubleshooting guide https://signoz.io/docs/install/troubleshooting/"
|
||||
echo "or reach us on SigNoz for support https://signoz.io/slack"
|
||||
@@ -534,10 +535,13 @@ else
|
||||
echo ""
|
||||
echo -e "🟢 Your frontend is running on http://localhost:3301"
|
||||
echo ""
|
||||
echo "ℹ️ By default, retention period is set to 15 days for logs and traces, and 30 days for metrics."
|
||||
echo "ℹ️ By default, retention period is set to 15 days for logs and traces, and 30 days for metrics."
|
||||
echo -e "To change this, navigate to the General tab on the Settings page of SigNoz UI. For more details, refer to https://signoz.io/docs/userguide/retention-period \n"
|
||||
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes : $sudo_cmd docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes:"
|
||||
echo ""
|
||||
echo "cd ${DOCKER_STANDALONE_DIR}"
|
||||
echo "$sudo_cmd $docker_compose_cmd down -v"
|
||||
|
||||
echo ""
|
||||
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
|
||||
@@ -552,7 +556,7 @@ else
|
||||
do
|
||||
read -rp 'Email: ' email
|
||||
done
|
||||
|
||||
|
||||
send_event "identify_successful_installation"
|
||||
fi
|
||||
|
||||
|
||||
@@ -23,6 +23,9 @@ COPY pkg/query-service/templates /root/templates
|
||||
# Make query-service executable for non-root users
|
||||
RUN chmod 755 /root /root/query-service
|
||||
|
||||
# Copy frontend
|
||||
COPY frontend/build/ /etc/signoz/web/
|
||||
|
||||
# run the binary
|
||||
ENTRYPOINT ["./query-service"]
|
||||
|
||||
|
||||
@@ -59,7 +59,7 @@ type anomalyQueryParams struct {
|
||||
// The results obtained from this query are used to compare with predicted values
|
||||
// and to detect anomalies
|
||||
CurrentPeriodQuery *v3.QueryRangeParamsV3
|
||||
// PastPeriodQuery is the query range params for past seasonal period
|
||||
// PastPeriodQuery is the query range params for past period of seasonality
|
||||
// Example: For weekly seasonality, (now-1w-5m, now-1w)
|
||||
// : For daily seasonality, (now-1d-5m, now-1d)
|
||||
// : For hourly seasonality, (now-1h-5m, now-1h)
|
||||
@@ -74,7 +74,6 @@ type anomalyQueryParams struct {
|
||||
// : For daily seasonality, this is the query range params for the (now-2d-5m, now-1d)
|
||||
// : For hourly seasonality, this is the query range params for the (now-2h-5m, now-1h)
|
||||
PastSeasonQuery *v3.QueryRangeParamsV3
|
||||
|
||||
// Past2SeasonQuery is the query range params for past 2 seasonal period to the current season
|
||||
// Example: For weekly seasonality, this is the query range params for the (now-3w-5m, now-2w)
|
||||
// : For daily seasonality, this is the query range params for the (now-3d-5m, now-2d)
|
||||
@@ -144,13 +143,13 @@ func prepareAnomalyQueryParams(req *v3.QueryRangeParamsV3, seasonality Seasonali
|
||||
switch seasonality {
|
||||
case SeasonalityWeekly:
|
||||
currentGrowthPeriodStart = start - oneWeekOffset
|
||||
currentGrowthPeriodEnd = end
|
||||
currentGrowthPeriodEnd = start
|
||||
case SeasonalityDaily:
|
||||
currentGrowthPeriodStart = start - oneDayOffset
|
||||
currentGrowthPeriodEnd = end
|
||||
currentGrowthPeriodEnd = start
|
||||
case SeasonalityHourly:
|
||||
currentGrowthPeriodStart = start - oneHourOffset
|
||||
currentGrowthPeriodEnd = end
|
||||
currentGrowthPeriodEnd = start
|
||||
}
|
||||
|
||||
currentGrowthQuery := &v3.QueryRangeParamsV3{
|
||||
|
||||
@@ -194,10 +194,11 @@ func (p *BaseSeasonalProvider) getMovingAvg(series *v3.Series, movingAvgWindowSi
|
||||
}
|
||||
var sum float64
|
||||
points := series.Points[startIdx:]
|
||||
for i := 0; i < movingAvgWindowSize && i < len(points); i++ {
|
||||
windowSize := int(math.Min(float64(movingAvgWindowSize), float64(len(points))))
|
||||
for i := 0; i < windowSize; i++ {
|
||||
sum += points[i].Value
|
||||
}
|
||||
avg := sum / float64(movingAvgWindowSize)
|
||||
avg := sum / float64(windowSize)
|
||||
return avg
|
||||
}
|
||||
|
||||
@@ -226,21 +227,25 @@ func (p *BaseSeasonalProvider) getPredictedSeries(
|
||||
// plus the average of the current season series
|
||||
// minus the mean of the past season series, past2 season series and past3 season series
|
||||
for idx, curr := range series.Points {
|
||||
predictedValue :=
|
||||
p.getMovingAvg(prevSeries, movingAvgWindowSize, idx) +
|
||||
p.getAvg(currentSeasonSeries) -
|
||||
p.getMean(p.getAvg(pastSeasonSeries), p.getAvg(past2SeasonSeries), p.getAvg(past3SeasonSeries))
|
||||
movingAvg := p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
|
||||
avg := p.getAvg(currentSeasonSeries)
|
||||
mean := p.getMean(p.getAvg(pastSeasonSeries), p.getAvg(past2SeasonSeries), p.getAvg(past3SeasonSeries))
|
||||
predictedValue := movingAvg + avg - mean
|
||||
|
||||
if predictedValue < 0 {
|
||||
// this should not happen (except when the data has extreme outliers)
|
||||
// we will use the moving avg of the previous period series in this case
|
||||
zap.L().Warn("predictedValue is less than 0", zap.Float64("predictedValue", predictedValue), zap.Any("labels", series.Labels))
|
||||
predictedValue = p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
|
||||
}
|
||||
|
||||
zap.L().Info("predictedSeries",
|
||||
zap.Float64("movingAvg", p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)),
|
||||
zap.Float64("avg", p.getAvg(currentSeasonSeries)),
|
||||
zap.Float64("mean", p.getMean(p.getAvg(pastSeasonSeries), p.getAvg(past2SeasonSeries), p.getAvg(past3SeasonSeries))),
|
||||
zap.L().Debug("predictedSeries",
|
||||
zap.Float64("movingAvg", movingAvg),
|
||||
zap.Float64("avg", avg),
|
||||
zap.Float64("mean", mean),
|
||||
zap.Any("labels", series.Labels),
|
||||
zap.Float64("predictedValue", predictedValue),
|
||||
zap.Float64("curr", curr.Value),
|
||||
)
|
||||
predictedSeries.Points = append(predictedSeries.Points, v3.Point{
|
||||
Timestamp: curr.Timestamp,
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"go.signoz.io/signoz/ee/query-service/license"
|
||||
"go.signoz.io/signoz/ee/query-service/usage"
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/cloudintegrations"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/integrations"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/logparsingpipeline"
|
||||
"go.signoz.io/signoz/pkg/query-service/cache"
|
||||
@@ -34,13 +35,14 @@ type APIHandlerOptions struct {
|
||||
FeatureFlags baseint.FeatureLookup
|
||||
LicenseManager *license.Manager
|
||||
IntegrationsController *integrations.Controller
|
||||
CloudIntegrationsController *cloudintegrations.Controller
|
||||
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
|
||||
Cache cache.Cache
|
||||
Gateway *httputil.ReverseProxy
|
||||
// Querier Influx Interval
|
||||
FluxInterval time.Duration
|
||||
UseLogsNewSchema bool
|
||||
UseLicensesV3 bool
|
||||
FluxInterval time.Duration
|
||||
UseLogsNewSchema bool
|
||||
UseTraceNewSchema bool
|
||||
}
|
||||
|
||||
type APIHandler struct {
|
||||
@@ -62,11 +64,12 @@ func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
|
||||
RuleManager: opts.RulesManager,
|
||||
FeatureFlags: opts.FeatureFlags,
|
||||
IntegrationsController: opts.IntegrationsController,
|
||||
CloudIntegrationsController: opts.CloudIntegrationsController,
|
||||
LogsParsingPipelineController: opts.LogsParsingPipelineController,
|
||||
Cache: opts.Cache,
|
||||
FluxInterval: opts.FluxInterval,
|
||||
UseLogsNewSchema: opts.UseLogsNewSchema,
|
||||
UseLicensesV3: opts.UseLicensesV3,
|
||||
UseTraceNewSchema: opts.UseTraceNewSchema,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -181,23 +184,16 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *baseapp.AuthMiddlew
|
||||
Methods(http.MethodGet)
|
||||
|
||||
// v3
|
||||
router.HandleFunc("/api/v3/licenses",
|
||||
am.ViewAccess(ah.listLicensesV3)).
|
||||
Methods(http.MethodGet)
|
||||
|
||||
router.HandleFunc("/api/v3/licenses",
|
||||
am.AdminAccess(ah.applyLicenseV3)).
|
||||
Methods(http.MethodPost)
|
||||
|
||||
router.HandleFunc("/api/v3/licenses",
|
||||
am.AdminAccess(ah.refreshLicensesV3)).
|
||||
Methods(http.MethodPut)
|
||||
router.HandleFunc("/api/v3/licenses", am.ViewAccess(ah.listLicensesV3)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v3/licenses", am.AdminAccess(ah.applyLicenseV3)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v3/licenses", am.AdminAccess(ah.refreshLicensesV3)).Methods(http.MethodPut)
|
||||
router.HandleFunc("/api/v3/licenses/active", am.ViewAccess(ah.getActiveLicenseV3)).Methods(http.MethodGet)
|
||||
|
||||
// v4
|
||||
router.HandleFunc("/api/v4/query_range", am.ViewAccess(ah.queryRangeV4)).Methods(http.MethodPost)
|
||||
|
||||
// Gateway
|
||||
router.PathPrefix(gateway.RoutePrefix).HandlerFunc(am.AdminAccess(ah.ServeGatewayHTTP))
|
||||
router.PathPrefix(gateway.RoutePrefix).HandlerFunc(am.EditAccess(ah.ServeGatewayHTTP))
|
||||
|
||||
ah.APIHandler.RegisterRoutes(router, am)
|
||||
|
||||
|
||||
@@ -95,7 +95,7 @@ func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) {
|
||||
RespondError(w, model.BadRequest(fmt.Errorf("license key is required")), nil)
|
||||
return
|
||||
}
|
||||
license, apiError := ah.LM().Activate(r.Context(), l.Key)
|
||||
license, apiError := ah.LM().ActivateV3(r.Context(), l.Key)
|
||||
if apiError != nil {
|
||||
RespondError(w, apiError, nil)
|
||||
return
|
||||
@@ -115,6 +115,23 @@ func (ah *APIHandler) listLicensesV3(w http.ResponseWriter, r *http.Request) {
|
||||
ah.Respond(w, convertLicenseV3ToListLicenseResponse(licenses))
|
||||
}
|
||||
|
||||
func (ah *APIHandler) getActiveLicenseV3(w http.ResponseWriter, r *http.Request) {
|
||||
activeLicense, err := ah.LM().GetRepo().GetActiveLicenseV3(r.Context())
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
|
||||
return
|
||||
}
|
||||
// return 404 not found if there is no active license
|
||||
if activeLicense == nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorNotFound, Err: fmt.Errorf("no active license found")}, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// TODO deprecate this when we move away from key for stripe
|
||||
activeLicense.Data["key"] = activeLicense.Key
|
||||
render.Success(w, http.StatusOK, activeLicense.Data)
|
||||
}
|
||||
|
||||
// this function is called by zeus when inserting licenses in the query-service
|
||||
func (ah *APIHandler) applyLicenseV3(w http.ResponseWriter, r *http.Request) {
|
||||
var licenseKey ApplyLicenseRequest
|
||||
@@ -218,6 +235,10 @@ func (ah *APIHandler) getBilling(w http.ResponseWriter, r *http.Request) {
|
||||
func convertLicenseV3ToLicenseV2(licenses []*model.LicenseV3) []model.License {
|
||||
licensesV2 := []model.License{}
|
||||
for _, l := range licenses {
|
||||
planKeyFromPlanName, ok := model.MapOldPlanKeyToNewPlanName[l.PlanName]
|
||||
if !ok {
|
||||
planKeyFromPlanName = model.Basic
|
||||
}
|
||||
licenseV2 := model.License{
|
||||
Key: l.Key,
|
||||
ActivationId: "",
|
||||
@@ -226,7 +247,7 @@ func convertLicenseV3ToLicenseV2(licenses []*model.LicenseV3) []model.License {
|
||||
ValidationMessage: "",
|
||||
IsCurrent: l.IsCurrent,
|
||||
LicensePlan: model.LicensePlan{
|
||||
PlanKey: l.PlanName,
|
||||
PlanKey: planKeyFromPlanName,
|
||||
ValidFrom: l.ValidFrom,
|
||||
ValidUntil: l.ValidUntil,
|
||||
Status: l.Status},
|
||||
@@ -237,24 +258,12 @@ func convertLicenseV3ToLicenseV2(licenses []*model.LicenseV3) []model.License {
|
||||
}
|
||||
|
||||
func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
var licenses []model.License
|
||||
|
||||
if ah.UseLicensesV3 {
|
||||
licensesV3, err := ah.LM().GetLicensesV3(r.Context())
|
||||
if err != nil {
|
||||
RespondError(w, err, nil)
|
||||
return
|
||||
}
|
||||
licenses = convertLicenseV3ToLicenseV2(licensesV3)
|
||||
} else {
|
||||
_licenses, apiError := ah.LM().GetLicenses(r.Context())
|
||||
if apiError != nil {
|
||||
RespondError(w, apiError, nil)
|
||||
return
|
||||
}
|
||||
licenses = _licenses
|
||||
licensesV3, apierr := ah.LM().GetLicensesV3(r.Context())
|
||||
if apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
return
|
||||
}
|
||||
licenses := convertLicenseV3ToLicenseV2(licensesV3)
|
||||
|
||||
resp := model.Licenses{
|
||||
TrialStart: -1,
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
"go.signoz.io/signoz/pkg/cache"
|
||||
basechr "go.signoz.io/signoz/pkg/query-service/app/clickhouseReader"
|
||||
"go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
)
|
||||
@@ -26,8 +27,11 @@ func NewDataConnector(
|
||||
dialTimeout time.Duration,
|
||||
cluster string,
|
||||
useLogsNewSchema bool,
|
||||
useTraceNewSchema bool,
|
||||
fluxIntervalForTraceDetail time.Duration,
|
||||
cache cache.Cache,
|
||||
) *ClickhouseReader {
|
||||
ch := basechr.NewReader(localDB, promConfigPath, lm, maxIdleConns, maxOpenConns, dialTimeout, cluster, useLogsNewSchema)
|
||||
ch := basechr.NewReader(localDB, promConfigPath, lm, maxIdleConns, maxOpenConns, dialTimeout, cluster, useLogsNewSchema, useTraceNewSchema, fluxIntervalForTraceDetail, cache)
|
||||
return &ClickhouseReader{
|
||||
conn: ch.GetConn(),
|
||||
appdb: localDB,
|
||||
|
||||
@@ -29,15 +29,18 @@ import (
|
||||
"go.signoz.io/signoz/ee/query-service/integrations/gateway"
|
||||
"go.signoz.io/signoz/ee/query-service/interfaces"
|
||||
"go.signoz.io/signoz/ee/query-service/rules"
|
||||
"go.signoz.io/signoz/pkg/http/middleware"
|
||||
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
|
||||
"go.signoz.io/signoz/pkg/query-service/migrate"
|
||||
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
"go.signoz.io/signoz/pkg/signoz"
|
||||
"go.signoz.io/signoz/pkg/web"
|
||||
|
||||
licensepkg "go.signoz.io/signoz/ee/query-service/license"
|
||||
"go.signoz.io/signoz/ee/query-service/usage"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/agentConf"
|
||||
baseapp "go.signoz.io/signoz/pkg/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/cloudintegrations"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
|
||||
baseexplorer "go.signoz.io/signoz/pkg/query-service/app/explorer"
|
||||
"go.signoz.io/signoz/pkg/query-service/app/integrations"
|
||||
@@ -61,23 +64,26 @@ import (
|
||||
const AppDbEngine = "sqlite"
|
||||
|
||||
type ServerOptions struct {
|
||||
Config signoz.Config
|
||||
SigNoz *signoz.SigNoz
|
||||
PromConfigPath string
|
||||
SkipTopLvlOpsPath string
|
||||
HTTPHostPort string
|
||||
PrivateHostPort string
|
||||
// alert specific params
|
||||
DisableRules bool
|
||||
RuleRepoURL string
|
||||
PreferSpanMetrics bool
|
||||
MaxIdleConns int
|
||||
MaxOpenConns int
|
||||
DialTimeout time.Duration
|
||||
CacheConfigPath string
|
||||
FluxInterval string
|
||||
Cluster string
|
||||
GatewayUrl string
|
||||
UseLogsNewSchema bool
|
||||
UseLicensesV3 bool
|
||||
DisableRules bool
|
||||
RuleRepoURL string
|
||||
PreferSpanMetrics bool
|
||||
MaxIdleConns int
|
||||
MaxOpenConns int
|
||||
DialTimeout time.Duration
|
||||
CacheConfigPath string
|
||||
FluxInterval string
|
||||
FluxIntervalForTraceDetail string
|
||||
Cluster string
|
||||
GatewayUrl string
|
||||
UseLogsNewSchema bool
|
||||
UseTraceNewSchema bool
|
||||
}
|
||||
|
||||
// Server runs HTTP api service
|
||||
@@ -108,25 +114,22 @@ func (s Server) HealthCheckStatus() chan healthcheck.Status {
|
||||
|
||||
// NewServer creates and initializes Server
|
||||
func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
|
||||
modelDao, err := dao.InitDao("sqlite", baseconst.RELATIONAL_DATASOURCE_PATH)
|
||||
modelDao, err := dao.InitDao(serverOptions.SigNoz.SQLStore.SQLxDB())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
baseexplorer.InitWithDSN(baseconst.RELATIONAL_DATASOURCE_PATH)
|
||||
|
||||
if err := preferences.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH); err != nil {
|
||||
if err := baseexplorer.InitWithDSN(serverOptions.SigNoz.SQLStore.SQLxDB()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localDB, err := dashboards.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH)
|
||||
|
||||
if err != nil {
|
||||
if err := preferences.InitDB(serverOptions.SigNoz.SQLStore.SQLxDB()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localDB.SetMaxOpenConns(10)
|
||||
if err := dashboards.InitDB(serverOptions.SigNoz.SQLStore.SQLxDB()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gatewayProxy, err := gateway.NewProxy(serverOptions.GatewayUrl, gateway.RoutePrefix)
|
||||
if err != nil {
|
||||
@@ -134,7 +137,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
}
|
||||
|
||||
// initiate license manager
|
||||
lm, err := licensepkg.StartManager("sqlite", localDB, serverOptions.UseLicensesV3)
|
||||
lm, err := licensepkg.StartManager(serverOptions.SigNoz.SQLStore.SQLxDB())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -143,12 +146,17 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
modelDao.SetFlagProvider(lm)
|
||||
readerReady := make(chan bool)
|
||||
|
||||
fluxIntervalForTraceDetail, err := time.ParseDuration(serverOptions.FluxIntervalForTraceDetail)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var reader interfaces.DataConnector
|
||||
storage := os.Getenv("STORAGE")
|
||||
if storage == "clickhouse" {
|
||||
zap.L().Info("Using ClickHouse as datastore ...")
|
||||
qb := db.NewDataConnector(
|
||||
localDB,
|
||||
serverOptions.SigNoz.SQLStore.SQLxDB(),
|
||||
serverOptions.PromConfigPath,
|
||||
lm,
|
||||
serverOptions.MaxIdleConns,
|
||||
@@ -156,6 +164,9 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
serverOptions.DialTimeout,
|
||||
serverOptions.Cluster,
|
||||
serverOptions.UseLogsNewSchema,
|
||||
serverOptions.UseTraceNewSchema,
|
||||
fluxIntervalForTraceDetail,
|
||||
serverOptions.SigNoz.Cache,
|
||||
)
|
||||
go qb.Start(readerReady)
|
||||
reader = qb
|
||||
@@ -183,41 +194,42 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
rm, err := makeRulesManager(serverOptions.PromConfigPath,
|
||||
baseconst.GetAlertManagerApiPrefix(),
|
||||
serverOptions.RuleRepoURL,
|
||||
localDB,
|
||||
serverOptions.SigNoz.SQLStore.SQLxDB(),
|
||||
reader,
|
||||
c,
|
||||
serverOptions.DisableRules,
|
||||
lm,
|
||||
serverOptions.UseLogsNewSchema,
|
||||
serverOptions.UseTraceNewSchema,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go func() {
|
||||
err = migrate.ClickHouseMigrate(reader.GetConn(), serverOptions.Cluster)
|
||||
if err != nil {
|
||||
zap.L().Error("error while running clickhouse migrations", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
// initiate opamp
|
||||
_, err = opAmpModel.InitDB(localDB)
|
||||
_, err = opAmpModel.InitDB(serverOptions.SigNoz.SQLStore.SQLxDB())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
integrationsController, err := integrations.NewController(localDB)
|
||||
integrationsController, err := integrations.NewController(serverOptions.SigNoz.SQLStore.SQLxDB())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't create integrations controller: %w", err,
|
||||
)
|
||||
}
|
||||
|
||||
cloudIntegrationsController, err := cloudintegrations.NewController(serverOptions.SigNoz.SQLStore.SQLxDB())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't create cloud provider integrations controller: %w", err,
|
||||
)
|
||||
}
|
||||
|
||||
// ingestion pipelines manager
|
||||
logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(
|
||||
localDB, "sqlite", integrationsController.GetPipelinesForInstalledIntegrations,
|
||||
serverOptions.SigNoz.SQLStore.SQLxDB(), integrationsController.GetPipelinesForInstalledIntegrations,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -225,8 +237,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
|
||||
// initiate agent config handler
|
||||
agentConfMgr, err := agentConf.Initiate(&agentConf.ManagerOptions{
|
||||
DB: localDB,
|
||||
DBEngine: AppDbEngine,
|
||||
DB: serverOptions.SigNoz.SQLStore.SQLxDB(),
|
||||
AgentFeatures: []agentConf.AgentFeature{logParsingPipelineController},
|
||||
})
|
||||
if err != nil {
|
||||
@@ -234,7 +245,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
}
|
||||
|
||||
// start the usagemanager
|
||||
usageManager, err := usage.New("sqlite", modelDao, lm.GetRepo(), reader.GetConn())
|
||||
usageManager, err := usage.New(modelDao, lm.GetRepo(), reader.GetConn())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -247,7 +258,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
telemetry.GetInstance().SetSaasOperator(constants.SaasSegmentKey)
|
||||
|
||||
fluxInterval, err := time.ParseDuration(serverOptions.FluxInterval)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -265,12 +275,13 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
FeatureFlags: lm,
|
||||
LicenseManager: lm,
|
||||
IntegrationsController: integrationsController,
|
||||
CloudIntegrationsController: cloudIntegrationsController,
|
||||
LogsParsingPipelineController: logParsingPipelineController,
|
||||
Cache: c,
|
||||
FluxInterval: fluxInterval,
|
||||
Gateway: gatewayProxy,
|
||||
UseLogsNewSchema: serverOptions.UseLogsNewSchema,
|
||||
UseLicensesV3: serverOptions.UseLicensesV3,
|
||||
UseTraceNewSchema: serverOptions.UseTraceNewSchema,
|
||||
}
|
||||
|
||||
apiHandler, err := api.NewAPIHandler(apiOpts)
|
||||
@@ -287,7 +298,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
usageManager: usageManager,
|
||||
}
|
||||
|
||||
httpServer, err := s.createPublicServer(apiHandler)
|
||||
httpServer, err := s.createPublicServer(apiHandler, serverOptions.SigNoz.Web)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -313,10 +324,13 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
|
||||
|
||||
r := baseapp.NewRouter()
|
||||
|
||||
r.Use(baseapp.LogCommentEnricher)
|
||||
r.Use(setTimeoutMiddleware)
|
||||
r.Use(s.analyticsMiddleware)
|
||||
r.Use(loggingMiddlewarePrivate)
|
||||
r.Use(middleware.NewTimeout(zap.L(),
|
||||
s.serverOptions.Config.APIServer.Timeout.ExcludedRoutes,
|
||||
s.serverOptions.Config.APIServer.Timeout.Default,
|
||||
s.serverOptions.Config.APIServer.Timeout.Max,
|
||||
).Wrap)
|
||||
r.Use(middleware.NewAnalytics(zap.L()).Wrap)
|
||||
r.Use(middleware.NewLogging(zap.L(), s.serverOptions.Config.APIServer.Logging.ExcludedRoutes).Wrap)
|
||||
|
||||
apiHandler.RegisterPrivateRoutes(r)
|
||||
|
||||
@@ -336,7 +350,7 @@ func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, error) {
|
||||
func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*http.Server, error) {
|
||||
|
||||
r := baseapp.NewRouter()
|
||||
|
||||
@@ -356,14 +370,18 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, e
|
||||
}
|
||||
am := baseapp.NewAuthMiddleware(getUserFromRequest)
|
||||
|
||||
r.Use(baseapp.LogCommentEnricher)
|
||||
r.Use(setTimeoutMiddleware)
|
||||
r.Use(s.analyticsMiddleware)
|
||||
r.Use(loggingMiddleware)
|
||||
r.Use(middleware.NewTimeout(zap.L(),
|
||||
s.serverOptions.Config.APIServer.Timeout.ExcludedRoutes,
|
||||
s.serverOptions.Config.APIServer.Timeout.Default,
|
||||
s.serverOptions.Config.APIServer.Timeout.Max,
|
||||
).Wrap)
|
||||
r.Use(middleware.NewAnalytics(zap.L()).Wrap)
|
||||
r.Use(middleware.NewLogging(zap.L(), s.serverOptions.Config.APIServer.Logging.ExcludedRoutes).Wrap)
|
||||
|
||||
apiHandler.RegisterRoutes(r, am)
|
||||
apiHandler.RegisterLogsRoutes(r, am)
|
||||
apiHandler.RegisterIntegrationRoutes(r, am)
|
||||
apiHandler.RegisterCloudIntegrationsRoutes(r, am)
|
||||
apiHandler.RegisterQueryRangeV3Routes(r, am)
|
||||
apiHandler.RegisterInfraMetricsRoutes(r, am)
|
||||
apiHandler.RegisterQueryRangeV4Routes(r, am)
|
||||
@@ -380,36 +398,16 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, e
|
||||
|
||||
handler = handlers.CompressHandler(handler)
|
||||
|
||||
err := web.AddToRouter(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &http.Server{
|
||||
Handler: handler,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||
// loggingMiddleware is used for logging public api calls
|
||||
func loggingMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
route := mux.CurrentRoute(r)
|
||||
path, _ := route.GetPathTemplate()
|
||||
startTime := time.Now()
|
||||
next.ServeHTTP(w, r)
|
||||
zap.L().Info(path, zap.Duration("timeTaken", time.Since(startTime)), zap.String("path", path))
|
||||
})
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||
// loggingMiddlewarePrivate is used for logging private api calls
|
||||
// from internal services like alert manager
|
||||
func loggingMiddlewarePrivate(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
route := mux.CurrentRoute(r)
|
||||
path, _ := route.GetPathTemplate()
|
||||
startTime := time.Now()
|
||||
next.ServeHTTP(w, r)
|
||||
zap.L().Info(path, zap.Duration("timeTaken", time.Since(startTime)), zap.String("path", path), zap.Bool("tprivatePort", true))
|
||||
})
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/logging.go
|
||||
type loggingResponseWriter struct {
|
||||
http.ResponseWriter
|
||||
@@ -489,32 +487,29 @@ func extractQueryRangeData(path string, r *http.Request) (map[string]interface{}
|
||||
zap.L().Error("error while matching the trace explorer: ", zap.Error(err))
|
||||
}
|
||||
|
||||
signozMetricsUsed := false
|
||||
signozLogsUsed := false
|
||||
signozTracesUsed := false
|
||||
if postData != nil {
|
||||
queryInfoResult := telemetry.GetInstance().CheckQueryInfo(postData)
|
||||
|
||||
if postData.CompositeQuery != nil {
|
||||
data["queryType"] = postData.CompositeQuery.QueryType
|
||||
data["panelType"] = postData.CompositeQuery.PanelType
|
||||
|
||||
signozLogsUsed, signozMetricsUsed, signozTracesUsed = telemetry.GetInstance().CheckSigNozSignals(postData)
|
||||
}
|
||||
}
|
||||
|
||||
if signozMetricsUsed || signozLogsUsed || signozTracesUsed {
|
||||
if signozMetricsUsed {
|
||||
if (queryInfoResult.MetricsUsed || queryInfoResult.LogsUsed || queryInfoResult.TracesUsed) && (queryInfoResult.FilterApplied) {
|
||||
if queryInfoResult.MetricsUsed {
|
||||
telemetry.GetInstance().AddActiveMetricsUser()
|
||||
}
|
||||
if signozLogsUsed {
|
||||
if queryInfoResult.LogsUsed {
|
||||
telemetry.GetInstance().AddActiveLogsUser()
|
||||
}
|
||||
if signozTracesUsed {
|
||||
if queryInfoResult.TracesUsed {
|
||||
telemetry.GetInstance().AddActiveTracesUser()
|
||||
}
|
||||
data["metricsUsed"] = signozMetricsUsed
|
||||
data["logsUsed"] = signozLogsUsed
|
||||
data["tracesUsed"] = signozTracesUsed
|
||||
data["metricsUsed"] = queryInfoResult.MetricsUsed
|
||||
data["logsUsed"] = queryInfoResult.LogsUsed
|
||||
data["tracesUsed"] = queryInfoResult.TracesUsed
|
||||
data["filterApplied"] = queryInfoResult.FilterApplied
|
||||
data["groupByApplied"] = queryInfoResult.GroupByApplied
|
||||
data["aggregateOperator"] = queryInfoResult.AggregateOperator
|
||||
data["aggregateAttributeKey"] = queryInfoResult.AggregateAttributeKey
|
||||
data["numberOfQueries"] = queryInfoResult.NumberOfQueries
|
||||
data["queryType"] = queryInfoResult.QueryType
|
||||
data["panelType"] = queryInfoResult.PanelType
|
||||
|
||||
userEmail, err := baseauth.GetEmailFromJwt(r.Context())
|
||||
if err == nil {
|
||||
// switch case to set data["screen"] based on the referrer
|
||||
@@ -581,23 +576,6 @@ func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
|
||||
})
|
||||
}
|
||||
|
||||
// TODO(remove): Implemented at pkg/http/middleware/timeout.go
|
||||
func setTimeoutMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
var cancel context.CancelFunc
|
||||
// check if route is not excluded
|
||||
url := r.URL.Path
|
||||
if _, ok := baseconst.TimeoutExcludedRoutes[url]; !ok {
|
||||
ctx, cancel = context.WithTimeout(r.Context(), baseconst.ContextTimeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
r = r.WithContext(ctx)
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// initListeners initialises listeners of the server
|
||||
func (s *Server) initListeners() error {
|
||||
// listen on public port
|
||||
@@ -737,7 +715,8 @@ func makeRulesManager(
|
||||
cache cache.Cache,
|
||||
disableRules bool,
|
||||
fm baseint.FeatureLookup,
|
||||
useLogsNewSchema bool) (*baserules.Manager, error) {
|
||||
useLogsNewSchema bool,
|
||||
useTraceNewSchema bool) (*baserules.Manager, error) {
|
||||
|
||||
// create engine
|
||||
pqle, err := pqle.FromConfigPath(promConfigPath)
|
||||
@@ -767,8 +746,9 @@ func makeRulesManager(
|
||||
EvalDelay: baseconst.GetEvalDelay(),
|
||||
|
||||
PrepareTaskFunc: rules.PrepareTaskFunc,
|
||||
PrepareTestRuleFunc: rules.TestNotification,
|
||||
UseLogsNewSchema: useLogsNewSchema,
|
||||
UseTraceNewSchema: useTraceNewSchema,
|
||||
PrepareTestRuleFunc: rules.TestNotification,
|
||||
}
|
||||
|
||||
// create Manager
|
||||
|
||||
@@ -13,7 +13,9 @@ var LicenseAPIKey = GetOrDefaultEnv("SIGNOZ_LICENSE_API_KEY", "")
|
||||
var SaasSegmentKey = GetOrDefaultEnv("SIGNOZ_SAAS_SEGMENT_KEY", "")
|
||||
var FetchFeatures = GetOrDefaultEnv("FETCH_FEATURES", "false")
|
||||
var ZeusFeaturesURL = GetOrDefaultEnv("ZEUS_FEATURES_URL", "ZeusFeaturesURL")
|
||||
var ZeusURL = GetOrDefaultEnv("ZEUS_URL", "ZeusURL")
|
||||
|
||||
// this is set via build time variable
|
||||
var ZeusURL = "https://api.signoz.cloud"
|
||||
|
||||
func GetOrDefaultEnv(key string, fallback string) string {
|
||||
v := os.Getenv(key)
|
||||
|
||||
@@ -1,18 +1,10 @@
|
||||
package dao
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"go.signoz.io/signoz/ee/query-service/dao/sqlite"
|
||||
)
|
||||
|
||||
func InitDao(engine, path string) (ModelDao, error) {
|
||||
|
||||
switch engine {
|
||||
case "sqlite":
|
||||
return sqlite.InitDB(path)
|
||||
default:
|
||||
return nil, fmt.Errorf("qsdb type: %s is not supported in query service", engine)
|
||||
}
|
||||
|
||||
func InitDao(inputDB *sqlx.DB) (ModelDao, error) {
|
||||
return sqlite.InitDB(inputDB)
|
||||
}
|
||||
|
||||
@@ -65,8 +65,8 @@ func columnExists(db *sqlx.DB, tableName, columnName string) bool {
|
||||
}
|
||||
|
||||
// InitDB creates and extends base model DB repository
|
||||
func InitDB(dataSourceName string) (*modelDao, error) {
|
||||
dao, err := basedsql.InitDB(dataSourceName)
|
||||
func InitDB(inputDB *sqlx.DB) (*modelDao, error) {
|
||||
dao, err := basedsql.InitDB(inputDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -2,18 +2,6 @@ package signozio
|
||||
|
||||
type status string
|
||||
|
||||
type ActivationResult struct {
|
||||
Status status `json:"status"`
|
||||
Data *ActivationResponse `json:"data,omitempty"`
|
||||
ErrorType string `json:"errorType,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
type ActivationResponse struct {
|
||||
ActivationId string `json:"ActivationId"`
|
||||
PlanDetails string `json:"PlanDetails"`
|
||||
}
|
||||
|
||||
type ValidateLicenseResponse struct {
|
||||
Status status `json:"status"`
|
||||
Data map[string]interface{} `json:"data"`
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"go.signoz.io/signoz/ee/query-service/constants"
|
||||
"go.signoz.io/signoz/ee/query-service/model"
|
||||
@@ -39,86 +38,6 @@ func init() {
|
||||
C = New()
|
||||
}
|
||||
|
||||
// ActivateLicense sends key to license.signoz.io and gets activation data
|
||||
func ActivateLicense(key, siteId string) (*ActivationResponse, *model.ApiError) {
|
||||
licenseReq := map[string]string{
|
||||
"key": key,
|
||||
"siteId": siteId,
|
||||
}
|
||||
|
||||
reqString, _ := json.Marshal(licenseReq)
|
||||
httpResponse, err := http.Post(C.Prefix+"/licenses/activate", APPLICATION_JSON, bytes.NewBuffer(reqString))
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("failed to connect to license.signoz.io", zap.Error(err))
|
||||
return nil, model.BadRequest(fmt.Errorf("unable to connect with license.signoz.io, please check your network connection"))
|
||||
}
|
||||
|
||||
httpBody, err := io.ReadAll(httpResponse.Body)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to read activation response from license.signoz.io", zap.Error(err))
|
||||
return nil, model.BadRequest(fmt.Errorf("failed to read activation response from license.signoz.io"))
|
||||
}
|
||||
|
||||
defer httpResponse.Body.Close()
|
||||
|
||||
// read api request result
|
||||
result := ActivationResult{}
|
||||
err = json.Unmarshal(httpBody, &result)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to marshal activation response from license.signoz.io", zap.Error(err))
|
||||
return nil, model.InternalError(errors.Wrap(err, "failed to marshal license activation response"))
|
||||
}
|
||||
|
||||
switch httpResponse.StatusCode {
|
||||
case 200, 201:
|
||||
return result.Data, nil
|
||||
case 400, 401:
|
||||
return nil, model.BadRequest(fmt.Errorf(fmt.Sprintf("failed to activate: %s", result.Error)))
|
||||
default:
|
||||
return nil, model.InternalError(fmt.Errorf(fmt.Sprintf("failed to activate: %s", result.Error)))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// ValidateLicense validates the license key
|
||||
func ValidateLicense(activationId string) (*ActivationResponse, *model.ApiError) {
|
||||
validReq := map[string]string{
|
||||
"activationId": activationId,
|
||||
}
|
||||
|
||||
reqString, _ := json.Marshal(validReq)
|
||||
response, err := http.Post(C.Prefix+"/licenses/validate", APPLICATION_JSON, bytes.NewBuffer(reqString))
|
||||
|
||||
if err != nil {
|
||||
return nil, model.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection"))
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
return nil, model.BadRequest(errors.Wrap(err, "failed to read validation response from license.signoz.io"))
|
||||
}
|
||||
|
||||
defer response.Body.Close()
|
||||
|
||||
switch response.StatusCode {
|
||||
case 200, 201:
|
||||
a := ActivationResult{}
|
||||
err = json.Unmarshal(body, &a)
|
||||
if err != nil {
|
||||
return nil, model.BadRequest(errors.Wrap(err, "failed to marshal license validation response"))
|
||||
}
|
||||
return a.Data, nil
|
||||
case 400, 401:
|
||||
return nil, model.BadRequest(errors.Wrap(fmt.Errorf(string(body)),
|
||||
"bad request error received from license.signoz.io"))
|
||||
default:
|
||||
return nil, model.InternalError(errors.Wrap(fmt.Errorf(string(body)),
|
||||
"internal error received from license.signoz.io"))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func ValidateLicenseV3(licenseKey string) (*model.LicenseV3, *model.ApiError) {
|
||||
|
||||
// Creating an HTTP client with a timeout for better control
|
||||
|
||||
@@ -28,13 +28,8 @@ func NewLicenseRepo(db *sqlx.DB) Repo {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Repo) InitDB(engine string) error {
|
||||
switch engine {
|
||||
case "sqlite3", "sqlite":
|
||||
return sqlite.InitDB(r.db)
|
||||
default:
|
||||
return fmt.Errorf("unsupported db")
|
||||
}
|
||||
func (r *Repo) InitDB(inputDB *sqlx.DB) error {
|
||||
return sqlite.InitDB(inputDB)
|
||||
}
|
||||
|
||||
func (r *Repo) GetLicenses(ctx context.Context) ([]model.License, error) {
|
||||
@@ -78,9 +73,7 @@ func (r *Repo) GetLicensesV3(ctx context.Context) ([]*model.LicenseV3, error) {
|
||||
return licenseV3Data, nil
|
||||
}
|
||||
|
||||
// GetActiveLicense fetches the latest active license from DB.
|
||||
// If the license is not present, expect a nil license and a nil error in the output.
|
||||
func (r *Repo) GetActiveLicense(ctx context.Context) (*model.License, *basemodel.ApiError) {
|
||||
func (r *Repo) GetActiveLicenseV2(ctx context.Context) (*model.License, *basemodel.ApiError) {
|
||||
var err error
|
||||
licenses := []model.License{}
|
||||
|
||||
@@ -109,6 +102,21 @@ func (r *Repo) GetActiveLicense(ctx context.Context) (*model.License, *basemodel
|
||||
return active, nil
|
||||
}
|
||||
|
||||
// GetActiveLicense fetches the latest active license from DB.
|
||||
// If the license is not present, expect a nil license and a nil error in the output.
|
||||
func (r *Repo) GetActiveLicense(ctx context.Context) (*model.License, *basemodel.ApiError) {
|
||||
activeLicenseV3, err := r.GetActiveLicenseV3(ctx)
|
||||
if err != nil {
|
||||
return nil, basemodel.InternalError(fmt.Errorf("failed to get active licenses from db: %v", err))
|
||||
}
|
||||
|
||||
if activeLicenseV3 == nil {
|
||||
return nil, nil
|
||||
}
|
||||
activeLicenseV2 := model.ConvertLicenseV3ToLicenseV2(activeLicenseV3)
|
||||
return activeLicenseV2, nil
|
||||
}
|
||||
|
||||
func (r *Repo) GetActiveLicenseV3(ctx context.Context) (*model.LicenseV3, error) {
|
||||
var err error
|
||||
licenses := []model.LicenseDB{}
|
||||
|
||||
@@ -51,13 +51,13 @@ type Manager struct {
|
||||
activeFeatures basemodel.FeatureSet
|
||||
}
|
||||
|
||||
func StartManager(dbType string, db *sqlx.DB, useLicensesV3 bool, features ...basemodel.Feature) (*Manager, error) {
|
||||
func StartManager(db *sqlx.DB, features ...basemodel.Feature) (*Manager, error) {
|
||||
if LM != nil {
|
||||
return LM, nil
|
||||
}
|
||||
|
||||
repo := NewLicenseRepo(db)
|
||||
err := repo.InitDB(dbType)
|
||||
err := repo.InitDB(db)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initiate license repo: %v", err)
|
||||
@@ -67,31 +67,7 @@ func StartManager(dbType string, db *sqlx.DB, useLicensesV3 bool, features ...ba
|
||||
repo: &repo,
|
||||
}
|
||||
|
||||
if useLicensesV3 {
|
||||
// get active license from the db
|
||||
active, err := m.repo.GetActiveLicense(context.Background())
|
||||
if err != nil {
|
||||
return m, err
|
||||
}
|
||||
|
||||
// if we have an active license then need to fetch the complete details
|
||||
if active != nil {
|
||||
// fetch the new license structure from control plane
|
||||
licenseV3, apiError := validate.ValidateLicenseV3(active.Key)
|
||||
if apiError != nil {
|
||||
return m, apiError
|
||||
}
|
||||
|
||||
// insert the licenseV3 in sqlite db
|
||||
apiError = m.repo.InsertLicenseV3(context.Background(), licenseV3)
|
||||
// if the license already exists move ahead.
|
||||
if apiError != nil && apiError.Typ != model.ErrorConflict {
|
||||
return m, apiError
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := m.start(useLicensesV3, features...); err != nil {
|
||||
if err := m.start(features...); err != nil {
|
||||
return m, err
|
||||
}
|
||||
LM = m
|
||||
@@ -99,16 +75,8 @@ func StartManager(dbType string, db *sqlx.DB, useLicensesV3 bool, features ...ba
|
||||
}
|
||||
|
||||
// start loads active license in memory and initiates validator
|
||||
func (lm *Manager) start(useLicensesV3 bool, features ...basemodel.Feature) error {
|
||||
|
||||
var err error
|
||||
if useLicensesV3 {
|
||||
err = lm.LoadActiveLicenseV3(features...)
|
||||
} else {
|
||||
err = lm.LoadActiveLicense(features...)
|
||||
}
|
||||
|
||||
return err
|
||||
func (lm *Manager) start(features ...basemodel.Feature) error {
|
||||
return lm.LoadActiveLicenseV3(features...)
|
||||
}
|
||||
|
||||
func (lm *Manager) Stop() {
|
||||
@@ -116,31 +84,6 @@ func (lm *Manager) Stop() {
|
||||
<-lm.terminated
|
||||
}
|
||||
|
||||
func (lm *Manager) SetActive(l *model.License, features ...basemodel.Feature) {
|
||||
lm.mutex.Lock()
|
||||
defer lm.mutex.Unlock()
|
||||
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
|
||||
lm.activeLicense = l
|
||||
lm.activeFeatures = append(l.FeatureSet, features...)
|
||||
// set default features
|
||||
setDefaultFeatures(lm)
|
||||
|
||||
err := lm.InitFeatures(lm.activeFeatures)
|
||||
if err != nil {
|
||||
zap.L().Panic("Couldn't activate features", zap.Error(err))
|
||||
}
|
||||
if !lm.validatorRunning {
|
||||
// we want to make sure only one validator runs,
|
||||
// we already have lock() so good to go
|
||||
lm.validatorRunning = true
|
||||
go lm.Validator(context.Background())
|
||||
}
|
||||
|
||||
}
|
||||
func (lm *Manager) SetActiveV3(l *model.LicenseV3, features ...basemodel.Feature) {
|
||||
lm.mutex.Lock()
|
||||
defer lm.mutex.Unlock()
|
||||
@@ -171,29 +114,6 @@ func setDefaultFeatures(lm *Manager) {
|
||||
lm.activeFeatures = append(lm.activeFeatures, baseconstants.DEFAULT_FEATURE_SET...)
|
||||
}
|
||||
|
||||
// LoadActiveLicense loads the most recent active license
|
||||
func (lm *Manager) LoadActiveLicense(features ...basemodel.Feature) error {
|
||||
active, err := lm.repo.GetActiveLicense(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if active != nil {
|
||||
lm.SetActive(active, features...)
|
||||
} else {
|
||||
zap.L().Info("No active license found, defaulting to basic plan")
|
||||
// if no active license is found, we default to basic(free) plan with all default features
|
||||
lm.activeFeatures = model.BasicPlan
|
||||
setDefaultFeatures(lm)
|
||||
err := lm.InitFeatures(lm.activeFeatures)
|
||||
if err != nil {
|
||||
zap.L().Error("Couldn't initialize features", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lm *Manager) LoadActiveLicenseV3(features ...basemodel.Feature) error {
|
||||
active, err := lm.repo.GetActiveLicenseV3(context.Background())
|
||||
if err != nil {
|
||||
@@ -253,38 +173,20 @@ func (lm *Manager) GetLicensesV3(ctx context.Context) (response []*model.License
|
||||
if lm.activeLicenseV3 != nil && l.Key == lm.activeLicenseV3.Key {
|
||||
l.IsCurrent = true
|
||||
}
|
||||
if l.ValidUntil == -1 {
|
||||
// for subscriptions, there is no end-date as such
|
||||
// but for showing user some validity we default one year timespan
|
||||
l.ValidUntil = l.ValidFrom + 31556926
|
||||
}
|
||||
response = append(response, l)
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// Validator validates license after an epoch of time
|
||||
func (lm *Manager) Validator(ctx context.Context) {
|
||||
defer close(lm.terminated)
|
||||
tick := time.NewTicker(validationFrequency)
|
||||
defer tick.Stop()
|
||||
|
||||
lm.Validate(ctx)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-lm.done:
|
||||
return
|
||||
default:
|
||||
select {
|
||||
case <-lm.done:
|
||||
return
|
||||
case <-tick.C:
|
||||
lm.Validate(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// Validator validates license after an epoch of time
|
||||
func (lm *Manager) ValidatorV3(ctx context.Context) {
|
||||
zap.L().Info("ValidatorV3 started!")
|
||||
defer close(lm.terminated)
|
||||
tick := time.NewTicker(validationFrequency)
|
||||
defer tick.Stop()
|
||||
@@ -307,74 +209,6 @@ func (lm *Manager) ValidatorV3(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
// Validate validates the current active license
|
||||
func (lm *Manager) Validate(ctx context.Context) (reterr error) {
|
||||
zap.L().Info("License validation started")
|
||||
if lm.activeLicense == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
defer func() {
|
||||
lm.mutex.Lock()
|
||||
|
||||
lm.lastValidated = time.Now().Unix()
|
||||
if reterr != nil {
|
||||
zap.L().Error("License validation completed with error", zap.Error(reterr))
|
||||
atomic.AddUint64(&lm.failedAttempts, 1)
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_CHECK_FAILED,
|
||||
map[string]interface{}{"err": reterr.Error()}, "", true, false)
|
||||
} else {
|
||||
zap.L().Info("License validation completed with no errors")
|
||||
}
|
||||
|
||||
lm.mutex.Unlock()
|
||||
}()
|
||||
|
||||
response, apiError := validate.ValidateLicense(lm.activeLicense.ActivationId)
|
||||
if apiError != nil {
|
||||
zap.L().Error("failed to validate license", zap.Error(apiError.Err))
|
||||
return apiError.Err
|
||||
}
|
||||
|
||||
if response.PlanDetails == lm.activeLicense.PlanDetails {
|
||||
// license plan hasnt changed, nothing to do
|
||||
return nil
|
||||
}
|
||||
|
||||
if response.PlanDetails != "" {
|
||||
|
||||
// copy and replace the active license record
|
||||
l := model.License{
|
||||
Key: lm.activeLicense.Key,
|
||||
CreatedAt: lm.activeLicense.CreatedAt,
|
||||
PlanDetails: response.PlanDetails,
|
||||
ValidationMessage: lm.activeLicense.ValidationMessage,
|
||||
ActivationId: lm.activeLicense.ActivationId,
|
||||
}
|
||||
|
||||
if err := l.ParsePlan(); err != nil {
|
||||
zap.L().Error("failed to parse updated license", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
// updated plan is parsable, check if plan has changed
|
||||
if lm.activeLicense.PlanDetails != response.PlanDetails {
|
||||
err := lm.repo.UpdatePlanDetails(ctx, lm.activeLicense.Key, response.PlanDetails)
|
||||
if err != nil {
|
||||
// unexpected db write issue but we can let the user continue
|
||||
// and wait for update to work in next cycle.
|
||||
zap.L().Error("failed to validate license", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
// activate the update license plan
|
||||
lm.SetActive(&l)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// todo[vikrantgupta25]: check the comparison here between old and new license!
|
||||
func (lm *Manager) RefreshLicense(ctx context.Context) *model.ApiError {
|
||||
|
||||
license, apiError := validate.ValidateLicenseV3(lm.activeLicenseV3.Key)
|
||||
@@ -422,50 +256,6 @@ func (lm *Manager) ValidateV3(ctx context.Context) (reterr error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Activate activates a license key with signoz server
|
||||
func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *model.License, errResponse *model.ApiError) {
|
||||
defer func() {
|
||||
if errResponse != nil {
|
||||
userEmail, err := auth.GetEmailFromJwt(ctx)
|
||||
if err == nil {
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_ACT_FAILED,
|
||||
map[string]interface{}{"err": errResponse.Err.Error()}, userEmail, true, false)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
response, apiError := validate.ActivateLicense(key, "")
|
||||
if apiError != nil {
|
||||
zap.L().Error("failed to activate license", zap.Error(apiError.Err))
|
||||
return nil, apiError
|
||||
}
|
||||
|
||||
l := &model.License{
|
||||
Key: key,
|
||||
ActivationId: response.ActivationId,
|
||||
PlanDetails: response.PlanDetails,
|
||||
}
|
||||
|
||||
// parse validity and features from the plan details
|
||||
err := l.ParsePlan()
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("failed to activate license", zap.Error(err))
|
||||
return nil, model.InternalError(err)
|
||||
}
|
||||
|
||||
// store the license before activating it
|
||||
err = lm.repo.InsertLicense(ctx, l)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to activate license", zap.Error(err))
|
||||
return nil, model.InternalError(err)
|
||||
}
|
||||
|
||||
// license is valid, activate it
|
||||
lm.SetActive(l)
|
||||
return l, nil
|
||||
}
|
||||
|
||||
func (lm *Manager) ActivateV3(ctx context.Context, licenseKey string) (licenseResponse *model.LicenseV3, errResponse *model.ApiError) {
|
||||
defer func() {
|
||||
if errResponse != nil {
|
||||
|
||||
@@ -13,10 +13,14 @@ import (
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
|
||||
"go.signoz.io/signoz/ee/query-service/app"
|
||||
"go.signoz.io/signoz/pkg/config"
|
||||
"go.signoz.io/signoz/pkg/config/envprovider"
|
||||
"go.signoz.io/signoz/pkg/config/fileprovider"
|
||||
"go.signoz.io/signoz/pkg/query-service/auth"
|
||||
baseconst "go.signoz.io/signoz/pkg/query-service/constants"
|
||||
"go.signoz.io/signoz/pkg/query-service/migrate"
|
||||
"go.signoz.io/signoz/pkg/query-service/version"
|
||||
"go.signoz.io/signoz/pkg/signoz"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
|
||||
@@ -94,8 +98,8 @@ func main() {
|
||||
var cluster string
|
||||
|
||||
var useLogsNewSchema bool
|
||||
var useLicensesV3 bool
|
||||
var cacheConfigPath, fluxInterval string
|
||||
var useTraceNewSchema bool
|
||||
var cacheConfigPath, fluxInterval, fluxIntervalForTraceDetail string
|
||||
var enableQueryServiceLogOTLPExport bool
|
||||
var preferSpanMetrics bool
|
||||
|
||||
@@ -103,9 +107,10 @@ func main() {
|
||||
var maxOpenConns int
|
||||
var dialTimeout time.Duration
|
||||
var gatewayUrl string
|
||||
var useLicensesV3 bool
|
||||
|
||||
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
|
||||
flag.BoolVar(&useLicensesV3, "use-licenses-v3", false, "use licenses_v3 schema for licenses")
|
||||
flag.BoolVar(&useTraceNewSchema, "use-trace-new-schema", false, "use new schema for traces")
|
||||
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
|
||||
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
|
||||
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
|
||||
@@ -116,10 +121,11 @@ func main() {
|
||||
flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)")
|
||||
flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)")
|
||||
flag.StringVar(&fluxInterval, "flux-interval", "5m", "(the interval to exclude data from being cached to avoid incorrect cache for data in motion)")
|
||||
flag.StringVar(&fluxIntervalForTraceDetail, "flux-interval-trace-detail", "2m", "(the interval to exclude data from being cached to avoid incorrect cache for trace data in motion)")
|
||||
flag.BoolVar(&enableQueryServiceLogOTLPExport, "enable.query.service.log.otlp.export", false, "(enable query service log otlp export)")
|
||||
flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')")
|
||||
flag.StringVar(&gatewayUrl, "gateway-url", "", "(url to the gateway)")
|
||||
|
||||
flag.BoolVar(&useLicensesV3, "use-licenses-v3", false, "use licenses_v3 schema for licenses")
|
||||
flag.Parse()
|
||||
|
||||
loggerMgr := initZapLog(enableQueryServiceLogOTLPExport)
|
||||
@@ -129,23 +135,42 @@ func main() {
|
||||
|
||||
version.PrintVersion()
|
||||
|
||||
config, err := signoz.NewConfig(context.Background(), config.ResolverConfig{
|
||||
Uris: []string{"env:"},
|
||||
ProviderFactories: []config.ProviderFactory{
|
||||
envprovider.NewFactory(),
|
||||
fileprovider.NewFactory(),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to create config", zap.Error(err))
|
||||
}
|
||||
|
||||
signoz, err := signoz.New(context.Background(), config, signoz.NewProviderConfig())
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to create signoz struct", zap.Error(err))
|
||||
}
|
||||
|
||||
serverOptions := &app.ServerOptions{
|
||||
HTTPHostPort: baseconst.HTTPHostPort,
|
||||
PromConfigPath: promConfigPath,
|
||||
SkipTopLvlOpsPath: skipTopLvlOpsPath,
|
||||
PreferSpanMetrics: preferSpanMetrics,
|
||||
PrivateHostPort: baseconst.PrivateHostPort,
|
||||
DisableRules: disableRules,
|
||||
RuleRepoURL: ruleRepoURL,
|
||||
MaxIdleConns: maxIdleConns,
|
||||
MaxOpenConns: maxOpenConns,
|
||||
DialTimeout: dialTimeout,
|
||||
CacheConfigPath: cacheConfigPath,
|
||||
FluxInterval: fluxInterval,
|
||||
Cluster: cluster,
|
||||
GatewayUrl: gatewayUrl,
|
||||
UseLogsNewSchema: useLogsNewSchema,
|
||||
UseLicensesV3: useLicensesV3,
|
||||
Config: config,
|
||||
SigNoz: signoz,
|
||||
HTTPHostPort: baseconst.HTTPHostPort,
|
||||
PromConfigPath: promConfigPath,
|
||||
SkipTopLvlOpsPath: skipTopLvlOpsPath,
|
||||
PreferSpanMetrics: preferSpanMetrics,
|
||||
PrivateHostPort: baseconst.PrivateHostPort,
|
||||
DisableRules: disableRules,
|
||||
RuleRepoURL: ruleRepoURL,
|
||||
MaxIdleConns: maxIdleConns,
|
||||
MaxOpenConns: maxOpenConns,
|
||||
DialTimeout: dialTimeout,
|
||||
CacheConfigPath: cacheConfigPath,
|
||||
FluxInterval: fluxInterval,
|
||||
FluxIntervalForTraceDetail: fluxIntervalForTraceDetail,
|
||||
Cluster: cluster,
|
||||
GatewayUrl: gatewayUrl,
|
||||
UseLogsNewSchema: useLogsNewSchema,
|
||||
UseTraceNewSchema: useTraceNewSchema,
|
||||
}
|
||||
|
||||
// Read the jwt secret key
|
||||
@@ -157,7 +182,7 @@ func main() {
|
||||
zap.L().Info("JWT secret key set successfully.")
|
||||
}
|
||||
|
||||
if err := migrate.Migrate(baseconst.RELATIONAL_DATASOURCE_PATH); err != nil {
|
||||
if err := migrate.Migrate(signoz.SQLStore.SQLxDB()); err != nil {
|
||||
zap.L().Error("Failed to migrate", zap.Error(err))
|
||||
} else {
|
||||
zap.L().Info("Migration successful")
|
||||
|
||||
@@ -247,3 +247,24 @@ func NewLicenseV3WithIDAndKey(id string, key string, data map[string]interface{}
|
||||
licenseDataWithIdAndKey["key"] = key
|
||||
return NewLicenseV3(licenseDataWithIdAndKey)
|
||||
}
|
||||
|
||||
func ConvertLicenseV3ToLicenseV2(l *LicenseV3) *License {
|
||||
planKeyFromPlanName, ok := MapOldPlanKeyToNewPlanName[l.PlanName]
|
||||
if !ok {
|
||||
planKeyFromPlanName = Basic
|
||||
}
|
||||
return &License{
|
||||
Key: l.Key,
|
||||
ActivationId: "",
|
||||
PlanDetails: "",
|
||||
FeatureSet: l.Features,
|
||||
ValidationMessage: "",
|
||||
IsCurrent: l.IsCurrent,
|
||||
LicensePlan: LicensePlan{
|
||||
PlanKey: planKeyFromPlanName,
|
||||
ValidFrom: l.ValidFrom,
|
||||
ValidUntil: l.ValidUntil,
|
||||
Status: l.Status},
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -16,6 +16,10 @@ var (
|
||||
PlanNameBasic = "BASIC"
|
||||
)
|
||||
|
||||
var (
|
||||
MapOldPlanKeyToNewPlanName map[string]string = map[string]string{PlanNameBasic: Basic, PlanNameTeams: Pro, PlanNameEnterprise: Enterprise}
|
||||
)
|
||||
|
||||
var (
|
||||
LicenseStatusInactive = "INACTIVE"
|
||||
)
|
||||
|
||||
@@ -26,6 +26,7 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
|
||||
opts.FF,
|
||||
opts.Reader,
|
||||
opts.UseLogsNewSchema,
|
||||
opts.UseTraceNewSchema,
|
||||
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
|
||||
)
|
||||
|
||||
@@ -122,6 +123,7 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
|
||||
opts.FF,
|
||||
opts.Reader,
|
||||
opts.UseLogsNewSchema,
|
||||
opts.UseTraceNewSchema,
|
||||
baserules.WithSendAlways(),
|
||||
baserules.WithSendUnmatched(),
|
||||
)
|
||||
|
||||
@@ -46,7 +46,7 @@ type Manager struct {
|
||||
tenantID string
|
||||
}
|
||||
|
||||
func New(dbType string, modelDao dao.ModelDao, licenseRepo *license.Repo, clickhouseConn clickhouse.Conn) (*Manager, error) {
|
||||
func New(modelDao dao.ModelDao, licenseRepo *license.Repo, clickhouseConn clickhouse.Conn) (*Manager, error) {
|
||||
hostNameRegex := regexp.MustCompile(`tcp://(?P<hostname>.*):`)
|
||||
hostNameRegexMatches := hostNameRegex.FindStringSubmatch(os.Getenv("ClickHouseUrl"))
|
||||
|
||||
|
||||
@@ -13,8 +13,3 @@ if [ "$branch" = "main" ]; then
|
||||
echo "${color_red}${bold}You can't commit directly to the main branch${reset}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$branch" = "develop" ]; then
|
||||
echo "${color_red}${bold}You can't commit directly to the develop branch${reset}"
|
||||
exit 1
|
||||
fi
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
**Building image**
|
||||
|
||||
``docker-compose up`
|
||||
``docker compose up`
|
||||
/ This will also run
|
||||
|
||||
or
|
||||
@@ -19,7 +19,7 @@ docker tag signoz/frontend:latest 7296823551/signoz:latest
|
||||
```
|
||||
|
||||
```
|
||||
docker-compose up
|
||||
docker compose up
|
||||
```
|
||||
|
||||
## Without Docker
|
||||
|
||||
7
frontend/docker-compose.yaml
Normal file
7
frontend/docker-compose.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
version: "3.9"
|
||||
services:
|
||||
web:
|
||||
build: .
|
||||
image: signoz/frontend:latest
|
||||
ports:
|
||||
- "3301:3301"
|
||||
@@ -1,7 +0,0 @@
|
||||
version: "3.9"
|
||||
services:
|
||||
web:
|
||||
build: .
|
||||
image: signoz/frontend:latest
|
||||
ports:
|
||||
- "3301:3301"
|
||||
@@ -24,7 +24,7 @@ const config: Config.InitialOptions = {
|
||||
'^.+\\.(js|jsx)$': 'babel-jest',
|
||||
},
|
||||
transformIgnorePatterns: [
|
||||
'node_modules/(?!(lodash-es|react-dnd|core-dnd|@react-dnd|dnd-core|react-dnd-html5-backend|axios|@signozhq/design-tokens|d3-interpolate|d3-color)/)',
|
||||
'node_modules/(?!(lodash-es|react-dnd|core-dnd|@react-dnd|dnd-core|react-dnd-html5-backend|axios|@signozhq/design-tokens|d3-interpolate|d3-color|api)/)',
|
||||
],
|
||||
setupFilesAfterEnv: ['<rootDir>jest.setup.ts'],
|
||||
testPathIgnorePatterns: ['/node_modules/', '/public/'],
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
"husky:configure": "cd .. && husky install frontend/.husky && cd frontend && chmod ug+x .husky/*",
|
||||
"commitlint": "commitlint --edit $1",
|
||||
"test": "jest --coverage",
|
||||
"test:changedsince": "jest --changedSince=develop --coverage --silent"
|
||||
"test:changedsince": "jest --changedSince=main --coverage --silent"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=16.15.0"
|
||||
@@ -40,9 +40,11 @@
|
||||
"@monaco-editor/react": "^4.3.1",
|
||||
"@radix-ui/react-tabs": "1.0.4",
|
||||
"@radix-ui/react-tooltip": "1.0.7",
|
||||
"@sentry/react": "7.102.1",
|
||||
"@sentry/webpack-plugin": "2.16.0",
|
||||
"@sentry/react": "8.41.0",
|
||||
"@sentry/webpack-plugin": "2.22.6",
|
||||
"@signozhq/design-tokens": "1.1.4",
|
||||
"@tanstack/react-table": "8.20.6",
|
||||
"@tanstack/react-virtual": "3.11.2",
|
||||
"@uiw/react-md-editor": "3.23.5",
|
||||
"@visx/group": "3.3.0",
|
||||
"@visx/shape": "3.5.0",
|
||||
@@ -76,7 +78,7 @@
|
||||
"fontfaceobserver": "2.3.0",
|
||||
"history": "4.10.1",
|
||||
"html-webpack-plugin": "5.5.0",
|
||||
"http-proxy-middleware": "2.0.7",
|
||||
"http-proxy-middleware": "3.0.3",
|
||||
"i18next": "^21.6.12",
|
||||
"i18next-browser-languagedetector": "^6.1.3",
|
||||
"i18next-http-backend": "^1.3.2",
|
||||
@@ -128,7 +130,7 @@
|
||||
"uuid": "^8.3.2",
|
||||
"web-vitals": "^0.2.4",
|
||||
"webpack": "5.94.0",
|
||||
"webpack-dev-server": "^4.15.1",
|
||||
"webpack-dev-server": "^4.15.2",
|
||||
"webpack-retry-chunk-load-plugin": "3.1.1",
|
||||
"xstate": "^4.31.0"
|
||||
},
|
||||
@@ -153,6 +155,7 @@
|
||||
"@babel/preset-typescript": "^7.21.4",
|
||||
"@commitlint/cli": "^16.3.0",
|
||||
"@commitlint/config-conventional": "^16.2.4",
|
||||
"@faker-js/faker": "9.3.0",
|
||||
"@jest/globals": "^27.5.1",
|
||||
"@playwright/test": "^1.22.0",
|
||||
"@testing-library/jest-dom": "5.16.5",
|
||||
@@ -241,6 +244,9 @@
|
||||
"semver": "7.5.4",
|
||||
"xml2js": "0.5.0",
|
||||
"phin": "^3.7.1",
|
||||
"body-parser": "1.20.3"
|
||||
"body-parser": "1.20.3",
|
||||
"http-proxy-middleware": "3.0.3",
|
||||
"cross-spawn": "7.0.5",
|
||||
"cookie": "^0.7.1"
|
||||
}
|
||||
}
|
||||
|
||||
1
frontend/public/Icons/broom.svg
Normal file
1
frontend/public/Icons/broom.svg
Normal file
@@ -0,0 +1 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="none"><path fill="#A65F3E" d="M8.04 10.331a.41.41 0 0 1-.414-.414.4.4 0 0 1 .121-.292l8.071-8.071a.414.414 0 1 1 .585.585l-8.07 8.071a.4.4 0 0 1-.293.121"/><path fill="#A65F3E" d="M16.11 1.5c.09 0 .178.034.245.101a.35.35 0 0 1 0 .492l-8.07 8.07a.346.346 0 0 1-.49 0 .35.35 0 0 1 0-.49l8.07-8.072a.35.35 0 0 1 .245-.101m0-.133a.48.48 0 0 0-.338.14L7.7 9.578a.47.47 0 0 0-.14.34.475.475 0 0 0 .478.478c.13 0 .25-.05.34-.14l8.07-8.071a.48.48 0 0 0-.339-.818"/><path fill="#FFE082" d="m1.701 12.438 3.89 3.889c.873-.963 1.62-2.057 2.023-3.313.03-.091.034-.24.128-.359.451-.566 1.865-2.008.706-3.167-1.106-1.106-2.438.227-2.994.686-.17.14-.384.228-.606.276-1.493.326-3.034 1.869-3.147 1.988"/><path fill="#FFE082" d="M8.385 8.577a.62.62 0 0 1 .393-.085c.098.018.237.135.38.28.144.143.28.304.32.408s-.005.242-.005.242c-.116.23-.383.69-.6.624-.24-.074-.482-.305-.66-.479a1.5 1.5 0 0 1-.276-.328c-.096-.177.008-.324.129-.447.086-.082.232-.17.319-.215"/><path fill="#F9C248" d="M8.327 8.975c.116.11.21.243.339.338.252.185.455.097.62-.052.049-.044.122-.1.17-.055a.1.1 0 0 1 .025.051.45.45 0 0 1-.045.273 1.3 1.3 0 0 1-.433.529c-.032.022-.07.044-.11.032a.12.12 0 0 1-.056-.045c-.207-.244-.37-.533-.626-.724-.103-.076-.364-.132-.298-.303.1-.262.317-.137.414-.044"/><path fill="#F9C248" d="M7.614 13.014c.028-.091.033-.24.127-.359.515-.645 1.223-1.38 1.145-2.275-.01-.123-.169-.75-.342-.514-.04.052-.024.315-.03.379-.1 1.172-1.02 1.821-1.19 2.024s-.164.393-.31.695a5 5 0 0 1-.61.947c-.379.47-.825.88-1.286 1.27a.8.8 0 0 0-.203.217c-.131.241.153.406.305.558l.369.368c.873-.961 1.62-2.055 2.025-3.31"/><path fill="#E2A610" d="M5.537 15.809c-.1-.157-.242-.3-.317-.458a.24.24 0 0 1-.03-.123c.01-.08.13-.15.187-.198q.129-.108.254-.22c.162-.149.314-.314.419-.509.017-.031.032-.07.016-.102-.035-.065-.238.152-.275.186-.105.092-.208.187-.318.272-.146.113-.422.304-.618.213-.1-.046-.19-.169-.263-.249-.084-.094-.164-.191-.252-.283a17 17 0 0 0-.592-.582c-.05-.046-.06-.066-.003-.122a10 10 0 0 0 .546-.58c.022-.025.044-.067.017-.09-.018-.015-.048-.004-.07.007-.26.138-.467.354-.692.544-.055.046-.214-.13-.249-.158-.092-.073-.154-.102-.046-.21.484-.49.972-.946 1.554-1.323.107-.07.22-.14.28-.253-.01-.03-.054-.026-.085-.015-.807.29-1.89 1.291-1.983 1.38-.162.158-.454-.206-.885-.481-.147-.094 0-.235.038-.279.26-.307.603-.642.603-.642-.127.013-.956.76-1.054.873-.084.097-.17.184-.175.318a.52.52 0 0 0 .107.325c.77 1.05 2.586 2.794 3.23 3.253.384.274.502.224.659.068a.35.35 0 0 0 .105-.3.65.65 0 0 0-.108-.263"/><path fill="#A65F3E" d="M8.835 10.176s-.438-.825-1.017-1.074c0 0-.02-.054.02-.1.052-.057.12-.07.157-.046.427.265.812.619 1.007 1.102.039.093-.131.23-.167.118"/><path fill="#F44336" d="M7.64 12.88c-.528-.818-1.63-1.937-2.46-2.524-.066-.046.204-.204.272-.156a9.7 9.7 0 0 1 2.31 2.398c.045.067-.091.33-.123.282M8.193 12.078c-.506-.71-1.521-1.738-2.238-2.312-.062-.05.182-.22.232-.181.755.602 1.668 1.499 2.18 2.263.037.057-.138.282-.174.23"/></svg>
|
||||
|
After Width: | Height: | Size: 2.9 KiB |
22
frontend/public/Icons/construction.svg
Normal file
22
frontend/public/Icons/construction.svg
Normal file
@@ -0,0 +1,22 @@
|
||||
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M4.02102 14.418C4.02102 14.418 3.82659 14.6658 3.32106 14.6658C2.81553 14.6658 2.62109 14.418 2.62109 14.418V5.74512H4.02102V14.418Z" fill="#9E9E9E"/>
|
||||
<path d="M4.02102 14.4179C4.02102 14.4179 3.82659 14.6657 3.32106 14.6657C2.81553 14.6657 2.62109 14.4179 2.62109 14.4179V11.077C2.62109 11.077 2.76331 10.8059 3.28328 10.8059C3.80325 10.8059 4.02102 11.077 4.02102 11.077V14.4179Z" fill="#BDBDBD"/>
|
||||
<path d="M13.3765 14.418C13.3765 14.418 13.1821 14.6658 12.6765 14.6658C12.171 14.6658 11.9766 14.418 11.9766 14.418V5.74512H13.3765V14.418Z" fill="#9E9E9E"/>
|
||||
<path d="M13.3765 14.4179C13.3765 14.4179 13.1821 14.6657 12.6765 14.6657C12.171 14.6657 11.9766 14.4179 11.9766 14.4179V11.077C11.9766 11.077 12.1188 10.8059 12.6387 10.8059C13.1587 10.8059 13.3765 11.077 13.3765 11.077V14.4179Z" fill="#BDBDBD"/>
|
||||
<path d="M14.3623 9.98379H1.63633C1.46856 9.98379 1.33301 9.84825 1.33301 9.68048V5.79624C1.33301 5.62847 1.46856 5.49292 1.63633 5.49292H14.3623C14.5301 5.49292 14.6656 5.62847 14.6656 5.79624V9.68048C14.6656 9.84825 14.5301 9.98379 14.3623 9.98379Z" fill="#9E9E9E"/>
|
||||
<path d="M14.3623 5.71509H1.63633C1.46856 5.71509 1.33301 5.85064 1.33301 6.01841V9.90264C1.33301 10.0704 1.46856 10.206 1.63633 10.206H14.3623C14.5301 10.206 14.6656 10.0704 14.6656 9.90264V6.01841C14.6656 5.85064 14.5301 5.71509 14.3623 5.71509Z" fill="#FFD600"/>
|
||||
<path d="M2.82515 5.71509L1.33301 7.20723V9.40712L5.02504 5.71509H2.82515Z" fill="#212121"/>
|
||||
<path d="M7.01027 5.71509L2.52051 10.206H4.72039L9.21016 5.71509H7.01027Z" fill="#212121"/>
|
||||
<path d="M11.1969 5.71509L6.70605 10.206H8.90594L13.3957 5.71509H11.1969Z" fill="#212121"/>
|
||||
<path d="M14.6658 6.43188L10.8916 10.2061H13.0915L14.6658 8.63177V6.43188Z" fill="#212121"/>
|
||||
<path d="M13.5322 5.9095H11.8223C11.6623 5.9095 11.5445 5.80506 11.5823 5.6984L11.7012 4.95288H13.6511L13.77 5.6984C13.81 5.80617 13.6922 5.9095 13.5322 5.9095Z" fill="#E2A610"/>
|
||||
<path d="M12.6768 4.93514C13.4567 4.93514 14.0889 4.3029 14.0889 3.52299C14.0889 2.74308 13.4567 2.11084 12.6768 2.11084C11.8969 2.11084 11.2646 2.74308 11.2646 3.52299C11.2646 4.3029 11.8969 4.93514 12.6768 4.93514Z" fill="#FFCA28"/>
|
||||
<path d="M12.6761 4.56629C13.2523 4.56629 13.7194 4.0992 13.7194 3.52301C13.7194 2.94683 13.2523 2.47974 12.6761 2.47974C12.0999 2.47974 11.6328 2.94683 11.6328 3.52301C11.6328 4.0992 12.0999 4.56629 12.6761 4.56629Z" fill="#FF5722"/>
|
||||
<path d="M13.652 4.96417H11.7021C11.7021 4.96417 11.7088 4.65308 12.2666 4.65308C12.8243 4.65308 13.0932 4.65308 13.0932 4.65308C13.6832 4.65308 13.652 4.96417 13.652 4.96417Z" fill="#FFCA28"/>
|
||||
<path d="M12.7998 3.02315L13.0665 2.67872C13.0787 2.66317 13.1031 2.67539 13.0987 2.69428L12.9954 3.11759C12.9709 3.21647 13.0465 3.31202 13.1487 3.3098L13.5842 3.30313C13.6042 3.30313 13.6098 3.3298 13.592 3.33869L13.1965 3.52201C13.1042 3.56534 13.0776 3.68311 13.142 3.762L13.4187 4.09865C13.4309 4.1142 13.4142 4.13531 13.3965 4.12642L13.0065 3.93199C12.9154 3.88644 12.8065 3.93866 12.7843 4.03865L12.6932 4.46529C12.6887 4.48529 12.6609 4.48529 12.6576 4.46529L12.5665 4.03865C12.5454 3.93866 12.4354 3.88644 12.3443 3.93199L11.9543 4.12642C11.9365 4.13531 11.9188 4.11309 11.9321 4.09865L12.2087 3.762C12.2732 3.68311 12.2465 3.56534 12.1543 3.52201L11.7588 3.33869C11.741 3.3298 11.7465 3.30313 11.7665 3.30313L12.2021 3.3098C12.3043 3.31091 12.3798 3.21647 12.3554 3.11759L12.2521 2.69428C12.2476 2.67539 12.2721 2.66317 12.2843 2.67872L12.5509 3.02315C12.6165 3.10314 12.7376 3.10314 12.7998 3.02315Z" fill="#FFD5CA"/>
|
||||
<path d="M4.17575 5.9095H2.46584C2.30584 5.9095 2.18807 5.80506 2.22585 5.6984L2.34473 4.95288H4.29463L4.41351 5.6984C4.45351 5.80617 4.33574 5.9095 4.17575 5.9095Z" fill="#E2A610"/>
|
||||
<path d="M3.3223 4.93514C4.10221 4.93514 4.73445 4.3029 4.73445 3.52299C4.73445 2.74308 4.10221 2.11084 3.3223 2.11084C2.5424 2.11084 1.91016 2.74308 1.91016 3.52299C1.91016 4.3029 2.5424 4.93514 3.3223 4.93514Z" fill="#FFCA28"/>
|
||||
<path d="M3.3216 4.56629C3.89779 4.56629 4.36488 4.0992 4.36488 3.52301C4.36488 2.94683 3.89779 2.47974 3.3216 2.47974C2.74541 2.47974 2.27832 2.94683 2.27832 3.52301C2.27832 4.0992 2.74541 4.56629 3.3216 4.56629Z" fill="#FF5722"/>
|
||||
<path d="M4.29658 4.96417H2.34668C2.34668 4.96417 2.35335 4.65308 2.91109 4.65308C3.46884 4.65308 3.73772 4.65308 3.73772 4.65308C4.32769 4.65308 4.29658 4.96417 4.29658 4.96417Z" fill="#FFCA28"/>
|
||||
<path d="M3.44337 3.02315L3.71002 2.67872C3.72224 2.66317 3.74669 2.67539 3.74224 2.69428L3.63892 3.11759C3.61447 3.21647 3.69002 3.31202 3.79224 3.3098L4.22777 3.30313C4.24777 3.30313 4.25333 3.3298 4.23555 3.33869L3.84002 3.52201C3.7478 3.56534 3.72113 3.68311 3.78557 3.762L4.06223 4.09865C4.07445 4.1142 4.05778 4.13531 4.04001 4.12642L3.65003 3.93199C3.55892 3.88644 3.45004 3.93866 3.42782 4.03865L3.33671 4.46529C3.33227 4.48529 3.30449 4.48529 3.30116 4.46529L3.21005 4.03865C3.18894 3.93866 3.07894 3.88644 2.98784 3.93199L2.59786 4.12642C2.58008 4.13531 2.56231 4.11309 2.57564 4.09865L2.85229 3.762C2.91673 3.68311 2.89007 3.56534 2.79785 3.52201L2.40231 3.33869C2.38454 3.3298 2.39009 3.30313 2.41009 3.30313L2.84562 3.3098C2.94784 3.31091 3.02339 3.21647 2.99895 3.11759L2.89562 2.69428C2.89118 2.67539 2.91562 2.66317 2.92784 2.67872L3.19449 3.02315C3.26005 3.10314 3.38115 3.10314 3.44337 3.02315Z" fill="#FFD5CA"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 5.2 KiB |
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user